-
Notifications
You must be signed in to change notification settings - Fork 0
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
tidy test 1 #11
tidy test 1 #11
Conversation
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
clang-tidy made some suggestions
There were too many comments to post at once. Showing the first 25 out of 28. Check the log or trigger a new build to see more.
|
||
static const std::map<e_model, size_t> & MEM_REQ_SCRATCH0() | ||
{ | ||
static std::map<e_model, size_t> _MEM_REQ_SCRATCH0 = { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
warning: declaration uses identifier '_MEM_REQ_SCRATCH0', which is a reserved identifier [bugprone-reserved-identifier]
static std::map<e_model, size_t> _MEM_REQ_SCRATCH0 = { | |
static std::map<e_model, size_t> MEM_REQ_SCRATCH0 = { |
llama-test.cpp:58:
- return _MEM_REQ_SCRATCH0;
+ return MEM_REQ_SCRATCH0;
|
||
static const std::map<e_model, size_t> & MEM_REQ_SCRATCH1() | ||
{ | ||
static std::map<e_model, size_t> _MEM_REQ_SCRATCH1 = { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
warning: declaration uses identifier '_MEM_REQ_SCRATCH1', which is a reserved identifier [bugprone-reserved-identifier]
static std::map<e_model, size_t> _MEM_REQ_SCRATCH1 = { | |
static std::map<e_model, size_t> MEM_REQ_SCRATCH1 = { |
llama-test.cpp:69:
- return _MEM_REQ_SCRATCH1;
+ return MEM_REQ_SCRATCH1;
// 2*n_embd*n_ctx*n_layer*sizeof(float16) | ||
static const std::map<e_model, size_t> & MEM_REQ_KV_SELF() | ||
{ | ||
static std::map<e_model, size_t> _MEM_REQ_KV_SELF = { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
warning: declaration uses identifier '_MEM_REQ_KV_SELF', which is a reserved identifier [bugprone-reserved-identifier]
static std::map<e_model, size_t> _MEM_REQ_KV_SELF = { | |
static std::map<e_model, size_t> MEM_REQ_KV_SELF = { |
llama-test.cpp:81:
- return _MEM_REQ_KV_SELF;
+ return MEM_REQ_KV_SELF;
// not actually needed if BLAS is disabled | ||
static const std::map<e_model, size_t> & MEM_REQ_EVAL() | ||
{ | ||
static std::map<e_model, size_t> _MEM_REQ_EVAL = { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
warning: declaration uses identifier '_MEM_REQ_EVAL', which is a reserved identifier [bugprone-reserved-identifier]
static std::map<e_model, size_t> _MEM_REQ_EVAL = { | |
static std::map<e_model, size_t> MEM_REQ_EVAL = { |
llama-test.cpp:94:
- return _MEM_REQ_EVAL;
+ return MEM_REQ_EVAL;
std::unique_ptr<llama_mmap> mapping; | ||
|
||
llama_model_loader(const std::string & fname_base, bool use_mmap, bool vocab_only) { | ||
auto first_file = new llama_file_loader(fname_base.c_str(), 0, tensors_map); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
warning: 'auto first_file' can be declared as 'auto *first_file' [readability-qualified-auto]
auto first_file = new llama_file_loader(fname_base.c_str(), 0, tensors_map); | |
auto *first_file = new llama_file_loader(fname_base.c_str(), 0, tensors_map); |
} | ||
}; | ||
if (int(workers.size()) < nthread_use - 1) workers.resize(nthread_use - 1); | ||
for (int it = 0; it < nthread_use - 1; ++it) workers[it] = std::thread(compute); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
warning: statement should be inside braces [readability-braces-around-statements]
for (int it = 0; it < nthread_use - 1; ++it) workers[it] = std::thread(compute); | |
for (int it = 0; it < nthread_use - 1; ++it) { workers[it] = std::thread(compute); | |
} |
if (int(workers.size()) < nthread_use - 1) workers.resize(nthread_use - 1); | ||
for (int it = 0; it < nthread_use - 1; ++it) workers[it] = std::thread(compute); | ||
compute(); | ||
for (int it = 0; it < nthread_use - 1; ++it) workers[it].join(); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
warning: statement should be inside braces [readability-braces-around-statements]
for (int it = 0; it < nthread_use - 1; ++it) workers[it].join(); | |
for (int it = 0; it < nthread_use - 1; ++it) { workers[it].join(); | |
} |
fprintf(stderr, "%s: loading base model from '%s'\n", __func__, path_base_model); | ||
model_loader.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true, /*vocab_only*/ false)); | ||
|
||
size_t ctx_size, mmapped_size; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
warning: multiple declarations in a single statement reduces readability [readability-isolate-declaration]
size_t ctx_size, mmapped_size; | |
size_t ctx_size; | |
size_t mmapped_size; |
} | ||
|
||
std::string name(length, 0); | ||
fin.read(&name[0], length); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
warning: 'data' should be used for accessing the data pointer instead of taking the address of the 0-th element [readability-container-data-pointer]
fin.read(&name[0], length); | |
fin.read(name.data(), length); |
base_name.erase(pos); | ||
// fprintf(stderr, "%s: %s => %s (lora type %s) ", __func__, name.c_str(),base_name.c_str(), lora_type.c_str()); | ||
|
||
if (model_tensors.find(base_name.data()) == model_tensors.end()) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
warning: redundant call to 'data' [readability-redundant-string-cstr]
if (model_tensors.find(base_name.data()) == model_tensors.end()) { | |
if (model_tensors.find(base_name) == model_tensors.end()) { |
No description provided.