当前位置:网站首页>ninja启动过程
ninja启动过程
2022-07-23 05:42:00 【你看那是一个舔狗】
ninja启动过程
代码中的Warning基本都是我自己打的用来测试
- 源码获取
git clone https://android.googlesource.com/platform/external/ninja
- 编译指令
python3 configure.py --bootstrap
编译列表 : build.ninja生成
文件列表 : https://github.com/zhchbin/DN/wiki/ninja%E6%BA%90%E7%A0%81%E9%98%85%E8%AF%BB%E7%AC%94%E8%AE%B0
底层的数据结构

根据这张图,我们来看Ninja的底层的如何处理的(以下数据结构只保留到最简的部分)
底层的数据结构
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-N0MDcgX7-1658137232199)(image/Ninja3.jpg)]
回到这张图,我们来看Ninja的底层的如何处理的(以下数据结构只保留到最简的部分)
State
State保存单次运行的全局状态
struct State {
//内置pool和Rule使用这个虚拟的内置scope来初始化它们的关系位置字段。这个范围内没有任何东西。
static Scope kBuiltinScope;
static Pool kDefaultPool;
static Pool kConsolePool;
static Rule kPhonyRule;
// 内置的hashmap 保存所有的Node
typedef ConcurrentHashMap<HashedStrView, Node*> Paths;
Paths paths_;
// 保存所有的Pool
std::unordered_map<HashedStrView, Pool*> pools_;
// 保存所有的edge
vector<Edge*> edges_;
// 根作用域
Scope root_scope_ {
ScopePosition {
} };
vector<Node*> defaults_; // 默认目标
private:
/// Position 0 is used for built-in decls (e.g. pools).
DeclIndex dfs_location_ = 1;
};
Scope
Scope作用域:变量的作用范围,有rule与build语句的块级,也有文件级别。包含Rule,同时保存了父Scope的位置
struct Scope {
Scope(ScopePosition parent) : parent_(parent) {
}
private:
ScopePosition parent_; // 父位置
DeclIndex pos_ = 0; // 自己的哈希位置
// 变量
std::unordered_map<HashedStrView, std::vector<Binding*>> bindings_;
// Rule
std::unordered_map<HashedStrView, Rule*> rules_;
};
Rule
Rule文件的构建规则,存在局部变量
struct Rule {
Rule() {
}
struct {
// 该规则在其源文件中的位置。
size_t rule_name_diag_pos = 0;
} parse_state_;
RelativePosition pos_; // 偏移值
HashedStr name_; // 规则名
std::vector<std::pair<HashedStr, std::string>> bindings_;//保存局部变量
};
Binding & DefaultTarget
Binding以键值对的形式存在用来变量DefaultTarget 保存默认的输出的target
struct Binding {
RelativePosition pos_; // 偏移位置
HashedStr name_; //变量名
StringPiece parsed_value_; // 变量值
};
struct DefaultTarget {
RelativePosition pos_; // 偏移值
LexedPath parsed_path_; // StringPiece
size_t diag_pos_ = 0;
};
Node
Node是最边界的数据结构,ninja语法中的input,output,target,default的底层保存都是Node
struct Node {
Node(const HashedStrView& path, uint64_t initial_slash_bits)
: path_(path),
first_reference_({
kLastDeclIndex, initial_slash_bits }) {
}
~Node();
private:
// 路径值
const HashedStr path_;
std::atomic<NodeFirstReference> first_reference_;
// 作为output所在的Edge位置
Edge* in_edge_ = nullptr;
// 使用此Node作为输入的所有Edge.列表顺序不确定,每次访问都是对其重新排序
struct EdgeList {
EdgeList(Edge* edge=nullptr, EdgeList* next=nullptr)
: edge(edge), next(next) {
}
Edge* edge = nullptr;
EdgeList* next = nullptr;
};
std::atomic<EdgeList*> out_edges_ {
nullptr };
std::atomic<EdgeList*> validation_out_edges_ {
nullptr };
std::vector<Edge*> dep_scan_out_edges_;
};
Edge
Edge是最核心的数据结构,会将NodeRuleBinding等数据结构组合起来
struct Edge {
// 固定的属性值 在Rule下进行配置
struct DepScanInfo {
bool valid = false;
bool restat = false;
bool generator = false;
bool deps = false;
bool depfile = false;
bool phony_output = false;
uint64_t command_hash = 0;
};
public:
struct {
StringPiece rule_name; // 保存rule_name
size_t rule_name_diag_pos = 0;
size_t final_diag_pos = 0;
} parse_state_;
const Rule* rule_ = nullptr; // 使用的rule
Pool* pool_ = nullptr; // 所在的pool
// 在一个edge中的input,output
vector<Node*> inputs_;
vector<Node*> outputs_;
std::vector<std::pair<HashedStr, std::string>> unevaled_bindings_; // 存储局部变量值
int explicit_deps_ = 0; // 显式输入
int implicit_deps_ = 0; // 隐式输入
int order_only_deps_ = 0; // 隐式order-only依赖
int explicit_outs_ = 0; // 显示输出
int implicit_outs_ = 0; // 隐式输出
};
如何区分显隐式,input和output会按照按照 显式 -> 隐式 -> order-only(仅依赖) 的顺序进行push_back()
根据当前的值的位置与显隐式的数量做对比就可以知道
edge->outputs_.reserve(edge->explicit_outs_ + edge->implicit_outs_);
edge->inputs_.reserve(edge->explicit_deps_ + edge->implicit_deps_ +
edge->order_only_deps_);
入口函数 real_main()
ninja.cc::main() -> ninja.cc::real_main()
NORETURN void real_main(int argc, char** argv) {
//在这个函数中使用exit()而不是返回,以避免在破坏NinjaMain时可能发生的昂贵的清理。
BuildConfig config;//
Options options = {
}; //
options.input_file = "build.ninja";
options.dupe_edges_should_err = true;
setvbuf(stdout, NULL, _IOLBF, BUFSIZ);
const char* ninja_command = argv[0];
// 处理参数
int exit_code = ReadFlags(&argc, &argv, &options, &config); // return 1 exit
if (exit_code >= 0)
exit(exit_code);
// 在不同的行上有多个目标的删除文件是否应该警告或打印错误。
if (options.depfile_distinct_target_lines_should_err) {
config.depfile_parser_options.depfile_distinct_target_lines_action_ =
kDepfileDistinctTargetLinesActionError;
}
// NULL
if (options.working_dir) {
if (!options.tool)
Info("Entering directory `%s'", options.working_dir);
if (chdir(options.working_dir) < 0) {
Error("chdir to '%s' - %s", options.working_dir, strerror(errno));
exit(1);
}
}
//if (-d nothreads) 获取CPU数量
SetThreadPoolThreadCount(g_use_threads ? GetProcessorCount() : 1);
// 只有 urtle 使用Tool::RUN_AFTER_FLAGS)
if (options.tool && options.tool->when == Tool::RUN_AFTER_FLAGS) {
NinjaMain ninja(ninja_command, config);
exit((ninja.*options.tool->func)(&options, argc, argv));
}
// 跟踪构建状态
Status* status = NULL;
// 限制重建的数量,以防止无限循环。
const int kCycleLimit = 100;
for (int cycle = 1; cycle <= kCycleLimit; ++cycle) {
// 加载了一系列的数据结构,用来构建文件
NinjaMain ninja(ninja_command, config);
if (status == NULL) {
status = new StatusPrinter(config);
}
// Manifest解析器选项
ManifestParserOptions parser_opts;
if (options.dupe_edges_should_err) {
parser_opts.dupe_edge_action_ = kDupeEdgeActionError;
}
if (options.phony_cycle_should_err) {
parser_opts.phony_cycle_action_ = kPhonyCycleActionError;
}
// Manifest解析器
ManifestParser parser(&ninja.state_, &ninja.disk_interface_, parser_opts);
Warning("parser : %s",ninja.ninja_command_); // parser : ./ninja
string err;
if (!parser.Load(options.input_file, &err)) {
status->Error("%s", err.c_str());
exit(1);
}
if (options.tool && options.tool->when == Tool::RUN_AFTER_LOAD)
exit((ninja.*options.tool->func)(&options, argc, argv));
// 保证BuildDir存在
if (!ninja.EnsureBuildDirExists())
exit(1);
if (!ninja.OpenBuildLog() || !ninja.OpenDepsLog())
exit(1);
if (options.tool && options.tool->when == Tool::RUN_AFTER_LOGS)
exit((ninja.*options.tool->func)(&options, argc, argv));
// 尝试在构建其他内容之前重新构建Manifest
if (ninja.RebuildManifest(options.input_file, &err, status)) {
// In dry_run mode the regeneration will succeed without changing the
// manifest forever. Better to return immediately.
if (config.dry_run)
exit(0);
// 使用新的manifest重新开始构建。
continue;
} else if (!err.empty()) {
status->Error("rebuilding '%s': %s", options.input_file, err.c_str());
exit(1);
}
int result = 0;
do {
// 一个秒表,调用后返回时间
Stopwatch stopwatch;
if (options.persistent) {
WaitForInput(config);
stopwatch.Restart();
}
//
result = ninja.RunBuild(argc, argv, status);
if (options.persistent) {
fprintf(stderr, "build %s in %0.3f seconds\n",
result == 0 ? "succeeded" : "failed", stopwatch.Elapsed());
ninja.state_.Reset();
}
} while (options.persistent);
if (g_metrics)
ninja.DumpMetrics(status);
delete status;
exit(result);
}
status->Error("manifest '%s' still dirty after %d tries",
options.input_file, kCycleLimit);
delete status;
exit(1);
}
} // anonymous namespace
参数处理及判断
参数处理 ReadFlag()
NORETURN void real_main(int argc, char** argv) {
BuildConfig config;//
Options options = {
}; //
options.input_file = "build.ninja";
options.dupe_edges_should_err = true;
setvbuf(stdout, NULL, _IOLBF, BUFSIZ);
const char* ninja_command = argv[0];
// 处理参数
int exit_code = ReadFlags(&argc, &argv, &options, &config); // return 1 exit
if (exit_code >= 0)
exit(exit_code);
...
}
int ReadFlags(int* argc, char*** argv,
Options* options, BuildConfig* config) {
config->parallelism = GuessParallelism(); // 18
enum {
OPT_VERSION = 1,
OPT_FRONTEND = 2,
OPT_FRONTEND_FILE = 3,
};
const option kLongOptions[] = {
#ifndef _WIN32
{
"frontend", required_argument, NULL, OPT_FRONTEND },
{
"frontend_file", required_argument, NULL, OPT_FRONTEND_FILE },
#endif
{
"help", no_argument, NULL, 'h' },
{
"version", no_argument, NULL, OPT_VERSION },
{
"verbose", no_argument, NULL, 'v' },
{
NULL, 0, NULL, 0 }
};
int opt;
while (!options->tool &&
(opt = getopt_long(*argc, *argv, "d:f:j:k:l:mnt:vw:o:C:ph", kLongOptions,
NULL)) != -1) {
switch (opt) {
case 'd':
if (!DebugEnable(optarg)) // list false 直接退出
return 1;
break;
case 'f':
options->input_file = optarg;
break;
// 线程数
case 'j': {
char* end;
int value = strtol(optarg, &end, 10);
if (*end != 0 || value < 0)
Fatal("invalid -j parameter");
// We want to run N jobs in parallel. For N = 0, INT_MAX
// is close enough to infinite for most sane builds.
// 线程数
config->parallelism = value > 0 ? value : INT_MAX;
break;
}
// 允许失败的个数
case 'k': {
char* end;
//strtol : 参数nptr字符串根据参数base来转换成长整型数
int value = strtol(optarg, &end, 10);
if (*end != 0)
Fatal("-k parameter not numeric; did you mean -k 0?");
// We want to go until N jobs fail, which means we should allow
// N failures and then stop. For N <= 0, INT_MAX is close enough
// to infinite for most sane builds.
// 我们想去直到N个工作失败,这意味着我们应该允许N失败,然后停止。对于N<=0,对于最理智的构建,INT_MAX足够接近到无限。
config->failures_allowed = value > 0 ? value : INT_MAX;
break;
}
case 'l': {
char* end;
// strtod : 字符串转换为浮点数;
double value = strtod(optarg, &end);
if (end == optarg)
Fatal("-l parameter not numeric: did you mean -l 0.0?");
config->max_load_average = value;
break;
}
case 'n':
config->dry_run = true;
break;
case 't':
options->tool = ChooseTool(optarg);
if (!options->tool)
return 0;
break;
case 'v':
config->verbosity = BuildConfig::VERBOSE;
break;
case 'w':
if (!WarningEnable(optarg, options, config))
return 1;
break;
case 'o':
if (!OptionEnable(optarg, options, config))
return 1;
break;
// 切换工作路径
case 'C':
options->working_dir = optarg;
break;
case 'p':
options->persistent = true;
break;
case OPT_VERSION:
printf("%s\n", kNinjaVersion);
return 0;
case OPT_FRONTEND:
config->frontend = optarg;
break;
case OPT_FRONTEND_FILE:
config->frontend_file = optarg;
break;
case 'h':
default:
Usage(*config);
return 1;
}
}
*argv += optind;
*argc -= optind;
if (config->frontend != NULL && config->frontend_file != NULL) {
Fatal("only one of --frontend or --frontend_file may be specified.");
}
if (config->pre_remove_output_files && !config->uses_phony_outputs) {
Fatal("preremoveoutputs=yes requires usesphonyoutputs=yes.");
}
return -1;
}
BuildConfig 和 Options 主要用来保存一些配置相关的信息,常用选项中:
Options主要保存输入文件[input_file(-f 默认 build.ninja)],工作路径[working_dir(-C 默认 当期路径,打印为NULL)],以及一个工具类[Tool*(-t 默认为NULL)]以及一些打印相关的标识BuildConfig主要是一些构建选项等,一般在使用时都是使用的默认值
参数判断
接下来就是一系列的参数判断
NORETURN void real_main(int argc, char** argv) {
...
if (options.depfile_distinct_target_lines_should_err) {
config.depfile_parser_options.depfile_distinct_target_lines_action_ =
kDepfileDistinctTargetLinesActionError;
}
// NULL
if (options.working_dir) {
// The formatting of this string, complete with funny quotes, is
// so Emacs can properly identify that the cwd has changed for
// subsequent commands.
// Don't print this if a tool is being used, so that tool output
// can be piped into a file without this string showing up.
if (!options.tool)
Info("Entering directory `%s'", options.working_dir);
if (chdir(options.working_dir) < 0) {
Error("chdir to '%s' - %s", options.working_dir, strerror(errno));
exit(1);
}
}
//if (-d nothreads) 获取CPU数量
SetThreadPoolThreadCount(g_use_threads ? GetProcessorCount() : 1);
// 只有 urtle 使用Tool::RUN_AFTER_FLAGS)
if (options.tool && options.tool->when == Tool::RUN_AFTER_FLAGS) {
// None of the RUN_AFTER_FLAGS actually use a NinjaMain, but it's needed
// by other tools.
NinjaMain ninja(ninja_command, config);
exit((ninja.*options.tool->func)(&options, argc, argv));
}
Status* status = NULL;
...
}
分析文件并构建图
const int kCycleLimit = 100;
for (int cycle = 1; cycle <= kCycleLimit; ++cycle) {
NinjaMain ninja(ninja_command, config);
if (status == NULL) {
#ifndef _WIN32
if (config.frontend != NULL || config.frontend_file != NULL)
status = new StatusSerializer(config);
else
#endif
status = new StatusPrinter(config);
}
ManifestParserOptions parser_opts;
if (options.dupe_edges_should_err) {
parser_opts.dupe_edge_action_ = kDupeEdgeActionError;
}
if (options.phony_cycle_should_err) {
parser_opts.phony_cycle_action_ = kPhonyCycleActionError;
}
// TODO: Manifest分析
ManifestParser parser(&ninja.state_, &ninja.disk_interface_, parser_opts);
Warning("parser : %s\r\n",ninja.ninja_command_); // parser : ninja
string err;
// 加载输入文件
Warning("parser.Load(options.input_file)[%s]\r\n",options.input_file);
if (!parser.Load(options.input_file, &err)) {
status->Error("%s", err.c_str());
exit(1);
}
文件分析的入口函数为parser.Load(options.input_file, &err) ,在进入函数之前会先接触两个类,NinjaMain 和 ManifestParser
NinjaMain
NinjaMain 中包含了 BuildConfig,State,RealDiskInterface,BuildLog,DepsLog 等数据结构.主函数中加载了一系列的数据结构,使用NinjaMain储存为对象上的字段
ManifestParser
解析ninja文件中的字段,并将其保存到对象中
// Manifest解析器
struct ManifestParser {
ManifestParser(State* state, FileReader* file_reader,
ManifestParserOptions options = ManifestParserOptions())
: state_(state),
file_reader_(file_reader),
options_(options) {
}
/// Load and parse a file.
// 加载和分析一个文件
bool Load(const std::string& filename, std::string* err);
/// Parse a text string of input. Used by tests.
///
/// Some tests may call ParseTest multiple times with the same State object.
/// Each call adds to the previous state; it doesn't replace it.
bool ParseTest(const std::string& input, std::string* err);
private:
State* state_ = nullptr;
FileReader* file_reader_ = nullptr;
ManifestParserOptions options_;
};
enum DupeEdgeAction {
kDupeEdgeActionWarn,
kDupeEdgeActionError,
};
enum PhonyCycleAction {
kPhonyCycleActionWarn,
kPhonyCycleActionError,
};
// Manifest解析器选项
struct ManifestParserOptions {
DupeEdgeAction dupe_edge_action_ = kDupeEdgeActionWarn;
PhonyCycleAction phony_cycle_action_ = kPhonyCycleActionWarn;
};
Load()
解析过程会按照以下的顺序进行
ninja.cc real_main() -> parser.Load()
mainfest_parser.cc ManifestParser::Load() -> loader.Load()
mainfest_parser.cc ManifestLoader::Load() -> dfs_parser.LoadManifestTree()
mainfest_parser.cc DfsParser::LoadManifestTree() -> ParseManifestChunks()
mainfest_parser.cc ParseManifestChunks() -> manifest_chunk::ParseChunk()
mainfest_chunk_parser.cc ParseChunk -> parser.ParseChunk()
ManifestParser::Load()
bool ManifestParser::Load(const string& filename, string* err) {
METRIC_RECORD(".ninja load");
ManifestFileSet file_set(file_reader_);
const LoadedFile* file = nullptr;
if (!file_set.LoadFile(filename, &file, err))
return false;
std::unique_ptr<ThreadPool> thread_pool = CreateThreadPool();
ManifestLoader loader(state_, thread_pool.get(), options_, false);
return loader.Load(&file_set, *file, err);
}
ManifestLoader::Load()
bool ManifestLoader::Load(ManifestFileSet* file_set,
const LoadedFile& root_manifest, std::string* err) {
DfsParser dfs_parser(file_set, state_, thread_pool_);
std::vector<Clump*> clumps;
if (!dfs_parser.LoadManifestTree(root_manifest, &state_->root_scope_, &clumps,
err)) {
return false;
}
return FinishLoading(clumps, err);
}
DfsParser::LoadManifestTree
bool DfsParser::LoadManifestTree(const LoadedFile& file, Scope* scope,
std::vector<Clump*>* out_clumps,
std::string* err) {
Warning("DfsParser::LoadManifestTree()\r\n");
Warning("std::vector<ParserItem> items = ParseManifestChunks(file, thread_pool_);\r\n");
std::vector<ParserItem> items = ParseManifestChunks(file, thread_pool_);
Warning("scope中保留空间\r\n");
ReserveSpaceInScopeTables(scope, items);
// With the chunks parsed, do a depth-first parse of the ninja manifest using
// the results of the parallel parse.
for (const auto& item : items) {
switch (item.kind) {
case ParserItem::kError:
*err = item.u.error->msg_;
return false;
case ParserItem::kRequiredVersion:
HandleRequiredVersion(*item.u.required_version, scope);
break;
case ParserItem::kInclude: {
const Include& include = *item.u.include;
const LoadedFile* child_file = nullptr;
Scope* child_scope = nullptr;
if (!HandleInclude(include, file, scope, &child_file, &child_scope, err))
return false;
if (!LoadManifestTree(*child_file, child_scope, out_clumps, err))
return false;
break;
}
case ParserItem::kClump:
if (!HandleClump(item.u.clump, file, scope, err))
return false;
out_clumps->push_back(item.u.clump);
break;
default:
assert(false && "unrecognized kind of ParserItem");
abort();
}
}
return true;
}
ParseManifestChunks()
//将单个manifest文件分成块,并行解析块,并返回生成的解析器输出。
static std::vector<ParserItem> ParseManifestChunks(const LoadedFile& file,
ThreadPool* thread_pool) {
std::vector<ParserItem> result;
const std::vector<StringPiece> chunk_views =
manifest_chunk::SplitManifestIntoChunks(file.content_with_nul());
METRIC_RECORD(".ninja load : parse chunks");
for (std::vector<ParserItem>& chunk_items :
ParallelMap(thread_pool, chunk_views, [&file](StringPiece view) {
std::vector<ParserItem> chunk_items;
manifest_chunk::ParseChunk(file, view, &chunk_items); // 解析build.ninja
return chunk_items;
})) {
std::move(chunk_items.begin(), chunk_items.end(),
std::back_inserter(result));
}
Warning("result.size(%d)\r\n", result.size());
return result;
}
ParseChunk()
void ParseChunk(const LoadedFile& file, StringPiece chunk_content,
std::vector<ParserItem>* out) {
ChunkParser parser(file, chunk_content, out);
Warning("manifest_chunk_parser.cc -> parser.ParseChunk()\r\n");
// 解析build.ninja文件到数据结构中
if (!parser.ParseChunk()) {
assert(!out->empty());
assert(out->back().kind == ParserItem::kError);
assert(!out->back().u.error->msg_.empty());
}
}
ChunkParser::ParseChunk()
此处为读取文件进行分析的主要位置,按行,循环执行lexer_.ReadToken();读取 build.ninja 的行值并根据内容返回枚举属性值,判断属性值并执行对应的函数
bool ChunkParser::ParseChunk() {
int i = 1;
while (true) {
Warning("while(%d)",i++);
if (lexer_.GetPos() >= chunk_end_) {
assert(lexer_.GetPos() == chunk_end_ &&
"lexer scanned beyond the end of a manifest chunk");
return true;
}
Lexer::Token token = lexer_.ReadToken();
Warning("token = (%d)\r",token);
bool success = true;
switch (token) {
case Lexer::INCLUDE: success = ParseFileInclude(false); break; // 读取include
case Lexer::SUBNINJA: success = ParseFileInclude(true); break; // 读取subninja
case Lexer::POOL: success = ParsePool(); break; // 读取pool
case Lexer::DEFAULT: success = ParseDefault(); break; // 读取默认
case Lexer::IDENT: success = ParseBinding(); break; // 读取全局变量并保存
case Lexer::RULE: success = ParseRule(); break; // 读取rule , rule保存在clump->rule_中, 在遇到rule内变量时,会保存到rule->bending_ 以键值对的形式顺序保存
case Lexer::BUILD: success = ParseEdge(); break; // 获取Edge,一个build就是一个Edge
case Lexer::NEWLINE: break;
case Lexer::ERROR: return LexerError(lexer_.DescribeLastError());
case Lexer::TNUL: return LexerError("unexpected NUL byte");
case Lexer::TEOF:
assert(false && "EOF should have been detected before reading a token");
break;
default:
return LexerError(std::string("unexpected ") + Lexer::TokenName(token));
}
if (!success) return false;
}
return false; // not reached
}
在中有一个Clump对象,用来保存全局变量,Rule,Edge,Pool,default_target
class ChunkParser {
const LoadedFile& file_;
Lexer lexer_;
const char* chunk_end_ = nullptr;
std::vector<ParserItem>* out_ = nullptr;
Clump* current_clump_ = nullptr;
Clump* MakeClump() {
Warning("MakeClump()\r");
if (current_clump_){
Warning("return current_clump_\r");
return current_clump_;
}
current_clump_ = new Clump {
file_ };
out_->push_back(current_clump_);
Warning("out_->size(%d)\r",out_->size());
return current_clump_;
}
void OutItem(ParserItem item) {
current_clump_ = nullptr;
out_->push_back(item);
}
bool OutError(const std::string& err) {
OutItem(new Error {
err });
return false;
}
bool LexerError(const std::string& message);
bool ExpectToken(Lexer::Token expected);
bool ParseLet(StringPiece* key, StringPiece* value);
bool ParseFileInclude(bool new_scope);
bool ParsePool();
bool ParseDefault();
bool ParseBinding();
bool ParseRule();
bool ParseEdge();
public:
ChunkParser(const LoadedFile& file,
StringPiece chunk_content,
std::vector<ParserItem>* out)
: file_(file),
lexer_(file.filename(), file.content(), chunk_content.data()),
chunk_end_(chunk_content.data() + chunk_content.size()),
out_(out) {
}
bool ParseChunk();
};
struct Clump {
Clump(const LoadedFile& file) : file_(file) {
Warning("Clump::file_(%s)", file.filename().c_str());
}
const LoadedFile& file_;
BasePosition pos_;
std::vector<Binding*> bindings_; // 保存全局变量
std::vector<Rule*> rules_; // rule
std::vector<Pool*> pools_; // 保存pool
std::vector<Edge*> edges_; // 保存build
std::vector<DefaultTarget*> default_targets_; // 保存default
/// A count of non-implicit outputs across all edges.
size_t edge_output_count_ = 0;
DeclIndex decl_count() const {
return next_index_; }
/// Allocate an index within the clump. Once the parallelized chunk parsing is
/// finished, each clump's base position will be computed, giving every clump
/// item both a DFS "depth-first-search" position and a position within the
/// tree of scopes.
RelativePosition AllocNextPos() {
return {
&pos_, next_index_++ }; }
private:
DeclIndex next_index_ = 0;
};
- 在此处的分析中,可以获取到键,值存储的是划分符号后所有的内容,在有的数据结构中会保存一个偏移值的数据结构.,在此处函数执行结束后,再进行一次分析获取到准确的值.
ParseFileInclude(false)
保存include
bool ChunkParser::ParseFileInclude(bool new_scope) {
Include* include = new Include();
include->new_scope_ = new_scope;
std::string err;
if (!lexer_.ReadPath(&include->path_, &err))
return OutError(err);
include->diag_pos_ = lexer_.GetLastTokenOffset();
if (!ExpectToken(Lexer::NEWLINE))
return false;
OutItem(include);
return true;
}
ParseFileInclude(true)
保存subninja,同上,两者的主要区别在于,是否存在于一个作用域下,include便是一个作用域,而subninja的作用域与当前文件不同
如,在android编译时入口文件为 out/combined_xxx.ninja,此文件主要作用就是设置一个depth = 2 的Pool ,include out/build-xxx.ninja 和 out/soong/build.ninja到当前作用域下
ParsePool()
保存Pool到clump->pools_
在执行过程中会读取局部变量 depth 并设置到 pool->parse_state_.depth
bool ChunkParser::ParsePool() {
Pool* pool = new Pool();
StringPiece name;
if (!lexer_.ReadIdent(&name))
return LexerError("expected pool name");
pool->name_ = name;
if (!ExpectToken(Lexer::NEWLINE))
return false;
pool->parse_state_.pool_name_diag_pos = lexer_.GetLastTokenOffset();
while (lexer_.PeekIndent()) {
StringPiece key;
StringPiece value;
if (!ParseLet(&key, &value))
return false;
if (key == "depth") {
pool->parse_state_.depth = value;
pool->parse_state_.depth_diag_pos = lexer_.GetLastTokenOffset();
} else {
return LexerError("unexpected variable '" + key.AsString() + "'");
}
}
if (pool->parse_state_.depth.empty())
return LexerError("expected 'depth =' line");
Clump* clump = MakeClump();
pool->pos_ = clump->AllocNextPos();
clump->pools_.push_back(pool);
return true;
}
ParseDefault()
保存default到clump->default_targets_
bool ChunkParser::ParseDefault() {
bool first = true;
while (true) {
LexedPath path;
std::string err;
if (!lexer_.ReadPath(&path, &err))
return OutError(err);
if (path.str_.empty()) {
if (first)
return LexerError("expected target name");
else
break;
}
first = false;
DefaultTarget* target = new DefaultTarget();
target->parsed_path_ = std::move(path);
target->diag_pos_ = lexer_.GetLastTokenOffset();
Clump* clump = MakeClump();
target->pos_ = clump->AllocNextPos();
clump->default_targets_.push_back(target);
Warning("clump->default_targets_.size()\r",clump->default_targets_.size());
}
Warning("ExpectToken(Lexer::NEWLINE)\r\n");
return ExpectToken(Lexer::NEWLINE);
}
ParseBinding()
保存全局变量到 clump->bindings_
使用Binding用来保存全局变量
在Binding中,可以使用name() 来读取变量名,在分析ninja文件时, parsed_value_ 中保存了从 = 后的所有的值,在所有的文件都分析完毕后,会再进行分析,所有在 ParseBinding() 中读取时,可以读取到键(name_) ,而不能获取到确切的值.
bool ChunkParser::ParseBinding() {
Binding* binding = new Binding();
lexer_.UnreadToken();
StringPiece name;
Warning("ParseLet()");
if (!ParseLet(&name, &binding->parsed_value_))
return false;
// 取第一个空格前的字符串
binding->name_ = name;
if (binding->name_ == kNinjaRequiredVersion){
OutItem(new RequiredVersion {
binding->parsed_value_ });}
Clump* clump = MakeClump();
binding->pos_ = clump->AllocNextPos();
clump->bindings_.push_back(binding);
return true;
}
ParseRule()
保存Rule到clump->rules_
首先会获取rule名,保存到rule->name_然后开始读取循环读取接下来的内容,以键值对的形式保存到 rule-> bindings_中,读取结束,将这个Rule保存.
bool ChunkParser::ParseRule() {
Rule* rule = new Rule();
StringPiece rule_name;
if (!lexer_.ReadIdent(&rule_name))
return LexerError("expected rule name");
rule->name_ = rule_name;
if (!ExpectToken(Lexer::NEWLINE))
return false;
rule->parse_state_.rule_name_diag_pos = lexer_.GetLastTokenOffset();
while (lexer_.PeekIndent()) {
StringPiece key;
StringPiece value;
if (!ParseLet(&key, &value))
return false;
if (!Rule::IsReservedBinding(key)) {
// Die on other keyvals for now; revisit if we want to add a scope here.
// If we allow arbitrary key values here, we'll need to revisit how cycle
// detection works when evaluating a rule variable.
return LexerError("unexpected variable '" + key.AsString() + "'");
}
// std::piecewise_construct 避免不必要的临时变量
rule->bindings_.emplace_back(std::piecewise_construct,
std::tuple<StringPiece>(key),
std::tuple<const char*, size_t>(value.data(),
value.size()));
Warning("ChunkParser::ParseBinding() while(%d) -> key[%s] : value[%s]\r",i++,rule->bindings_.back().first.data(),rule->bindings_.back().second.data());
}
if (static_cast<bool>(rule->GetBinding(kRspFile)) !=
static_cast<bool>(rule->GetBinding(kRspFileContent))) {
return LexerError("rspfile and rspfile_content need to be both specified");
}
if (rule->GetBinding(kCommand) == nullptr){
return LexerError("expected 'command =' line");
}
Clump* clump = MakeClump();
rule->pos_ = clump->AllocNextPos();
clump->rules_.push_back(rule);
return true;
}
ParseEdge()
一个build语句就是一个Edge,保存Edge到 clump->edges_
在ParseEdge()中会按照以下顺序进行分析 :显式输出 -> 隐式输出 -> 构建规则名 -> 显式依赖 -> 隐式依赖 -> order-only依赖
保存输出到edge->outputs_ ,保存依赖到edge->inputs_ ,保存rule到edge->parse_state_.rule_name
之后逐行分析局部变量值到 edge->unevaled_bindings_
bool ChunkParser::ParseEdge() {
Edge* edge = new Edge();
// 自定义函数 命名为 parse_path_list,用来解析路径列表
auto parse_path_list = [this, edge](Edge::DeferredPathList::Type type, int& count) -> bool {
const char* start_pos = lexer_.GetPos();
int i = 1;
while (true) {
Warning("ChunkParser::ParseEdge()::While(%d)",i++);
LexedPath path;
std::string err;
// 读取此行的内容,当遇到需要分割的值结束一次读取,直到行尾
if (!lexer_.ReadPath(&path, &err))
return OutError(err);
if (path.str_.empty()) {
// 保存内容,从当期行开始保存
edge->parse_state_.deferred_path_lists.push_back({
start_pos, type, count,
});
return true;
}
count++;
}
};
// 解析得到 output
Warning("parse_path_list(OUTPUT) 显式输出\r");
if (!parse_path_list(Edge::DeferredPathList::OUTPUT,
edge->explicit_outs_))
return false;
// Add all implicit outs, counting how many as we go.
// 隐式输出
if (lexer_.PeekToken(Lexer::PIPE)) {
Warning("(Lexer::PIPE) -> parse_path_list(OUTPUT,edge) | 分割 隐式输出\r");
if (!parse_path_list(Edge::DeferredPathList::OUTPUT,
edge->implicit_outs_))
return false;
}
if (edge->explicit_outs_ + edge->implicit_outs_ == 0)
return LexerError("expected path");
if (!ExpectToken(Lexer::COLON))
return false;
Warning(" rule \r");
StringPiece rule_name;
if (!lexer_.ReadIdent(&rule_name))
return LexerError("expected build command name");
edge->parse_state_.rule_name = rule_name;
edge->parse_state_.rule_name_diag_pos = lexer_.GetLastTokenOffset();
// input
Warning("parse_path_list(INPUT) rule 后分割 显式依赖\r");
if (!parse_path_list(Edge::DeferredPathList::INPUT,
edge->explicit_deps_))
return false;
// Add all implicit deps, counting how many as we go.
// 隐式依赖
if (lexer_.PeekToken(Lexer::PIPE)) {
Warning("(Lexer::PIPE) -> parse_path_list(INPUT) |后分割 隐式依赖\r");
if (!parse_path_list(Edge::DeferredPathList::INPUT,
edge->implicit_deps_))
return false;
}
// Add all order-only deps, counting how many as we go.
// order-only 依赖
if (lexer_.PeekToken(Lexer::PIPE2)) {
Warning("(Lexer::PIPE2) -> parse_path_list(INPUT) ||后分割 order-only依赖\r");
if (!parse_path_list(Edge::DeferredPathList::INPUT,
edge->order_only_deps_))
return false;
}
// Add all validation deps, counting how many as we go.
// 添加所有验证程序,计算我们进行的数量。实际上并没有进入
if (lexer_.PeekToken(Lexer::PIPEAT)) {
Warning("(Lexer::PIPEAT) -> parse_path_list(VALIDATION) 验证值\r");
if (!parse_path_list(Edge::DeferredPathList::VALIDATION,
edge->validation_deps_))
return false;
}
if (!ExpectToken(Lexer::NEWLINE))
return false;
int i = 1; // 读取局部变量值
while (lexer_.PeekIndent()) {
StringPiece key;
StringPiece val;
if (!ParseLet(&key, &val))
return false;
std::tuple<const char*, size_t> val_ctor_params(val.data(), val.size());
edge->unevaled_bindings_.emplace_back(std::piecewise_construct,
std::tuple<StringPiece>(key),
val_ctor_params);
Warning("布局变量(%d) , key[%s] : value[%s]\r",i++,edge->unevaled_bindings_.back().first.data(),edge->unevaled_bindings_.back().second.data());
}
edge->parse_state_.final_diag_pos = lexer_.GetLastTokenOffset();
Clump* clump = MakeClump();
edge->pos_ = clump->AllocNextPos();
clump->edges_.push_back(edge);
Warning("所有的Edge(%d) , 此Edge的局部变量数量(%d)\r\n",clump->edges_.size(),edge->unevaled_bindings_.size());
clump->edge_output_count_ += edge->explicit_outs_;
return true;
}
Lexer::NEWLINE
当读取到注释或空行时,会返回Lexer::NEWLINE
HandleClump(item.u.clump, file, scope, err)
在此处将Clump中的全局变量,Rule和Pool保存到当期的Scope下
bool DfsParser::HandleClump(Clump* clump, const LoadedFile& file, Scope* scope,
std::string* err) {
METRIC_RECORD(".ninja load : scope setup");
// Allocate DFS and scope positions for the clump.
clump->pos_.scope = scope->AllocDecls(clump->decl_count());
clump->pos_.dfs_location = state_->AllocDfsLocation(clump->decl_count());
{
METRIC_RECORD(".ninja load : scope setup : bindings");
for (Binding* binding : clump->bindings_) {
scope->AddBinding(binding);
}
}
{
METRIC_RECORD(".ninja load : scope setup : rules");
for (Rule* rule : clump->rules_) {
if (!scope->AddRule(rule)) {
return DecorateError(file, rule->parse_state_.rule_name_diag_pos,
"duplicate rule '" + rule->name() + "'", err);
}
}
}
for (Pool* pool : clump->pools_) {
if (!HandlePool(pool, file, err)) {
return false;
}
}
return true;
}
构建Edge图
ManifestLoader::FinishLoading()
在初步加载分析后会再次分析得到准确的Edge,分为5部分
1 edge setup
构造输入/输出节点的初始图。选择一个可能保持碰撞次数较低的初始大小。Edge的非隐式输出的数量对于最终的节点的数量是一个足够好的代理。
{
METRIC_RECORD(".ninja load : edge setup");
size_t output_count = 0;
// 计算edge的数量
for (Clump* clump : clumps)
output_count += clump->edge_output_count_;
// 重新计算Node的容器大小,默认算Edge的三倍
state_->paths_.reserve(state_->paths_.size() + output_count * 3);
if (!PropagateError(err, ParallelMap(thread_pool_, clumps,
[this](Clump* clump) {
std::string err;
// 抽出Clump中的Edge,Node,Pool等数据,初步构建Edge图
FinishAddingClumpToGraph(clump, &err);
return err;
}))) {
return false;
}
}
FinishAddingClumpToGraph()
bool ManifestLoader::FinishAddingClumpToGraph(Clump* clump, std::string* err) {
std::string work_buf;
// Precompute all binding values. Discard each evaluated string -- we just
// need to make sure each binding's value isn't coming from the mmap'ed
// manifest anymore.
for (Binding* binding : clump->bindings_) {
work_buf.clear();
binding->Evaluate(&work_buf);
}
for (Edge* edge : clump->edges_) {
if (!AddEdgeToGraph(edge, clump->file_, err))
return false;
}
return true;
}
Evaluate()用来解析全局变量的值,根据偏移得到准确的变量值
AddEdgeToGraph()
用来构建Edge,主要的作用是创建Node
bool ManifestLoader::AddEdgeToGraph(Edge* edge, const LoadedFile& file,
std::string* err) {
const ScopePosition edge_pos = edge->pos_.scope_pos();
// 查询Rule
edge->rule_ = Scope::LookupRuleAtPos(edge->parse_state_.rule_name, edge_pos);
Warning("ManifestLoader::AddEdgeToGraph() -> edge->rule_(%s)\r",edge->rule_->name().c_str());
if (edge->rule_ == nullptr) {
std::string msg = "unknown build rule '" +
edge->parse_state_.rule_name.AsString() + "'";
Warning("DecorateError()\r");
return DecorateError(file, edge->parse_state_.rule_name_diag_pos, msg, err);
}
// Now that the edge's bindings are available, check whether the edge has a
// pool. This check requires the full edge+rule evaluation system.
std::string pool_name;
if (!edge->EvaluateVariable(&pool_name, kPool, err, EdgeEval::kParseTime))
return false;
Warning("edge->EvaluateVariable() \r");
// 判断Pool
if (pool_name.empty()) {
edge->pool_ = &State::kDefaultPool;
Warning("edge->pool_ = &State::kDefaultPool \r");
} else {
edge->pool_ = state_->LookupPoolAtPos(pool_name, edge->pos_.dfs_location());
if (edge->pool_ == nullptr) {
Warning("edge->pool_ == nullptr\r");
return DecorateError(file, edge->parse_state_.final_diag_pos,
"unknown pool name '" + pool_name + "'", err);
}
Warning("ManifestLoader::AddEdgeToGraph() -> LookupPoolAtPos()[%s]",edge->pool_->name().data());
}
// 重置容器容量
edge->outputs_.reserve(edge->explicit_outs_ + edge->implicit_outs_);
edge->inputs_.reserve(edge->explicit_deps_ + edge->implicit_deps_ +
edge->order_only_deps_);
edge->validations_.reserve(edge->validation_deps_);
Warning("outputs_(%d) , input_(%d) , reserve(%d)\r",edge->outputs_.size(),edge->inputs_.size(),edge->validations_.size());
// Add the input and output nodes. We already lexed them in the first pass,
// but we couldn't add them because scope bindings weren't available. To save
// memory, the first pass only recorded the lexer position of each category
// of input/output nodes, rather than each path's location.
Lexer lexer(file.filename(), file.content(), file.content().data());
Warning("Lexer lexer(file.filename(), file.content(), file.content().data());");
int j = 0;
for (const Edge::DeferredPathList& path_list :
edge->parse_state_.deferred_path_lists) {
// 判断容器类型
std::vector<Node*>* vec =
path_list.type == Edge::DeferredPathList::INPUT ? &edge->inputs_ :
path_list.type == Edge::DeferredPathList::OUTPUT ? &edge->outputs_ :
&edge->validations_;
Warning("for(%d) -> std::vector<Node*>* type = %d\r",j++,path_list.type);
lexer.ResetPos(path_list.lexer_pos);
Warning("ResetPos()\r");
for (int i = 0; i < path_list.count; ++i) {
Warning(" for(%d) ",i);
// 设置节点
if (!AddPathToEdge(state_, *edge, vec, file, lexer, err))
return false;
}
// Verify that there are no more paths to parse.
LexedPath path;
if (!lexer.ReadPath(&path, err) || !path.str_.empty()) {
assert(false && "manifest file apparently changed during parsing");
abort();
}
}
// This compatibility mode filters nodes from the edge->inputs_ list; do it
// before linking the edge inputs and nodes.
if (options_.phony_cycle_action_ == kPhonyCycleActionWarn &&
edge->maybe_phonycycle_diagnostic()) {
// CMake 2.8.12.x and 3.0.x incorrectly write phony build statements that
// reference themselves. Ninja used to tolerate these in the build graph
// but that has since been fixed. Filter them out to support users of those
// old CMake versions.
Node* out = edge->outputs_[0];
std::vector<Node*>::iterator new_end =
std::remove(edge->inputs_.begin(), edge->inputs_.end(), out);
if (new_end != edge->inputs_.end()) {
edge->inputs_.erase(new_end, edge->inputs_.end());
--edge->explicit_deps_;
if (!quiet_) {
Warning("phony target '%s' names itself as an input; ignoring "
"[-w phonycycle=warn]", out->path().c_str());
}
}
}
// Multiple outputs aren't (yet?) supported with depslog.
std::string deps_type;
if (!edge->EvaluateVariable(&deps_type, kDeps, err, EdgeEval::kParseTime))
return false;
if (!deps_type.empty() && edge->outputs_.size() - edge->implicit_outs_ > 1) {
return DecorateError(file, edge->parse_state_.final_diag_pos,
"multiple outputs aren't (yet?) supported by depslog; "
"bring this up on the mailing list if it affects you",
err);
}
// Lookup, validate, and save any dyndep binding. It will be used later
// to load generated dependency information dynamically, but it must
// be one of our manifest-specified inputs.
std::string dyndep;
if (!edge->EvaluateVariable(&dyndep, kDyndep, err, EdgeEval::kParseTime))
return false;
if (!dyndep.empty()) {
uint64_t slash_bits;
if (!CanonicalizePath(&dyndep, &slash_bits, err))
return false;
edge->dyndep_ = state_->GetNode(dyndep, 0);
edge->dyndep_->set_dyndep_pending(true);
vector<Node*>::iterator dgi =
std::find(edge->inputs_.begin(), edge->inputs_.end(), edge->dyndep_);
if (dgi == edge->inputs_.end()) {
return DecorateError(file, edge->parse_state_.final_diag_pos,
"dyndep '" + dyndep + "' is not an input", err);
}
}
return true;
}
按照顺序,首先会查询Rule -> 判断Pool-> 重置容器容量 -> 循环构建Node
构建完成,根据配置进行一些设置,此部分的内容就完成了
LookupRuleAtPos()
Rule* Scope::LookupRuleAtPos(const HashedStrView& rule_name,
ScopePosition pos) {
Scope* scope = pos.scope;
if (scope == nullptr) return nullptr;
auto it = scope->rules_.find(rule_name);
if (it != scope->rules_.end()) {
Rule* rule = it->second;
Warning("Rule* Scope::LookupRuleAtPos() : [%s] pos = [%d] \r",rule->name_hashed().data(),pos.index);
if (rule->pos_.scope_index() < pos.index)
return rule;
}
return LookupRuleAtPos(rule_name, scope->parent_);
}
AddPathToEdge()
static inline bool AddPathToEdge(State* state, const Edge& edge,
std::vector<Node*>* out_vec,
const LoadedFile& file, Lexer& lexer,
std::string* err) {
Warning("AddPathToEdge()\r");
HashedStrView key;
uint64_t slash_bits = 0;
StringPiece canon_path = lexer.PeekCanonicalPath();
if (!canon_path.empty()) {
Warning("canon_path != empty()\r");
key = canon_path;
} else {
LexedPath path;
if (!lexer.ReadPath(&path, err) || path.str_.empty()) {
assert(false && "manifest file apparently changed during parsing");
abort();
}
thread_local std::string tls_work_buf;
std::string& work_buf = tls_work_buf;
work_buf.clear();
EvaluatePathOnEdge(&work_buf, path, edge);
std::string path_err;
if (!CanonicalizePath(&work_buf, &slash_bits, &path_err)) {
return DecorateError(file, edge.parse_state_.final_diag_pos, path_err,
err);
}
key = work_buf;
Warning("key = [%s]\r",key.data());
}
Warning("Node* node = state->GetNode(key, 0)\r");
Node* node = state->GetNode(key, 0);
// 更新引用时间
node->UpdateFirstReference(edge.dfs_location(), slash_bits);
out_vec->push_back(node);
return true;
}
GetNode()
GetNode()会先查找是否已经存在此节点,如果有就直接返回,如果没有就创建一个Node返回
Node* State::GetNode(const HashedStrView& path, uint64_t slash_bits) {
if (Node** opt_node = paths_.Lookup(path))
return *opt_node;
// Create a new node and try to insert it.
std::unique_ptr<Node> node(new Node(path, slash_bits));
if (paths_.insert({
node->path_hashed(), node.get()}).second)
return node.release(); // 不再管理指针
// Another thread beat us to it. Use its node instead.
return *paths_.Lookup(path);
}
2 link edge outputs
记录由一条边构建的每个节点的内边。检测到重复的Edge。使用 dupbuild=warn(默认直到1.9.0),当两条Edge生成同一Node时,从后面的Edge的输出列表中删除重复的Node。如果删除了一条Edge的所有输出,请从graph中删除该Edge。
简单的说就是,会遍历Edge和其中output的Node,查看是否有重复值如果有就会删除掉
{
METRIC_RECORD(".ninja load : link edge outputs");
for (Clump* clump : clumps) {
for (size_t edge_idx = 0; edge_idx < clump->edges_.size(); ) {
Edge* edge = clump->edges_[edge_idx];
for (size_t i = 0; i < edge->outputs_.size(); ) {
Node* output = edge->outputs_[i];
if (output->in_edge() == nullptr) {
output->set_in_edge(edge);
++i;
continue;
}
// 存在两个Edge输出同一节点
if (options_.dupe_edge_action_ == kDupeEdgeActionError) {
return DecorateError(clump->file_,
edge->parse_state_.final_diag_pos,
"multiple rules generate " + output->path() +
" [-w dupbuild=err]", err);
} else {
if (!quiet_) {
Warning("multiple rules generate %s. "
"builds involving this target will not be correct; "
"continuing anyway [-w dupbuild=warn]",
output->path().c_str());
}
if (edge->is_implicit_out(i))
--edge->implicit_outs_;
else
--edge->explicit_outs_;
edge->outputs_.erase(edge->outputs_.begin() + i);
}
}
if (edge->outputs_.empty()) {
clump->edges_.erase(clump->edges_.begin() + edge_idx);
continue;
}
++edge_idx;
}
}
}
3 link edge inputs
此时所有的重复Edge已经被剔除掉了,现在开始给input添加需要自己的edge
{
METRIC_RECORD(".ninja load : link edge inputs");
ParallelMap(thread_pool_, clumps, [](Clump* clump) {
for (Edge* edge : clump->edges_) {
for (Node* input : edge->inputs_) {
input->AddOutEdge(edge);
}
for (Node* validation : edge->validations_) {
validation->AddValidationOutEdge(edge);
}
}
});
}
4 default targets
添加默认的target
{
METRIC_RECORD(".ninja load : default targets");
for (Clump* clump : clumps) {
// 从Clump->default_targets_中获取没有 DefaultTarget
for (DefaultTarget* target : clump->default_targets_) {
std::string path;
EvaluatePathInScope(&path, target->parsed_path_,
target->pos_.scope_pos());
uint64_t slash_bits; // Unused because this only does lookup.
std::string path_err;
if (!CanonicalizePath(&path, &slash_bits, &path_err))
return DecorateError(clump->file_, target->diag_pos_, path_err, err);
Node* node = state_->LookupNodeAtPos(path, target->pos_.dfs_location());
if (node == nullptr) {
return DecorateError(clump->file_, target->diag_pos_,
"unknown target '" + path + "'", err);
}
state_->AddDefault(node);
}
}
}
5 build edge table
将所有的Edge添加到全局的Edge vector容器中(*State->edges_),并对其分配id
{
METRIC_RECORD(".ninja load : build edge table");
size_t old_size = state_->edges_.size();
size_t new_size = old_size;
for (Clump* clump : clumps) {
new_size += clump->edges_.size();
}
state_->edges_.reserve(new_size);
for (Clump* clump : clumps) {
std::copy(clump->edges_.begin(), clump->edges_.end(),
std::back_inserter(state_->edges_));
}
// Assign edge IDs.
ParallelMap(thread_pool_, IntegralRange<size_t>(old_size, new_size),
[this](size_t idx) {
state_->edges_[idx]->id_ = idx;
});
}
加载日志文件
OpenBuildLog()
文件名.ninja_log保存ninja运行期间的所有日志
OpenDepsLog()
文件名.ninja_deps保存了ninja的构建图,在此过程将节点添加到构建图中,查找每个节点的最后记录记录输出,并计算开发记录的总数。
执行编译
RunBuild()
构建一个Builder
// Builder包装了构建过程:启动命令,更新状态。
struct Builder {
Builder(State* state, const BuildConfig& config,
BuildLog* build_log, DepsLog* deps_log,
DiskInterface* disk_interface, Status* status,
int64_t start_time_millis);
~Builder();
/// Clean up after interrupted commands by deleting output files.
void Cleanup();
/// Used by tests.
Node* AddTarget(const string& name, string* err);
/// Add targets to the build, scanning dependencies.
/// @return false on error.
bool AddTargets(const std::vector<Node*>& targets, string* err);
/// Returns true if the build targets are already up to date.
bool AlreadyUpToDate() const;
/// Run the build. Returns false on error.
/// It is an error to call this function when AlreadyUpToDate() is true.
bool Build(string* err);
bool StartEdge(Edge* edge, string* err);
/// Update status ninja logs following a command termination.
/// @return false if the build can not proceed further due to a fatal error.
bool FinishCommand(CommandRunner::Result* result, string* err);
/// Used for tests.
void SetBuildLog(BuildLog* log) {
scan_.set_build_log(log);
}
/// Load the dyndep information provided by the given node.
bool LoadDyndeps(Node* node, string* err);
State* state_;
const BuildConfig& config_;
Plan plan_;
#if __cplusplus < 201703L
auto_ptr<CommandRunner> command_runner_;
#else
unique_ptr<CommandRunner> command_runner_; // auto_ptr was removed in C++17.
#endif
Status* status_;
private:
bool ExtractDeps(CommandRunner::Result* result, const string& deps_type,
const string& deps_prefix, vector<Node*>* deps_nodes,
string* err);
/// Map of running edge to time the edge started running.
typedef map<Edge*, int> RunningEdgeMap;
RunningEdgeMap running_edges_;
/// Time the build started.
int64_t start_time_millis_;
DiskInterface* disk_interface_;
DependencyScan scan_;
// Unimplemented copy ctor and operator= ensure we don't copy the auto_ptr.
Builder(const Builder &other); // DO NOT IMPLEMENT
void operator=(const Builder &other); // DO NOT IMPLEMENT
};
CollectTargetsFromArgs()
收集需要构建target到一个vector容器中
AddTargets()
添加target到builder中
AlreadyUpToDate()
判断文件的构建时间,如果文件的修改时间小于构造时间就不会再进行编译输出no work to do.
Build()
执行构建
bool Builder::Build(string* err) {
assert(!AlreadyUpToDate());
status_->PlanHasTotalEdges(plan_.command_edge_count());
int pending_commands = 0;
int failures_allowed = config_.failures_allowed;
// Set up the command runner if we haven't done so already.
if (!command_runner_.get()) {
if (config_.dry_run)
command_runner_.reset(new DryRunCommandRunner);
else
command_runner_.reset(new RealCommandRunner(config_));
}
// We are about to start the build process.
status_->BuildStarted();
// This main loop runs the entire build process.
// It is structured like this:
// First, we attempt to start as many commands as allowed by the
// command runner.
// Second, we attempt to wait for / reap the next finished command.
while (plan_.more_to_do()) {
// See if we can start any more commands.
if (failures_allowed && command_runner_->CanRunMore()) {
if (Edge* edge = plan_.FindWork()) {
if (!StartEdge(edge, err)) {
Cleanup();
status_->BuildFinished();
return false;
}
if (edge->is_phony()) {
if (!plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, err)) {
Cleanup();
status_->BuildFinished();
return false;
}
} else {
++pending_commands;
}
// We made some progress; go back to the main loop.
continue;
}
}
// See if we can reap any finished commands.
if (pending_commands) {
CommandRunner::Result result;
if (!command_runner_->WaitForCommand(&result) ||
result.status == ExitInterrupted) {
Cleanup();
status_->BuildFinished();
*err = "interrupted by user";
return false;
}
--pending_commands;
if (!FinishCommand(&result, err)) {
Cleanup();
status_->BuildFinished();
return false;
}
if (!result.success()) {
if (failures_allowed)
failures_allowed--;
}
// We made some progress; start the main loop over.
continue;
}
// If we get here, we cannot make any more progress.
status_->BuildFinished();
if (failures_allowed == 0) {
if (config_.failures_allowed > 1)
*err = "subcommands failed";
else
*err = "subcommand failed";
} else if (failures_allowed < config_.failures_allowed)
*err = "cannot make progress due to previous errors";
else
*err = "stuck [this is a bug]";
return false;
}
status_->BuildFinished();
return true;
}
plan_.more_to_do()
判断是否有工作需要完成
FindWork()
从Edge队列中弹出一个Edge进行构建,如果没有就返回空
StartEdge()
bool Builder::StartEdge(Edge* edge, string* err) {
Warning("StartEdge()\r");
METRIC_RECORD("StartEdge");
if (edge->is_phony())
return true;
int64_t start_time_millis = GetTimeMillis() - start_time_millis_;
running_edges_.insert(make_pair(edge, start_time_millis));
// 打印日志
status_->BuildEdgeStarted(edge, start_time_millis);
if (!edge->IsPhonyOutput()) {
for (vector<Node*>::iterator o = edge->outputs_.begin();
o != edge->outputs_.end(); ++o) {
Warning("StartEdge() -> Node->path = %s\r",(*o)->path().c_str());
// Create directories necessary for outputs.
// XXX: this will block; do we care?
if (!disk_interface_->MakeDirs((*o)->path()))
return false;
if (!(*o)->exists())
continue;
// Remove existing outputs for non-restat rules.
// XXX: this will block; do we care?
if (config_.pre_remove_output_files && !edge->IsRestat() && !config_.dry_run) {
if (disk_interface_->RemoveFile((*o)->path()) < 0)
return false;
}
}
}
// Create response file, if needed
// XXX: this may also block; do we care?
string rspfile = edge->GetUnescapedRspfile();
if (!rspfile.empty()) {
string content = edge->GetBinding("rspfile_content");
if (!disk_interface_->WriteFile(rspfile, content))
return false;
}
// start command computing and run it
Warning("StartCommand()\r");
if (!command_runner_->StartCommand(edge)) {
err->assign("command '" + edge->EvaluateCommand() + "' failed.");
return false;
}
return true;
}
打印日志
StatusPrinter::BuildEdgeStarted()
void StatusPrinter::BuildEdgeStarted(Edge* edge, int64_t start_time_millis) {
Warning("StatusPrinter::BuildEdgeStarted()\r");
++started_edges_;
++running_edges_;
time_millis_ = start_time_millis;
if (edge->use_console() || printer_.is_smart_terminal())
PrintStatus(edge, start_time_millis);
if (edge->use_console())
printer_.SetConsoleLocked(true);
}
StatusPrinter::PrintStatus()
void StatusPrinter::PrintStatus(Edge* edge, int64_t time_millis) {
if (config_.verbosity == BuildConfig::QUIET)
return;
bool force_full_command = config_.verbosity == BuildConfig::VERBOSE;
string to_print = edge->GetBinding("description");
if (to_print.empty() || force_full_command)
to_print = edge->GetBinding("command");
to_print = FormatProgressStatus(progress_status_format_, time_millis)
+ to_print;
printer_.Print(to_print,
force_full_command ? LinePrinter::FULL : LinePrinter::ELIDE);
}
LinePrinter::Print()
void LinePrinter::Print(string to_print, LineType type) {
if (console_locked_) {
line_buffer_ = to_print;
line_type_ = type;
return;
}
if (smart_terminal_) {
printf("\r"); // Print over previous line, if any.
// On Windows, calling a C library function writing to stdout also handles
// pausing the executable when the "Pause" key or Ctrl-S is pressed.
}
if (smart_terminal_ && type == ELIDE) {
// Limit output to width of the terminal if provided so we don't cause
// line-wrapping.
// 获取终端窗口大小
winsize size;
if ((ioctl(STDOUT_FILENO, TIOCGWINSZ, &size) == 0) && size.ws_col) {
to_print = ElideMiddle(to_print, size.ws_col);
}
printf("%s", to_print.c_str());
printf("\x1B[K"); // Clear to end of line.
fflush(stdout);
have_blank_line_ = false;
} else {
printf("%s\n", to_print.c_str());
}
}
RealCommandRunner::StartCommand()
bool RealCommandRunner::StartCommand(Edge* edge) {
string command = edge->EvaluateCommand();
Subprocess* subproc = subprocs_.Add(command, edge->use_console());
if (!subproc)
return false;
subproc_to_edge_.insert(make_pair(subproc, edge));
return true;
}
SubprocessSet::Add()
Subprocess *SubprocessSet::Add(const string& command, bool use_console,
int extra_fd) {
Subprocess *subprocess = new Subprocess(use_console);
if (!subprocess->Start(this, command, extra_fd)) {
delete subprocess;
return 0;
}
running_.push_back(subprocess);
return subprocess;
}
Subprocess::Start()
使用 posix_spawn() 创建一个新的进程,使用/bin/sh -c "command"来执行编译指令
bool Subprocess::Start(SubprocessSet* set, const string& command,
int extra_fd) {
int output_pipe[2];
if (pipe(output_pipe) < 0)
Fatal("pipe: %s", strerror(errno));
fd_ = output_pipe[0];
#if !defined(USE_PPOLL)
// If available, we use ppoll in DoWork(); otherwise we use pselect
// and so must avoid overly-large FDs.
if (fd_ >= static_cast<int>(FD_SETSIZE))
Fatal("pipe: %s", strerror(EMFILE));
#endif // !USE_PPOLL
SetCloseOnExec(fd_);
posix_spawn_file_actions_t action;
int err = posix_spawn_file_actions_init(&action);
if (err != 0)
Fatal("posix_spawn_file_actions_init: %s", strerror(err));
err = posix_spawn_file_actions_addclose(&action, output_pipe[0]);
if (err != 0)
Fatal("posix_spawn_file_actions_addclose: %s", strerror(err));
if (extra_fd >= 0) {
if (posix_spawn_file_actions_adddup2(&action, extra_fd, 3) != 0)
Fatal("posix_spawn_file_actions_adddup2: %s", strerror(errno));
}
posix_spawnattr_t attr;
err = posix_spawnattr_init(&attr);
if (err != 0)
Fatal("posix_spawnattr_init: %s", strerror(err));
short flags = 0;
flags |= POSIX_SPAWN_SETSIGMASK;
err = posix_spawnattr_setsigmask(&attr, &set->old_mask_);
if (err != 0)
Fatal("posix_spawnattr_setsigmask: %s", strerror(err));
// Signals which are set to be caught in the calling process image are set to
// default action in the new process image, so no explicit
// POSIX_SPAWN_SETSIGDEF parameter is needed.
if (!use_console_) {
// Put the child in its own process group, so ctrl-c won't reach it.
flags |= POSIX_SPAWN_SETPGROUP;
// No need to posix_spawnattr_setpgroup(&attr, 0), it's the default.
// Open /dev/null over stdin.
err = posix_spawn_file_actions_addopen(&action, 0, "/dev/null", O_RDONLY,
0);
if (err != 0) {
Fatal("posix_spawn_file_actions_addopen: %s", strerror(err));
}
err = posix_spawn_file_actions_adddup2(&action, output_pipe[1], 1);
if (err != 0)
Fatal("posix_spawn_file_actions_adddup2: %s", strerror(err));
err = posix_spawn_file_actions_adddup2(&action, output_pipe[1], 2);
if (err != 0)
Fatal("posix_spawn_file_actions_adddup2: %s", strerror(err));
err = posix_spawn_file_actions_addclose(&action, output_pipe[1]);
if (err != 0)
Fatal("posix_spawn_file_actions_addclose: %s", strerror(err));
// In the console case, output_pipe is still inherited by the child and
// closed when the subprocess finishes, which then notifies ninja.
}
#ifdef POSIX_SPAWN_USEVFORK
flags |= POSIX_SPAWN_USEVFORK;
#endif
err = posix_spawnattr_setflags(&attr, flags);
if (err != 0)
Fatal("posix_spawnattr_setflags: %s", strerror(err));
const char* spawned_args[] = {
"/bin/sh", "-c", command.c_str(), NULL };
err = posix_spawn(&pid_, "/bin/sh", &action, &attr,
const_cast<char**>(spawned_args), environ);
if (err != 0)
Fatal("posix_spawn: %s", strerror(err));
err = posix_spawnattr_destroy(&attr);
if (err != 0)
Fatal("posix_spawnattr_destroy: %s", strerror(err));
err = posix_spawn_file_actions_destroy(&action);
if (err != 0)
Fatal("posix_spawn_file_actions_destroy: %s", strerror(err));
close(output_pipe[1]);
return true;
}
边栏推荐
- The user logs in continuously (interruption is allowed) to query SQL
- [untitled]
- Entrepôt de données 4.0 Notes - acquisition de données sur le comportement de l'utilisateur II
- 九、实用类
- [radiology] bugfix: when GLCM features: indexerror: arrays used as indexes must be of integer (or Boolean) type
- Es operation command
- 3.1、对DQL简化补充
- NFT数字藏品系统开发,数字藏品的发展趋势
- P5 interview questions
- MySQL invalid conn troubleshooting
猜你喜欢

数仓4.0笔记——用户行为数据采集二

NFT数字藏品系统开发,数字藏品的发展趋势

11、多线程

数仓4.0笔记——业务数据采集——Sqoop

Websocket long connection

Phxpaxos installation and compilation process
![[deployment] cluster deployment and startup of presto-server-0.261.tar.gz](/img/37/1185b2321b003a7793c8c37891008c.png)
[deployment] cluster deployment and startup of presto-server-0.261.tar.gz

Digital collection development / meta universe digital collection development

Cuda10.0 configuration pytorch1.7.0+monai0.9.0

蚂蚁链NFT数字藏品DAPP商城系统定制开发
随机推荐
NFT digital collection development /dapp development
11、多线程
數倉4.0筆記——業務數據采集
[flick]flick on yarn's flick conf simplest configuration
Development of digital collection system: what are the main features of NFT?
Project instances used by activiti workflow
数仓4.0笔记——用户行为数据采集三
DBA命令
MySQL用户管理
Adding environment variables and templates to systemctl service
强迫症的硬盘分区
數倉4.0筆記——用戶行為數據采集四
Es operation command
第一个FLINK程序之WordCount
数仓4.0笔记——数仓建模
Unable to negotiate with port 51732: no matching host key type found. Their offer:
Accumulate SQL by date
[untitled]
Sqli lab 1-16 notes with customs clearance
NFT digital collection system development: Shenzhen Evening News "good times travel" digital collection online seconds chime