use benchmark_repository::BenchmarkRepository; use std::fs; use std::path::Path; use std::process::Command; use yaml_rust::YamlLoader; fn main() { pretty_env_logger::init(); // Initialize a new benchmark repository let mut benchmark_repository = BenchmarkRepository::new( "gitea@git.luehne.de:patrick/benchmark-template.git", Path::new("cache").to_path_buf(), "gitea"); // Load the instances.yml file from the config branch let content = benchmark_repository.read_file(Path::new("instances.yml"), "config").unwrap(); let instances = &YamlLoader::load_from_str(&content).unwrap()[0]; // Iterate over all instances for instance in instances.as_vec().unwrap() { // Each instance has a fruit and a time needed to collect it let fruit = instance["fruit"].as_str().unwrap(); let time = instance["time"].as_i64().unwrap(); // The key defines in which directory the results will be stored. It must be unique for // every job let job_key = format!("{}/{}", fruit, time); // Skip jobs that have already been completed if benchmark_repository.is_job_done(&job_key) { continue; } // Request a new job. This will create the infrastructure necessary to automatically // retrieve the results from each computation node let job = benchmark_repository.create_job(job_key); // Run the script that we use to benchmark this job. In this example, all configuration // options are passed to the script as environment variable. The script then passes them to // the actual command we want to benchmark Command::new("sbatch") .args(&["/home/pluehne/test-job.sh", "--nodes", "1", "--ntasks-per-node", "1", "-p", "kr"]) // This is the URL the computation node needs to write the results to. All it needs to // do is clone this repository, and create and push a commit to the “master” branch once // it’s done with the benchmark job .env("JOB_RESULT_REPOSITORY_URL", &format!("file://{}", fs::canonicalize(&job.result_repository_path).unwrap().display())) // The job ID provides a quick way to create temporary files without conflicts between // the jobs .env("JOB_ID", format!("{}", job.id)) .env("JOB_KEY", &job.key) // Here come the two options that we use to configure the command we want to benchmark .env("FRUIT", fruit) .env("TIME", format!("{}", time)) .output() .expect("could not execute command"); } // Finally, we instruct the benchmark runner to wait for all jobs to finish and to join all the // results from all jobs into the “results” branch benchmark_repository.join(); }