Skip to content

Commit

Permalink
added split and distr step, uses google cloud bucket to push data fro…
Browse files Browse the repository at this point in the history
…m the runner
  • Loading branch information
saicharan0112 committed May 6, 2023
1 parent 1586966 commit 4d9efbd
Show file tree
Hide file tree
Showing 5 changed files with 55 additions and 7 deletions.
1 change: 1 addition & 0 deletions regression/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
generators_checks/__pycache__/
2 changes: 1 addition & 1 deletion regression/configs/config_ldo.yml
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
designName: ldoInst
vin: [1.8, 2.0, 0.1]
vin: [1.8, 2.1, 0.1]
imax: [1, 1.5, 0.5]
4 changes: 2 additions & 2 deletions regression/configs/config_tempsense.yml
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
designName: tempSenseInst_error
ninv: [2, 10, 2]
nhead: [5, 9, 1]
ninv: [3, 5, 1]
nhead: [5, 7, 1]
1 change: 0 additions & 1 deletion regression/generators_checks/run_checks_ldo.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
import os, sys, fileinput, datetime, math, glob
import subprocess as sp

import ltspice
import pandas as pd
import numpy as np

Expand Down
54 changes: 51 additions & 3 deletions regression/run_regression.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import yaml, sys, subprocess, os, csv
import yaml, sys, subprocess, os, csv, itertools, math
import numpy as np
import pandas as pd
from generators_checks import *
Expand Down Expand Up @@ -194,16 +194,20 @@ def ldo(data, stage):
4. build - to build the dataset using the earlier extracted results
'''

runner_results_dir = "/home/"+os.getenv("USER")+"/runner_results/ldo/"

designName = data["designName"]
vin = data["vin"]
imax = data["imax"]

reg = run_checks_ldo.regression_ldo()

fail_records = open("failed_configs.txt","w+")


# https://stackoverflow.com/questions/53580811/np-arange-does-not-work-as-expected-with-floating-point-arguments
vin_array = np.round(np.arange(vin[0], vin[1], vin[2]), 1)
imax_array = np.round(np.arange(imax[0], imax[1], imax[2]), 1)
split_strategy = 3


# generate spice netlists for simulations
Expand Down Expand Up @@ -243,7 +247,42 @@ def ldo(data, stage):
fail_records.close()

# idea is to build the total combinations list and split them by 2. Run simulations on each half.
# TODO: Create new runner machines, share the above built data with them and run each half of the simulation on each machine. After ending, get back the data to the master machine and do the postprocess
# TODO: Create new runner machines, split the above built data with them and run each split of the simulation on a single machine. After ending, get back the data to the master machine and do the postprocess


# split the list of dirs that are generated and create a diff folder for each split and store it on cloud bucket
if stage == "splitndist":

total_dirs = os.listdir(runner_results_dir)
total_count = len(total_dirs)

start = 0
end = math.ceil(total_count/split_strategy)
incr = end

for i in range(split_strategy):


print("MACHINE_"+str(i)+"="+str(total_dirs[start:end]))
os.system(runner_results_dir)

os.system("gsutil -m cp -r "+runner_results_dir+" gs://openfasoc_ci_bucket/"+runner_results_dir.split("/")[-1])


start = end
end += incr


# TODO: add a stage to pull data from the cloud bucket and place it in a location so that the "simulate" step can run it directly.
if stage == "pullfrombucket":

# this stage is actually runned on a different job and on a different machine.
# pull the data from the bucket using the command "os.system("gsutil -m cp -r "+runner_results_dir+" gs://openfasoc_ci_bucket/"+runner_results_dir.split("/")[-1])" and place it inside the location /home/runner_results
pass



# run simulations based on the machine and the list part of that particular split
if stage == "simulate":
for i in vin_array:
for j in imax_array:
Expand All @@ -255,7 +294,16 @@ def ldo(data, stage):



# TODO: another stage to push the data back to the cloud bucket
if stage == "pushtobucket":

# this stage will just push the /home/runner_results back to the cloud bucket
pass



# process simulation logfiles and generate final data
# TODO: this stage should now contain another sub-step to pull the data from the bucket into the main runner
if stage == "process":
for i in vin_array:
for j in imax_array:
Expand Down

0 comments on commit 4d9efbd

Please sign in to comment.