Commit 2186835b authored by Ashkan Taslimi's avatar Ashkan Taslimi

run tests in parallel.

parent e5cbc622
Pipeline #3914 failed with stages
in 4 seconds
......@@ -84,6 +84,8 @@ class HECSearch:
n_c * req_delay) / self.prrtSystemParameters.source_packet_interval)
def get_k_lim(self, start, end):
if end - start <= 1:
return end
mid_point = math.ceil((end + start) / 2)
p_r = np.around(self.residual_packet_erasure_rate(mid_point, self.n_max, self.prrtChannelParameters.loss_rate_fwd),
8) # Pr(k, n_max)
......
......@@ -40,8 +40,9 @@ def gen_repair_schedules(redundancy, positions, min, max, is_order_ascending):
def gen_repair_schedule(redundancy, positions, min, max):
if min > max:
raise Exception(
"Illegal input combinations. Make sure the min > max. And, number of total redundancy is greater that positions*min.")
return []
# raise Exception(
# "Illegal input combinations. Make sure the min < max.")
if positions == 0:
return [redundancy]
opt_schedule = [min for p in range(positions)]
......
import pandas as pd
import hec_search
import os
import prrt
import time
import hec_search
import numpy as np
import pandas as pd
from multiprocessing import Pool
dataset_file_path = 'documents/in_12_param_4_sz_mini_1000.csv'
ds_basename = os.listdir("../../hecps/code/ML/data/")
# ds_basename = ["in_12_param_4_sz_zzatql"]
ds_rel_input_path = "../../hecps/code/ML/data/"
ds_rel_result_path = "documents/result/"
#['documents/1','documents/2','documents/3','documents/4','documents/5','documents/6']
def get_n_p_max(rtt_prop_fwd, pkt_length, data_rate_btl_fwd):
......@@ -14,34 +20,70 @@ def get_n_p_max(rtt_prop_fwd, pkt_length, data_rate_btl_fwd):
def evaluate(searchAlgorithm, appParams, channelParams, systemParams):
n_p_min = 1
n_p_max = get_n_p_max(channelParams.rtt_prop_fwd, appParams.pkt_length, channelParams.data_rate_btl_fwd)
if n_p_min <= n_p_max:
start = time.time()
search = hec_search.HECSearch(searchAlgorithm, n_p_min, n_p_max, appParams, channelParams, systemParams)
search_result = search.search()
duration = time.time() - start
return [search_result, duration]
else:
return []
start = time.time()
search = hec_search.HECSearch(searchAlgorithm, n_p_min, n_p_max, appParams, channelParams, systemParams)
search_result = search.search()
duration = time.time() - start
return [search_result, duration]
def test_case():
def test_case(dataset_basename):
counter = 0
save_result_to = pd.DataFrame()
# Load dataset in chunk
for df_in_chunk in pd.read_csv(dataset_file_path, sep=',', chunksize=1000):
for df_in_chunk in pd.read_csv(ds_rel_input_path + dataset_basename, sep=',', chunksize=100):
for index, row in df_in_chunk.iterrows():
appParams = prrt.PrrtApplicationParameters(row['app_max_latency'], row['app_max_residual_loss_rate'], row['app_data_rate'], row['app_pkt_length'])
chnlParams = prrt.PrrtChannelParameters(row['ch_loss_rate'], 0, row['ch_rtt_prop_fwd'], 0, row['ch_data_rate_btl_fwd'], 0)
sysParams = prrt.PrrtSystemParameters(row['sys_block_coding_dly'], row['sys_red_pkt_trans_dly'], row['sys_proc_dly'], row['sys_pkt_loss_detection_dly'], row['sys_src_pkt_interval'])
if 970 > index > 900:
for searchAlgorithm in ["GreedySearch"]:
print(str(index))
result = evaluate(searchAlgorithm, appParams, chnlParams, sysParams)
config = result[0]
save_result_to = save_result_to.append({'search': searchAlgorithm, 'config': [config.k, len(config.n_p), config.n_p], 'duration' : np.around(result[1],0)}, ignore_index=True, sort=False)
for searchAlgorithm in ["GreedySearch", "FullSearch"]:
print(str(index))
search_result = evaluate(searchAlgorithm, appParams, chnlParams, sysParams)
if len(search_result) != 0:
config = search_result[0]
save_result_to = save_result_to.append({'app_max_latency': row['app_max_latency'],
'app_max_residual_loss_rate': row['app_max_residual_loss_rate'],
'app_data_rate' : row['app_data_rate'],
'app_pkt_length' : row['app_pkt_length'],
'ch_loss_rate' : row['ch_loss_rate'],
'ch_rtt_prop_fwd' : row['ch_rtt_prop_fwd'],
'ch_data_rate_btl_fwd' : row['ch_data_rate_btl_fwd'],
'sys_block_coding_dly' : row['sys_block_coding_dly'],
'sys_red_pkt_trans_dly' : row['sys_red_pkt_trans_dly'],
'sys_proc_dly' : row['sys_proc_dly'],
'sys_pkt_loss_detection_dly' : row['sys_pkt_loss_detection_dly'],
'sys_src_pkt_interval' : row['sys_src_pkt_interval'],
'search': searchAlgorithm,
'config': [config.k, len(config.n_p), config.n_p],
'duration' : np.around(search_result[1],0)}, ignore_index=True, sort=False)
else:
save_result_to = save_result_to.append({'app_max_latency': row['app_max_latency'],
'app_max_residual_loss_rate': row['app_max_residual_loss_rate'],
'app_data_rate' : row['app_data_rate'],
'app_pkt_length' : row['app_pkt_length'],
'ch_loss_rate' : row['ch_loss_rate'],
'ch_rtt_prop_fwd' : row['ch_rtt_prop_fwd'],
'ch_data_rate_btl_fwd' : row['ch_data_rate_btl_fwd'],
'sys_block_coding_dly' : row['sys_block_coding_dly'],
'sys_red_pkt_trans_dly' : row['sys_red_pkt_trans_dly'],
'sys_proc_dly' : row['sys_proc_dly'],
'sys_pkt_loss_detection_dly' : row['sys_pkt_loss_detection_dly'],
'sys_src_pkt_interval' : row['sys_src_pkt_interval'],
'search': searchAlgorithm,
'config': ["INV_PRM_NPM"],
'duration' : 0}, ignore_index=True, sort=False)
counter += 1
print("Chunk round: " + str(counter))
save_result_to.to_csv('result.csv', sep=',', index = False)
#print("Chunk round: " + str(counter))
save_result_to.to_csv(ds_rel_result_path + dataset_basename, sep=',', index = False)
test_case()
#test_case("documents/5")
if __name__ == '__main__':
pool = Pool(processes=8)
pool.map(test_case, ds_basename, chunksize=1)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment