Commit 8cb1e572 authored by Ashkan's avatar Ashkan
Browse files

Test and debug.

parent 2186835b
Loading
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -84,11 +84,11 @@ class HECSearch:
                          n_c * req_delay) / self.prrtSystemParameters.source_packet_interval)

    def get_k_lim(self, start, end):
        if end - start <= 1:
            return end
        if end < start:
            return start

        mid_point = math.ceil((end + start) / 2)
        p_r = np.around(self.residual_packet_erasure_rate(mid_point, self.n_max, self.prrtChannelParameters.loss_rate_fwd),
            8)  # Pr(k, n_max)
        p_r = np.around(self.residual_packet_erasure_rate(mid_point, self.n_max, self.prrtChannelParameters.loss_rate_fwd), 8)  # Pr(k, n_max)
        if p_r == self.prrtApplicationParameters.max_residual_loss_rate:
            return int(mid_point)
        elif p_r > self.prrtApplicationParameters.max_residual_loss_rate:
+52 −47
Original line number Diff line number Diff line
@@ -6,12 +6,17 @@ import numpy as np
import pandas as pd
from multiprocessing import Pool

ds_basename = os.listdir("../../hecps/code/ML/data/")
# ds_basename = os.listdir("../../hecps/code/ML/data/")
# ds_basename = ["in_12_param_4_sz_zzatql"]
ds_rel_input_path = "../../hecps/code/ML/data/"
ds_rel_result_path = "documents/result/"
# ds_rel_input_path = "../../hecps/code/ML/data/"
# ds_rel_result_path = "documents/result/"
    #['documents/1','documents/2','documents/3','documents/4','documents/5','documents/6']

ds_basename = os.listdir("documents/debug/")
# ds_basename = ["in_12_param_4_sz_zzatql"]
ds_rel_input_path = "documents/debug/"
ds_rel_result_path = "documents/debug/"


def get_n_p_max(rtt_prop_fwd, pkt_length, data_rate_btl_fwd):
	return rtt_prop_fwd * data_rate_btl_fwd / pkt_length
@@ -19,7 +24,7 @@ def get_n_p_max(rtt_prop_fwd, pkt_length, data_rate_btl_fwd):

def evaluate(searchAlgorithm, appParams, channelParams, systemParams):
    n_p_min = 1
    n_p_max = get_n_p_max(channelParams.rtt_prop_fwd, appParams.pkt_length, channelParams.data_rate_btl_fwd)
    n_p_max = np.round(get_n_p_max(channelParams.rtt_prop_fwd, appParams.pkt_length, channelParams.data_rate_btl_fwd), 0)
    if n_p_min <= n_p_max:
        start = time.time()
        search = hec_search.HECSearch(searchAlgorithm, n_p_min, n_p_max, appParams, channelParams, systemParams)
@@ -37,9 +42,9 @@ def test_case(dataset_basename):
        for index, row in df_in_chunk.iterrows():
            appParams = prrt.PrrtApplicationParameters(row['app_max_latency'], row['app_max_residual_loss_rate'], row['app_data_rate'], row['app_pkt_length'])
            chnlParams = prrt.PrrtChannelParameters(row['ch_loss_rate'], 0, row['ch_rtt_prop_fwd'], 0, row['ch_data_rate_btl_fwd'], 0)
            sysParams = prrt.PrrtSystemParameters(row['sys_block_coding_dly'], row['sys_red_pkt_trans_dly'], row['sys_proc_dly'], row['sys_pkt_loss_detection_dly'], row['sys_src_pkt_interval'])

            for searchAlgorithm in ["GreedySearch", "FullSearch"]:
            sysParams = prrt.PrrtSystemParameters(row['sys_block_coding_dly'], np.round(row['sys_red_pkt_trans_dly'], 5), row['sys_proc_dly'], row['sys_pkt_loss_detection_dly'], row['sys_src_pkt_interval'])
            if index == 98:
                for searchAlgorithm in ["GreedySearch"]:
                    print(str(index))
                    search_result = evaluate(searchAlgorithm, appParams, chnlParams, sysParams)
                    if len(search_result) != 0:
@@ -79,11 +84,11 @@ def test_case(dataset_basename):

        counter += 1
        #print("Chunk round: " + str(counter))
    save_result_to.to_csv(ds_rel_result_path + dataset_basename, sep=',', index = False)
    save_result_to.to_csv(ds_rel_result_path + dataset_basename.replace("in", "out", 1), sep=',', index = False)


#test_case("documents/5")
if __name__ == '__main__':
    pool = Pool(processes=8)
    pool.map(test_case, ds_basename, chunksize=1)
test_case(ds_basename[0])
# if __name__ == '__main__':
#     pool = Pool(processes=8)
#     pool.map(test_case, ds_basename, chunksize=1)