Commit c813df8f authored by Ashkan's avatar Ashkan

Increase performance by 80x faster hby adding types to the parameters.

parent 874c2286
......@@ -94,7 +94,7 @@ class HECSearch:
k_opt = k
n_opt = n
n_p_opt = repair_schedule
print("k_opt=" + str(k_opt) + " n_opt=" + str(n_opt) + " n_p_opt=" + str(n_p_opt) + "RI= " + str(ri_opt))
# print("k_opt=" + str(k_opt) + " n_opt=" + str(n_opt) + " n_p_opt=" + str(n_p_opt) + "RI= " + str(ri_opt))
if len(n_p_opt) != 0:
fec_balance = self.get_balance_delay_fec(n_p_opt[0], k_opt)
return [k_opt, n_opt, n_p_opt, fec_balance]
......
from scipy.special import comb
import numpy as np
import math
from cython cimport cdivision
cdef int step_size = 1 # Step size in the estimation of the optimum code word length
# TODO: Maybe running in parallel to optimize runtime
# Pm(e, m), Eq. 3.45, Page 91
cpdef float get_error_prob(j, sent_packets_item, p_e):
cpdef float get_error_prob(int j, int sent_packets_item, float p_e):
return comb(sent_packets_item, j) * (p_e ** j) * ((1 - p_e) ** (sent_packets_item - j))
# TODO
# Check if it is able to fulfill application parameters given channel parameters.
cpdef float hypergeometric_distribution(n, k, i, j):
@cdivision(True)
cpdef float hypergeometric_distribution(int n, int k, int i, int j):
return (comb(k, i) * comb(n - k, j - i)) / comb(n, j)
cpdef float residual_packet_erasure_rate(k, n, ch_loss_rate):
cdef float total_packet_erasure = 0
@cdivision(True)
cpdef float residual_packet_erasure_rate(int k, int n, float ch_loss_rate):
cdef:
float total_packet_erasure = 0
int i, j
for i in range(1, k + 1):
for j in range(max(n - k + 1, i), n - k + i + 1):
total_packet_erasure += i * hypergeometric_distribution(n, k, i, j) \
......@@ -25,12 +29,12 @@ cpdef float residual_packet_erasure_rate(k, n, ch_loss_rate):
# Pr(k, n)
return (1 / k) * total_packet_erasure
cpdef int get_k_lim(smallestK, biggestK, loss_rate_fwd, max_residual_loss_rate):
@cdivision(True)
cpdef int get_k_lim(int smallestK, int biggestK, float loss_rate_fwd, float max_residual_loss_rate):
if biggestK < smallestK:
return smallestK
mid_point = math.ceil((biggestK + smallestK) / 2)
cdef int mid_point = math.ceil((biggestK + smallestK) / 2)
cdef float p_r = np.around(residual_packet_erasure_rate(mid_point, biggestK, loss_rate_fwd),8) # Pr(k, n_max)
if p_r == max_residual_loss_rate:
return int(mid_point)
......@@ -47,7 +51,7 @@ cpdef int estimate_n_for_k(k, n_max, loss_rate_fwd, max_residual_loss_rate):
n = n + step_size
return n
@cdivision(True)
cpdef bint is_maximum_loss_fulfilled(k, n, loss_rate_fwd, max_residual_loss_rate):
cdef float total_packet_erasure = 0
for i in range(1, k):
......
......@@ -6,14 +6,21 @@ import numpy as np
import pandas as pd
from multiprocessing import Pool
# ds_basename = os.listdir("../../hecps/code/ML/data/")
# ds_rel_input_path = "../../hecps/code/ML/data/"
# --------------------------------------------------------------
# Generate dataset
# ds_all_input = set(os.listdir("../../hecps/code/ML/dataset1/"))
# print(len(ds_all_input))
# ds_calculated_output = set(os.listdir("documents/bigdata/"))
# ds_calculated_output = {s.replace("out", "in") for s in ds_calculated_output}
# print(len(ds_calculated_output))
# ds_basename = list(ds_all_input - ds_calculated_output)
# print(len(ds_basename))
# ds_rel_input_path = "../../hecps/code/ML/dataset1/"
# ds_rel_output_path = "documents/bigdata/"
#['documents/1','documents/2','documents/3','documents/4','documents/5','documents/6']
#----------------------------------------------------------------
# ds_basename = os.listdir("documents/input/")
ds_basename = ["LONG_in_12_param_4_sz_zzelqt"]
ds_basename = os.listdir("documents/input/")
ds_basename = "LONG_in_12_param_4_sz_zzelqt"
ds_rel_input_path = "documents/input/"
ds_rel_output_path = "documents/output/"
......@@ -48,8 +55,8 @@ def test_case(dataset_basename):
for df_in_chunk in pd.read_csv(ds_rel_input_path + dataset_basename, sep=',', chunksize=100):
print(dataset_basename + " started.")
for index, row in df_in_chunk.iterrows():
if index == 1:
print(str(index))
# if index == 1:
# print(str(index))
appParams = prrt.PrrtApplicationParameters(row['app_max_latency'], row['app_max_residual_loss_rate'], row['app_data_rate'], row['app_pkt_length'])
chnlParams = prrt.PrrtChannelParameters(row['ch_loss_rate'], 0, row['ch_rtt_prop_fwd'], 0, row['ch_data_rate_btl_fwd'], 0)
sysParams = prrt.PrrtSystemParameters(0, np.round(row['sys_red_pkt_trans_dly'], 5), 0, 0, row['sys_src_pkt_interval'])
......@@ -105,6 +112,6 @@ def test_case(dataset_basename):
# test_case(ds_basename)
if __name__ == '__main__':
pool = Pool(processes=1)
pool = Pool(processes=7)
pool.map(test_case, ds_basename, chunksize=1)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment