Edit File by line
/home/barbar84/www/wp-conte.../plugins/sujqvwi/ExeBy/exe_root.../bin
File: fiologparser_hist.py
#! /usr/libexec/platform-python
[0] Fix | Delete
"""
[1] Fix | Delete
Utility for converting *_clat_hist* files generated by fio into latency statistics.
[2] Fix | Delete
[3] Fix | Delete
Example usage:
[4] Fix | Delete
[5] Fix | Delete
$ fiologparser_hist.py *_clat_hist*
[6] Fix | Delete
end-time, samples, min, avg, median, 90%, 95%, 99%, max
[7] Fix | Delete
1000, 15, 192, 1678.107, 1788.859, 1856.076, 1880.040, 1899.208, 1888.000
[8] Fix | Delete
2000, 43, 152, 1642.368, 1714.099, 1816.659, 1845.552, 1888.131, 1888.000
[9] Fix | Delete
4000, 39, 1152, 1546.962, 1545.785, 1627.192, 1640.019, 1691.204, 1744
[10] Fix | Delete
...
[11] Fix | Delete
[12] Fix | Delete
@author Karl Cronburg <karl.cronburg@gmail.com>
[13] Fix | Delete
"""
[14] Fix | Delete
import os
[15] Fix | Delete
import sys
[16] Fix | Delete
import pandas
[17] Fix | Delete
import re
[18] Fix | Delete
import numpy as np
[19] Fix | Delete
[20] Fix | Delete
runascmd = False
[21] Fix | Delete
[22] Fix | Delete
err = sys.stderr.write
[23] Fix | Delete
[24] Fix | Delete
class HistFileRdr():
[25] Fix | Delete
""" Class to read a hist file line by line, buffering
[26] Fix | Delete
a value array for the latest line, and allowing a preview
[27] Fix | Delete
of the next timestamp in next line
[28] Fix | Delete
Note: this does not follow a generator pattern, but must explicitly
[29] Fix | Delete
get next bin array.
[30] Fix | Delete
"""
[31] Fix | Delete
def __init__(self, file):
[32] Fix | Delete
self.fp = open(file, 'r')
[33] Fix | Delete
self.data = self.nextData()
[34] Fix | Delete
[35] Fix | Delete
def close(self):
[36] Fix | Delete
self.fp.close()
[37] Fix | Delete
self.fp = None
[38] Fix | Delete
[39] Fix | Delete
def nextData(self):
[40] Fix | Delete
self.data = None
[41] Fix | Delete
if self.fp:
[42] Fix | Delete
line = self.fp.readline()
[43] Fix | Delete
if line == "":
[44] Fix | Delete
self.close()
[45] Fix | Delete
else:
[46] Fix | Delete
self.data = [int(x) for x in line.replace(' ', '').rstrip().split(',')]
[47] Fix | Delete
[48] Fix | Delete
return self.data
[49] Fix | Delete
[50] Fix | Delete
@property
[51] Fix | Delete
def curTS(self):
[52] Fix | Delete
ts = None
[53] Fix | Delete
if self.data:
[54] Fix | Delete
ts = self.data[0]
[55] Fix | Delete
return ts
[56] Fix | Delete
[57] Fix | Delete
@property
[58] Fix | Delete
def curDir(self):
[59] Fix | Delete
d = None
[60] Fix | Delete
if self.data:
[61] Fix | Delete
d = self.data[1]
[62] Fix | Delete
return d
[63] Fix | Delete
[64] Fix | Delete
@property
[65] Fix | Delete
def curBins(self):
[66] Fix | Delete
return self.data[3:]
[67] Fix | Delete
[68] Fix | Delete
def weighted_percentile(percs, vs, ws):
[69] Fix | Delete
""" Use linear interpolation to calculate the weighted percentile.
[70] Fix | Delete
[71] Fix | Delete
Value and weight arrays are first sorted by value. The cumulative
[72] Fix | Delete
distribution function (cdf) is then computed, after which np.interp
[73] Fix | Delete
finds the two values closest to our desired weighted percentile(s)
[74] Fix | Delete
and linearly interpolates them.
[75] Fix | Delete
[76] Fix | Delete
percs :: List of percentiles we want to calculate
[77] Fix | Delete
vs :: Array of values we are computing the percentile of
[78] Fix | Delete
ws :: Array of weights for our corresponding values
[79] Fix | Delete
return :: Array of percentiles
[80] Fix | Delete
"""
[81] Fix | Delete
idx = np.argsort(vs)
[82] Fix | Delete
vs, ws = vs[idx], ws[idx] # weights and values sorted by value
[83] Fix | Delete
cdf = 100 * (ws.cumsum() - ws / 2.0) / ws.sum()
[84] Fix | Delete
return np.interp(percs, cdf, vs) # linear interpolation
[85] Fix | Delete
[86] Fix | Delete
def weights(start_ts, end_ts, start, end):
[87] Fix | Delete
""" Calculate weights based on fraction of sample falling in the
[88] Fix | Delete
given interval [start,end]. Weights computed using vector / array
[89] Fix | Delete
computation instead of for-loops.
[90] Fix | Delete
[91] Fix | Delete
Note that samples with zero time length are effectively ignored
[92] Fix | Delete
(we set their weight to zero).
[93] Fix | Delete
[94] Fix | Delete
start_ts :: Array of start times for a set of samples
[95] Fix | Delete
end_ts :: Array of end times for a set of samples
[96] Fix | Delete
start :: int
[97] Fix | Delete
end :: int
[98] Fix | Delete
return :: Array of weights
[99] Fix | Delete
"""
[100] Fix | Delete
sbounds = np.maximum(start_ts, start).astype(float)
[101] Fix | Delete
ebounds = np.minimum(end_ts, end).astype(float)
[102] Fix | Delete
ws = (ebounds - sbounds) / (end_ts - start_ts)
[103] Fix | Delete
if np.any(np.isnan(ws)):
[104] Fix | Delete
err("WARNING: zero-length sample(s) detected. Log file corrupt"
[105] Fix | Delete
" / bad time values? Ignoring these samples.\n")
[106] Fix | Delete
ws[np.where(np.isnan(ws))] = 0.0;
[107] Fix | Delete
return ws
[108] Fix | Delete
[109] Fix | Delete
def weighted_average(vs, ws):
[110] Fix | Delete
return np.sum(vs * ws) / np.sum(ws)
[111] Fix | Delete
[112] Fix | Delete
[113] Fix | Delete
percs = None
[114] Fix | Delete
columns = None
[115] Fix | Delete
[116] Fix | Delete
def gen_output_columns(ctx):
[117] Fix | Delete
global percs,columns
[118] Fix | Delete
strpercs = re.split('[,:]', ctx.percentiles)
[119] Fix | Delete
percs = [50.0] # always print 50% in 'median' column
[120] Fix | Delete
percs.extend(list(map(float,strpercs)))
[121] Fix | Delete
if ctx.directions:
[122] Fix | Delete
columns = ["end-time", "dir", "samples", "min", "avg", "median"]
[123] Fix | Delete
else:
[124] Fix | Delete
columns = ["end-time", "samples", "min", "avg", "median"]
[125] Fix | Delete
columns.extend(list(map(lambda x: x+'%', strpercs)))
[126] Fix | Delete
columns.append("max")
[127] Fix | Delete
[128] Fix | Delete
def fmt_float_list(ctx, num=1):
[129] Fix | Delete
""" Return a comma separated list of float formatters to the required number
[130] Fix | Delete
of decimal places. For instance:
[131] Fix | Delete
[132] Fix | Delete
fmt_float_list(ctx.decimals=4, num=3) == "%.4f, %.4f, %.4f"
[133] Fix | Delete
"""
[134] Fix | Delete
return ', '.join(["%%.%df" % ctx.decimals] * num)
[135] Fix | Delete
[136] Fix | Delete
# Default values - see beginning of main() for how we detect number columns in
[137] Fix | Delete
# the input files:
[138] Fix | Delete
__HIST_COLUMNS = 1216
[139] Fix | Delete
__NON_HIST_COLUMNS = 3
[140] Fix | Delete
__TOTAL_COLUMNS = __HIST_COLUMNS + __NON_HIST_COLUMNS
[141] Fix | Delete
[142] Fix | Delete
def read_chunk(rdr, sz):
[143] Fix | Delete
""" Read the next chunk of size sz from the given reader. """
[144] Fix | Delete
try:
[145] Fix | Delete
""" StopIteration occurs when the pandas reader is empty, and AttributeError
[146] Fix | Delete
occurs if rdr is None due to the file being empty. """
[147] Fix | Delete
new_arr = rdr.read().values
[148] Fix | Delete
except (StopIteration, AttributeError):
[149] Fix | Delete
return None
[150] Fix | Delete
[151] Fix | Delete
# Let's leave the array as is, and let later code ignore the block size
[152] Fix | Delete
return new_arr
[153] Fix | Delete
[154] Fix | Delete
#""" Extract array of the times, directions wo times, and histograms matrix without times column. """
[155] Fix | Delete
#times, rws, szs = new_arr[:,0], new_arr[:,1], new_arr[:,2]
[156] Fix | Delete
#hists = new_arr[:,__NON_HIST_COLUMNS:]
[157] Fix | Delete
#times = times.reshape((len(times),1))
[158] Fix | Delete
#dirs = rws.reshape((len(rws),1))
[159] Fix | Delete
#arr = np.append(times, hists, axis=1)
[160] Fix | Delete
#return arr
[161] Fix | Delete
[162] Fix | Delete
def get_min(fps, arrs):
[163] Fix | Delete
""" Find the file with the current first row with the smallest start time """
[164] Fix | Delete
return min([fp for fp in fps if not arrs[fp] is None], key=lambda fp: arrs.get(fp)[0][0])
[165] Fix | Delete
[166] Fix | Delete
def histogram_generator(ctx, fps, sz):
[167] Fix | Delete
[168] Fix | Delete
# Create a chunked pandas reader for each of the files:
[169] Fix | Delete
rdrs = {}
[170] Fix | Delete
for fp in fps:
[171] Fix | Delete
try:
[172] Fix | Delete
rdrs[fp] = pandas.read_csv(fp, dtype=int, header=None, chunksize=sz)
[173] Fix | Delete
except ValueError as e:
[174] Fix | Delete
if e.message == 'No columns to parse from file':
[175] Fix | Delete
if ctx.warn: sys.stderr.write("WARNING: Empty input file encountered.\n")
[176] Fix | Delete
rdrs[fp] = None
[177] Fix | Delete
else:
[178] Fix | Delete
raise(e)
[179] Fix | Delete
[180] Fix | Delete
# Initial histograms from disk:
[181] Fix | Delete
arrs = {fp: read_chunk(rdr, sz) for fp,rdr in rdrs.items()}
[182] Fix | Delete
while True:
[183] Fix | Delete
[184] Fix | Delete
try:
[185] Fix | Delete
""" ValueError occurs when nothing more to read """
[186] Fix | Delete
fp = get_min(fps, arrs)
[187] Fix | Delete
except ValueError:
[188] Fix | Delete
return
[189] Fix | Delete
arr = arrs[fp]
[190] Fix | Delete
arri = np.insert(arr[0], 1, fps.index(fp))
[191] Fix | Delete
yield arri
[192] Fix | Delete
arrs[fp] = arr[1:]
[193] Fix | Delete
[194] Fix | Delete
if arrs[fp].shape[0] == 0:
[195] Fix | Delete
arrs[fp] = read_chunk(rdrs[fp], sz)
[196] Fix | Delete
[197] Fix | Delete
def _plat_idx_to_val(idx, edge=0.5, FIO_IO_U_PLAT_BITS=6, FIO_IO_U_PLAT_VAL=64):
[198] Fix | Delete
""" Taken from fio's stat.c for calculating the latency value of a bin
[199] Fix | Delete
from that bin's index.
[200] Fix | Delete
[201] Fix | Delete
idx : the value of the index into the histogram bins
[202] Fix | Delete
edge : fractional value in the range [0,1]** indicating how far into
[203] Fix | Delete
the bin we wish to compute the latency value of.
[204] Fix | Delete
[205] Fix | Delete
** edge = 0.0 and 1.0 computes the lower and upper latency bounds
[206] Fix | Delete
respectively of the given bin index. """
[207] Fix | Delete
[208] Fix | Delete
# MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
[209] Fix | Delete
# all bits of the sample as index
[210] Fix | Delete
if (idx < (FIO_IO_U_PLAT_VAL << 1)):
[211] Fix | Delete
return idx
[212] Fix | Delete
[213] Fix | Delete
# Find the group and compute the minimum value of that group
[214] Fix | Delete
error_bits = (idx >> FIO_IO_U_PLAT_BITS) - 1
[215] Fix | Delete
base = 1 << (error_bits + FIO_IO_U_PLAT_BITS)
[216] Fix | Delete
[217] Fix | Delete
# Find its bucket number of the group
[218] Fix | Delete
k = idx % FIO_IO_U_PLAT_VAL
[219] Fix | Delete
[220] Fix | Delete
# Return the mean (if edge=0.5) of the range of the bucket
[221] Fix | Delete
return base + ((k + edge) * (1 << error_bits))
[222] Fix | Delete
[223] Fix | Delete
def plat_idx_to_val_coarse(idx, coarseness, edge=0.5):
[224] Fix | Delete
""" Converts the given *coarse* index into a non-coarse index as used by fio
[225] Fix | Delete
in stat.h:plat_idx_to_val(), subsequently computing the appropriate
[226] Fix | Delete
latency value for that bin.
[227] Fix | Delete
"""
[228] Fix | Delete
[229] Fix | Delete
# Multiply the index by the power of 2 coarseness to get the bin
[230] Fix | Delete
# bin index with a max of 1536 bins (FIO_IO_U_PLAT_GROUP_NR = 24 in stat.h)
[231] Fix | Delete
stride = 1 << coarseness
[232] Fix | Delete
idx = idx * stride
[233] Fix | Delete
lower = _plat_idx_to_val(idx, edge=0.0)
[234] Fix | Delete
upper = _plat_idx_to_val(idx + stride, edge=1.0)
[235] Fix | Delete
return lower + (upper - lower) * edge
[236] Fix | Delete
[237] Fix | Delete
def print_all_stats(ctx, end, mn, ss_cnt, vs, ws, mx, dir=dir):
[238] Fix | Delete
ps = weighted_percentile(percs, vs, ws)
[239] Fix | Delete
[240] Fix | Delete
avg = weighted_average(vs, ws)
[241] Fix | Delete
values = [mn, avg] + list(ps) + [mx]
[242] Fix | Delete
if ctx.directions:
[243] Fix | Delete
row = [end, dir, ss_cnt]
[244] Fix | Delete
fmt = "%d, %s, %d, "
[245] Fix | Delete
else:
[246] Fix | Delete
row = [end, ss_cnt]
[247] Fix | Delete
fmt = "%d, %d, "
[248] Fix | Delete
row = row + [float(x) / ctx.divisor for x in values]
[249] Fix | Delete
if ctx.divisor > 1:
[250] Fix | Delete
fmt = fmt + fmt_float_list(ctx, len(percs)+3)
[251] Fix | Delete
else:
[252] Fix | Delete
# max and min are decimal values if no divisor
[253] Fix | Delete
fmt = fmt + "%d, " + fmt_float_list(ctx, len(percs)+1) + ", %d"
[254] Fix | Delete
[255] Fix | Delete
print (fmt % tuple(row))
[256] Fix | Delete
[257] Fix | Delete
def update_extreme(val, fncn, new_val):
[258] Fix | Delete
""" Calculate min / max in the presence of None values """
[259] Fix | Delete
if val is None: return new_val
[260] Fix | Delete
else: return fncn(val, new_val)
[261] Fix | Delete
[262] Fix | Delete
# See beginning of main() for how bin_vals are computed
[263] Fix | Delete
bin_vals = []
[264] Fix | Delete
lower_bin_vals = [] # lower edge of each bin
[265] Fix | Delete
upper_bin_vals = [] # upper edge of each bin
[266] Fix | Delete
[267] Fix | Delete
def process_interval(ctx, iHist, iEnd, dir):
[268] Fix | Delete
""" print estimated percentages for the given merged sample
[269] Fix | Delete
"""
[270] Fix | Delete
ss_cnt = 0 # number of samples affecting this interval
[271] Fix | Delete
mn_bin_val, mx_bin_val = None, None
[272] Fix | Delete
[273] Fix | Delete
# Update total number of samples affecting current interval histogram:
[274] Fix | Delete
ss_cnt += np.sum(iHist)
[275] Fix | Delete
[276] Fix | Delete
# Update min and max bin values
[277] Fix | Delete
idxs = np.nonzero(iHist != 0)[0]
[278] Fix | Delete
if idxs.size > 0:
[279] Fix | Delete
mn_bin_val = bin_vals[idxs[0]]
[280] Fix | Delete
mx_bin_val = bin_vals[idxs[-1]]
[281] Fix | Delete
[282] Fix | Delete
if ss_cnt > 0: print_all_stats(ctx, iEnd, mn_bin_val, ss_cnt, bin_vals, iHist, mx_bin_val, dir=dir)
[283] Fix | Delete
[284] Fix | Delete
[285] Fix | Delete
dir_map = ['r', 'w', 't'] # map of directional value in log to textual representation
[286] Fix | Delete
def process_weighted_interval(ctx, samples, iStart, iEnd, printdirs):
[287] Fix | Delete
""" Construct the weighted histogram for the given interval by scanning
[288] Fix | Delete
through all the histograms and figuring out which of their bins have
[289] Fix | Delete
samples with latencies which overlap with the given interval
[290] Fix | Delete
[iStart,iEnd].
[291] Fix | Delete
"""
[292] Fix | Delete
[293] Fix | Delete
times, files, dirs, sizes, hists = samples[:,0], samples[:,1], samples[:,2], samples[:,3], samples[:,4:]
[294] Fix | Delete
iHist={}; ss_cnt = {}; mn_bin_val={}; mx_bin_val={}
[295] Fix | Delete
for dir in printdirs:
[296] Fix | Delete
iHist[dir] = np.zeros(__HIST_COLUMNS, dtype=float)
[297] Fix | Delete
ss_cnt[dir] = 0 # number of samples affecting this interval
[298] Fix | Delete
mn_bin_val[dir] = None
[299] Fix | Delete
mx_bin_val[dir] = None
[300] Fix | Delete
[301] Fix | Delete
for end_time,file,dir,hist in zip(times,files,dirs,hists):
[302] Fix | Delete
[303] Fix | Delete
# Only look at bins of the current histogram sample which
[304] Fix | Delete
# started before the end of the current time interval [start,end]
[305] Fix | Delete
start_times = (end_time - 0.5 * ctx.interval) - bin_vals / ctx.time_divisor
[306] Fix | Delete
idx = np.where(start_times < iEnd)
[307] Fix | Delete
s_ts, l_bvs, u_bvs, hs = start_times[idx], lower_bin_vals[idx], upper_bin_vals[idx], hist[idx]
[308] Fix | Delete
[309] Fix | Delete
# Increment current interval histogram by weighted values of future histogram
[310] Fix | Delete
# total number of samples
[311] Fix | Delete
# and min and max values as necessary
[312] Fix | Delete
textdir = dir_map[dir]
[313] Fix | Delete
ws = hs * weights(s_ts, end_time, iStart, iEnd)
[314] Fix | Delete
mmidx = np.where(hs != 0)[0]
[315] Fix | Delete
if 'm' in printdirs:
[316] Fix | Delete
iHist['m'][idx] += ws
[317] Fix | Delete
ss_cnt['m'] += np.sum(hs)
[318] Fix | Delete
if mmidx.size > 0:
[319] Fix | Delete
mn_bin_val['m'] = update_extreme(mn_bin_val['m'], min, l_bvs[max(0, mmidx[0] - 1)])
[320] Fix | Delete
mx_bin_val['m'] = update_extreme(mx_bin_val['m'], max, u_bvs[min(len(hs) - 1, mmidx[-1] + 1)])
[321] Fix | Delete
if textdir in printdirs:
[322] Fix | Delete
iHist[textdir][idx] += ws
[323] Fix | Delete
ss_cnt[textdir] += np.sum(hs) # Update total number of samples affecting current interval histogram:
[324] Fix | Delete
if mmidx.size > 0:
[325] Fix | Delete
mn_bin_val[textdir] = update_extreme(mn_bin_val[textdir], min, l_bvs[max(0, mmidx[0] - 1)])
[326] Fix | Delete
mx_bin_val[textdir] = update_extreme(mx_bin_val[textdir], max, u_bvs[min(len(hs) - 1, mmidx[-1] + 1)])
[327] Fix | Delete
[328] Fix | Delete
for textdir in sorted(printdirs):
[329] Fix | Delete
if ss_cnt[textdir] > 0: print_all_stats(ctx, iEnd, mn_bin_val[textdir], ss_cnt[textdir], bin_vals, iHist[textdir], mx_bin_val[textdir], dir=textdir)
[330] Fix | Delete
[331] Fix | Delete
def guess_max_from_bins(ctx, hist_cols):
[332] Fix | Delete
""" Try to guess the GROUP_NR from given # of histogram
[333] Fix | Delete
columns seen in an input file """
[334] Fix | Delete
max_coarse = 8
[335] Fix | Delete
if ctx.group_nr < 19 or ctx.group_nr > 26:
[336] Fix | Delete
bins = [ctx.group_nr * (1 << 6)]
[337] Fix | Delete
else:
[338] Fix | Delete
bins = [1216,1280,1344,1408,1472,1536,1600,1664]
[339] Fix | Delete
coarses = range(max_coarse + 1)
[340] Fix | Delete
fncn = lambda z: list(map(lambda x: z/2**x if z % 2**x == 0 else -10, coarses))
[341] Fix | Delete
[342] Fix | Delete
arr = np.transpose(list(map(fncn, bins)))
[343] Fix | Delete
idx = np.where(arr == hist_cols)
[344] Fix | Delete
if len(idx[1]) == 0:
[345] Fix | Delete
table = repr(arr.astype(int)).replace('-10', 'N/A').replace('array',' ')
[346] Fix | Delete
errmsg = ("Unable to determine bin values from input clat_hist files. Namely \n"
[347] Fix | Delete
"the first line of file '%s' " % ctx.FILE[0] + "has %d \n" % (__TOTAL_COLUMNS,) +
[348] Fix | Delete
"columns of which we assume %d " % (hist_cols,) + "correspond to histogram bins. \n"
[349] Fix | Delete
"This number needs to be equal to one of the following numbers:\n\n"
[350] Fix | Delete
+ table + "\n\n"
[351] Fix | Delete
"Possible reasons and corresponding solutions:\n"
[352] Fix | Delete
" - Input file(s) does not contain histograms.\n"
[353] Fix | Delete
" - You recompiled fio with a different GROUP_NR. If so please specify this\n"
[354] Fix | Delete
" new GROUP_NR on the command line with --group_nr\n")
[355] Fix | Delete
if runascmd:
[356] Fix | Delete
err(errmsg)
[357] Fix | Delete
exit(1)
[358] Fix | Delete
else:
[359] Fix | Delete
raise RuntimeError(errmsg)
[360] Fix | Delete
[361] Fix | Delete
return bins[idx[1][0]]
[362] Fix | Delete
[363] Fix | Delete
def output_weighted_interval_data(ctx,printdirs):
[364] Fix | Delete
[365] Fix | Delete
fps = [open(f, 'r') for f in ctx.FILE]
[366] Fix | Delete
gen = histogram_generator(ctx, fps, ctx.buff_size)
[367] Fix | Delete
[368] Fix | Delete
print(', '.join(columns))
[369] Fix | Delete
[370] Fix | Delete
try:
[371] Fix | Delete
start, end = 0, ctx.interval
[372] Fix | Delete
arr = np.empty(shape=(0,__TOTAL_COLUMNS + 1),dtype=int)
[373] Fix | Delete
more_data = True
[374] Fix | Delete
while more_data or len(arr) > 0:
[375] Fix | Delete
[376] Fix | Delete
# Read up to ctx.max_latency (default 20 seconds) of data from end of current interval.
[377] Fix | Delete
while len(arr) == 0 or arr[-1][0] < ctx.max_latency * 1000 + end:
[378] Fix | Delete
try:
[379] Fix | Delete
new_arr = next(gen)
[380] Fix | Delete
except StopIteration:
[381] Fix | Delete
more_data = False
[382] Fix | Delete
break
[383] Fix | Delete
nashape = new_arr.reshape((1,__TOTAL_COLUMNS + 1))
[384] Fix | Delete
arr = np.append(arr, nashape, axis=0)
[385] Fix | Delete
#arr = arr.astype(int)
[386] Fix | Delete
[387] Fix | Delete
if arr.size > 0:
[388] Fix | Delete
# Jump immediately to the start of the input, rounding
[389] Fix | Delete
# down to the nearest multiple of the interval (useful when --log_unix_epoch
[390] Fix | Delete
# was used to create these histograms):
[391] Fix | Delete
if start == 0 and arr[0][0] - ctx.max_latency > end:
[392] Fix | Delete
start = arr[0][0] - ctx.max_latency
[393] Fix | Delete
start = start - (start % ctx.interval)
[394] Fix | Delete
end = start + ctx.interval
[395] Fix | Delete
[396] Fix | Delete
process_weighted_interval(ctx, arr, start, end, printdirs)
[397] Fix | Delete
[398] Fix | Delete
# Update arr to throw away samples we no longer need - samples which
[399] Fix | Delete
# end before the start of the next interval, i.e. the end of the
[400] Fix | Delete
# current interval:
[401] Fix | Delete
idx = np.where(arr[:,0] > end)
[402] Fix | Delete
arr = arr[idx]
[403] Fix | Delete
[404] Fix | Delete
start += ctx.interval
[405] Fix | Delete
end = start + ctx.interval
[406] Fix | Delete
finally:
[407] Fix | Delete
for fp in fps:
[408] Fix | Delete
fp.close()
[409] Fix | Delete
[410] Fix | Delete
def output_interval_data(ctx,directions):
[411] Fix | Delete
fps = [HistFileRdr(f) for f in ctx.FILE]
[412] Fix | Delete
[413] Fix | Delete
print(', '.join(columns))
[414] Fix | Delete
[415] Fix | Delete
start = 0
[416] Fix | Delete
end = ctx.interval
[417] Fix | Delete
while True:
[418] Fix | Delete
[419] Fix | Delete
more_data = False
[420] Fix | Delete
[421] Fix | Delete
# add bins from all files in target intervals
[422] Fix | Delete
arr = None
[423] Fix | Delete
numSamples = 0
[424] Fix | Delete
while True:
[425] Fix | Delete
foundSamples = False
[426] Fix | Delete
for fp in fps:
[427] Fix | Delete
ts = fp.curTS
[428] Fix | Delete
if ts and ts+10 < end: # shift sample time when very close to an end time
[429] Fix | Delete
curdirect = fp.curDir
[430] Fix | Delete
numSamples += 1
[431] Fix | Delete
foundSamples = True
[432] Fix | Delete
if arr is None:
[433] Fix | Delete
arr = {}
[434] Fix | Delete
for d in directions:
[435] Fix | Delete
arr[d] = np.zeros(shape=(__HIST_COLUMNS), dtype=int)
[436] Fix | Delete
if 'm' in arr:
[437] Fix | Delete
arr['m'] = np.add(arr['m'], fp.curBins)
[438] Fix | Delete
if 'r' in arr and curdirect == 0:
[439] Fix | Delete
arr['r'] = np.add(arr['r'], fp.curBins)
[440] Fix | Delete
if 'w' in arr and curdirect == 1:
[441] Fix | Delete
arr['w'] = np.add(arr['w'], fp.curBins)
[442] Fix | Delete
if 't' in arr and curdirect == 2:
[443] Fix | Delete
arr['t'] = np.add(arr['t'], fp.curBins)
[444] Fix | Delete
[445] Fix | Delete
more_data = True
[446] Fix | Delete
fp.nextData()
[447] Fix | Delete
elif ts:
[448] Fix | Delete
more_data = True
[449] Fix | Delete
[450] Fix | Delete
# reached end of all files
[451] Fix | Delete
# or gone through all files without finding sample in interval
[452] Fix | Delete
if not more_data or not foundSamples:
[453] Fix | Delete
break
[454] Fix | Delete
[455] Fix | Delete
if arr is not None:
[456] Fix | Delete
#print("{} size({}) samples({}) nonzero({}):".format(end, arr.size, numSamples, np.count_nonzero(arr)), str(arr), )
[457] Fix | Delete
for d in sorted(arr.keys()):
[458] Fix | Delete
aval = arr[d]
[459] Fix | Delete
process_interval(ctx, aval, end, d)
[460] Fix | Delete
[461] Fix | Delete
# reach end of all files
[462] Fix | Delete
if not more_data:
[463] Fix | Delete
break
[464] Fix | Delete
[465] Fix | Delete
start += ctx.interval
[466] Fix | Delete
end = start + ctx.interval
[467] Fix | Delete
[468] Fix | Delete
def main(ctx):
[469] Fix | Delete
[470] Fix | Delete
if ctx.job_file:
[471] Fix | Delete
try:
[472] Fix | Delete
from configparser import SafeConfigParser, NoOptionError
[473] Fix | Delete
except ImportError:
[474] Fix | Delete
from ConfigParser import SafeConfigParser, NoOptionError
[475] Fix | Delete
[476] Fix | Delete
cp = SafeConfigParser(allow_no_value=True)
[477] Fix | Delete
with open(ctx.job_file, 'r') as fp:
[478] Fix | Delete
cp.readfp(fp)
[479] Fix | Delete
[480] Fix | Delete
if ctx.interval is None:
[481] Fix | Delete
# Auto detect --interval value
[482] Fix | Delete
for s in cp.sections():
[483] Fix | Delete
try:
[484] Fix | Delete
hist_msec = cp.get(s, 'log_hist_msec')
[485] Fix | Delete
if hist_msec is not None:
[486] Fix | Delete
ctx.interval = int(hist_msec)
[487] Fix | Delete
except NoOptionError:
[488] Fix | Delete
pass
[489] Fix | Delete
[490] Fix | Delete
if not hasattr(ctx, 'percentiles'):
[491] Fix | Delete
ctx.percentiles = "90,95,99"
[492] Fix | Delete
[493] Fix | Delete
if ctx.directions:
[494] Fix | Delete
ctx.directions = ctx.directions.lower()
[495] Fix | Delete
[496] Fix | Delete
if ctx.interval is None:
[497] Fix | Delete
ctx.interval = 1000
[498] Fix | Delete
[499] Fix | Delete
12
It is recommended that you Edit text format, this type of Fix handles quite a lot in one request
Function