code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
<|reserved_special_token_0|>
class windowed_incStat:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class windowed_incStat_2D:
def __init__(self, L):
self.incStats = list()
self.L = sorted(L, reverse=True)
for l in self.L:
self.incStats.append(incStat_2D(l))
self.other_winStat = None
def getStats(self):
allstats = np.zeros(len(self.L) * 7)
for i in range(0, len(self.incStats)):
stats = self.incStats[i].allstats2D([self.other_winStat[0].
incStats[i]])
allstats[i * 7:i * 7 + 7] = stats
return allstats
def getHeaders(self):
headers = []
for i in range(0, len(self.incStats)):
headers = headers + [('L' + str(self.L[i]) + '_' + header) for
header in self.incStats[i].getHeaders()]
return headers
def updateStats(self, val, timestamp):
for i in range(0, len(self.incStats)):
decay = self.other_winStat[0].incStats[i].processDecay(timestamp)
self.incStats[i].insert2D(val, timestamp, self.other_winStat[0]
.incStats[i].last_residule, decay)
def updateAndGetStats(self, val, timestamp):
self.updateStats(val, timestamp)
return self.getStats()
def join_with_winStat(self, other_winStat):
self.other_winStat = other_winStat
other_winStat[0].other_winStat = [self]
for i in range(0, len(self.incStats)):
self.incStats[i].CF3 = other_winStat[0].incStats[i].CF3 = [0]
def getMaxW(self, t):
lastIncStat = len(self.incStats)
self.incStats[lastIncStat - 1].processDecay(t)
return self.incStats[lastIncStat - 1].w
class incStatHT:
def __init__(self):
self.HT = dict()
def updateGet_1D(self, key, val, timestamp, L, isTypeJitter=False):
wis = self.HT.get(key)
if wis is None:
wis = [windowed_incStat(L, isTypeJitter)]
self.HT[key] = wis
stats = wis[0].updateAndGetStats(val, timestamp)
return stats
def getHeaders_1D(self, L):
tmp_incs = windowed_incStat(L)
return tmp_incs.getHeaders()
class incStatHT_2D(incStatHT):
def updateGet_2D(self, key1, key2, val, timestamp, L):
key = key1 + key2
wis = self.HT.get(key)
if wis is None:
wis = self.create_2D_entry(key1, key2, L)
elif hasattr(wis[0], 'other_winStat') and wis[0].other_winStat == []:
self.create_1D_entry(key1, key2, L, wis)
stats = wis[0].updateAndGetStats(val, timestamp)
return stats
def create_1D_entry(self, key1, key2, L, wis):
wis_k2_k1 = [windowed_incStat_2D(L)]
wis[0].join_with_winStat(wis_k2_k1)
self.HT[key2 + key1] = wis_k2_k1
return wis_k2_k1
def create_2D_entry(self, key1, key2, L):
wis_k1_k2 = [windowed_incStat_2D(L)]
wis_k2_k1 = [windowed_incStat_2D(L)]
wis_k1_k2[0].join_with_winStat(wis_k2_k1)
self.HT[key1 + key2] = wis_k1_k2
self.HT[key2 + key1] = wis_k2_k1
return wis_k1_k2
def getHeaders_2D(self, L):
tmp_incs = windowed_incStat_2D(L)
return tmp_incs.getHeaders()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class incStat_2D(incStat):
def __init__(self, Lambda):
self.CF1 = 0
self.CF2 = 0
self.CF3 = None
self.w = 0
self.Lambda = Lambda
self.lastTimestamp = np.nan
self.cur_mean = np.nan
self.cur_var = np.nan
self.cur_std = np.nan
self.cur_cov = np.nan
self.last_residule = 0
def insert2D(self, v, t, other_incS_lastRes, other_incS_decay=1):
self.processDecay(t)
self.CF1 = self.CF1 + v
self.CF2 = self.CF2 + math.pow(v, 2)
self.w = self.w + 1
self.cur_mean = np.nan
self.cur_var = np.nan
self.cur_std = np.nan
self.cur_cov = np.nan
self.last_residule = v - self.mean()
self.CF3[0] = self.CF3[0
] + self.last_residule * other_incS_lastRes * other_incS_decay
def processDecay(self, timestamp):
factor = 1
if not math.isnan(self.lastTimestamp):
timeDiff = timestamp - self.lastTimestamp
factor = math.pow(2, -self.Lambda * timeDiff)
self.CF1 = self.CF1 * factor
self.CF2 = self.CF2 * factor
if self.CF3 == None:
self.CF3 = [0]
self.CF3[0] = self.CF3[0] * factor
self.w = self.w * factor
self.lastTimestamp = timestamp
return factor
def radius(self, istat_ref):
return math.sqrt(math.pow(self.var(), 2) + math.pow(istat_ref[0].
var(), 2))
def magnitude(self, istat_ref):
return math.sqrt(math.pow(self.mean(), 2) + math.pow(istat_ref[0].
mean(), 2))
<|reserved_special_token_0|>
def p_cc(self, istat_ref):
ss = self.std() * istat_ref[0].std()
if ss != 0:
return self.cov(istat_ref[0]) / ss
else:
return 0
<|reserved_special_token_0|>
def getHeaders(self):
return ('weight', 'mean', 'std', 'magnitude', 'radius',
'covariance', 'pcc')
class windowed_incStat:
def __init__(self, L, isTypeJitter=False):
self.incStats = list()
self.L = sorted(L, reverse=True)
for l in self.L:
self.incStats.append(incStat(l, isTypeJitter))
def getStats(self):
allstats = np.zeros(len(self.L) * 3)
for i in range(0, len(self.incStats)):
stats = self.incStats[i].allstats()
allstats[i * 3:i * 3 + 3] = stats
return allstats
def getHeaders(self):
headers = []
for i in range(0, len(self.incStats)):
headers = headers + [('L' + str(self.L[i]) + '_' + header) for
header in self.incStats[i].getHeaders()]
return headers
def updateStats(self, val, timestamp):
for i in range(0, len(self.incStats)):
self.incStats[i].insert(val, timestamp)
def updateAndGetStats(self, val, timestamp):
self.updateStats(val, timestamp)
return self.getStats()
def getMaxW(self, t):
mx = 0
for stat in self.incStats:
stat.processDecay(t)
if stat.w > mx:
mx = stat.w
return mx
class windowed_incStat_2D:
def __init__(self, L):
self.incStats = list()
self.L = sorted(L, reverse=True)
for l in self.L:
self.incStats.append(incStat_2D(l))
self.other_winStat = None
def getStats(self):
allstats = np.zeros(len(self.L) * 7)
for i in range(0, len(self.incStats)):
stats = self.incStats[i].allstats2D([self.other_winStat[0].
incStats[i]])
allstats[i * 7:i * 7 + 7] = stats
return allstats
def getHeaders(self):
headers = []
for i in range(0, len(self.incStats)):
headers = headers + [('L' + str(self.L[i]) + '_' + header) for
header in self.incStats[i].getHeaders()]
return headers
def updateStats(self, val, timestamp):
for i in range(0, len(self.incStats)):
decay = self.other_winStat[0].incStats[i].processDecay(timestamp)
self.incStats[i].insert2D(val, timestamp, self.other_winStat[0]
.incStats[i].last_residule, decay)
def updateAndGetStats(self, val, timestamp):
self.updateStats(val, timestamp)
return self.getStats()
def join_with_winStat(self, other_winStat):
self.other_winStat = other_winStat
other_winStat[0].other_winStat = [self]
for i in range(0, len(self.incStats)):
self.incStats[i].CF3 = other_winStat[0].incStats[i].CF3 = [0]
def getMaxW(self, t):
lastIncStat = len(self.incStats)
self.incStats[lastIncStat - 1].processDecay(t)
return self.incStats[lastIncStat - 1].w
class incStatHT:
def __init__(self):
self.HT = dict()
def updateGet_1D(self, key, val, timestamp, L, isTypeJitter=False):
wis = self.HT.get(key)
if wis is None:
wis = [windowed_incStat(L, isTypeJitter)]
self.HT[key] = wis
stats = wis[0].updateAndGetStats(val, timestamp)
return stats
def getHeaders_1D(self, L):
tmp_incs = windowed_incStat(L)
return tmp_incs.getHeaders()
class incStatHT_2D(incStatHT):
def updateGet_2D(self, key1, key2, val, timestamp, L):
key = key1 + key2
wis = self.HT.get(key)
if wis is None:
wis = self.create_2D_entry(key1, key2, L)
elif hasattr(wis[0], 'other_winStat') and wis[0].other_winStat == []:
self.create_1D_entry(key1, key2, L, wis)
stats = wis[0].updateAndGetStats(val, timestamp)
return stats
def create_1D_entry(self, key1, key2, L, wis):
wis_k2_k1 = [windowed_incStat_2D(L)]
wis[0].join_with_winStat(wis_k2_k1)
self.HT[key2 + key1] = wis_k2_k1
return wis_k2_k1
def create_2D_entry(self, key1, key2, L):
wis_k1_k2 = [windowed_incStat_2D(L)]
wis_k2_k1 = [windowed_incStat_2D(L)]
wis_k1_k2[0].join_with_winStat(wis_k2_k1)
self.HT[key1 + key2] = wis_k1_k2
self.HT[key2 + key1] = wis_k2_k1
return wis_k1_k2
def getHeaders_2D(self, L):
tmp_incs = windowed_incStat_2D(L)
return tmp_incs.getHeaders()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class incStat:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class incStat_2D(incStat):
def __init__(self, Lambda):
self.CF1 = 0
self.CF2 = 0
self.CF3 = None
self.w = 0
self.Lambda = Lambda
self.lastTimestamp = np.nan
self.cur_mean = np.nan
self.cur_var = np.nan
self.cur_std = np.nan
self.cur_cov = np.nan
self.last_residule = 0
def insert2D(self, v, t, other_incS_lastRes, other_incS_decay=1):
self.processDecay(t)
self.CF1 = self.CF1 + v
self.CF2 = self.CF2 + math.pow(v, 2)
self.w = self.w + 1
self.cur_mean = np.nan
self.cur_var = np.nan
self.cur_std = np.nan
self.cur_cov = np.nan
self.last_residule = v - self.mean()
self.CF3[0] = self.CF3[0
] + self.last_residule * other_incS_lastRes * other_incS_decay
def processDecay(self, timestamp):
factor = 1
if not math.isnan(self.lastTimestamp):
timeDiff = timestamp - self.lastTimestamp
factor = math.pow(2, -self.Lambda * timeDiff)
self.CF1 = self.CF1 * factor
self.CF2 = self.CF2 * factor
if self.CF3 == None:
self.CF3 = [0]
self.CF3[0] = self.CF3[0] * factor
self.w = self.w * factor
self.lastTimestamp = timestamp
return factor
def radius(self, istat_ref):
return math.sqrt(math.pow(self.var(), 2) + math.pow(istat_ref[0].
var(), 2))
def magnitude(self, istat_ref):
return math.sqrt(math.pow(self.mean(), 2) + math.pow(istat_ref[0].
mean(), 2))
def cov(self, istat_ref):
if math.isnan(self.cur_cov):
self.cur_cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)
return self.cur_cov
def p_cc(self, istat_ref):
ss = self.std() * istat_ref[0].std()
if ss != 0:
return self.cov(istat_ref[0]) / ss
else:
return 0
def allstats2D(self, istat_ref):
self.cur_mean = self.CF1 / self.w
self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))
self.cur_std = math.sqrt(self.cur_var)
if istat_ref[0].w != 0:
cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)
magnitude = math.sqrt(math.pow(self.cur_mean, 2) + math.pow(
istat_ref[0].mean(), 2))
radius = math.sqrt(math.pow(self.cur_var, 2) + math.pow(
istat_ref[0].var(), 2))
ss = self.cur_std * istat_ref[0].std()
pcc = 0
if ss != 0:
pcc = cov / ss
else:
magnitude = self.cur_mean
radius = self.cur_var
cov = 0
pcc = 0
return self.w, self.cur_mean, self.cur_std, magnitude, radius, cov, pcc
def getHeaders(self):
return ('weight', 'mean', 'std', 'magnitude', 'radius',
'covariance', 'pcc')
class windowed_incStat:
def __init__(self, L, isTypeJitter=False):
self.incStats = list()
self.L = sorted(L, reverse=True)
for l in self.L:
self.incStats.append(incStat(l, isTypeJitter))
def getStats(self):
allstats = np.zeros(len(self.L) * 3)
for i in range(0, len(self.incStats)):
stats = self.incStats[i].allstats()
allstats[i * 3:i * 3 + 3] = stats
return allstats
def getHeaders(self):
headers = []
for i in range(0, len(self.incStats)):
headers = headers + [('L' + str(self.L[i]) + '_' + header) for
header in self.incStats[i].getHeaders()]
return headers
def updateStats(self, val, timestamp):
for i in range(0, len(self.incStats)):
self.incStats[i].insert(val, timestamp)
def updateAndGetStats(self, val, timestamp):
self.updateStats(val, timestamp)
return self.getStats()
def getMaxW(self, t):
mx = 0
for stat in self.incStats:
stat.processDecay(t)
if stat.w > mx:
mx = stat.w
return mx
class windowed_incStat_2D:
def __init__(self, L):
self.incStats = list()
self.L = sorted(L, reverse=True)
for l in self.L:
self.incStats.append(incStat_2D(l))
self.other_winStat = None
def getStats(self):
allstats = np.zeros(len(self.L) * 7)
for i in range(0, len(self.incStats)):
stats = self.incStats[i].allstats2D([self.other_winStat[0].
incStats[i]])
allstats[i * 7:i * 7 + 7] = stats
return allstats
def getHeaders(self):
headers = []
for i in range(0, len(self.incStats)):
headers = headers + [('L' + str(self.L[i]) + '_' + header) for
header in self.incStats[i].getHeaders()]
return headers
def updateStats(self, val, timestamp):
for i in range(0, len(self.incStats)):
decay = self.other_winStat[0].incStats[i].processDecay(timestamp)
self.incStats[i].insert2D(val, timestamp, self.other_winStat[0]
.incStats[i].last_residule, decay)
def updateAndGetStats(self, val, timestamp):
self.updateStats(val, timestamp)
return self.getStats()
def join_with_winStat(self, other_winStat):
self.other_winStat = other_winStat
other_winStat[0].other_winStat = [self]
for i in range(0, len(self.incStats)):
self.incStats[i].CF3 = other_winStat[0].incStats[i].CF3 = [0]
def getMaxW(self, t):
lastIncStat = len(self.incStats)
self.incStats[lastIncStat - 1].processDecay(t)
return self.incStats[lastIncStat - 1].w
class incStatHT:
def __init__(self):
self.HT = dict()
def updateGet_1D(self, key, val, timestamp, L, isTypeJitter=False):
wis = self.HT.get(key)
if wis is None:
wis = [windowed_incStat(L, isTypeJitter)]
self.HT[key] = wis
stats = wis[0].updateAndGetStats(val, timestamp)
return stats
def getHeaders_1D(self, L):
tmp_incs = windowed_incStat(L)
return tmp_incs.getHeaders()
class incStatHT_2D(incStatHT):
def updateGet_2D(self, key1, key2, val, timestamp, L):
key = key1 + key2
wis = self.HT.get(key)
if wis is None:
wis = self.create_2D_entry(key1, key2, L)
elif hasattr(wis[0], 'other_winStat') and wis[0].other_winStat == []:
self.create_1D_entry(key1, key2, L, wis)
stats = wis[0].updateAndGetStats(val, timestamp)
return stats
def create_1D_entry(self, key1, key2, L, wis):
wis_k2_k1 = [windowed_incStat_2D(L)]
wis[0].join_with_winStat(wis_k2_k1)
self.HT[key2 + key1] = wis_k2_k1
return wis_k2_k1
def create_2D_entry(self, key1, key2, L):
wis_k1_k2 = [windowed_incStat_2D(L)]
wis_k2_k1 = [windowed_incStat_2D(L)]
wis_k1_k2[0].join_with_winStat(wis_k2_k1)
self.HT[key1 + key2] = wis_k1_k2
self.HT[key2 + key1] = wis_k2_k1
return wis_k1_k2
def getHeaders_2D(self, L):
tmp_incs = windowed_incStat_2D(L)
return tmp_incs.getHeaders()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class incStat:
def __init__(self, Lambda, isTypeJitter=False):
self.CF1 = 0
self.CF2 = 0
self.w = 0
self.isTypeJitter = isTypeJitter
self.Lambda = Lambda
self.lastTimestamp = np.nan
self.cur_mean = np.nan
self.cur_var = np.nan
self.cur_std = np.nan
def insert(self, v, t=0):
if self.isTypeJitter:
if not math.isnan(self.lastTimestamp):
v = t - self.lastTimestamp
else:
v = 0
self.processDecay(t)
self.CF1 = self.CF1 + v
self.CF2 = self.CF2 + math.pow(v, 2)
self.w = self.w + 1
self.cur_mean = np.nan
self.cur_var = np.nan
self.cur_std = np.nan
def processDecay(self, timestamp):
factor = 1
if not math.isnan(self.lastTimestamp):
timeDiff = timestamp - self.lastTimestamp
factor = math.pow(2, -self.Lambda * timeDiff)
self.CF1 = self.CF1 * factor
self.CF2 = self.CF2 * factor
self.w = self.w * factor
self.lastTimestamp = timestamp
return factor
def weight(self):
return self.w
def mean(self):
if math.isnan(self.cur_mean):
self.cur_mean = self.CF1 / self.w
return self.cur_mean
def var(self):
if math.isnan(self.cur_var):
self.cur_var = abs(self.CF2 / self.w - math.pow(self.mean(), 2))
return self.cur_var
def std(self):
if math.isnan(self.cur_std):
self.cur_std = math.sqrt(self.var())
return self.cur_std
def allstats(self):
self.cur_mean = self.CF1 / self.w
self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))
return self.w, self.cur_mean, self.cur_var
def getHeaders(self):
return 'weight', 'mean', 'variance'
class incStat_2D(incStat):
def __init__(self, Lambda):
self.CF1 = 0
self.CF2 = 0
self.CF3 = None
self.w = 0
self.Lambda = Lambda
self.lastTimestamp = np.nan
self.cur_mean = np.nan
self.cur_var = np.nan
self.cur_std = np.nan
self.cur_cov = np.nan
self.last_residule = 0
def insert2D(self, v, t, other_incS_lastRes, other_incS_decay=1):
self.processDecay(t)
self.CF1 = self.CF1 + v
self.CF2 = self.CF2 + math.pow(v, 2)
self.w = self.w + 1
self.cur_mean = np.nan
self.cur_var = np.nan
self.cur_std = np.nan
self.cur_cov = np.nan
self.last_residule = v - self.mean()
self.CF3[0] = self.CF3[0
] + self.last_residule * other_incS_lastRes * other_incS_decay
def processDecay(self, timestamp):
factor = 1
if not math.isnan(self.lastTimestamp):
timeDiff = timestamp - self.lastTimestamp
factor = math.pow(2, -self.Lambda * timeDiff)
self.CF1 = self.CF1 * factor
self.CF2 = self.CF2 * factor
if self.CF3 == None:
self.CF3 = [0]
self.CF3[0] = self.CF3[0] * factor
self.w = self.w * factor
self.lastTimestamp = timestamp
return factor
def radius(self, istat_ref):
return math.sqrt(math.pow(self.var(), 2) + math.pow(istat_ref[0].
var(), 2))
def magnitude(self, istat_ref):
return math.sqrt(math.pow(self.mean(), 2) + math.pow(istat_ref[0].
mean(), 2))
def cov(self, istat_ref):
if math.isnan(self.cur_cov):
self.cur_cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)
return self.cur_cov
def p_cc(self, istat_ref):
ss = self.std() * istat_ref[0].std()
if ss != 0:
return self.cov(istat_ref[0]) / ss
else:
return 0
def allstats2D(self, istat_ref):
self.cur_mean = self.CF1 / self.w
self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))
self.cur_std = math.sqrt(self.cur_var)
if istat_ref[0].w != 0:
cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)
magnitude = math.sqrt(math.pow(self.cur_mean, 2) + math.pow(
istat_ref[0].mean(), 2))
radius = math.sqrt(math.pow(self.cur_var, 2) + math.pow(
istat_ref[0].var(), 2))
ss = self.cur_std * istat_ref[0].std()
pcc = 0
if ss != 0:
pcc = cov / ss
else:
magnitude = self.cur_mean
radius = self.cur_var
cov = 0
pcc = 0
return self.w, self.cur_mean, self.cur_std, magnitude, radius, cov, pcc
def getHeaders(self):
return ('weight', 'mean', 'std', 'magnitude', 'radius',
'covariance', 'pcc')
class windowed_incStat:
def __init__(self, L, isTypeJitter=False):
self.incStats = list()
self.L = sorted(L, reverse=True)
for l in self.L:
self.incStats.append(incStat(l, isTypeJitter))
def getStats(self):
allstats = np.zeros(len(self.L) * 3)
for i in range(0, len(self.incStats)):
stats = self.incStats[i].allstats()
allstats[i * 3:i * 3 + 3] = stats
return allstats
def getHeaders(self):
headers = []
for i in range(0, len(self.incStats)):
headers = headers + [('L' + str(self.L[i]) + '_' + header) for
header in self.incStats[i].getHeaders()]
return headers
def updateStats(self, val, timestamp):
for i in range(0, len(self.incStats)):
self.incStats[i].insert(val, timestamp)
def updateAndGetStats(self, val, timestamp):
self.updateStats(val, timestamp)
return self.getStats()
def getMaxW(self, t):
mx = 0
for stat in self.incStats:
stat.processDecay(t)
if stat.w > mx:
mx = stat.w
return mx
class windowed_incStat_2D:
def __init__(self, L):
self.incStats = list()
self.L = sorted(L, reverse=True)
for l in self.L:
self.incStats.append(incStat_2D(l))
self.other_winStat = None
def getStats(self):
allstats = np.zeros(len(self.L) * 7)
for i in range(0, len(self.incStats)):
stats = self.incStats[i].allstats2D([self.other_winStat[0].
incStats[i]])
allstats[i * 7:i * 7 + 7] = stats
return allstats
def getHeaders(self):
headers = []
for i in range(0, len(self.incStats)):
headers = headers + [('L' + str(self.L[i]) + '_' + header) for
header in self.incStats[i].getHeaders()]
return headers
def updateStats(self, val, timestamp):
for i in range(0, len(self.incStats)):
decay = self.other_winStat[0].incStats[i].processDecay(timestamp)
self.incStats[i].insert2D(val, timestamp, self.other_winStat[0]
.incStats[i].last_residule, decay)
def updateAndGetStats(self, val, timestamp):
self.updateStats(val, timestamp)
return self.getStats()
def join_with_winStat(self, other_winStat):
self.other_winStat = other_winStat
other_winStat[0].other_winStat = [self]
for i in range(0, len(self.incStats)):
self.incStats[i].CF3 = other_winStat[0].incStats[i].CF3 = [0]
def getMaxW(self, t):
lastIncStat = len(self.incStats)
self.incStats[lastIncStat - 1].processDecay(t)
return self.incStats[lastIncStat - 1].w
class incStatHT:
def __init__(self):
self.HT = dict()
def updateGet_1D(self, key, val, timestamp, L, isTypeJitter=False):
wis = self.HT.get(key)
if wis is None:
wis = [windowed_incStat(L, isTypeJitter)]
self.HT[key] = wis
stats = wis[0].updateAndGetStats(val, timestamp)
return stats
def getHeaders_1D(self, L):
tmp_incs = windowed_incStat(L)
return tmp_incs.getHeaders()
class incStatHT_2D(incStatHT):
def updateGet_2D(self, key1, key2, val, timestamp, L):
key = key1 + key2
wis = self.HT.get(key)
if wis is None:
wis = self.create_2D_entry(key1, key2, L)
elif hasattr(wis[0], 'other_winStat') and wis[0].other_winStat == []:
self.create_1D_entry(key1, key2, L, wis)
stats = wis[0].updateAndGetStats(val, timestamp)
return stats
def create_1D_entry(self, key1, key2, L, wis):
wis_k2_k1 = [windowed_incStat_2D(L)]
wis[0].join_with_winStat(wis_k2_k1)
self.HT[key2 + key1] = wis_k2_k1
return wis_k2_k1
def create_2D_entry(self, key1, key2, L):
wis_k1_k2 = [windowed_incStat_2D(L)]
wis_k2_k1 = [windowed_incStat_2D(L)]
wis_k1_k2[0].join_with_winStat(wis_k2_k1)
self.HT[key1 + key2] = wis_k1_k2
self.HT[key2 + key1] = wis_k2_k1
return wis_k1_k2
def getHeaders_2D(self, L):
tmp_incs = windowed_incStat_2D(L)
return tmp_incs.getHeaders()
<|reserved_special_token_1|>
import math
import numpy as np
class incStat:
def __init__(self, Lambda, isTypeJitter=False): # timestamp is creation time
self.CF1 = 0 # linear sum
self.CF2 = 0 # sum of squares
self.w = 0 # weight
self.isTypeJitter = isTypeJitter
self.Lambda = Lambda # Decay Factor
self.lastTimestamp = np.nan
self.cur_mean = np.nan
self.cur_var = np.nan
self.cur_std = np.nan
def insert(self, v, t=0): # v is a scalar, t is v's arrival the timestamp
if self.isTypeJitter:
if not math.isnan(self.lastTimestamp):
v = t - self.lastTimestamp
else:
v = 0
self.processDecay(t)
# update with v
self.CF1 = self.CF1 + v
self.CF2 = self.CF2 + math.pow(v, 2)
self.w = self.w + 1
self.cur_mean = np.nan # force recalculation if called
self.cur_var = np.nan
self.cur_std = np.nan
def processDecay(self, timestamp):
factor=1
# check for decay
if not math.isnan(self.lastTimestamp):
timeDiff = timestamp - self.lastTimestamp
factor = math.pow(2, (-self.Lambda * timeDiff))
self.CF1 = self.CF1 * factor
self.CF2 = self.CF2 * factor
self.w = self.w * factor
self.lastTimestamp = timestamp
return factor
def weight(self):
return self.w
def mean(self):
if math.isnan(self.cur_mean): # calculate it only once when necessary
self.cur_mean = self.CF1 / self.w
return self.cur_mean
def var(self):
if math.isnan(self.cur_var): # calculate it only once when necessary
self.cur_var = abs(self.CF2 / self.w - math.pow(self.mean(), 2))
return self.cur_var
def std(self):
if math.isnan(self.cur_std): # calculate it only once when necessary
self.cur_std = math.sqrt(self.var())
return self.cur_std
#calculates and pulls all stats
def allstats(self):
self.cur_mean = self.CF1 / self.w
self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))
return self.w, self.cur_mean, self.cur_var
def getHeaders(self):
return "weight", "mean", "variance"
#like incStat, but maintains stats between two streams
class incStat_2D(incStat):
def __init__(self, Lambda): # timestamp is creation time
self.CF1 = 0 # linear sum
self.CF2 = 0 # sum of squares
self.CF3 = None # sum of residules (A-uA)
self.w = 0 # weight
self.Lambda = Lambda # Decay Factor
self.lastTimestamp = np.nan
self.cur_mean = np.nan
self.cur_var = np.nan
self.cur_std = np.nan
self.cur_cov = np.nan
self.last_residule = 0 # the value of the last residule
#other_incS_decay is the decay factor of the other incstat
def insert2D(self, v, t, other_incS_lastRes, other_incS_decay = 1): # also updates covariance (expensive)
self.processDecay(t)
# update with v
self.CF1 = self.CF1 + v
self.CF2 = self.CF2 + math.pow(v, 2)
self.w = self.w + 1
self.cur_mean = np.nan # force recalculation if called
self.cur_var = np.nan
self.cur_std = np.nan
self.cur_cov = np.nan
self.last_residule = v - self.mean()
self.CF3[0] = self.CF3[0] + self.last_residule * other_incS_lastRes * other_incS_decay
def processDecay(self, timestamp):
# check for decay
factor=1
if not math.isnan(self.lastTimestamp):
timeDiff = timestamp - self.lastTimestamp
factor = math.pow(2, (-self.Lambda * timeDiff))
self.CF1 = self.CF1 * factor
self.CF2 = self.CF2 * factor
if self.CF3 == None:
self.CF3 = [0]
self.CF3[0] = self.CF3[0] * factor
self.w = self.w * factor
self.lastTimestamp = timestamp
return factor
def radius(self, istat_ref): # the radius of two stats
return math.sqrt(math.pow(self.var(), 2) + math.pow(istat_ref[0].var(), 2))
def magnitude(self, istat_ref): # the magnitude of two stats
return math.sqrt(math.pow(self.mean(), 2) + math.pow(istat_ref[0].mean(), 2))
#covaince approximation using a hold-and-wait model
def cov(self,istat_ref): # assumes that current time is the timestamp in 'self.lastTimestamp' is the current time
if math.isnan(self.cur_cov):
self.cur_cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)
return self.cur_cov
# Pearson corl. coef (using a hold-and-wait model)
def p_cc(self, istat_ref): # assumes that current time is the timestamp in 'self.lastTimestamp' is the current time
ss = self.std() * istat_ref[0].std()
if ss != 0:
return self.cov(istat_ref[0]) / ss
else:
return 0
# calculates and pulls all stats
def allstats2D(self, istat_ref):
self.cur_mean = self.CF1 / self.w
self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))
self.cur_std = math.sqrt(self.cur_var)
if istat_ref[0].w != 0:
cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)
magnitude = math.sqrt(math.pow(self.cur_mean, 2) + math.pow(istat_ref[0].mean(), 2))
radius = math.sqrt(math.pow(self.cur_var, 2) + math.pow(istat_ref[0].var(), 2))
ss = self.cur_std * istat_ref[0].std()
pcc = 0
if ss != 0:
pcc = cov / ss
else:
magnitude = self.cur_mean
radius = self.cur_var
cov = 0
pcc = 0
return self.w, self.cur_mean, self.cur_std, magnitude, radius, cov, pcc
def getHeaders(self):
return "weight", "mean", "std", "magnitude", "radius", "covariance", "pcc"
# A set of 3 incremental statistics for a 1 or 2 dimensional time-series
class windowed_incStat:
# Each lambda in the tuple L parameter determines a incStat's decay window size (factor)
def __init__(self, L, isTypeJitter=False):
self.incStats = list()
self.L = sorted(L,reverse=True) #largest lambda to smallest
for l in self.L:
self.incStats.append(incStat(l,isTypeJitter))
# returns the weight, mean, and variance of each window
def getStats(self):
allstats = np.zeros(len(self.L)*3) #3 stats for each lambda
for i in range(0,len(self.incStats)):
stats = self.incStats[i].allstats()
allstats[i*3:(i*3+3)] = stats
return allstats
def getHeaders(self):
headers = []
for i in range(0,len(self.incStats)):
headers = headers + ["L"+str(self.L[i])+"_"+header for header in self.incStats[i].getHeaders()]
return headers
# updates the statistics
# val is the new observation
# timestamp is the arrival time of val.
# lite only updates incrementals needed for weight, mean, variance, magnitude and radius
def updateStats(self, val, timestamp):
for i in range(0,len(self.incStats)):
self.incStats[i].insert(val, timestamp)
# First updates, then gets the stats (weight, mean, and variance only)
def updateAndGetStats(self, val, timestamp):
self.updateStats(val, timestamp)
return self.getStats()
def getMaxW(self,t):
mx = 0
for stat in self.incStats:
stat.processDecay(t)
if stat.w > mx:
mx = stat.w
return mx
# A set of 3 incremental statistics for a 1 or 2 dimensional time-series
class windowed_incStat_2D:
# Each lambda parameter in L determines a incStat's decay window size (factor)
def __init__(self, L):
self.incStats = list()
self.L = sorted(L,reverse=True) #largest lambda to smallest
for l in self.L:
self.incStats.append(incStat_2D(l))
self.other_winStat = None # a mutable refernece [] to the windowed_incStat monitoring the other parallel time-series
# returns the weight, mean, variance, radius, magnitude, and covariance and pcc of each window
def getStats(self):
allstats = np.zeros(len(self.L)*7) #6 stats for each lambda
for i in range(0,len(self.incStats)):
stats = self.incStats[i].allstats2D([self.other_winStat[0].incStats[i]])
allstats[i*7:(i*7+7)] = stats
return allstats
def getHeaders(self):
headers = []
for i in range(0,len(self.incStats)):
headers = headers + ["L"+str(self.L[i])+"_"+header for header in self.incStats[i].getHeaders()]
return headers
# updates the statistics
# val is the new observation
# timestamp is the arrival time of val.
def updateStats(self, val, timestamp):
for i in range(0,len(self.incStats)):
decay = self.other_winStat[0].incStats[i].processDecay(timestamp)
self.incStats[i].insert2D(val, timestamp, self.other_winStat[0].incStats[i].last_residule, decay)
# First updates, then gets the stats (weight, mean, variance, magnitude, radius, and covariance)
def updateAndGetStats(self, val, timestamp):
self.updateStats(val, timestamp)
return self.getStats()
# Joins two windowed_incStat (e.g. rx and tx channels) together.
# other_winStat should be a [] mutable object
def join_with_winStat(self, other_winStat): # prectect with mutexes!
self.other_winStat = other_winStat
other_winStat[0].other_winStat = [self]
for i in range(0,len(self.incStats)):
self.incStats[i].CF3 = other_winStat[0].incStats[i].CF3 = [0]
def getMaxW(self,t):
lastIncStat = len(self.incStats)
self.incStats[lastIncStat-1].processDecay(t)
return self.incStats[lastIncStat-1].w
class incStatHT:
# incStatHT maintains a python dictionary object (Hash Table) filled with a collection of windowed_incStats.
# The purpose of the incStatHT is to minimize the number of operations in incrementing and retrieving statics on time-series in an online manner.
# Note, this library is built in a manner which assumes that the individual time sereis are NOT sampled at the same time (i.e., fused), thus each stream should be updated individually with each corresponding value.
# The current implementation can maintain 1-dimensional or 2-dimensional time series, and monitors three windows over each time-series.
# If 1-dimensional, set key 2 to the empty string ''.
# If 2-dimensional, key1 should be the target stream
# Each lambda parameter determines a incStat's decay window size (factor): 2^(-lambda*deltaT)
def __init__(self):
self.HT = dict()
def updateGet_1D(self, key, val, timestamp, L, isTypeJitter=False): # 1D will only maintain the mean and variance
wis = self.HT.get(key)
if wis is None:
wis = [windowed_incStat(L,isTypeJitter)]
self.HT[key] = wis
stats = wis[0].updateAndGetStats(val, timestamp)
return stats
def getHeaders_1D(self,L):
tmp_incs = windowed_incStat(L)
return tmp_incs.getHeaders()
class incStatHT_2D(incStatHT):
def updateGet_2D(self, key1, key2, val, timestamp, L): # src and dst should be strings
key = key1 + key2
wis = self.HT.get(key) # get windowed incrimental stat object
if wis is None:
wis = self.create_2D_entry(key1, key2, L)
elif hasattr(wis[0],'other_winStat') and wis[0].other_winStat == []:
self.create_1D_entry(key1,key2,L,wis)
stats = wis[0].updateAndGetStats(val, timestamp)
return stats
def create_1D_entry(self, key1, key2, L, wis): # prectect with mutexes!
# create
wis_k2_k1 = [windowed_incStat_2D(L)]
# connect net stats..
wis[0].join_with_winStat(wis_k2_k1)
# store
self.HT[key2 + key1] = wis_k2_k1
return wis_k2_k1
def create_2D_entry(self, key1, key2, L): # prectect with mutexes!
# create
wis_k1_k2 = [windowed_incStat_2D(L)]
wis_k2_k1 = [windowed_incStat_2D(L)]
# connect net stats..
wis_k1_k2[0].join_with_winStat(wis_k2_k1)
# store
self.HT[key1 + key2] = wis_k1_k2
self.HT[key2 + key1] = wis_k2_k1
return wis_k1_k2
def getHeaders_2D(self,L):
tmp_incs = windowed_incStat_2D(L)
return tmp_incs.getHeaders()
|
flexible
|
{
"blob_id": "7b2ca3db44c5f71c2975bd8af701dafca3b3d081",
"index": 5492,
"step-1": "<mask token>\n\n\nclass windowed_incStat:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass windowed_incStat_2D:\n\n def __init__(self, L):\n self.incStats = list()\n self.L = sorted(L, reverse=True)\n for l in self.L:\n self.incStats.append(incStat_2D(l))\n self.other_winStat = None\n\n def getStats(self):\n allstats = np.zeros(len(self.L) * 7)\n for i in range(0, len(self.incStats)):\n stats = self.incStats[i].allstats2D([self.other_winStat[0].\n incStats[i]])\n allstats[i * 7:i * 7 + 7] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0, len(self.incStats)):\n headers = headers + [('L' + str(self.L[i]) + '_' + header) for\n header in self.incStats[i].getHeaders()]\n return headers\n\n def updateStats(self, val, timestamp):\n for i in range(0, len(self.incStats)):\n decay = self.other_winStat[0].incStats[i].processDecay(timestamp)\n self.incStats[i].insert2D(val, timestamp, self.other_winStat[0]\n .incStats[i].last_residule, decay)\n\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def join_with_winStat(self, other_winStat):\n self.other_winStat = other_winStat\n other_winStat[0].other_winStat = [self]\n for i in range(0, len(self.incStats)):\n self.incStats[i].CF3 = other_winStat[0].incStats[i].CF3 = [0]\n\n def getMaxW(self, t):\n lastIncStat = len(self.incStats)\n self.incStats[lastIncStat - 1].processDecay(t)\n return self.incStats[lastIncStat - 1].w\n\n\nclass incStatHT:\n\n def __init__(self):\n self.HT = dict()\n\n def updateGet_1D(self, key, val, timestamp, L, isTypeJitter=False):\n wis = self.HT.get(key)\n if wis is None:\n wis = [windowed_incStat(L, isTypeJitter)]\n self.HT[key] = wis\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def getHeaders_1D(self, L):\n tmp_incs = windowed_incStat(L)\n return tmp_incs.getHeaders()\n\n\nclass incStatHT_2D(incStatHT):\n\n def updateGet_2D(self, key1, key2, val, timestamp, L):\n key = key1 + key2\n wis = self.HT.get(key)\n if wis is None:\n wis = self.create_2D_entry(key1, key2, L)\n elif hasattr(wis[0], 'other_winStat') and wis[0].other_winStat == []:\n self.create_1D_entry(key1, key2, L, wis)\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def create_1D_entry(self, key1, key2, L, wis):\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis[0].join_with_winStat(wis_k2_k1)\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k2_k1\n\n def create_2D_entry(self, key1, key2, L):\n wis_k1_k2 = [windowed_incStat_2D(L)]\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis_k1_k2[0].join_with_winStat(wis_k2_k1)\n self.HT[key1 + key2] = wis_k1_k2\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k1_k2\n\n def getHeaders_2D(self, L):\n tmp_incs = windowed_incStat_2D(L)\n return tmp_incs.getHeaders()\n",
"step-2": "<mask token>\n\n\nclass incStat_2D(incStat):\n\n def __init__(self, Lambda):\n self.CF1 = 0\n self.CF2 = 0\n self.CF3 = None\n self.w = 0\n self.Lambda = Lambda\n self.lastTimestamp = np.nan\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = 0\n\n def insert2D(self, v, t, other_incS_lastRes, other_incS_decay=1):\n self.processDecay(t)\n self.CF1 = self.CF1 + v\n self.CF2 = self.CF2 + math.pow(v, 2)\n self.w = self.w + 1\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = v - self.mean()\n self.CF3[0] = self.CF3[0\n ] + self.last_residule * other_incS_lastRes * other_incS_decay\n\n def processDecay(self, timestamp):\n factor = 1\n if not math.isnan(self.lastTimestamp):\n timeDiff = timestamp - self.lastTimestamp\n factor = math.pow(2, -self.Lambda * timeDiff)\n self.CF1 = self.CF1 * factor\n self.CF2 = self.CF2 * factor\n if self.CF3 == None:\n self.CF3 = [0]\n self.CF3[0] = self.CF3[0] * factor\n self.w = self.w * factor\n self.lastTimestamp = timestamp\n return factor\n\n def radius(self, istat_ref):\n return math.sqrt(math.pow(self.var(), 2) + math.pow(istat_ref[0].\n var(), 2))\n\n def magnitude(self, istat_ref):\n return math.sqrt(math.pow(self.mean(), 2) + math.pow(istat_ref[0].\n mean(), 2))\n <mask token>\n\n def p_cc(self, istat_ref):\n ss = self.std() * istat_ref[0].std()\n if ss != 0:\n return self.cov(istat_ref[0]) / ss\n else:\n return 0\n <mask token>\n\n def getHeaders(self):\n return ('weight', 'mean', 'std', 'magnitude', 'radius',\n 'covariance', 'pcc')\n\n\nclass windowed_incStat:\n\n def __init__(self, L, isTypeJitter=False):\n self.incStats = list()\n self.L = sorted(L, reverse=True)\n for l in self.L:\n self.incStats.append(incStat(l, isTypeJitter))\n\n def getStats(self):\n allstats = np.zeros(len(self.L) * 3)\n for i in range(0, len(self.incStats)):\n stats = self.incStats[i].allstats()\n allstats[i * 3:i * 3 + 3] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0, len(self.incStats)):\n headers = headers + [('L' + str(self.L[i]) + '_' + header) for\n header in self.incStats[i].getHeaders()]\n return headers\n\n def updateStats(self, val, timestamp):\n for i in range(0, len(self.incStats)):\n self.incStats[i].insert(val, timestamp)\n\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def getMaxW(self, t):\n mx = 0\n for stat in self.incStats:\n stat.processDecay(t)\n if stat.w > mx:\n mx = stat.w\n return mx\n\n\nclass windowed_incStat_2D:\n\n def __init__(self, L):\n self.incStats = list()\n self.L = sorted(L, reverse=True)\n for l in self.L:\n self.incStats.append(incStat_2D(l))\n self.other_winStat = None\n\n def getStats(self):\n allstats = np.zeros(len(self.L) * 7)\n for i in range(0, len(self.incStats)):\n stats = self.incStats[i].allstats2D([self.other_winStat[0].\n incStats[i]])\n allstats[i * 7:i * 7 + 7] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0, len(self.incStats)):\n headers = headers + [('L' + str(self.L[i]) + '_' + header) for\n header in self.incStats[i].getHeaders()]\n return headers\n\n def updateStats(self, val, timestamp):\n for i in range(0, len(self.incStats)):\n decay = self.other_winStat[0].incStats[i].processDecay(timestamp)\n self.incStats[i].insert2D(val, timestamp, self.other_winStat[0]\n .incStats[i].last_residule, decay)\n\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def join_with_winStat(self, other_winStat):\n self.other_winStat = other_winStat\n other_winStat[0].other_winStat = [self]\n for i in range(0, len(self.incStats)):\n self.incStats[i].CF3 = other_winStat[0].incStats[i].CF3 = [0]\n\n def getMaxW(self, t):\n lastIncStat = len(self.incStats)\n self.incStats[lastIncStat - 1].processDecay(t)\n return self.incStats[lastIncStat - 1].w\n\n\nclass incStatHT:\n\n def __init__(self):\n self.HT = dict()\n\n def updateGet_1D(self, key, val, timestamp, L, isTypeJitter=False):\n wis = self.HT.get(key)\n if wis is None:\n wis = [windowed_incStat(L, isTypeJitter)]\n self.HT[key] = wis\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def getHeaders_1D(self, L):\n tmp_incs = windowed_incStat(L)\n return tmp_incs.getHeaders()\n\n\nclass incStatHT_2D(incStatHT):\n\n def updateGet_2D(self, key1, key2, val, timestamp, L):\n key = key1 + key2\n wis = self.HT.get(key)\n if wis is None:\n wis = self.create_2D_entry(key1, key2, L)\n elif hasattr(wis[0], 'other_winStat') and wis[0].other_winStat == []:\n self.create_1D_entry(key1, key2, L, wis)\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def create_1D_entry(self, key1, key2, L, wis):\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis[0].join_with_winStat(wis_k2_k1)\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k2_k1\n\n def create_2D_entry(self, key1, key2, L):\n wis_k1_k2 = [windowed_incStat_2D(L)]\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis_k1_k2[0].join_with_winStat(wis_k2_k1)\n self.HT[key1 + key2] = wis_k1_k2\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k1_k2\n\n def getHeaders_2D(self, L):\n tmp_incs = windowed_incStat_2D(L)\n return tmp_incs.getHeaders()\n",
"step-3": "<mask token>\n\n\nclass incStat:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass incStat_2D(incStat):\n\n def __init__(self, Lambda):\n self.CF1 = 0\n self.CF2 = 0\n self.CF3 = None\n self.w = 0\n self.Lambda = Lambda\n self.lastTimestamp = np.nan\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = 0\n\n def insert2D(self, v, t, other_incS_lastRes, other_incS_decay=1):\n self.processDecay(t)\n self.CF1 = self.CF1 + v\n self.CF2 = self.CF2 + math.pow(v, 2)\n self.w = self.w + 1\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = v - self.mean()\n self.CF3[0] = self.CF3[0\n ] + self.last_residule * other_incS_lastRes * other_incS_decay\n\n def processDecay(self, timestamp):\n factor = 1\n if not math.isnan(self.lastTimestamp):\n timeDiff = timestamp - self.lastTimestamp\n factor = math.pow(2, -self.Lambda * timeDiff)\n self.CF1 = self.CF1 * factor\n self.CF2 = self.CF2 * factor\n if self.CF3 == None:\n self.CF3 = [0]\n self.CF3[0] = self.CF3[0] * factor\n self.w = self.w * factor\n self.lastTimestamp = timestamp\n return factor\n\n def radius(self, istat_ref):\n return math.sqrt(math.pow(self.var(), 2) + math.pow(istat_ref[0].\n var(), 2))\n\n def magnitude(self, istat_ref):\n return math.sqrt(math.pow(self.mean(), 2) + math.pow(istat_ref[0].\n mean(), 2))\n\n def cov(self, istat_ref):\n if math.isnan(self.cur_cov):\n self.cur_cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)\n return self.cur_cov\n\n def p_cc(self, istat_ref):\n ss = self.std() * istat_ref[0].std()\n if ss != 0:\n return self.cov(istat_ref[0]) / ss\n else:\n return 0\n\n def allstats2D(self, istat_ref):\n self.cur_mean = self.CF1 / self.w\n self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))\n self.cur_std = math.sqrt(self.cur_var)\n if istat_ref[0].w != 0:\n cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)\n magnitude = math.sqrt(math.pow(self.cur_mean, 2) + math.pow(\n istat_ref[0].mean(), 2))\n radius = math.sqrt(math.pow(self.cur_var, 2) + math.pow(\n istat_ref[0].var(), 2))\n ss = self.cur_std * istat_ref[0].std()\n pcc = 0\n if ss != 0:\n pcc = cov / ss\n else:\n magnitude = self.cur_mean\n radius = self.cur_var\n cov = 0\n pcc = 0\n return self.w, self.cur_mean, self.cur_std, magnitude, radius, cov, pcc\n\n def getHeaders(self):\n return ('weight', 'mean', 'std', 'magnitude', 'radius',\n 'covariance', 'pcc')\n\n\nclass windowed_incStat:\n\n def __init__(self, L, isTypeJitter=False):\n self.incStats = list()\n self.L = sorted(L, reverse=True)\n for l in self.L:\n self.incStats.append(incStat(l, isTypeJitter))\n\n def getStats(self):\n allstats = np.zeros(len(self.L) * 3)\n for i in range(0, len(self.incStats)):\n stats = self.incStats[i].allstats()\n allstats[i * 3:i * 3 + 3] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0, len(self.incStats)):\n headers = headers + [('L' + str(self.L[i]) + '_' + header) for\n header in self.incStats[i].getHeaders()]\n return headers\n\n def updateStats(self, val, timestamp):\n for i in range(0, len(self.incStats)):\n self.incStats[i].insert(val, timestamp)\n\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def getMaxW(self, t):\n mx = 0\n for stat in self.incStats:\n stat.processDecay(t)\n if stat.w > mx:\n mx = stat.w\n return mx\n\n\nclass windowed_incStat_2D:\n\n def __init__(self, L):\n self.incStats = list()\n self.L = sorted(L, reverse=True)\n for l in self.L:\n self.incStats.append(incStat_2D(l))\n self.other_winStat = None\n\n def getStats(self):\n allstats = np.zeros(len(self.L) * 7)\n for i in range(0, len(self.incStats)):\n stats = self.incStats[i].allstats2D([self.other_winStat[0].\n incStats[i]])\n allstats[i * 7:i * 7 + 7] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0, len(self.incStats)):\n headers = headers + [('L' + str(self.L[i]) + '_' + header) for\n header in self.incStats[i].getHeaders()]\n return headers\n\n def updateStats(self, val, timestamp):\n for i in range(0, len(self.incStats)):\n decay = self.other_winStat[0].incStats[i].processDecay(timestamp)\n self.incStats[i].insert2D(val, timestamp, self.other_winStat[0]\n .incStats[i].last_residule, decay)\n\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def join_with_winStat(self, other_winStat):\n self.other_winStat = other_winStat\n other_winStat[0].other_winStat = [self]\n for i in range(0, len(self.incStats)):\n self.incStats[i].CF3 = other_winStat[0].incStats[i].CF3 = [0]\n\n def getMaxW(self, t):\n lastIncStat = len(self.incStats)\n self.incStats[lastIncStat - 1].processDecay(t)\n return self.incStats[lastIncStat - 1].w\n\n\nclass incStatHT:\n\n def __init__(self):\n self.HT = dict()\n\n def updateGet_1D(self, key, val, timestamp, L, isTypeJitter=False):\n wis = self.HT.get(key)\n if wis is None:\n wis = [windowed_incStat(L, isTypeJitter)]\n self.HT[key] = wis\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def getHeaders_1D(self, L):\n tmp_incs = windowed_incStat(L)\n return tmp_incs.getHeaders()\n\n\nclass incStatHT_2D(incStatHT):\n\n def updateGet_2D(self, key1, key2, val, timestamp, L):\n key = key1 + key2\n wis = self.HT.get(key)\n if wis is None:\n wis = self.create_2D_entry(key1, key2, L)\n elif hasattr(wis[0], 'other_winStat') and wis[0].other_winStat == []:\n self.create_1D_entry(key1, key2, L, wis)\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def create_1D_entry(self, key1, key2, L, wis):\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis[0].join_with_winStat(wis_k2_k1)\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k2_k1\n\n def create_2D_entry(self, key1, key2, L):\n wis_k1_k2 = [windowed_incStat_2D(L)]\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis_k1_k2[0].join_with_winStat(wis_k2_k1)\n self.HT[key1 + key2] = wis_k1_k2\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k1_k2\n\n def getHeaders_2D(self, L):\n tmp_incs = windowed_incStat_2D(L)\n return tmp_incs.getHeaders()\n",
"step-4": "<mask token>\n\n\nclass incStat:\n\n def __init__(self, Lambda, isTypeJitter=False):\n self.CF1 = 0\n self.CF2 = 0\n self.w = 0\n self.isTypeJitter = isTypeJitter\n self.Lambda = Lambda\n self.lastTimestamp = np.nan\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n\n def insert(self, v, t=0):\n if self.isTypeJitter:\n if not math.isnan(self.lastTimestamp):\n v = t - self.lastTimestamp\n else:\n v = 0\n self.processDecay(t)\n self.CF1 = self.CF1 + v\n self.CF2 = self.CF2 + math.pow(v, 2)\n self.w = self.w + 1\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n\n def processDecay(self, timestamp):\n factor = 1\n if not math.isnan(self.lastTimestamp):\n timeDiff = timestamp - self.lastTimestamp\n factor = math.pow(2, -self.Lambda * timeDiff)\n self.CF1 = self.CF1 * factor\n self.CF2 = self.CF2 * factor\n self.w = self.w * factor\n self.lastTimestamp = timestamp\n return factor\n\n def weight(self):\n return self.w\n\n def mean(self):\n if math.isnan(self.cur_mean):\n self.cur_mean = self.CF1 / self.w\n return self.cur_mean\n\n def var(self):\n if math.isnan(self.cur_var):\n self.cur_var = abs(self.CF2 / self.w - math.pow(self.mean(), 2))\n return self.cur_var\n\n def std(self):\n if math.isnan(self.cur_std):\n self.cur_std = math.sqrt(self.var())\n return self.cur_std\n\n def allstats(self):\n self.cur_mean = self.CF1 / self.w\n self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))\n return self.w, self.cur_mean, self.cur_var\n\n def getHeaders(self):\n return 'weight', 'mean', 'variance'\n\n\nclass incStat_2D(incStat):\n\n def __init__(self, Lambda):\n self.CF1 = 0\n self.CF2 = 0\n self.CF3 = None\n self.w = 0\n self.Lambda = Lambda\n self.lastTimestamp = np.nan\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = 0\n\n def insert2D(self, v, t, other_incS_lastRes, other_incS_decay=1):\n self.processDecay(t)\n self.CF1 = self.CF1 + v\n self.CF2 = self.CF2 + math.pow(v, 2)\n self.w = self.w + 1\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = v - self.mean()\n self.CF3[0] = self.CF3[0\n ] + self.last_residule * other_incS_lastRes * other_incS_decay\n\n def processDecay(self, timestamp):\n factor = 1\n if not math.isnan(self.lastTimestamp):\n timeDiff = timestamp - self.lastTimestamp\n factor = math.pow(2, -self.Lambda * timeDiff)\n self.CF1 = self.CF1 * factor\n self.CF2 = self.CF2 * factor\n if self.CF3 == None:\n self.CF3 = [0]\n self.CF3[0] = self.CF3[0] * factor\n self.w = self.w * factor\n self.lastTimestamp = timestamp\n return factor\n\n def radius(self, istat_ref):\n return math.sqrt(math.pow(self.var(), 2) + math.pow(istat_ref[0].\n var(), 2))\n\n def magnitude(self, istat_ref):\n return math.sqrt(math.pow(self.mean(), 2) + math.pow(istat_ref[0].\n mean(), 2))\n\n def cov(self, istat_ref):\n if math.isnan(self.cur_cov):\n self.cur_cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)\n return self.cur_cov\n\n def p_cc(self, istat_ref):\n ss = self.std() * istat_ref[0].std()\n if ss != 0:\n return self.cov(istat_ref[0]) / ss\n else:\n return 0\n\n def allstats2D(self, istat_ref):\n self.cur_mean = self.CF1 / self.w\n self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))\n self.cur_std = math.sqrt(self.cur_var)\n if istat_ref[0].w != 0:\n cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)\n magnitude = math.sqrt(math.pow(self.cur_mean, 2) + math.pow(\n istat_ref[0].mean(), 2))\n radius = math.sqrt(math.pow(self.cur_var, 2) + math.pow(\n istat_ref[0].var(), 2))\n ss = self.cur_std * istat_ref[0].std()\n pcc = 0\n if ss != 0:\n pcc = cov / ss\n else:\n magnitude = self.cur_mean\n radius = self.cur_var\n cov = 0\n pcc = 0\n return self.w, self.cur_mean, self.cur_std, magnitude, radius, cov, pcc\n\n def getHeaders(self):\n return ('weight', 'mean', 'std', 'magnitude', 'radius',\n 'covariance', 'pcc')\n\n\nclass windowed_incStat:\n\n def __init__(self, L, isTypeJitter=False):\n self.incStats = list()\n self.L = sorted(L, reverse=True)\n for l in self.L:\n self.incStats.append(incStat(l, isTypeJitter))\n\n def getStats(self):\n allstats = np.zeros(len(self.L) * 3)\n for i in range(0, len(self.incStats)):\n stats = self.incStats[i].allstats()\n allstats[i * 3:i * 3 + 3] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0, len(self.incStats)):\n headers = headers + [('L' + str(self.L[i]) + '_' + header) for\n header in self.incStats[i].getHeaders()]\n return headers\n\n def updateStats(self, val, timestamp):\n for i in range(0, len(self.incStats)):\n self.incStats[i].insert(val, timestamp)\n\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def getMaxW(self, t):\n mx = 0\n for stat in self.incStats:\n stat.processDecay(t)\n if stat.w > mx:\n mx = stat.w\n return mx\n\n\nclass windowed_incStat_2D:\n\n def __init__(self, L):\n self.incStats = list()\n self.L = sorted(L, reverse=True)\n for l in self.L:\n self.incStats.append(incStat_2D(l))\n self.other_winStat = None\n\n def getStats(self):\n allstats = np.zeros(len(self.L) * 7)\n for i in range(0, len(self.incStats)):\n stats = self.incStats[i].allstats2D([self.other_winStat[0].\n incStats[i]])\n allstats[i * 7:i * 7 + 7] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0, len(self.incStats)):\n headers = headers + [('L' + str(self.L[i]) + '_' + header) for\n header in self.incStats[i].getHeaders()]\n return headers\n\n def updateStats(self, val, timestamp):\n for i in range(0, len(self.incStats)):\n decay = self.other_winStat[0].incStats[i].processDecay(timestamp)\n self.incStats[i].insert2D(val, timestamp, self.other_winStat[0]\n .incStats[i].last_residule, decay)\n\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def join_with_winStat(self, other_winStat):\n self.other_winStat = other_winStat\n other_winStat[0].other_winStat = [self]\n for i in range(0, len(self.incStats)):\n self.incStats[i].CF3 = other_winStat[0].incStats[i].CF3 = [0]\n\n def getMaxW(self, t):\n lastIncStat = len(self.incStats)\n self.incStats[lastIncStat - 1].processDecay(t)\n return self.incStats[lastIncStat - 1].w\n\n\nclass incStatHT:\n\n def __init__(self):\n self.HT = dict()\n\n def updateGet_1D(self, key, val, timestamp, L, isTypeJitter=False):\n wis = self.HT.get(key)\n if wis is None:\n wis = [windowed_incStat(L, isTypeJitter)]\n self.HT[key] = wis\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def getHeaders_1D(self, L):\n tmp_incs = windowed_incStat(L)\n return tmp_incs.getHeaders()\n\n\nclass incStatHT_2D(incStatHT):\n\n def updateGet_2D(self, key1, key2, val, timestamp, L):\n key = key1 + key2\n wis = self.HT.get(key)\n if wis is None:\n wis = self.create_2D_entry(key1, key2, L)\n elif hasattr(wis[0], 'other_winStat') and wis[0].other_winStat == []:\n self.create_1D_entry(key1, key2, L, wis)\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def create_1D_entry(self, key1, key2, L, wis):\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis[0].join_with_winStat(wis_k2_k1)\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k2_k1\n\n def create_2D_entry(self, key1, key2, L):\n wis_k1_k2 = [windowed_incStat_2D(L)]\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis_k1_k2[0].join_with_winStat(wis_k2_k1)\n self.HT[key1 + key2] = wis_k1_k2\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k1_k2\n\n def getHeaders_2D(self, L):\n tmp_incs = windowed_incStat_2D(L)\n return tmp_incs.getHeaders()\n",
"step-5": "import math\nimport numpy as np\n\n\nclass incStat:\n def __init__(self, Lambda, isTypeJitter=False): # timestamp is creation time\n self.CF1 = 0 # linear sum\n self.CF2 = 0 # sum of squares\n self.w = 0 # weight\n self.isTypeJitter = isTypeJitter\n self.Lambda = Lambda # Decay Factor\n self.lastTimestamp = np.nan\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n\n def insert(self, v, t=0): # v is a scalar, t is v's arrival the timestamp\n if self.isTypeJitter:\n if not math.isnan(self.lastTimestamp):\n v = t - self.lastTimestamp\n else:\n v = 0\n self.processDecay(t)\n\n # update with v\n self.CF1 = self.CF1 + v\n self.CF2 = self.CF2 + math.pow(v, 2)\n self.w = self.w + 1\n self.cur_mean = np.nan # force recalculation if called\n self.cur_var = np.nan\n self.cur_std = np.nan\n\n def processDecay(self, timestamp):\n factor=1\n # check for decay\n if not math.isnan(self.lastTimestamp):\n timeDiff = timestamp - self.lastTimestamp\n factor = math.pow(2, (-self.Lambda * timeDiff))\n self.CF1 = self.CF1 * factor\n self.CF2 = self.CF2 * factor\n self.w = self.w * factor\n self.lastTimestamp = timestamp\n return factor\n\n def weight(self):\n return self.w\n\n def mean(self):\n if math.isnan(self.cur_mean): # calculate it only once when necessary\n self.cur_mean = self.CF1 / self.w\n return self.cur_mean\n\n def var(self):\n if math.isnan(self.cur_var): # calculate it only once when necessary\n self.cur_var = abs(self.CF2 / self.w - math.pow(self.mean(), 2))\n return self.cur_var\n\n def std(self):\n if math.isnan(self.cur_std): # calculate it only once when necessary\n self.cur_std = math.sqrt(self.var())\n return self.cur_std\n\n #calculates and pulls all stats\n def allstats(self):\n self.cur_mean = self.CF1 / self.w\n self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))\n return self.w, self.cur_mean, self.cur_var\n\n def getHeaders(self):\n return \"weight\", \"mean\", \"variance\"\n\n#like incStat, but maintains stats between two streams\nclass incStat_2D(incStat):\n def __init__(self, Lambda): # timestamp is creation time\n self.CF1 = 0 # linear sum\n self.CF2 = 0 # sum of squares\n self.CF3 = None # sum of residules (A-uA)\n self.w = 0 # weight\n self.Lambda = Lambda # Decay Factor\n self.lastTimestamp = np.nan\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = 0 # the value of the last residule\n\n #other_incS_decay is the decay factor of the other incstat\n def insert2D(self, v, t, other_incS_lastRes, other_incS_decay = 1): # also updates covariance (expensive)\n self.processDecay(t)\n\n # update with v\n self.CF1 = self.CF1 + v\n self.CF2 = self.CF2 + math.pow(v, 2)\n self.w = self.w + 1\n self.cur_mean = np.nan # force recalculation if called\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = v - self.mean()\n self.CF3[0] = self.CF3[0] + self.last_residule * other_incS_lastRes * other_incS_decay\n\n def processDecay(self, timestamp):\n # check for decay\n factor=1\n if not math.isnan(self.lastTimestamp):\n timeDiff = timestamp - self.lastTimestamp\n factor = math.pow(2, (-self.Lambda * timeDiff))\n self.CF1 = self.CF1 * factor\n self.CF2 = self.CF2 * factor\n if self.CF3 == None:\n self.CF3 = [0]\n self.CF3[0] = self.CF3[0] * factor\n self.w = self.w * factor\n self.lastTimestamp = timestamp\n return factor\n\n def radius(self, istat_ref): # the radius of two stats\n return math.sqrt(math.pow(self.var(), 2) + math.pow(istat_ref[0].var(), 2))\n\n def magnitude(self, istat_ref): # the magnitude of two stats\n return math.sqrt(math.pow(self.mean(), 2) + math.pow(istat_ref[0].mean(), 2))\n\n #covaince approximation using a hold-and-wait model\n def cov(self,istat_ref): # assumes that current time is the timestamp in 'self.lastTimestamp' is the current time\n if math.isnan(self.cur_cov):\n self.cur_cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)\n return self.cur_cov\n\n # Pearson corl. coef (using a hold-and-wait model)\n def p_cc(self, istat_ref): # assumes that current time is the timestamp in 'self.lastTimestamp' is the current time\n ss = self.std() * istat_ref[0].std()\n if ss != 0:\n return self.cov(istat_ref[0]) / ss\n else:\n return 0\n\n # calculates and pulls all stats\n def allstats2D(self, istat_ref):\n self.cur_mean = self.CF1 / self.w\n self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))\n self.cur_std = math.sqrt(self.cur_var)\n\n if istat_ref[0].w != 0:\n cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)\n magnitude = math.sqrt(math.pow(self.cur_mean, 2) + math.pow(istat_ref[0].mean(), 2))\n radius = math.sqrt(math.pow(self.cur_var, 2) + math.pow(istat_ref[0].var(), 2))\n ss = self.cur_std * istat_ref[0].std()\n pcc = 0\n if ss != 0:\n pcc = cov / ss\n else:\n magnitude = self.cur_mean\n radius = self.cur_var\n cov = 0\n pcc = 0\n\n return self.w, self.cur_mean, self.cur_std, magnitude, radius, cov, pcc\n\n def getHeaders(self):\n return \"weight\", \"mean\", \"std\", \"magnitude\", \"radius\", \"covariance\", \"pcc\"\n\n\n# A set of 3 incremental statistics for a 1 or 2 dimensional time-series\nclass windowed_incStat:\n # Each lambda in the tuple L parameter determines a incStat's decay window size (factor)\n def __init__(self, L, isTypeJitter=False):\n self.incStats = list()\n self.L = sorted(L,reverse=True) #largest lambda to smallest\n for l in self.L:\n self.incStats.append(incStat(l,isTypeJitter))\n\n # returns the weight, mean, and variance of each window\n def getStats(self):\n allstats = np.zeros(len(self.L)*3) #3 stats for each lambda\n for i in range(0,len(self.incStats)):\n stats = self.incStats[i].allstats()\n allstats[i*3:(i*3+3)] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0,len(self.incStats)):\n headers = headers + [\"L\"+str(self.L[i])+\"_\"+header for header in self.incStats[i].getHeaders()]\n return headers\n\n # updates the statistics\n # val is the new observation\n # timestamp is the arrival time of val.\n # lite only updates incrementals needed for weight, mean, variance, magnitude and radius\n def updateStats(self, val, timestamp):\n for i in range(0,len(self.incStats)):\n self.incStats[i].insert(val, timestamp)\n\n # First updates, then gets the stats (weight, mean, and variance only)\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def getMaxW(self,t):\n mx = 0\n for stat in self.incStats:\n stat.processDecay(t)\n if stat.w > mx:\n mx = stat.w\n return mx\n\n# A set of 3 incremental statistics for a 1 or 2 dimensional time-series\nclass windowed_incStat_2D:\n # Each lambda parameter in L determines a incStat's decay window size (factor)\n def __init__(self, L):\n self.incStats = list()\n self.L = sorted(L,reverse=True) #largest lambda to smallest\n for l in self.L:\n self.incStats.append(incStat_2D(l))\n self.other_winStat = None # a mutable refernece [] to the windowed_incStat monitoring the other parallel time-series\n\n # returns the weight, mean, variance, radius, magnitude, and covariance and pcc of each window\n def getStats(self):\n allstats = np.zeros(len(self.L)*7) #6 stats for each lambda\n for i in range(0,len(self.incStats)):\n stats = self.incStats[i].allstats2D([self.other_winStat[0].incStats[i]])\n allstats[i*7:(i*7+7)] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0,len(self.incStats)):\n headers = headers + [\"L\"+str(self.L[i])+\"_\"+header for header in self.incStats[i].getHeaders()]\n return headers\n\n # updates the statistics\n # val is the new observation\n # timestamp is the arrival time of val.\n def updateStats(self, val, timestamp):\n for i in range(0,len(self.incStats)):\n decay = self.other_winStat[0].incStats[i].processDecay(timestamp)\n self.incStats[i].insert2D(val, timestamp, self.other_winStat[0].incStats[i].last_residule, decay)\n\n # First updates, then gets the stats (weight, mean, variance, magnitude, radius, and covariance)\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n # Joins two windowed_incStat (e.g. rx and tx channels) together.\n # other_winStat should be a [] mutable object\n def join_with_winStat(self, other_winStat): # prectect with mutexes!\n self.other_winStat = other_winStat\n other_winStat[0].other_winStat = [self]\n for i in range(0,len(self.incStats)):\n self.incStats[i].CF3 = other_winStat[0].incStats[i].CF3 = [0]\n\n def getMaxW(self,t):\n lastIncStat = len(self.incStats)\n self.incStats[lastIncStat-1].processDecay(t)\n return self.incStats[lastIncStat-1].w\n\nclass incStatHT:\n # incStatHT maintains a python dictionary object (Hash Table) filled with a collection of windowed_incStats.\n # The purpose of the incStatHT is to minimize the number of operations in incrementing and retrieving statics on time-series in an online manner.\n # Note, this library is built in a manner which assumes that the individual time sereis are NOT sampled at the same time (i.e., fused), thus each stream should be updated individually with each corresponding value.\n\n # The current implementation can maintain 1-dimensional or 2-dimensional time series, and monitors three windows over each time-series.\n # If 1-dimensional, set key 2 to the empty string ''.\n # If 2-dimensional, key1 should be the target stream\n # Each lambda parameter determines a incStat's decay window size (factor): 2^(-lambda*deltaT)\n def __init__(self):\n self.HT = dict()\n\n def updateGet_1D(self, key, val, timestamp, L, isTypeJitter=False): # 1D will only maintain the mean and variance\n wis = self.HT.get(key)\n if wis is None:\n wis = [windowed_incStat(L,isTypeJitter)]\n self.HT[key] = wis\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def getHeaders_1D(self,L):\n tmp_incs = windowed_incStat(L)\n return tmp_incs.getHeaders()\n\nclass incStatHT_2D(incStatHT):\n def updateGet_2D(self, key1, key2, val, timestamp, L): # src and dst should be strings\n key = key1 + key2\n wis = self.HT.get(key) # get windowed incrimental stat object\n if wis is None:\n wis = self.create_2D_entry(key1, key2, L)\n elif hasattr(wis[0],'other_winStat') and wis[0].other_winStat == []:\n self.create_1D_entry(key1,key2,L,wis)\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def create_1D_entry(self, key1, key2, L, wis): # prectect with mutexes!\n # create\n wis_k2_k1 = [windowed_incStat_2D(L)]\n # connect net stats..\n wis[0].join_with_winStat(wis_k2_k1)\n # store\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k2_k1\n\n def create_2D_entry(self, key1, key2, L): # prectect with mutexes!\n # create\n wis_k1_k2 = [windowed_incStat_2D(L)]\n wis_k2_k1 = [windowed_incStat_2D(L)]\n # connect net stats..\n wis_k1_k2[0].join_with_winStat(wis_k2_k1)\n # store\n self.HT[key1 + key2] = wis_k1_k2\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k1_k2\n\n def getHeaders_2D(self,L):\n tmp_incs = windowed_incStat_2D(L)\n return tmp_incs.getHeaders()\n",
"step-ids": [
18,
32,
35,
44,
46
]
}
|
[
18,
32,
35,
44,
46
] |
<|reserved_special_token_0|>
class BaseApplication(Shop):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseApplication(Shop):
catalogue_app = catalogue_app
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseApplication(Shop):
catalogue_app = catalogue_app
application = BaseApplication()
<|reserved_special_token_1|>
from oscar.app import Shop
from apps.catalogue.app import application as catalogue_app
class BaseApplication(Shop):
catalogue_app = catalogue_app
application = BaseApplication()
|
flexible
|
{
"blob_id": "c8bb6ead7e305f466e24b47811d6ed38c8cfec0a",
"index": 2691,
"step-1": "<mask token>\n\n\nclass BaseApplication(Shop):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BaseApplication(Shop):\n catalogue_app = catalogue_app\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BaseApplication(Shop):\n catalogue_app = catalogue_app\n\n\napplication = BaseApplication()\n",
"step-4": "from oscar.app import Shop\nfrom apps.catalogue.app import application as catalogue_app\n\n\nclass BaseApplication(Shop):\n catalogue_app = catalogue_app\n\n\napplication = BaseApplication()\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def generate_launch_description():
cart_sdf = os.path.join(get_package_share_directory('crs_support'),
'sdf', 'cart.sdf')
cart_spawner = launch_ros.actions.Node(node_name='spawn_node', package=
'gazebo_ros', node_executable='spawn_entity.py', arguments=[
'-entity', 'cart', '-x', '0', '-y', '0.2', '-z', '0.05', '-file',
cart_sdf])
return launch.LaunchDescription([cart_spawner])
<|reserved_special_token_1|>
import os
from pathlib import Path
import shutil
from ament_index_python.packages import get_package_share_directory, get_package_prefix
import launch
import launch_ros.actions
def generate_launch_description():
cart_sdf = os.path.join(get_package_share_directory('crs_support'),
'sdf', 'cart.sdf')
cart_spawner = launch_ros.actions.Node(node_name='spawn_node', package=
'gazebo_ros', node_executable='spawn_entity.py', arguments=[
'-entity', 'cart', '-x', '0', '-y', '0.2', '-z', '0.05', '-file',
cart_sdf])
return launch.LaunchDescription([cart_spawner])
|
flexible
|
{
"blob_id": "cc74163d5dbcc2b2ca0fe5222692f6f5e45f73fe",
"index": 2377,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate_launch_description():\n cart_sdf = os.path.join(get_package_share_directory('crs_support'),\n 'sdf', 'cart.sdf')\n cart_spawner = launch_ros.actions.Node(node_name='spawn_node', package=\n 'gazebo_ros', node_executable='spawn_entity.py', arguments=[\n '-entity', 'cart', '-x', '0', '-y', '0.2', '-z', '0.05', '-file',\n cart_sdf])\n return launch.LaunchDescription([cart_spawner])\n",
"step-3": "import os\nfrom pathlib import Path\nimport shutil\nfrom ament_index_python.packages import get_package_share_directory, get_package_prefix\nimport launch\nimport launch_ros.actions\n\n\ndef generate_launch_description():\n cart_sdf = os.path.join(get_package_share_directory('crs_support'),\n 'sdf', 'cart.sdf')\n cart_spawner = launch_ros.actions.Node(node_name='spawn_node', package=\n 'gazebo_ros', node_executable='spawn_entity.py', arguments=[\n '-entity', 'cart', '-x', '0', '-y', '0.2', '-z', '0.05', '-file',\n cart_sdf])\n return launch.LaunchDescription([cart_spawner])\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from abc import ABC, abstractmethod, abstractproperty
from pytz import timezone
class EngageScraper(ABC):
def __init__(self, tz_string):
super().__init__()
self._agenda_locations = []
self._tz = timezone(tz_string)
@property
def agenda_locations(self):
return self._agenda_locations
@agenda_locations.setter
def agenda_locations(self, locations):
self._agenda_locations = locations
@abstractmethod
def get_available_agendas(self):
"""
Method to determine what agendas are available.
Sets the self._agenda_locations property
In a typical HTML scraper, these resources would be HTTP URLs
"""
pass
@abstractmethod
def scrape(self):
"""
Scrape processes all agendas in self._agenda_locations
It calls process agenda on all items in _agenda_locations with
data downloaded from those locations.
The result of scrape is the stored agendas and agenda items.
"""
pass
@abstractmethod
def _process_agenda(self, agenda_data, meeting_id):
"""
process_agenda takes one agenda document (for instance HTML document) data.
A processed agenda will have to process each of its items. Each agenda item might
be at a different location or contained within an agenda. If they are contained within
the agenda, progress to process_agenda_item with its data. If not, scrape_agenda_item should be
called with the location of the agenda_item.
The result of process agenda will be a dict that can be saved by store_agenda and store_agenda_items
"""
pass
@abstractmethod
def _scrape_agenda_item(self, agenda_item_location):
"""
Takes a location and produces the data from the item and calls process_agenda_item
"""
pass
@abstractmethod
def _process_agenda_item(self, agenda_item_data, agenda_item_id,
meeting_id, meeting_time):
"""
The result of process agenda item will be a dict that can be stored by store_agenda_item
"""
pass
@abstractmethod
def _store_agenda(self, processed_agenda, committee):
"""
Calls to DB should be here for the main agenda content
"""
pass
@abstractmethod
def _store_agenda_items(self, agenda_dict, agenda_saved):
"""
Calls to the DB should be here for agenda item content
"""
pass
|
normal
|
{
"blob_id": "ec224924206c41cf8203c6aa8002ddf6b0e70e9b",
"index": 1116,
"step-1": "<mask token>\n\n\nclass EngageScraper(ABC):\n\n def __init__(self, tz_string):\n super().__init__()\n self._agenda_locations = []\n self._tz = timezone(tz_string)\n\n @property\n def agenda_locations(self):\n return self._agenda_locations\n\n @agenda_locations.setter\n def agenda_locations(self, locations):\n self._agenda_locations = locations\n\n @abstractmethod\n def get_available_agendas(self):\n \"\"\"\n Method to determine what agendas are available.\n Sets the self._agenda_locations property\n In a typical HTML scraper, these resources would be HTTP URLs\n \"\"\"\n pass\n <mask token>\n\n @abstractmethod\n def _process_agenda(self, agenda_data, meeting_id):\n \"\"\"\n process_agenda takes one agenda document (for instance HTML document) data.\n A processed agenda will have to process each of its items. Each agenda item might\n be at a different location or contained within an agenda. If they are contained within\n the agenda, progress to process_agenda_item with its data. If not, scrape_agenda_item should be\n called with the location of the agenda_item.\n The result of process agenda will be a dict that can be saved by store_agenda and store_agenda_items\n \"\"\"\n pass\n\n @abstractmethod\n def _scrape_agenda_item(self, agenda_item_location):\n \"\"\"\n Takes a location and produces the data from the item and calls process_agenda_item\n \"\"\"\n pass\n <mask token>\n <mask token>\n\n @abstractmethod\n def _store_agenda_items(self, agenda_dict, agenda_saved):\n \"\"\"\n Calls to the DB should be here for agenda item content\n \"\"\"\n pass\n",
"step-2": "<mask token>\n\n\nclass EngageScraper(ABC):\n\n def __init__(self, tz_string):\n super().__init__()\n self._agenda_locations = []\n self._tz = timezone(tz_string)\n\n @property\n def agenda_locations(self):\n return self._agenda_locations\n\n @agenda_locations.setter\n def agenda_locations(self, locations):\n self._agenda_locations = locations\n\n @abstractmethod\n def get_available_agendas(self):\n \"\"\"\n Method to determine what agendas are available.\n Sets the self._agenda_locations property\n In a typical HTML scraper, these resources would be HTTP URLs\n \"\"\"\n pass\n\n @abstractmethod\n def scrape(self):\n \"\"\"\n Scrape processes all agendas in self._agenda_locations\n It calls process agenda on all items in _agenda_locations with \n data downloaded from those locations.\n The result of scrape is the stored agendas and agenda items.\n \"\"\"\n pass\n\n @abstractmethod\n def _process_agenda(self, agenda_data, meeting_id):\n \"\"\"\n process_agenda takes one agenda document (for instance HTML document) data.\n A processed agenda will have to process each of its items. Each agenda item might\n be at a different location or contained within an agenda. If they are contained within\n the agenda, progress to process_agenda_item with its data. If not, scrape_agenda_item should be\n called with the location of the agenda_item.\n The result of process agenda will be a dict that can be saved by store_agenda and store_agenda_items\n \"\"\"\n pass\n\n @abstractmethod\n def _scrape_agenda_item(self, agenda_item_location):\n \"\"\"\n Takes a location and produces the data from the item and calls process_agenda_item\n \"\"\"\n pass\n\n @abstractmethod\n def _process_agenda_item(self, agenda_item_data, agenda_item_id,\n meeting_id, meeting_time):\n \"\"\"\n The result of process agenda item will be a dict that can be stored by store_agenda_item\n \"\"\"\n pass\n <mask token>\n\n @abstractmethod\n def _store_agenda_items(self, agenda_dict, agenda_saved):\n \"\"\"\n Calls to the DB should be here for agenda item content\n \"\"\"\n pass\n",
"step-3": "<mask token>\n\n\nclass EngageScraper(ABC):\n\n def __init__(self, tz_string):\n super().__init__()\n self._agenda_locations = []\n self._tz = timezone(tz_string)\n\n @property\n def agenda_locations(self):\n return self._agenda_locations\n\n @agenda_locations.setter\n def agenda_locations(self, locations):\n self._agenda_locations = locations\n\n @abstractmethod\n def get_available_agendas(self):\n \"\"\"\n Method to determine what agendas are available.\n Sets the self._agenda_locations property\n In a typical HTML scraper, these resources would be HTTP URLs\n \"\"\"\n pass\n\n @abstractmethod\n def scrape(self):\n \"\"\"\n Scrape processes all agendas in self._agenda_locations\n It calls process agenda on all items in _agenda_locations with \n data downloaded from those locations.\n The result of scrape is the stored agendas and agenda items.\n \"\"\"\n pass\n\n @abstractmethod\n def _process_agenda(self, agenda_data, meeting_id):\n \"\"\"\n process_agenda takes one agenda document (for instance HTML document) data.\n A processed agenda will have to process each of its items. Each agenda item might\n be at a different location or contained within an agenda. If they are contained within\n the agenda, progress to process_agenda_item with its data. If not, scrape_agenda_item should be\n called with the location of the agenda_item.\n The result of process agenda will be a dict that can be saved by store_agenda and store_agenda_items\n \"\"\"\n pass\n\n @abstractmethod\n def _scrape_agenda_item(self, agenda_item_location):\n \"\"\"\n Takes a location and produces the data from the item and calls process_agenda_item\n \"\"\"\n pass\n\n @abstractmethod\n def _process_agenda_item(self, agenda_item_data, agenda_item_id,\n meeting_id, meeting_time):\n \"\"\"\n The result of process agenda item will be a dict that can be stored by store_agenda_item\n \"\"\"\n pass\n\n @abstractmethod\n def _store_agenda(self, processed_agenda, committee):\n \"\"\"\n Calls to DB should be here for the main agenda content\n \"\"\"\n pass\n\n @abstractmethod\n def _store_agenda_items(self, agenda_dict, agenda_saved):\n \"\"\"\n Calls to the DB should be here for agenda item content\n \"\"\"\n pass\n",
"step-4": "from abc import ABC, abstractmethod, abstractproperty\nfrom pytz import timezone\n\n\nclass EngageScraper(ABC):\n\n def __init__(self, tz_string):\n super().__init__()\n self._agenda_locations = []\n self._tz = timezone(tz_string)\n\n @property\n def agenda_locations(self):\n return self._agenda_locations\n\n @agenda_locations.setter\n def agenda_locations(self, locations):\n self._agenda_locations = locations\n\n @abstractmethod\n def get_available_agendas(self):\n \"\"\"\n Method to determine what agendas are available.\n Sets the self._agenda_locations property\n In a typical HTML scraper, these resources would be HTTP URLs\n \"\"\"\n pass\n\n @abstractmethod\n def scrape(self):\n \"\"\"\n Scrape processes all agendas in self._agenda_locations\n It calls process agenda on all items in _agenda_locations with \n data downloaded from those locations.\n The result of scrape is the stored agendas and agenda items.\n \"\"\"\n pass\n\n @abstractmethod\n def _process_agenda(self, agenda_data, meeting_id):\n \"\"\"\n process_agenda takes one agenda document (for instance HTML document) data.\n A processed agenda will have to process each of its items. Each agenda item might\n be at a different location or contained within an agenda. If they are contained within\n the agenda, progress to process_agenda_item with its data. If not, scrape_agenda_item should be\n called with the location of the agenda_item.\n The result of process agenda will be a dict that can be saved by store_agenda and store_agenda_items\n \"\"\"\n pass\n\n @abstractmethod\n def _scrape_agenda_item(self, agenda_item_location):\n \"\"\"\n Takes a location and produces the data from the item and calls process_agenda_item\n \"\"\"\n pass\n\n @abstractmethod\n def _process_agenda_item(self, agenda_item_data, agenda_item_id,\n meeting_id, meeting_time):\n \"\"\"\n The result of process agenda item will be a dict that can be stored by store_agenda_item\n \"\"\"\n pass\n\n @abstractmethod\n def _store_agenda(self, processed_agenda, committee):\n \"\"\"\n Calls to DB should be here for the main agenda content\n \"\"\"\n pass\n\n @abstractmethod\n def _store_agenda_items(self, agenda_dict, agenda_saved):\n \"\"\"\n Calls to the DB should be here for agenda item content\n \"\"\"\n pass\n",
"step-5": null,
"step-ids": [
8,
10,
11,
12
]
}
|
[
8,
10,
11,
12
] |
<|reserved_special_token_0|>
class ProtestDataset(Dataset):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __len__(self):
return len(self.label_frame)
<|reserved_special_token_0|>
class ProtestDatasetEval(Dataset):
"""
dataset for just calculating the output (does not need an annotation file)
"""
def __init__(self, img_dir):
"""
Args:
img_dir: Directory with images
"""
self.img_dir = img_dir
self.transform = transforms.Compose([transforms.Resize(125),
transforms.CenterCrop(100), transforms.Grayscale(
num_output_channels=1), transforms.ToTensor(), transforms.
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.img_list = sorted(os.listdir(img_dir))
def __len__(self):
return len(self.img_list)
def __getitem__(self, idx):
imgpath = os.path.join(self.img_dir, self.img_list[idx])
image = pil_loader(imgpath)
sample = {'imgpath': imgpath, 'image': image}
sample['image'] = self.transform(sample['image'])
return sample
class FinalLayer(nn.Module):
"""modified last layer for resnet50 for our dataset"""
def __init__(self):
super(FinalLayer, self).__init__()
self.fc = nn.Linear(2048, 12)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.fc(x)
out = self.sigmoid(out)
return out
<|reserved_special_token_0|>
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
if self.count != 0:
self.avg = self.sum / self.count
class Lighting(object):
"""
Lighting noise(AlexNet - style PCA - based noise)
https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/experiments/recognition/dataset/minc.py
"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone().mul(alpha.view(1, 3).expand(
3, 3)).mul(self.eigval.view(1, 3).expand(3, 3)).sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProtestDataset(Dataset):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __len__(self):
return len(self.label_frame)
def __getitem__(self, idx):
imgpath = os.path.join(self.img_dir, self.label_frame.iloc[idx, 0])
image = pil_loader(imgpath)
protest = self.label_frame.iloc[idx, 1:2].values.astype('float')
violence = self.label_frame.iloc[idx, 2:3].values.astype('float')
visattr = self.label_frame.iloc[idx, 3:].values.astype('float')
label = {'protest': protest, 'violence': violence, 'visattr': visattr}
sample = {'image': image, 'label': label}
if self.transform:
sample['image'] = self.transform(sample['image'])
return sample
class ProtestDatasetEval(Dataset):
"""
dataset for just calculating the output (does not need an annotation file)
"""
def __init__(self, img_dir):
"""
Args:
img_dir: Directory with images
"""
self.img_dir = img_dir
self.transform = transforms.Compose([transforms.Resize(125),
transforms.CenterCrop(100), transforms.Grayscale(
num_output_channels=1), transforms.ToTensor(), transforms.
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.img_list = sorted(os.listdir(img_dir))
def __len__(self):
return len(self.img_list)
def __getitem__(self, idx):
imgpath = os.path.join(self.img_dir, self.img_list[idx])
image = pil_loader(imgpath)
sample = {'imgpath': imgpath, 'image': image}
sample['image'] = self.transform(sample['image'])
return sample
class FinalLayer(nn.Module):
"""modified last layer for resnet50 for our dataset"""
def __init__(self):
super(FinalLayer, self).__init__()
self.fc = nn.Linear(2048, 12)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.fc(x)
out = self.sigmoid(out)
return out
<|reserved_special_token_0|>
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
if self.count != 0:
self.avg = self.sum / self.count
class Lighting(object):
"""
Lighting noise(AlexNet - style PCA - based noise)
https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/experiments/recognition/dataset/minc.py
"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone().mul(alpha.view(1, 3).expand(
3, 3)).mul(self.eigval.view(1, 3).expand(3, 3)).sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProtestDataset(Dataset):
<|reserved_special_token_0|>
def __init__(self, txt_file, img_dir, transform=None):
"""
Args:
txt_file: Path to txt file with annotation
img_dir: Directory with images
transform: Optional transform to be applied on a sample.
"""
self.label_frame = pd.read_csv(txt_file, delimiter='\t').replace('-', 0
)
self.img_dir = img_dir
self.transform = transform
def __len__(self):
return len(self.label_frame)
def __getitem__(self, idx):
imgpath = os.path.join(self.img_dir, self.label_frame.iloc[idx, 0])
image = pil_loader(imgpath)
protest = self.label_frame.iloc[idx, 1:2].values.astype('float')
violence = self.label_frame.iloc[idx, 2:3].values.astype('float')
visattr = self.label_frame.iloc[idx, 3:].values.astype('float')
label = {'protest': protest, 'violence': violence, 'visattr': visattr}
sample = {'image': image, 'label': label}
if self.transform:
sample['image'] = self.transform(sample['image'])
return sample
class ProtestDatasetEval(Dataset):
"""
dataset for just calculating the output (does not need an annotation file)
"""
def __init__(self, img_dir):
"""
Args:
img_dir: Directory with images
"""
self.img_dir = img_dir
self.transform = transforms.Compose([transforms.Resize(125),
transforms.CenterCrop(100), transforms.Grayscale(
num_output_channels=1), transforms.ToTensor(), transforms.
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.img_list = sorted(os.listdir(img_dir))
def __len__(self):
return len(self.img_list)
def __getitem__(self, idx):
imgpath = os.path.join(self.img_dir, self.img_list[idx])
image = pil_loader(imgpath)
sample = {'imgpath': imgpath, 'image': image}
sample['image'] = self.transform(sample['image'])
return sample
class FinalLayer(nn.Module):
"""modified last layer for resnet50 for our dataset"""
def __init__(self):
super(FinalLayer, self).__init__()
self.fc = nn.Linear(2048, 12)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.fc(x)
out = self.sigmoid(out)
return out
<|reserved_special_token_0|>
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
if self.count != 0:
self.avg = self.sum / self.count
class Lighting(object):
"""
Lighting noise(AlexNet - style PCA - based noise)
https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/experiments/recognition/dataset/minc.py
"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone().mul(alpha.view(1, 3).expand(
3, 3)).mul(self.eigval.view(1, 3).expand(3, 3)).sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProtestDataset(Dataset):
"""
dataset for training and evaluation
"""
def __init__(self, txt_file, img_dir, transform=None):
"""
Args:
txt_file: Path to txt file with annotation
img_dir: Directory with images
transform: Optional transform to be applied on a sample.
"""
self.label_frame = pd.read_csv(txt_file, delimiter='\t').replace('-', 0
)
self.img_dir = img_dir
self.transform = transform
def __len__(self):
return len(self.label_frame)
def __getitem__(self, idx):
imgpath = os.path.join(self.img_dir, self.label_frame.iloc[idx, 0])
image = pil_loader(imgpath)
protest = self.label_frame.iloc[idx, 1:2].values.astype('float')
violence = self.label_frame.iloc[idx, 2:3].values.astype('float')
visattr = self.label_frame.iloc[idx, 3:].values.astype('float')
label = {'protest': protest, 'violence': violence, 'visattr': visattr}
sample = {'image': image, 'label': label}
if self.transform:
sample['image'] = self.transform(sample['image'])
return sample
class ProtestDatasetEval(Dataset):
"""
dataset for just calculating the output (does not need an annotation file)
"""
def __init__(self, img_dir):
"""
Args:
img_dir: Directory with images
"""
self.img_dir = img_dir
self.transform = transforms.Compose([transforms.Resize(125),
transforms.CenterCrop(100), transforms.Grayscale(
num_output_channels=1), transforms.ToTensor(), transforms.
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.img_list = sorted(os.listdir(img_dir))
def __len__(self):
return len(self.img_list)
def __getitem__(self, idx):
imgpath = os.path.join(self.img_dir, self.img_list[idx])
image = pil_loader(imgpath)
sample = {'imgpath': imgpath, 'image': image}
sample['image'] = self.transform(sample['image'])
return sample
class FinalLayer(nn.Module):
"""modified last layer for resnet50 for our dataset"""
def __init__(self):
super(FinalLayer, self).__init__()
self.fc = nn.Linear(2048, 12)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.fc(x)
out = self.sigmoid(out)
return out
<|reserved_special_token_0|>
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
if self.count != 0:
self.avg = self.sum / self.count
class Lighting(object):
"""
Lighting noise(AlexNet - style PCA - based noise)
https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/experiments/recognition/dataset/minc.py
"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone().mul(alpha.view(1, 3).expand(
3, 3)).mul(self.eigval.view(1, 3).expand(3, 3)).sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
<|reserved_special_token_0|>
def train(train_loader, model, criterions, optimizer, epoch):
"""training the model"""
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
loss_protest = AverageMeter()
loss_v = AverageMeter()
protest_acc = AverageMeter()
violence_mse = AverageMeter()
visattr_acc = AverageMeter()
end = time.time()
loss_history = []
for i, sample in enumerate(train_loader):
input, target = sample['image'], sample['label']
data_time.update(time.time() - end)
if args.cuda:
input = input.cuda()
for k, v in target.items():
target[k] = v.cuda()
target_var = {}
for k, v in target.items():
target_var[k] = Variable(v)
input_var = Variable(input)
output = model(input_var)
losses, scores, N_protest = calculate_loss(output, target_var,
criterions)
optimizer.zero_grad()
loss = 0
for l in losses:
loss += l
loss.backward()
optimizer.step()
if N_protest:
loss_protest.update(losses[0].data, input.size(0))
loss_v.update(loss.data - losses[0].data, N_protest)
else:
loss_protest.update(losses[0].data, input.size(0))
loss_history.append(loss.data)
protest_acc.update(scores['protest_acc'], input.size(0))
violence_mse.update(scores['violence_mse'], N_protest)
visattr_acc.update(scores['visattr_acc'], N_protest)
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print(
'Epoch: [{0}][{1}/{2}] Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f}) Loss {loss_val:.3f} ({loss_avg:.3f}) Protest {protest_acc.val:.3f} ({protest_acc.avg:.3f}) Violence {violence_mse.val:.5f} ({violence_mse.avg:.5f}) Vis Attr {visattr_acc.val:.3f} ({visattr_acc.avg:.3f})'
.format(epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss_val=loss_protest.val + loss_v.val,
loss_avg=loss_protest.avg + loss_v.avg, protest_acc=
protest_acc, violence_mse=violence_mse, visattr_acc=
visattr_acc))
return loss_history
def validate(val_loader, model, criterions, epoch):
"""Validating"""
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
loss_protest = AverageMeter()
loss_v = AverageMeter()
protest_acc = AverageMeter()
violence_mse = AverageMeter()
visattr_acc = AverageMeter()
end = time.time()
loss_history = []
for i, sample in enumerate(val_loader):
input, target = sample['image'], sample['label']
if args.cuda:
input = input.cuda()
for k, v in target.items():
target[k] = v.cuda()
input_var = Variable(input)
target_var = {}
for k, v in target.items():
target_var[k] = Variable(v)
output = model(input_var)
losses, scores, N_protest = calculate_loss(output, target_var,
criterions)
loss = 0
for l in losses:
loss += l
if N_protest:
loss_protest.update(losses[0].data, input.size(0))
loss_v.update(loss.data - losses[0].data, N_protest)
else:
loss_protest.update(losses[0].data, input.size(0))
loss_history.append(loss.data)
protest_acc.update(scores['protest_acc'], input.size(0))
violence_mse.update(scores['violence_mse'], N_protest)
visattr_acc.update(scores['visattr_acc'], N_protest)
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print(
'Epoch: [{0}][{1}/{2}]\tTime {batch_time.val:.2f} ({batch_time.avg:.2f}) Loss {loss_val:.3f} ({loss_avg:.3f}) Protest Acc {protest_acc.val:.3f} ({protest_acc.avg:.3f}) Violence MSE {violence_mse.val:.5f} ({violence_mse.avg:.5f}) Vis Attr Acc {visattr_acc.val:.3f} ({visattr_acc.avg:.3f})'
.format(epoch, i, len(val_loader), batch_time=batch_time,
loss_val=loss_protest.val + loss_v.val, loss_avg=
loss_protest.avg + loss_v.avg, protest_acc=protest_acc,
violence_mse=violence_mse, visattr_acc=visattr_acc))
print(
' * Loss {loss_avg:.3f} Protest Acc {protest_acc.avg:.3f} Violence MSE {violence_mse.avg:.5f} Vis Attr Acc {visattr_acc.avg:.3f} '
.format(loss_avg=loss_protest.avg + loss_v.avg, protest_acc=
protest_acc, violence_mse=violence_mse, visattr_acc=visattr_acc))
return loss_protest.avg + loss_v.avg, loss_history
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# coding: utf-8
# In[5]:
import os
import numpy as np
import pandas as pd
from PIL import Image
import argparse
import time
import shutil
from sklearn.metrics import accuracy_score, mean_squared_error
import torch
import torch.optim
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.models as models
import matplotlib.image as mpimg
class ProtestDataset(Dataset):
"""
dataset for training and evaluation
"""
def __init__(self, txt_file, img_dir, transform = None):
"""
Args:
txt_file: Path to txt file with annotation
img_dir: Directory with images
transform: Optional transform to be applied on a sample.
"""
self.label_frame = pd.read_csv(txt_file, delimiter="\t").replace('-', 0)
self.img_dir = img_dir
self.transform = transform
def __len__(self):
return len(self.label_frame)
def __getitem__(self, idx):
imgpath = os.path.join(self.img_dir,
self.label_frame.iloc[idx, 0])
image = pil_loader(imgpath)
protest = self.label_frame.iloc[idx, 1:2].values.astype('float')
violence = self.label_frame.iloc[idx, 2:3].values.astype('float')
visattr = self.label_frame.iloc[idx, 3:].values.astype('float')
label = {'protest':protest, 'violence':violence, 'visattr':visattr}
sample = {"image":image, "label":label}
if self.transform:
sample["image"] = self.transform(sample["image"])
return sample
class ProtestDatasetEval(Dataset):
"""
dataset for just calculating the output (does not need an annotation file)
"""
def __init__(self, img_dir):
"""
Args:
img_dir: Directory with images
"""
self.img_dir = img_dir
self.transform = transforms.Compose([
transforms.Resize(125),
transforms.CenterCrop(100),
transforms.Grayscale(num_output_channels=1), #testtest
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
self.img_list = sorted(os.listdir(img_dir))
def __len__(self):
return len(self.img_list)
def __getitem__(self, idx):
imgpath = os.path.join(self.img_dir,
self.img_list[idx])
image = pil_loader(imgpath)
# we need this variable to check if the image is protest or not)
sample = {"imgpath":imgpath, "image":image}
sample["image"] = self.transform(sample["image"])
return sample
class FinalLayer(nn.Module):
"""modified last layer for resnet50 for our dataset"""
def __init__(self):
super(FinalLayer, self).__init__()
self.fc = nn.Linear(2048, 12)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.fc(x)
out = self.sigmoid(out)
return out
def pil_loader(path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def modified_resnet():
# load pretrained resnet with a modified last fully connected layer
model = models.resnet50(pretrained = True)
model.fc = FinalLayer()
return model
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
if self.count != 0:
self.avg = self.sum / self.count
class Lighting(object):
"""
Lighting noise(AlexNet - style PCA - based noise)
https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/experiments/recognition/dataset/minc.py
"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone() .mul(alpha.view(1, 3).expand(3, 3)) .mul(self.eigval.view(1, 3).expand(3, 3)) .sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
# for indexing output of the model
protest_idx = Variable(torch.LongTensor([0]))
violence_idx = Variable(torch.LongTensor([1]))
visattr_idx = Variable(torch.LongTensor(range(2,12)))
best_loss = float("inf")
def calculate_loss(output, target, criterions, weights = [1, 10, 5]):
"""Calculate loss"""
# number of protest images
N_protest = int(target['protest'].data.sum())
batch_size = len(target['protest'])
if N_protest == 0:
# if no protest image in target
outputs = [None]
# protest output
outputs[0] = output.index_select(1, protest_idx)
targets = [None]
# protest target
targets[0] = target['protest'].float()
losses = [weights[i] * criterions[i](outputs[i], targets[i]) for i in range(1)]
scores = {}
scores['protest_acc'] = accuracy_score((outputs[0]).data.round(), targets[0].data)
scores['violence_mse'] = 0
scores['visattr_acc'] = 0
return losses, scores, N_protest
# used for filling 0 for non-protest images
not_protest_mask = (1 - target['protest']).byte()
outputs = [None] * 4
# protest output
outputs[0] = output.index_select(1, protest_idx)
# violence output
outputs[1] = output.index_select(1, violence_idx)
outputs[1].masked_fill_(not_protest_mask, 0)
# visual attribute output
outputs[2] = output.index_select(1, visattr_idx)
outputs[2].masked_fill_(not_protest_mask.repeat(1, 10),0)
targets = [None] * 4
targets[0] = target['protest'].float()
targets[1] = target['violence'].float()
targets[2] = target['visattr'].float()
scores = {}
# protest accuracy for this batch
scores['protest_acc'] = accuracy_score(outputs[0].data.round(), targets[0].data)
# violence MSE for this batch
scores['violence_mse'] = ((outputs[1].data - targets[1].data).pow(2)).sum() / float(N_protest)
# mean accuracy for visual attribute for this batch
comparison = (outputs[2].data.round() == targets[2].data)
comparison.masked_fill_(not_protest_mask.repeat(1, 10).data,0)
n_right = comparison.float().sum()
mean_acc = n_right / float(N_protest*10)
scores['visattr_acc'] = mean_acc
# return weighted loss
losses = [weights[i] * criterions[i](outputs[i], targets[i]) for i in range(len(criterions))]
return losses, scores, N_protest
def train(train_loader, model, criterions, optimizer, epoch):
"""training the model"""
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
loss_protest = AverageMeter()
loss_v = AverageMeter()
protest_acc = AverageMeter()
violence_mse = AverageMeter()
visattr_acc = AverageMeter()
end = time.time()
loss_history = []
for i, sample in enumerate(train_loader):
# measure data loading batch_time
input, target = sample['image'], sample['label']
data_time.update(time.time() - end)
if args.cuda:
input = input.cuda()
for k, v in target.items():
target[k] = v.cuda()
target_var = {}
for k,v in target.items():
target_var[k] = Variable(v)
input_var = Variable(input)
output = model(input_var)
losses, scores, N_protest = calculate_loss(output, target_var, criterions)
optimizer.zero_grad()
loss = 0
for l in losses:
loss += l
# back prop
loss.backward()
optimizer.step()
if N_protest:
loss_protest.update(losses[0].data, input.size(0))
loss_v.update(loss.data - losses[0].data, N_protest)
else:
# when there is no protest image in the batch
loss_protest.update(losses[0].data, input.size(0))
loss_history.append(loss.data)
protest_acc.update(scores['protest_acc'], input.size(0))
violence_mse.update(scores['violence_mse'], N_protest)
visattr_acc.update(scores['visattr_acc'], N_protest)
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}] '
'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) '
'Data {data_time.val:.2f} ({data_time.avg:.2f}) '
'Loss {loss_val:.3f} ({loss_avg:.3f}) '
'Protest {protest_acc.val:.3f} ({protest_acc.avg:.3f}) '
'Violence {violence_mse.val:.5f} ({violence_mse.avg:.5f}) '
'Vis Attr {visattr_acc.val:.3f} ({visattr_acc.avg:.3f})'
.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time,
loss_val=loss_protest.val + loss_v.val,
loss_avg = loss_protest.avg + loss_v.avg,
protest_acc = protest_acc, violence_mse = violence_mse,
visattr_acc = visattr_acc))
return loss_history
def validate(val_loader, model, criterions, epoch):
"""Validating"""
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
loss_protest = AverageMeter()
loss_v = AverageMeter()
protest_acc = AverageMeter()
violence_mse = AverageMeter()
visattr_acc = AverageMeter()
end = time.time()
loss_history = []
for i, sample in enumerate(val_loader):
# measure data loading batch_time
input, target = sample['image'], sample['label']
if args.cuda:
input = input.cuda()
for k, v in target.items():
target[k] = v.cuda()
input_var = Variable(input)
target_var = {}
for k,v in target.items():
target_var[k] = Variable(v)
output = model(input_var)
losses, scores, N_protest = calculate_loss(output, target_var, criterions)
loss = 0
for l in losses:
loss += l
if N_protest:
loss_protest.update(losses[0].data, input.size(0))
loss_v.update(loss.data - losses[0].data, N_protest)
else:
# when no protest images
loss_protest.update(losses[0].data, input.size(0))
loss_history.append(loss.data)
protest_acc.update(scores['protest_acc'], input.size(0))
violence_mse.update(scores['violence_mse'], N_protest)
visattr_acc.update(scores['visattr_acc'], N_protest)
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) '
'Loss {loss_val:.3f} ({loss_avg:.3f}) '
'Protest Acc {protest_acc.val:.3f} ({protest_acc.avg:.3f}) '
'Violence MSE {violence_mse.val:.5f} ({violence_mse.avg:.5f}) '
'Vis Attr Acc {visattr_acc.val:.3f} ({visattr_acc.avg:.3f})'
.format(
epoch, i, len(val_loader), batch_time=batch_time,
loss_val =loss_protest.val + loss_v.val,
loss_avg = loss_protest.avg + loss_v.avg,
protest_acc = protest_acc,
violence_mse = violence_mse, visattr_acc = visattr_acc))
print(' * Loss {loss_avg:.3f} Protest Acc {protest_acc.avg:.3f} '
'Violence MSE {violence_mse.avg:.5f} '
'Vis Attr Acc {visattr_acc.avg:.3f} '
.format(loss_avg = loss_protest.avg + loss_v.avg,
protest_acc = protest_acc,
violence_mse = violence_mse, visattr_acc = visattr_acc))
return loss_protest.avg + loss_v.avg, loss_history
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 0.5 every 5 epochs"""
lr = args.lr * (0.4 ** (epoch // 4))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""Save checkpoints"""
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def main():
global best_loss
loss_history_train = []
loss_history_val = []
data_dir = args.data_dir
img_dir_train = os.path.join(data_dir, "train")
img_dir_val = os.path.join(data_dir, "test")
txt_file_train = os.path.join(data_dir, "annot_train.txt")
txt_file_val = os.path.join(data_dir, "annot_test.txt")
# load pretrained resnet50 with a modified last fully connected layer
model = modified_resnet()
# we need three different criterion for training
criterion_protest = nn.BCELoss()
criterion_violence = nn.MSELoss()
criterion_visattr = nn.BCELoss()
criterions = [criterion_protest, criterion_violence, criterion_visattr]
if args.cuda and not torch.cuda.is_available():
raise Exception("No GPU Found")
if args.cuda:
model = model.cuda()
criterions = [criterion.cuda() for criterion in criterions]
# we are not training the frozen layers
parameters = filter(lambda p: p.requires_grad, model.parameters())
optimizer = torch.optim.SGD(
parameters, args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay
)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_loss = checkpoint['best_loss']
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
loss_history_train = checkpoint['loss_history_train']
loss_history_val = checkpoint['loss_history_val']
if args.change_lr:
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr
else:
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
eigval = torch.Tensor([0.2175, 0.0188, 0.0045])
eigvec = torch.Tensor([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
train_dataset = ProtestDataset(
txt_file = txt_file_train,
img_dir = img_dir_train,
transform = transforms.Compose([
transforms.RandomResizedCrop(100),
transforms.RandomRotation(30),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness = 0.4,
contrast = 0.7,
saturation = 0.4,
),
transforms.ToTensor(),
Lighting(0.1, eigval, eigvec),
normalize,
]))
val_dataset = ProtestDataset(
txt_file = txt_file_val,
img_dir = img_dir_val,
transform = transforms.Compose([
transforms.Resize(125),
transforms.CenterCrop(100),
transforms.ToTensor(),
normalize,
]))
train_loader = DataLoader(
train_dataset,
num_workers = args.workers,
batch_size = args.batch_size,
shuffle = True
)
val_loader = DataLoader(
val_dataset,
num_workers = args.workers,
batch_size = args.batch_size)
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
loss_history_train_this = train(train_loader, model, criterions,
optimizer, epoch)
loss_val, loss_history_val_this = validate(val_loader, model,
criterions, epoch)
loss_history_train.append(loss_history_train_this)
loss_history_val.append(loss_history_val_this)
is_best = loss_val < best_loss
if is_best:
print('best model!!')
best_loss = min(loss_val, best_loss)
save_checkpoint({
'epoch' : epoch + 1,
'state_dict' : model.state_dict(),
'best_loss' : best_loss,
'optimizer' : optimizer.state_dict(),
'loss_history_train': loss_history_train,
'loss_history_val': loss_history_val
}, is_best)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir",
type=str,
default = "",
help = "directory path to dataset",
)
parser.add_argument("--cuda",
action = "store_true",
help = "use cuda?",
)
parser.add_argument("--workers",
type = int,
default = 0,
help = "number of workers",
)
parser.add_argument("--batch_size",
type = int,
default = 8,
help = "batch size",
)
parser.add_argument("--epochs",
type = int,
default = 10,
help = "number of epochs",
)
parser.add_argument("--weight_decay",
type = float,
default = 1e-4,
help = "weight decay",
)
parser.add_argument("--lr",
type = float,
default = 0.01,
help = "learning rate",
)
parser.add_argument("--momentum",
type = float,
default = 0.9,
help = "momentum",
)
parser.add_argument("--print_freq",
type = int,
default = 10,
help = "print frequency",
)
parser.add_argument('--resume',
default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--change_lr',
action = "store_true",
help = "Use this if you want to \
change learning rate when resuming")
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
args, unknown = parser.parse_known_args()
if args.cuda:
protest_idx = protest_idx.cuda()
violence_idx = violence_idx.cuda()
visattr_idx = visattr_idx.cuda()
main()
|
flexible
|
{
"blob_id": "f3a3746c48617754aad5ae8d0d7a0b8908c34562",
"index": 7852,
"step-1": "<mask token>\n\n\nclass ProtestDataset(Dataset):\n <mask token>\n <mask token>\n\n def __len__(self):\n return len(self.label_frame)\n <mask token>\n\n\nclass ProtestDatasetEval(Dataset):\n \"\"\"\n dataset for just calculating the output (does not need an annotation file)\n \"\"\"\n\n def __init__(self, img_dir):\n \"\"\"\n Args:\n img_dir: Directory with images\n \"\"\"\n self.img_dir = img_dir\n self.transform = transforms.Compose([transforms.Resize(125),\n transforms.CenterCrop(100), transforms.Grayscale(\n num_output_channels=1), transforms.ToTensor(), transforms.\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n self.img_list = sorted(os.listdir(img_dir))\n\n def __len__(self):\n return len(self.img_list)\n\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir, self.img_list[idx])\n image = pil_loader(imgpath)\n sample = {'imgpath': imgpath, 'image': image}\n sample['image'] = self.transform(sample['image'])\n return sample\n\n\nclass FinalLayer(nn.Module):\n \"\"\"modified last layer for resnet50 for our dataset\"\"\"\n\n def __init__(self):\n super(FinalLayer, self).__init__()\n self.fc = nn.Linear(2048, 12)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n out = self.fc(x)\n out = self.sigmoid(out)\n return out\n\n\n<mask token>\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n if self.count != 0:\n self.avg = self.sum / self.count\n\n\nclass Lighting(object):\n \"\"\"\n Lighting noise(AlexNet - style PCA - based noise)\n https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/experiments/recognition/dataset/minc.py\n \"\"\"\n\n def __init__(self, alphastd, eigval, eigvec):\n self.alphastd = alphastd\n self.eigval = eigval\n self.eigvec = eigvec\n\n def __call__(self, img):\n if self.alphastd == 0:\n return img\n alpha = img.new().resize_(3).normal_(0, self.alphastd)\n rgb = self.eigvec.type_as(img).clone().mul(alpha.view(1, 3).expand(\n 3, 3)).mul(self.eigval.view(1, 3).expand(3, 3)).sum(1).squeeze()\n return img.add(rgb.view(3, 1, 1).expand_as(img))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ProtestDataset(Dataset):\n <mask token>\n <mask token>\n\n def __len__(self):\n return len(self.label_frame)\n\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir, self.label_frame.iloc[idx, 0])\n image = pil_loader(imgpath)\n protest = self.label_frame.iloc[idx, 1:2].values.astype('float')\n violence = self.label_frame.iloc[idx, 2:3].values.astype('float')\n visattr = self.label_frame.iloc[idx, 3:].values.astype('float')\n label = {'protest': protest, 'violence': violence, 'visattr': visattr}\n sample = {'image': image, 'label': label}\n if self.transform:\n sample['image'] = self.transform(sample['image'])\n return sample\n\n\nclass ProtestDatasetEval(Dataset):\n \"\"\"\n dataset for just calculating the output (does not need an annotation file)\n \"\"\"\n\n def __init__(self, img_dir):\n \"\"\"\n Args:\n img_dir: Directory with images\n \"\"\"\n self.img_dir = img_dir\n self.transform = transforms.Compose([transforms.Resize(125),\n transforms.CenterCrop(100), transforms.Grayscale(\n num_output_channels=1), transforms.ToTensor(), transforms.\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n self.img_list = sorted(os.listdir(img_dir))\n\n def __len__(self):\n return len(self.img_list)\n\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir, self.img_list[idx])\n image = pil_loader(imgpath)\n sample = {'imgpath': imgpath, 'image': image}\n sample['image'] = self.transform(sample['image'])\n return sample\n\n\nclass FinalLayer(nn.Module):\n \"\"\"modified last layer for resnet50 for our dataset\"\"\"\n\n def __init__(self):\n super(FinalLayer, self).__init__()\n self.fc = nn.Linear(2048, 12)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n out = self.fc(x)\n out = self.sigmoid(out)\n return out\n\n\n<mask token>\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n if self.count != 0:\n self.avg = self.sum / self.count\n\n\nclass Lighting(object):\n \"\"\"\n Lighting noise(AlexNet - style PCA - based noise)\n https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/experiments/recognition/dataset/minc.py\n \"\"\"\n\n def __init__(self, alphastd, eigval, eigvec):\n self.alphastd = alphastd\n self.eigval = eigval\n self.eigvec = eigvec\n\n def __call__(self, img):\n if self.alphastd == 0:\n return img\n alpha = img.new().resize_(3).normal_(0, self.alphastd)\n rgb = self.eigvec.type_as(img).clone().mul(alpha.view(1, 3).expand(\n 3, 3)).mul(self.eigval.view(1, 3).expand(3, 3)).sum(1).squeeze()\n return img.add(rgb.view(3, 1, 1).expand_as(img))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ProtestDataset(Dataset):\n <mask token>\n\n def __init__(self, txt_file, img_dir, transform=None):\n \"\"\"\n Args:\n txt_file: Path to txt file with annotation\n img_dir: Directory with images\n transform: Optional transform to be applied on a sample.\n \"\"\"\n self.label_frame = pd.read_csv(txt_file, delimiter='\\t').replace('-', 0\n )\n self.img_dir = img_dir\n self.transform = transform\n\n def __len__(self):\n return len(self.label_frame)\n\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir, self.label_frame.iloc[idx, 0])\n image = pil_loader(imgpath)\n protest = self.label_frame.iloc[idx, 1:2].values.astype('float')\n violence = self.label_frame.iloc[idx, 2:3].values.astype('float')\n visattr = self.label_frame.iloc[idx, 3:].values.astype('float')\n label = {'protest': protest, 'violence': violence, 'visattr': visattr}\n sample = {'image': image, 'label': label}\n if self.transform:\n sample['image'] = self.transform(sample['image'])\n return sample\n\n\nclass ProtestDatasetEval(Dataset):\n \"\"\"\n dataset for just calculating the output (does not need an annotation file)\n \"\"\"\n\n def __init__(self, img_dir):\n \"\"\"\n Args:\n img_dir: Directory with images\n \"\"\"\n self.img_dir = img_dir\n self.transform = transforms.Compose([transforms.Resize(125),\n transforms.CenterCrop(100), transforms.Grayscale(\n num_output_channels=1), transforms.ToTensor(), transforms.\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n self.img_list = sorted(os.listdir(img_dir))\n\n def __len__(self):\n return len(self.img_list)\n\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir, self.img_list[idx])\n image = pil_loader(imgpath)\n sample = {'imgpath': imgpath, 'image': image}\n sample['image'] = self.transform(sample['image'])\n return sample\n\n\nclass FinalLayer(nn.Module):\n \"\"\"modified last layer for resnet50 for our dataset\"\"\"\n\n def __init__(self):\n super(FinalLayer, self).__init__()\n self.fc = nn.Linear(2048, 12)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n out = self.fc(x)\n out = self.sigmoid(out)\n return out\n\n\n<mask token>\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n if self.count != 0:\n self.avg = self.sum / self.count\n\n\nclass Lighting(object):\n \"\"\"\n Lighting noise(AlexNet - style PCA - based noise)\n https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/experiments/recognition/dataset/minc.py\n \"\"\"\n\n def __init__(self, alphastd, eigval, eigvec):\n self.alphastd = alphastd\n self.eigval = eigval\n self.eigvec = eigvec\n\n def __call__(self, img):\n if self.alphastd == 0:\n return img\n alpha = img.new().resize_(3).normal_(0, self.alphastd)\n rgb = self.eigvec.type_as(img).clone().mul(alpha.view(1, 3).expand(\n 3, 3)).mul(self.eigval.view(1, 3).expand(3, 3)).sum(1).squeeze()\n return img.add(rgb.view(3, 1, 1).expand_as(img))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ProtestDataset(Dataset):\n \"\"\"\n dataset for training and evaluation\n \"\"\"\n\n def __init__(self, txt_file, img_dir, transform=None):\n \"\"\"\n Args:\n txt_file: Path to txt file with annotation\n img_dir: Directory with images\n transform: Optional transform to be applied on a sample.\n \"\"\"\n self.label_frame = pd.read_csv(txt_file, delimiter='\\t').replace('-', 0\n )\n self.img_dir = img_dir\n self.transform = transform\n\n def __len__(self):\n return len(self.label_frame)\n\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir, self.label_frame.iloc[idx, 0])\n image = pil_loader(imgpath)\n protest = self.label_frame.iloc[idx, 1:2].values.astype('float')\n violence = self.label_frame.iloc[idx, 2:3].values.astype('float')\n visattr = self.label_frame.iloc[idx, 3:].values.astype('float')\n label = {'protest': protest, 'violence': violence, 'visattr': visattr}\n sample = {'image': image, 'label': label}\n if self.transform:\n sample['image'] = self.transform(sample['image'])\n return sample\n\n\nclass ProtestDatasetEval(Dataset):\n \"\"\"\n dataset for just calculating the output (does not need an annotation file)\n \"\"\"\n\n def __init__(self, img_dir):\n \"\"\"\n Args:\n img_dir: Directory with images\n \"\"\"\n self.img_dir = img_dir\n self.transform = transforms.Compose([transforms.Resize(125),\n transforms.CenterCrop(100), transforms.Grayscale(\n num_output_channels=1), transforms.ToTensor(), transforms.\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n self.img_list = sorted(os.listdir(img_dir))\n\n def __len__(self):\n return len(self.img_list)\n\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir, self.img_list[idx])\n image = pil_loader(imgpath)\n sample = {'imgpath': imgpath, 'image': image}\n sample['image'] = self.transform(sample['image'])\n return sample\n\n\nclass FinalLayer(nn.Module):\n \"\"\"modified last layer for resnet50 for our dataset\"\"\"\n\n def __init__(self):\n super(FinalLayer, self).__init__()\n self.fc = nn.Linear(2048, 12)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n out = self.fc(x)\n out = self.sigmoid(out)\n return out\n\n\n<mask token>\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n if self.count != 0:\n self.avg = self.sum / self.count\n\n\nclass Lighting(object):\n \"\"\"\n Lighting noise(AlexNet - style PCA - based noise)\n https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/experiments/recognition/dataset/minc.py\n \"\"\"\n\n def __init__(self, alphastd, eigval, eigvec):\n self.alphastd = alphastd\n self.eigval = eigval\n self.eigvec = eigvec\n\n def __call__(self, img):\n if self.alphastd == 0:\n return img\n alpha = img.new().resize_(3).normal_(0, self.alphastd)\n rgb = self.eigvec.type_as(img).clone().mul(alpha.view(1, 3).expand(\n 3, 3)).mul(self.eigval.view(1, 3).expand(3, 3)).sum(1).squeeze()\n return img.add(rgb.view(3, 1, 1).expand_as(img))\n\n\n<mask token>\n\n\ndef train(train_loader, model, criterions, optimizer, epoch):\n \"\"\"training the model\"\"\"\n model.train()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n loss_protest = AverageMeter()\n loss_v = AverageMeter()\n protest_acc = AverageMeter()\n violence_mse = AverageMeter()\n visattr_acc = AverageMeter()\n end = time.time()\n loss_history = []\n for i, sample in enumerate(train_loader):\n input, target = sample['image'], sample['label']\n data_time.update(time.time() - end)\n if args.cuda:\n input = input.cuda()\n for k, v in target.items():\n target[k] = v.cuda()\n target_var = {}\n for k, v in target.items():\n target_var[k] = Variable(v)\n input_var = Variable(input)\n output = model(input_var)\n losses, scores, N_protest = calculate_loss(output, target_var,\n criterions)\n optimizer.zero_grad()\n loss = 0\n for l in losses:\n loss += l\n loss.backward()\n optimizer.step()\n if N_protest:\n loss_protest.update(losses[0].data, input.size(0))\n loss_v.update(loss.data - losses[0].data, N_protest)\n else:\n loss_protest.update(losses[0].data, input.size(0))\n loss_history.append(loss.data)\n protest_acc.update(scores['protest_acc'], input.size(0))\n violence_mse.update(scores['violence_mse'], N_protest)\n visattr_acc.update(scores['visattr_acc'], N_protest)\n batch_time.update(time.time() - end)\n end = time.time()\n if i % args.print_freq == 0:\n print(\n 'Epoch: [{0}][{1}/{2}] Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f}) Loss {loss_val:.3f} ({loss_avg:.3f}) Protest {protest_acc.val:.3f} ({protest_acc.avg:.3f}) Violence {violence_mse.val:.5f} ({violence_mse.avg:.5f}) Vis Attr {visattr_acc.val:.3f} ({visattr_acc.avg:.3f})'\n .format(epoch, i, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss_val=loss_protest.val + loss_v.val,\n loss_avg=loss_protest.avg + loss_v.avg, protest_acc=\n protest_acc, violence_mse=violence_mse, visattr_acc=\n visattr_acc))\n return loss_history\n\n\ndef validate(val_loader, model, criterions, epoch):\n \"\"\"Validating\"\"\"\n model.eval()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n loss_protest = AverageMeter()\n loss_v = AverageMeter()\n protest_acc = AverageMeter()\n violence_mse = AverageMeter()\n visattr_acc = AverageMeter()\n end = time.time()\n loss_history = []\n for i, sample in enumerate(val_loader):\n input, target = sample['image'], sample['label']\n if args.cuda:\n input = input.cuda()\n for k, v in target.items():\n target[k] = v.cuda()\n input_var = Variable(input)\n target_var = {}\n for k, v in target.items():\n target_var[k] = Variable(v)\n output = model(input_var)\n losses, scores, N_protest = calculate_loss(output, target_var,\n criterions)\n loss = 0\n for l in losses:\n loss += l\n if N_protest:\n loss_protest.update(losses[0].data, input.size(0))\n loss_v.update(loss.data - losses[0].data, N_protest)\n else:\n loss_protest.update(losses[0].data, input.size(0))\n loss_history.append(loss.data)\n protest_acc.update(scores['protest_acc'], input.size(0))\n violence_mse.update(scores['violence_mse'], N_protest)\n visattr_acc.update(scores['visattr_acc'], N_protest)\n batch_time.update(time.time() - end)\n end = time.time()\n if i % args.print_freq == 0:\n print(\n 'Epoch: [{0}][{1}/{2}]\\tTime {batch_time.val:.2f} ({batch_time.avg:.2f}) Loss {loss_val:.3f} ({loss_avg:.3f}) Protest Acc {protest_acc.val:.3f} ({protest_acc.avg:.3f}) Violence MSE {violence_mse.val:.5f} ({violence_mse.avg:.5f}) Vis Attr Acc {visattr_acc.val:.3f} ({visattr_acc.avg:.3f})'\n .format(epoch, i, len(val_loader), batch_time=batch_time,\n loss_val=loss_protest.val + loss_v.val, loss_avg=\n loss_protest.avg + loss_v.avg, protest_acc=protest_acc,\n violence_mse=violence_mse, visattr_acc=visattr_acc))\n print(\n ' * Loss {loss_avg:.3f} Protest Acc {protest_acc.avg:.3f} Violence MSE {violence_mse.avg:.5f} Vis Attr Acc {visattr_acc.avg:.3f} '\n .format(loss_avg=loss_protest.avg + loss_v.avg, protest_acc=\n protest_acc, violence_mse=violence_mse, visattr_acc=visattr_acc))\n return loss_protest.avg + loss_v.avg, loss_history\n\n\n<mask token>\n",
"step-5": "\n# coding: utf-8\n\n# In[5]:\n\n\nimport os\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nimport argparse\nimport time\nimport shutil\nfrom sklearn.metrics import accuracy_score, mean_squared_error\n\nimport torch\nimport torch.optim\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nimport torchvision.models as models\nimport matplotlib.image as mpimg\n\nclass ProtestDataset(Dataset):\n \"\"\"\n dataset for training and evaluation\n \"\"\"\n def __init__(self, txt_file, img_dir, transform = None):\n \"\"\"\n Args:\n txt_file: Path to txt file with annotation\n img_dir: Directory with images\n transform: Optional transform to be applied on a sample.\n \"\"\"\n self.label_frame = pd.read_csv(txt_file, delimiter=\"\\t\").replace('-', 0)\n self.img_dir = img_dir\n self.transform = transform\n def __len__(self):\n return len(self.label_frame)\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir,\n self.label_frame.iloc[idx, 0])\n image = pil_loader(imgpath)\n \n protest = self.label_frame.iloc[idx, 1:2].values.astype('float')\n violence = self.label_frame.iloc[idx, 2:3].values.astype('float')\n visattr = self.label_frame.iloc[idx, 3:].values.astype('float')\n label = {'protest':protest, 'violence':violence, 'visattr':visattr}\n\n sample = {\"image\":image, \"label\":label}\n if self.transform:\n sample[\"image\"] = self.transform(sample[\"image\"])\n return sample\n\nclass ProtestDatasetEval(Dataset):\n \"\"\"\n dataset for just calculating the output (does not need an annotation file)\n \"\"\"\n def __init__(self, img_dir):\n \"\"\"\n Args:\n img_dir: Directory with images\n \"\"\"\n self.img_dir = img_dir\n self.transform = transforms.Compose([\n transforms.Resize(125),\n transforms.CenterCrop(100),\n transforms.Grayscale(num_output_channels=1), #testtest\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n ])\n self.img_list = sorted(os.listdir(img_dir))\n def __len__(self):\n return len(self.img_list)\n def __getitem__(self, idx):\n imgpath = os.path.join(self.img_dir,\n self.img_list[idx])\n image = pil_loader(imgpath)\n # we need this variable to check if the image is protest or not)\n sample = {\"imgpath\":imgpath, \"image\":image}\n sample[\"image\"] = self.transform(sample[\"image\"])\n return sample\n\nclass FinalLayer(nn.Module):\n \"\"\"modified last layer for resnet50 for our dataset\"\"\"\n def __init__(self):\n super(FinalLayer, self).__init__()\n self.fc = nn.Linear(2048, 12)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n out = self.fc(x)\n out = self.sigmoid(out)\n return out\n\n\ndef pil_loader(path):\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert('RGB')\n\ndef modified_resnet():\n # load pretrained resnet with a modified last fully connected layer\n model = models.resnet50(pretrained = True)\n model.fc = FinalLayer()\n return model\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n if self.count != 0:\n self.avg = self.sum / self.count\n\nclass Lighting(object):\n \"\"\"\n Lighting noise(AlexNet - style PCA - based noise)\n https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/experiments/recognition/dataset/minc.py\n \"\"\"\n \n def __init__(self, alphastd, eigval, eigvec):\n self.alphastd = alphastd\n self.eigval = eigval\n self.eigvec = eigvec\n\n def __call__(self, img):\n if self.alphastd == 0:\n return img\n\n alpha = img.new().resize_(3).normal_(0, self.alphastd)\n rgb = self.eigvec.type_as(img).clone() .mul(alpha.view(1, 3).expand(3, 3)) .mul(self.eigval.view(1, 3).expand(3, 3)) .sum(1).squeeze()\n\n return img.add(rgb.view(3, 1, 1).expand_as(img))\n\n# for indexing output of the model\nprotest_idx = Variable(torch.LongTensor([0]))\nviolence_idx = Variable(torch.LongTensor([1]))\nvisattr_idx = Variable(torch.LongTensor(range(2,12)))\nbest_loss = float(\"inf\")\n\ndef calculate_loss(output, target, criterions, weights = [1, 10, 5]):\n \"\"\"Calculate loss\"\"\"\n # number of protest images\n N_protest = int(target['protest'].data.sum())\n batch_size = len(target['protest'])\n\n if N_protest == 0:\n # if no protest image in target\n outputs = [None]\n # protest output\n outputs[0] = output.index_select(1, protest_idx)\n targets = [None]\n # protest target\n targets[0] = target['protest'].float()\n losses = [weights[i] * criterions[i](outputs[i], targets[i]) for i in range(1)]\n scores = {}\n scores['protest_acc'] = accuracy_score((outputs[0]).data.round(), targets[0].data)\n scores['violence_mse'] = 0\n scores['visattr_acc'] = 0\n return losses, scores, N_protest\n\n # used for filling 0 for non-protest images\n not_protest_mask = (1 - target['protest']).byte()\n\n outputs = [None] * 4\n # protest output\n outputs[0] = output.index_select(1, protest_idx)\n # violence output\n outputs[1] = output.index_select(1, violence_idx)\n outputs[1].masked_fill_(not_protest_mask, 0)\n # visual attribute output\n outputs[2] = output.index_select(1, visattr_idx)\n outputs[2].masked_fill_(not_protest_mask.repeat(1, 10),0)\n\n\n targets = [None] * 4\n\n targets[0] = target['protest'].float()\n targets[1] = target['violence'].float()\n targets[2] = target['visattr'].float()\n\n scores = {}\n # protest accuracy for this batch\n scores['protest_acc'] = accuracy_score(outputs[0].data.round(), targets[0].data)\n # violence MSE for this batch\n scores['violence_mse'] = ((outputs[1].data - targets[1].data).pow(2)).sum() / float(N_protest)\n # mean accuracy for visual attribute for this batch\n comparison = (outputs[2].data.round() == targets[2].data)\n comparison.masked_fill_(not_protest_mask.repeat(1, 10).data,0)\n n_right = comparison.float().sum()\n mean_acc = n_right / float(N_protest*10)\n scores['visattr_acc'] = mean_acc\n\n # return weighted loss\n losses = [weights[i] * criterions[i](outputs[i], targets[i]) for i in range(len(criterions))]\n\n return losses, scores, N_protest\n\n\n\ndef train(train_loader, model, criterions, optimizer, epoch):\n \"\"\"training the model\"\"\"\n\n model.train()\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n loss_protest = AverageMeter()\n loss_v = AverageMeter()\n protest_acc = AverageMeter()\n violence_mse = AverageMeter()\n visattr_acc = AverageMeter()\n\n end = time.time()\n loss_history = []\n for i, sample in enumerate(train_loader):\n # measure data loading batch_time\n input, target = sample['image'], sample['label']\n data_time.update(time.time() - end)\n\n if args.cuda:\n input = input.cuda()\n for k, v in target.items():\n target[k] = v.cuda()\n target_var = {}\n for k,v in target.items():\n target_var[k] = Variable(v)\n\n input_var = Variable(input)\n output = model(input_var)\n\n losses, scores, N_protest = calculate_loss(output, target_var, criterions)\n\n optimizer.zero_grad()\n loss = 0\n for l in losses:\n loss += l\n # back prop\n loss.backward()\n optimizer.step()\n \n if N_protest:\n loss_protest.update(losses[0].data, input.size(0))\n loss_v.update(loss.data - losses[0].data, N_protest)\n else:\n # when there is no protest image in the batch\n loss_protest.update(losses[0].data, input.size(0))\n loss_history.append(loss.data)\n protest_acc.update(scores['protest_acc'], input.size(0))\n violence_mse.update(scores['violence_mse'], N_protest)\n visattr_acc.update(scores['visattr_acc'], N_protest)\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}] '\n 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) '\n 'Data {data_time.val:.2f} ({data_time.avg:.2f}) '\n 'Loss {loss_val:.3f} ({loss_avg:.3f}) '\n 'Protest {protest_acc.val:.3f} ({protest_acc.avg:.3f}) '\n 'Violence {violence_mse.val:.5f} ({violence_mse.avg:.5f}) '\n 'Vis Attr {visattr_acc.val:.3f} ({visattr_acc.avg:.3f})'\n .format(\n epoch, i, len(train_loader), batch_time=batch_time,\n data_time=data_time,\n loss_val=loss_protest.val + loss_v.val,\n loss_avg = loss_protest.avg + loss_v.avg,\n protest_acc = protest_acc, violence_mse = violence_mse,\n visattr_acc = visattr_acc))\n\n return loss_history\n\ndef validate(val_loader, model, criterions, epoch):\n \"\"\"Validating\"\"\"\n model.eval()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n loss_protest = AverageMeter()\n loss_v = AverageMeter()\n protest_acc = AverageMeter()\n violence_mse = AverageMeter()\n visattr_acc = AverageMeter()\n\n end = time.time()\n loss_history = []\n for i, sample in enumerate(val_loader):\n # measure data loading batch_time\n input, target = sample['image'], sample['label']\n\n if args.cuda:\n input = input.cuda()\n for k, v in target.items():\n target[k] = v.cuda()\n input_var = Variable(input)\n\n target_var = {}\n for k,v in target.items():\n target_var[k] = Variable(v)\n\n output = model(input_var)\n\n losses, scores, N_protest = calculate_loss(output, target_var, criterions)\n loss = 0\n for l in losses:\n loss += l\n\n if N_protest:\n loss_protest.update(losses[0].data, input.size(0))\n loss_v.update(loss.data - losses[0].data, N_protest)\n else:\n # when no protest images\n loss_protest.update(losses[0].data, input.size(0))\n loss_history.append(loss.data)\n protest_acc.update(scores['protest_acc'], input.size(0))\n violence_mse.update(scores['violence_mse'], N_protest)\n visattr_acc.update(scores['visattr_acc'], N_protest)\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) '\n 'Loss {loss_val:.3f} ({loss_avg:.3f}) '\n 'Protest Acc {protest_acc.val:.3f} ({protest_acc.avg:.3f}) '\n 'Violence MSE {violence_mse.val:.5f} ({violence_mse.avg:.5f}) '\n 'Vis Attr Acc {visattr_acc.val:.3f} ({visattr_acc.avg:.3f})'\n .format(\n epoch, i, len(val_loader), batch_time=batch_time,\n loss_val =loss_protest.val + loss_v.val,\n loss_avg = loss_protest.avg + loss_v.avg,\n protest_acc = protest_acc,\n violence_mse = violence_mse, visattr_acc = visattr_acc))\n\n print(' * Loss {loss_avg:.3f} Protest Acc {protest_acc.avg:.3f} '\n 'Violence MSE {violence_mse.avg:.5f} '\n 'Vis Attr Acc {visattr_acc.avg:.3f} '\n .format(loss_avg = loss_protest.avg + loss_v.avg,\n protest_acc = protest_acc,\n violence_mse = violence_mse, visattr_acc = visattr_acc))\n return loss_protest.avg + loss_v.avg, loss_history\n\ndef adjust_learning_rate(optimizer, epoch):\n \"\"\"Sets the learning rate to the initial LR decayed by 0.5 every 5 epochs\"\"\"\n lr = args.lr * (0.4 ** (epoch // 4))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n \"\"\"Save checkpoints\"\"\"\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'model_best.pth.tar')\n\n\ndef main():\n global best_loss\n loss_history_train = []\n loss_history_val = []\n data_dir = args.data_dir\n img_dir_train = os.path.join(data_dir, \"train\")\n img_dir_val = os.path.join(data_dir, \"test\")\n txt_file_train = os.path.join(data_dir, \"annot_train.txt\")\n txt_file_val = os.path.join(data_dir, \"annot_test.txt\")\n\n # load pretrained resnet50 with a modified last fully connected layer\n model = modified_resnet()\n\n # we need three different criterion for training\n criterion_protest = nn.BCELoss()\n criterion_violence = nn.MSELoss()\n criterion_visattr = nn.BCELoss()\n criterions = [criterion_protest, criterion_violence, criterion_visattr]\n\n if args.cuda and not torch.cuda.is_available():\n raise Exception(\"No GPU Found\")\n if args.cuda:\n model = model.cuda()\n criterions = [criterion.cuda() for criterion in criterions]\n # we are not training the frozen layers\n parameters = filter(lambda p: p.requires_grad, model.parameters())\n\n optimizer = torch.optim.SGD(\n parameters, args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay\n )\n\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n best_loss = checkpoint['best_loss']\n args.start_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'])\n loss_history_train = checkpoint['loss_history_train']\n loss_history_val = checkpoint['loss_history_val']\n if args.change_lr:\n for param_group in optimizer.param_groups:\n param_group['lr'] = args.lr\n else:\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n eigval = torch.Tensor([0.2175, 0.0188, 0.0045])\n eigvec = torch.Tensor([[-0.5675, 0.7192, 0.4009],\n [-0.5808, -0.0045, -0.8140],\n [-0.5836, -0.6948, 0.4203]])\n\n train_dataset = ProtestDataset(\n txt_file = txt_file_train,\n img_dir = img_dir_train,\n transform = transforms.Compose([\n transforms.RandomResizedCrop(100),\n transforms.RandomRotation(30),\n transforms.RandomHorizontalFlip(),\n transforms.ColorJitter(\n brightness = 0.4,\n contrast = 0.7,\n saturation = 0.4,\n ),\n transforms.ToTensor(),\n Lighting(0.1, eigval, eigvec),\n normalize,\n ]))\n val_dataset = ProtestDataset(\n txt_file = txt_file_val,\n img_dir = img_dir_val,\n transform = transforms.Compose([\n transforms.Resize(125),\n transforms.CenterCrop(100),\n transforms.ToTensor(),\n normalize,\n ]))\n train_loader = DataLoader(\n train_dataset,\n num_workers = args.workers,\n batch_size = args.batch_size,\n shuffle = True\n )\n val_loader = DataLoader(\n val_dataset,\n num_workers = args.workers,\n batch_size = args.batch_size)\n\n for epoch in range(args.start_epoch, args.epochs):\n adjust_learning_rate(optimizer, epoch)\n loss_history_train_this = train(train_loader, model, criterions,\n optimizer, epoch)\n loss_val, loss_history_val_this = validate(val_loader, model,\n criterions, epoch)\n loss_history_train.append(loss_history_train_this)\n loss_history_val.append(loss_history_val_this)\n\n is_best = loss_val < best_loss\n if is_best:\n print('best model!!')\n best_loss = min(loss_val, best_loss)\n\n\n save_checkpoint({\n 'epoch' : epoch + 1,\n 'state_dict' : model.state_dict(),\n 'best_loss' : best_loss,\n 'optimizer' : optimizer.state_dict(),\n 'loss_history_train': loss_history_train,\n 'loss_history_val': loss_history_val\n }, is_best)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--data_dir\",\n type=str,\n default = \"\",\n help = \"directory path to dataset\",\n )\n parser.add_argument(\"--cuda\",\n action = \"store_true\",\n help = \"use cuda?\",\n )\n parser.add_argument(\"--workers\",\n type = int,\n default = 0,\n help = \"number of workers\",\n )\n parser.add_argument(\"--batch_size\",\n type = int,\n default = 8,\n help = \"batch size\",\n )\n parser.add_argument(\"--epochs\",\n type = int,\n default = 10,\n help = \"number of epochs\",\n )\n parser.add_argument(\"--weight_decay\",\n type = float,\n default = 1e-4,\n help = \"weight decay\",\n )\n parser.add_argument(\"--lr\",\n type = float,\n default = 0.01,\n help = \"learning rate\",\n )\n parser.add_argument(\"--momentum\",\n type = float,\n default = 0.9,\n help = \"momentum\",\n )\n parser.add_argument(\"--print_freq\",\n type = int,\n default = 10,\n help = \"print frequency\",\n )\n parser.add_argument('--resume',\n default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\n parser.add_argument('--change_lr',\n action = \"store_true\",\n help = \"Use this if you want to \\\n change learning rate when resuming\")\n parser.add_argument('--start_epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\n args, unknown = parser.parse_known_args()\n\n if args.cuda:\n protest_idx = protest_idx.cuda()\n violence_idx = violence_idx.cuda()\n visattr_idx = visattr_idx.cuda()\n\n\n main()\n\n",
"step-ids": [
20,
21,
22,
25,
35
]
}
|
[
20,
21,
22,
25,
35
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('login', '0006_usermovies_img')]
operations = [migrations.AddField(model_name='moviesinfo', name=
'country', field=models.CharField(default=1, max_length=50),
preserve_default=False), migrations.AddField(model_name=
'moviesinfo', name='description', field=models.CharField(default=1,
max_length=200), preserve_default=False), migrations.AddField(
model_name='moviesinfo', name='director', field=models.CharField(
default=1, max_length=100), preserve_default=False), migrations.
AddField(model_name='moviesinfo', name='grenre', field=models.
CharField(default=1, max_length=50), preserve_default=False),
migrations.AddField(model_name='moviesinfo', name='year', field=
models.CharField(default=1, max_length=8), preserve_default=False),
migrations.AddField(model_name='usermovies', name='country', field=
models.CharField(default=1, max_length=50), preserve_default=False),
migrations.AddField(model_name='usermovies', name='description',
field=models.CharField(default=1, max_length=200), preserve_default
=False), migrations.AddField(model_name='usermovies', name=
'director', field=models.CharField(default=1, max_length=100),
preserve_default=False), migrations.AddField(model_name=
'usermovies', name='grenre', field=models.CharField(default=1,
max_length=50), preserve_default=False), migrations.AddField(
model_name='usermovies', name='year', field=models.CharField(
default=1, max_length=8), preserve_default=False)]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('login', '0006_usermovies_img')]
operations = [migrations.AddField(model_name='moviesinfo', name=
'country', field=models.CharField(default=1, max_length=50),
preserve_default=False), migrations.AddField(model_name=
'moviesinfo', name='description', field=models.CharField(default=1,
max_length=200), preserve_default=False), migrations.AddField(
model_name='moviesinfo', name='director', field=models.CharField(
default=1, max_length=100), preserve_default=False), migrations.
AddField(model_name='moviesinfo', name='grenre', field=models.
CharField(default=1, max_length=50), preserve_default=False),
migrations.AddField(model_name='moviesinfo', name='year', field=
models.CharField(default=1, max_length=8), preserve_default=False),
migrations.AddField(model_name='usermovies', name='country', field=
models.CharField(default=1, max_length=50), preserve_default=False),
migrations.AddField(model_name='usermovies', name='description',
field=models.CharField(default=1, max_length=200), preserve_default
=False), migrations.AddField(model_name='usermovies', name=
'director', field=models.CharField(default=1, max_length=100),
preserve_default=False), migrations.AddField(model_name=
'usermovies', name='grenre', field=models.CharField(default=1,
max_length=50), preserve_default=False), migrations.AddField(
model_name='usermovies', name='year', field=models.CharField(
default=1, max_length=8), preserve_default=False)]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2018-12-20 13:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0006_usermovies_img'),
]
operations = [
migrations.AddField(
model_name='moviesinfo',
name='country',
field=models.CharField(default=1, max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='moviesinfo',
name='description',
field=models.CharField(default=1, max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='moviesinfo',
name='director',
field=models.CharField(default=1, max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='moviesinfo',
name='grenre',
field=models.CharField(default=1, max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='moviesinfo',
name='year',
field=models.CharField(default=1, max_length=8),
preserve_default=False,
),
migrations.AddField(
model_name='usermovies',
name='country',
field=models.CharField(default=1, max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='usermovies',
name='description',
field=models.CharField(default=1, max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='usermovies',
name='director',
field=models.CharField(default=1, max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='usermovies',
name='grenre',
field=models.CharField(default=1, max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='usermovies',
name='year',
field=models.CharField(default=1, max_length=8),
preserve_default=False,
),
]
|
flexible
|
{
"blob_id": "e67cbddf10440e8a31373e05a82840677d3045f5",
"index": 4388,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('login', '0006_usermovies_img')]\n operations = [migrations.AddField(model_name='moviesinfo', name=\n 'country', field=models.CharField(default=1, max_length=50),\n preserve_default=False), migrations.AddField(model_name=\n 'moviesinfo', name='description', field=models.CharField(default=1,\n max_length=200), preserve_default=False), migrations.AddField(\n model_name='moviesinfo', name='director', field=models.CharField(\n default=1, max_length=100), preserve_default=False), migrations.\n AddField(model_name='moviesinfo', name='grenre', field=models.\n CharField(default=1, max_length=50), preserve_default=False),\n migrations.AddField(model_name='moviesinfo', name='year', field=\n models.CharField(default=1, max_length=8), preserve_default=False),\n migrations.AddField(model_name='usermovies', name='country', field=\n models.CharField(default=1, max_length=50), preserve_default=False),\n migrations.AddField(model_name='usermovies', name='description',\n field=models.CharField(default=1, max_length=200), preserve_default\n =False), migrations.AddField(model_name='usermovies', name=\n 'director', field=models.CharField(default=1, max_length=100),\n preserve_default=False), migrations.AddField(model_name=\n 'usermovies', name='grenre', field=models.CharField(default=1,\n max_length=50), preserve_default=False), migrations.AddField(\n model_name='usermovies', name='year', field=models.CharField(\n default=1, max_length=8), preserve_default=False)]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('login', '0006_usermovies_img')]\n operations = [migrations.AddField(model_name='moviesinfo', name=\n 'country', field=models.CharField(default=1, max_length=50),\n preserve_default=False), migrations.AddField(model_name=\n 'moviesinfo', name='description', field=models.CharField(default=1,\n max_length=200), preserve_default=False), migrations.AddField(\n model_name='moviesinfo', name='director', field=models.CharField(\n default=1, max_length=100), preserve_default=False), migrations.\n AddField(model_name='moviesinfo', name='grenre', field=models.\n CharField(default=1, max_length=50), preserve_default=False),\n migrations.AddField(model_name='moviesinfo', name='year', field=\n models.CharField(default=1, max_length=8), preserve_default=False),\n migrations.AddField(model_name='usermovies', name='country', field=\n models.CharField(default=1, max_length=50), preserve_default=False),\n migrations.AddField(model_name='usermovies', name='description',\n field=models.CharField(default=1, max_length=200), preserve_default\n =False), migrations.AddField(model_name='usermovies', name=\n 'director', field=models.CharField(default=1, max_length=100),\n preserve_default=False), migrations.AddField(model_name=\n 'usermovies', name='grenre', field=models.CharField(default=1,\n max_length=50), preserve_default=False), migrations.AddField(\n model_name='usermovies', name='year', field=models.CharField(\n default=1, max_length=8), preserve_default=False)]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.3 on 2018-12-20 13:06\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('login', '0006_usermovies_img'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='moviesinfo',\n name='country',\n field=models.CharField(default=1, max_length=50),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='moviesinfo',\n name='description',\n field=models.CharField(default=1, max_length=200),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='moviesinfo',\n name='director',\n field=models.CharField(default=1, max_length=100),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='moviesinfo',\n name='grenre',\n field=models.CharField(default=1, max_length=50),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='moviesinfo',\n name='year',\n field=models.CharField(default=1, max_length=8),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='usermovies',\n name='country',\n field=models.CharField(default=1, max_length=50),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='usermovies',\n name='description',\n field=models.CharField(default=1, max_length=200),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='usermovies',\n name='director',\n field=models.CharField(default=1, max_length=100),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='usermovies',\n name='grenre',\n field=models.CharField(default=1, max_length=50),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='usermovies',\n name='year',\n field=models.CharField(default=1, max_length=8),\n preserve_default=False,\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''
Exercício 1: Estenda a classe Stack , que escrevemos durante as explicações do
conteúdo, adicionando uma nova função chamada min_value() que irá retornar o
menor valor inteiro presente na pilha.
'''
from stack import Stack
class Other_Operations_Stack(Stack):
def min_value(self):
min_value = self.peek()
for value in self._data:
if value < min_value:
min_value = value
self.pop()
return min_value
content_stack = Other_Operations_Stack()
content_stack.push(1)
content_stack.push(-2)
content_stack.push(3)
print(content_stack.min_value()) # saída: -2
|
normal
|
{
"blob_id": "0b2fd671b99b7012a14b132db2322318873b826c",
"index": 1345,
"step-1": "<mask token>\n\n\nclass Other_Operations_Stack(Stack):\n\n def min_value(self):\n min_value = self.peek()\n for value in self._data:\n if value < min_value:\n min_value = value\n self.pop()\n return min_value\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Other_Operations_Stack(Stack):\n\n def min_value(self):\n min_value = self.peek()\n for value in self._data:\n if value < min_value:\n min_value = value\n self.pop()\n return min_value\n\n\n<mask token>\ncontent_stack.push(1)\ncontent_stack.push(-2)\ncontent_stack.push(3)\nprint(content_stack.min_value())\n",
"step-3": "<mask token>\n\n\nclass Other_Operations_Stack(Stack):\n\n def min_value(self):\n min_value = self.peek()\n for value in self._data:\n if value < min_value:\n min_value = value\n self.pop()\n return min_value\n\n\ncontent_stack = Other_Operations_Stack()\ncontent_stack.push(1)\ncontent_stack.push(-2)\ncontent_stack.push(3)\nprint(content_stack.min_value())\n",
"step-4": "<mask token>\nfrom stack import Stack\n\n\nclass Other_Operations_Stack(Stack):\n\n def min_value(self):\n min_value = self.peek()\n for value in self._data:\n if value < min_value:\n min_value = value\n self.pop()\n return min_value\n\n\ncontent_stack = Other_Operations_Stack()\ncontent_stack.push(1)\ncontent_stack.push(-2)\ncontent_stack.push(3)\nprint(content_stack.min_value())\n",
"step-5": "'''\nExercício 1: Estenda a classe Stack , que escrevemos durante as explicações do\nconteúdo, adicionando uma nova função chamada min_value() que irá retornar o\nmenor valor inteiro presente na pilha.\n'''\n\nfrom stack import Stack\n\n\nclass Other_Operations_Stack(Stack):\n def min_value(self):\n min_value = self.peek()\n for value in self._data:\n if value < min_value:\n min_value = value\n self.pop()\n return min_value\n\n\ncontent_stack = Other_Operations_Stack()\ncontent_stack.push(1)\ncontent_stack.push(-2)\ncontent_stack.push(3)\nprint(content_stack.min_value()) # saída: -2\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def simple_get(url):
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None.
"""
try:
with closing(get(url, stream=True)) as resp:
if is_good_response(resp):
return resp.content
else:
return None
except RequestException as e:
log_error('Error during requests to {0} : {1}'.format(url, str(e)))
return None
def is_good_response(resp):
"""
Returns True if the response seems to be HTML, False otherwise.
"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200 and content_type is not None and
content_type.find('html') > -1)
def log_error(e):
"""
It is always a good idea to log errors.
This function just prints them, but you can
make it do anything.
"""
print(e)
def save_json(file):
with open('gif_list.txt', 'w') as f:
f.write(file)
<|reserved_special_token_0|>
def dl_img(url, file_path, file_name):
full_path = file_path + '/' + file_name + '.gif'
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(url, full_path)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def simple_get(url):
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None.
"""
try:
with closing(get(url, stream=True)) as resp:
if is_good_response(resp):
return resp.content
else:
return None
except RequestException as e:
log_error('Error during requests to {0} : {1}'.format(url, str(e)))
return None
def is_good_response(resp):
"""
Returns True if the response seems to be HTML, False otherwise.
"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200 and content_type is not None and
content_type.find('html') > -1)
def log_error(e):
"""
It is always a good idea to log errors.
This function just prints them, but you can
make it do anything.
"""
print(e)
def save_json(file):
with open('gif_list.txt', 'w') as f:
f.write(file)
<|reserved_special_token_0|>
def dl_img(url, file_path, file_name):
full_path = file_path + '/' + file_name + '.gif'
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(url, full_path)
<|reserved_special_token_0|>
for i in filename:
url = 'https://play.pokemonshowdown.com/sprites/ani/{}'.format(i)
file_name = str(i[:-4])
dl_img(url, 'files/pokemon/front', file_name)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def simple_get(url):
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None.
"""
try:
with closing(get(url, stream=True)) as resp:
if is_good_response(resp):
return resp.content
else:
return None
except RequestException as e:
log_error('Error during requests to {0} : {1}'.format(url, str(e)))
return None
def is_good_response(resp):
"""
Returns True if the response seems to be HTML, False otherwise.
"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200 and content_type is not None and
content_type.find('html') > -1)
def log_error(e):
"""
It is always a good idea to log errors.
This function just prints them, but you can
make it do anything.
"""
print(e)
def save_json(file):
with open('gif_list.txt', 'w') as f:
f.write(file)
<|reserved_special_token_0|>
def dl_img(url, file_path, file_name):
full_path = file_path + '/' + file_name + '.gif'
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(url, full_path)
filename = ['abomasnow-f.gif', 'abomasnow-mega.gif', 'abomasnow.gif',
'abra.gif', 'absol-mega.gif', 'absol.gif', 'accelgor.gif',
'aegislash-blade.gif', 'aegislash.gif', 'aerodactyl-mega.gif',
'aerodactyl.gif', 'aggron-mega.gif', 'aggron.gif', 'aipom-f.gif',
'aipom.gif', 'alakazam-mega.gif', 'alakazam.gif',
'alcremie-caramel-swirl-berry.gif', 'alcremie-caramel-swirl-clover.gif',
'alcremie-caramel-swirl-flower.gif', 'alcremie-caramel-swirl-love.gif',
'alcremie-caramel-swirl-ribbon.gif', 'alcremie-caramel-swirl-star.gif',
'alcremie-caramelswirl.gif', 'alcremie-gmax.gif',
'alcremie-lemon-cream-berry.gif', 'alcremie-lemon-cream-clover.gif',
'alcremie-lemon-cream-flower.gif', 'alcremie-lemon-cream-love.gif',
'alcremie-lemon-cream-ribbon.gif', 'alcremie-lemon-cream-star.gif',
'alcremie-lemoncream.gif', 'alcremie-matcha-cream-berry.gif',
'alcremie-matcha-cream-clover.gif', 'alcremie-matcha-cream-flower.gif',
'alcremie-matcha-cream-love.gif', 'alcremie-matcha-cream-ribbon.gif',
'alcremie-matcha-cream-star.gif', 'alcremie-matchacream.gif',
'alcremie-mint-cream-berry.gif', 'alcremie-mint-cream-clover.gif',
'alcremie-mint-cream-flower.gif', 'alcremie-mint-cream-love.gif',
'alcremie-mint-cream-ribbon.gif', 'alcremie-mint-cream-star.gif',
'alcremie-mintcream.gif', 'alcremie-rainbow-swirl-berry.gif',
'alcremie-rainbow-swirl-clover.gif',
'alcremie-rainbow-swirl-flower.gif', 'alcremie-rainbow-swirl-love.gif',
'alcremie-rainbow-swirl-ribbon.gif', 'alcremie-rainbow-swirl-star.gif',
'alcremie-rainbowswirl.gif', 'alcremie-ruby-cream-berry.gif',
'alcremie-ruby-cream-clover.gif', 'alcremie-ruby-cream-flower.gif',
'alcremie-ruby-cream-love.gif', 'alcremie-ruby-cream-ribbon.gif',
'alcremie-ruby-cream-star.gif', 'alcremie-ruby-swirl-berry.gif',
'alcremie-ruby-swirl-clover.gif', 'alcremie-ruby-swirl-flower.gif',
'alcremie-ruby-swirl-love.gif', 'alcremie-ruby-swirl-ribbon.gif',
'alcremie-ruby-swirl-star.gif', 'alcremie-rubycream.gif',
'alcremie-rubyswirl.gif', 'alcremie-salted-cream-berry.gif',
'alcremie-salted-cream-clover.gif', 'alcremie-salted-cream-flower.gif',
'alcremie-salted-cream-love.gif', 'alcremie-salted-cream-ribbon.gif',
'alcremie-salted-cream-star.gif', 'alcremie-saltedcream.gif',
'alcremie-vanilla-cream-berry.gif', 'alcremie-vanilla-cream-clover.gif',
'alcremie-vanilla-cream-flower.gif', 'alcremie-vanilla-cream-love.gif',
'alcremie-vanilla-cream-ribbon.gif', 'alcremie-vanilla-cream-star.gif',
'alcremie.gif', 'alomomola.gif', 'altaria-mega.gif', 'altaria.gif',
'amaura.gif', 'ambipom-f.gif', 'ambipom.gif', 'amoonguss.gif',
'ampharos-mega.gif', 'ampharos.gif', 'anorith.gif', 'appletun-gmax.gif',
'appletun.gif', 'applin.gif', 'araquanid-totem.gif', 'araquanid.gif',
'arbok.gif', 'arcanine.gif', 'arceus-bug.gif', 'arceus-dark.gif',
'arceus-dragon.gif', 'arceus-electric.gif', 'arceus-fairy.gif',
'arceus-fighting.gif', 'arceus-fire.gif', 'arceus-flying.gif',
'arceus-ghost.gif', 'arceus-grass.gif', 'arceus-ground.gif',
'arceus-ice.gif', 'arceus-poison.gif', 'arceus-psychic.gif',
'arceus-rock.gif', 'arceus-steel.gif', 'arceus-water.gif', 'arceus.gif',
'archen.gif', 'archeops.gif', 'arctovish.gif', 'arctozolt.gif',
'ariados.gif', 'armaldo.gif', 'aromatisse.gif', 'aron.gif',
'arrokuda.gif', 'articuno.gif', 'audino-mega.gif', 'audino.gif',
'aurorus.gif', 'aurumoth.gif', 'avalugg.gif', 'axew.gif', 'azelf.gif',
'azumarill.gif', 'azurill.gif', 'bagon.gif', 'baltoy.gif',
'banette-mega.gif', 'banette.gif', 'barbaracle.gif', 'barboach.gif',
'barboarch.gif', 'barraskewda.gif', 'basculin-bluestriped.gif',
'basculin.gif', 'bastiodon.gif', 'bayleef.gif', 'beartic.gif',
'beautifly-f.gif', 'beautifly.gif', 'beedrill-mega.gif', 'beedrill.gif',
'beheeyem.gif', 'beldum.gif', 'bellossom.gif', 'bellsprout.gif',
'bergmite.gif', 'bewear.gif', 'bibarel-f.gif', 'bibarel.gif',
'bidoof-f.gif', 'bidoof.gif', 'binacle.gif', 'bisharp.gif',
'blacephalon.gif', 'blastoise-mega.gif', 'blastoise.gif',
'blaziken-f.gif', 'blaziken-mega.gif', 'blaziken.gif', 'blipbug.gif',
'blissey.gif', 'blitzle.gif', 'boldore.gif', 'boltund.gif',
'bonsly.gif', 'bouffalant.gif', 'bounsweet.gif', 'braixen.gif',
'braviary.gif', 'breloom.gif', 'brionne.gif', 'bronzong.gif',
'bronzor.gif', 'bruxish.gif', 'budew.gif', 'buizel-f.gif', 'buizel.gif',
'bulbasaur.gif', 'buneary.gif', 'bunnelby.gif', 'burmy-sandy.gif',
'burmy-trash.gif', 'burmy.gif', 'butterfree-gmax.gif', 'butterfree.gif',
'buzzwole.gif', 'cacnea.gif', 'cacturne-f.gif', 'cacturne.gif',
'camerupt-f.gif', 'camerupt-mega.gif', 'camerupt.gif',
'camperupt-mega.gif', 'carbink.gif', 'caribolt.gif', 'carkol.gif',
'carnivine.gif', 'carracosta.gif', 'carvanha.gif', 'cascoon.gif',
'castform-rainy.gif', 'castform-snowy.gif', 'castform-sunny.gif',
'castform.gif', 'caterpie.gif', 'cawmodore.gif', 'celebi.gif',
'celesteela.gif', 'centiskorch-gmax.gif', 'centiskorch.gif',
'chandelure.gif', 'chansey.gif', 'charizard-gmax.gif',
'charizard-megax.gif', 'charizard-megay.gif', 'charizard.gif',
'charjabug.gif', 'charmander.gif', 'charmeleon.gif', 'chatot.gif',
'cherrim-sunshine.gif', 'cherrim.gif', 'cherubi.gif', 'chesnaught.gif',
'chespin.gif', 'chewtle.gif', 'chikorita.gif', 'chimchar.gif',
'chimecho.gif', 'chinchou.gif', 'chingling.gif', 'cinccino.gif',
'cinderace.gif', 'clamperl.gif', 'clauncher.gif', 'clawitzer.gif',
'claydol.gif', 'clefable.gif', 'clefairy.gif', 'cleffa.gif',
'clobbopus.gif', 'cloyster.gif', 'coalossal-gmax.gif', 'coalossal.gif',
'cobalion.gif', 'cofagrigus.gif', 'combee-f.gif', 'combee.gif',
'combusken-f.gif', 'combusken.gif', 'comfey.gif', 'conkeldurr.gif',
'copperajah-gmax.gif', 'copperajah.gif', 'corphish.gif',
'corsola-galar.gif', 'corsola.gif', 'corviknight-gmax.gif',
'corviknight.gif', 'corvisquire.gif', 'cosmoem.gif', 'cosmog.gif',
'cottonee.gif', 'crabominable.gif', 'crabrawler.gif', 'cradily.gif',
'cramorant-gorging.gif', 'cramorant-gulping.gif', 'cramorant.gif',
'cranidos.gif', 'crawdaunt.gif', 'cresselia.gif', 'croagunk-f.gif',
'croagunk.gif', 'crobat.gif', 'croconaw.gif', 'crucibelle-mega.gif',
'crucibelle.gif', 'crustle.gif', 'cryogonal.gif', 'cubchoo.gif',
'cubone.gif', 'cufant.gif', 'cursola.gif', 'cutiefly.gif',
'cyndaquil.gif', 'darkrai.gif', 'darmanitan-galar.gif',
'darmanitan-galarzen.gif', 'darmanitan-zen.gif', 'darmanitan.gif',
'dartrix.gif', 'darumaka-galar.gif', 'darumaka.gif', 'decidueye.gif',
'dedenne.gif', 'deerling-autumn.gif', 'deerling-summer.gif',
'deerling-winter.gif', 'deerling.gif', 'deino.gif', 'delcatty.gif',
'delibird.gif', 'delphox.gif', 'deoxys-attack.gif',
'deoxys-defense.gif', 'deoxys-speed.gif', 'deoxys.gif', 'dewgong.gif',
'dewott.gif', 'dewpider.gif', 'dhelmise.gif', 'dialga.gif',
'diancie-mega.gif', 'diancie.gif', 'diggersby.gif', 'diglett-alola.gif',
'diglett.gif', 'ditto.gif', 'dodrio.gif', 'doduo.gif', 'donphan-f.gif',
'donphan.gif', 'dottler.gif', 'doublade.gif', 'dracovish.gif',
'dracozolt.gif', 'dragalge.gif', 'dragapult.gif', 'dragonair.gif',
'dragonite.gif', 'drakloak.gif', 'drampa.gif', 'drapion.gif',
'dratini.gif', 'drednaw-gmax.gif', 'drednaw.gif', 'dreepy.gif',
'drifblim.gif', 'drifloon.gif', 'drilbur.gif', 'drizzile.gif',
'drowzee.gif', 'druddigon.gif', 'dubwool.gif', 'ducklett.gif',
'dugtrio-alola.gif', 'dugtrio.gif', 'dunsparce.gif', 'duosion.gif',
'duraludon-gmax.gif', 'duraludon.gif', 'durant.gif', 'dusclops.gif',
'dusknoir.gif', 'duskull.gif', 'dustox-f.gif', 'dustox.gif',
'dwebble.gif', 'eelektrik.gif', 'eelektross.gif', 'eevee-gmax.gif',
'eevee-starter.gif', 'eevee.gif', 'eiscue-noice.gif', 'eiscue.gif',
'ekans.gif', 'eldegoss.gif', 'electabuzz.gif', 'electivire.gif',
'electrike.gif', 'electrode.gif', 'elekid.gif', 'elgyem.gif',
'emboar.gif', 'emolga.gif', 'empoleon.gif', 'entei.gif',
'equilibra.gif', 'escavalier.gif', 'espeon.gif', 'espurr.gif',
'eternatus-eternamax.gif', 'eternatus.gif', 'excadrill.gif',
'exeggcute.gif', 'exeggutor-alola.gif', 'exeggutor.gif', 'exploud.gif',
'falinks.gif', 'farfetchd-galar.gif', 'farfetchd.gif', 'fearow.gif',
'feebas.gif', 'fennekin.gif', 'feraligatr.gif', 'ferroseed.gif',
'ferrothorn.gif', 'fidgit.gif', 'finneon-f.gif', 'finneon.gif',
'flaaffy.gif', 'flabebe-blue.gif', 'flabebe-orange.gif',
'flabebe-white.gif', 'flabebe-yellow.gif', 'flabebe.gif',
'flapple-gmax.gif', 'flapple.gif', 'flareon.gif', 'fletchinder.gif',
'fletchling.gif', 'floatzel-f.gif', 'floatzel.gif', 'floette-blue.gif',
'floette-eternal.gif', 'floette-orange.gif', 'floette-white.gif',
'floette-yellow.gif', 'floette.gif', 'florges-blue.gif',
'florges-orange.gif', 'florges-white.gif', 'florges-yellow.gif',
'florges.gif', 'flygon.gif', 'fomantis.gif', 'foongus.gif',
'forretress.gif', 'fraxure.gif', 'frillish-f.gif', 'frillish.gif',
'froakie.gif', 'frogadier.gif', 'froslass.gif', 'frosmoth.gif',
'furfrou-dandy.gif', 'furfrou-debutante.gif', 'furfrou-diamond.gif',
'furfrou-heart.gif', 'furfrou-kabuki.gif', 'furfrou-lareine.gif',
'furfrou-matron.gif', 'furfrou-pharaoh.gif', 'furfrou-star.gif',
'furfrou.gif', 'furret.gif', 'gabite-f.gif', 'gabite.gif',
'gallade-mega.gif', 'gallade.gif', 'galvantula.gif',
'garbodor-gmax.gif', 'garbodor.gif', 'garchomp-f.gif',
'garchomp-mega.gif', 'garchomp.gif', 'gardevoir-mega.gif',
'gardevoir.gif', 'gastly.gif', 'gastrodon-east.gif', 'gastrodon.gif',
'genesect-burn.gif', 'genesect-chill.gif', 'genesect-douse.gif',
'genesect-shock.gif', 'genesect.gif', 'gengar-gmax.gif',
'gengar-mega.gif', 'gengar.gif', 'geodude-alola.gif', 'geodude.gif',
'gible-f.gif', 'gible.gif', 'gigalith.gif', 'girafarig-f.gif',
'girafarig.gif', 'giratina-origin.gif', 'giratina.gif', 'glaceon.gif',
'glalie-mega.gif', 'glalie.gif', 'glameow.gif', 'gligar-f.gif',
'gligar.gif', 'gliscor.gif', 'gloom.gif', 'gogoat.gif', 'golbat-f.gif',
'golbat.gif', 'goldeen.gif', 'golduck.gif', 'golem-alola.gif',
'golem.gif', 'golett.gif', 'golisopod.gif', 'golurk.gif', 'goodra.gif',
'goomy.gif', 'gorebyss.gif', 'gossifleur.gif', 'gothita.gif',
'gothitelle.gif', 'gothorita.gif', 'gourgeist-large.gif',
'gourgeist-small.gif', 'gourgeist-super.gif', 'gourgeist.gif',
'granbull.gif', 'grapploct.gif', 'graveler-alola.gif', 'graveler.gif',
'greedent.gif', 'greninja-ash.gif', 'greninja.gif', 'grimer-alola.gif',
'grimer.gif', 'grimmsnarl-gmax.gif', 'grimmsnarl.gif', 'grookey.gif',
'grotle.gif', 'groudon-primal.gif', 'groudon.gif', 'grovyle.gif',
'growlithe.gif', 'grubbin.gif', 'grumpig.gif', 'gulpin-f.gif',
'gulpin.gif', 'gumshoos-totem.gif', 'gumshoos.gif', 'gurdurr.gif',
'guzzlord.gif', 'gyarados-mega.gif', 'gyarados.gif', 'hakamoo.gif',
'happiny.gif', 'hariyama.gif', 'hatenna.gif', 'hatterene-gmax.gif',
'hatterene.gif', 'hattrem.gif', 'haunter.gif', 'hawlucha.gif',
'haxorus.gif', 'heatmor.gif', 'heatran.gif', 'heliolisk.gif',
'helioptile.gif', 'heracross-f.gif', 'heracross-mega.gif',
'heracross.gif', 'herdier.gif', 'hippopotas-f.gif', 'hippopotas.gif',
'hippowdon-f.gif', 'hippowdon.gif', 'hitmonchan.gif', 'hitmonlee.gif',
'hitmontop.gif', 'honchkrow.gif', 'honedge.gif', 'hooh.gif',
'hoopa-mega.gif', 'hoopa-unbound.gif', 'hoopa.gif', 'hoothoot.gif',
'hoppip.gif', 'horsea.gif', 'houndoom-f.gif', 'houndoom-mega.gif',
'houndoom.gif', 'houndour.gif', 'huntail.gif', 'hydreigon.gif',
'hypno.gif', 'igglybuff.gif', 'illumise.gif', 'impidimp.gif',
'incineroar.gif', 'indeedee-f.gif', 'indeedee.gif', 'infernape.gif',
'inkay.gif', 'inteleon.gif', 'ivysaur.gif', 'jangmoo.gif',
'jellicent-f.gif', 'jellicent.gif', 'jigglypuff.gif', 'jirachi.gif',
'jolteon.gif', 'joltik.gif', 'jumbao.gif', 'jumpluff.gif', 'jynx.gif',
'kabuto.gif', 'kabutops.gif', 'kadabra.gif', 'kakuna.gif',
'kangaskhan-mega.gif', 'kangaskhan.gif', 'karrablast.gif',
'kartana.gif', 'kecleon.gif', 'keldeo-resolute.gif', 'keldeo.gif',
'kerfluffle-f.gif', 'kerfluffle.gif', 'kingdra.gif', 'kingler-gmax.gif',
'kingler.gif', 'kirlia.gif', 'kitsunoh-f.gif', 'kitsunoh.gif',
'klang.gif', 'klefki.gif', 'klink.gif', 'klinklang.gif', 'koffing.gif',
'komala.gif', 'kommo-o-totem.gif', 'kommoo.gif', 'krabby.gif',
'kricketot-f.gif', 'kricketot.gif', 'kricketune-f.gif',
'kricketune.gif', 'krilowatt.gif', 'krokorok.gif', 'krookodile.gif',
'kyogre-primal.gif', 'kyogre.gif', 'kyurem-black.gif',
'kyurem-white.gif', 'kyurem.gif', 'lairon.gif', 'lampent.gif',
'landorus-therian.gif', 'landorus.gif', 'lanturn.gif',
'lapras-gmax.gif', 'lapras.gif', 'larvesta.gif', 'larvitar.gif',
'latias-mega.gif', 'latias.gif', 'latios-mega.gif', 'latios.gif',
'leafeon.gif', 'leavanny.gif', 'ledian-f.gif', 'ledian.gif',
'ledyba-f.gif', 'ledyba.gif', 'lickilicky.gif', 'lickitung.gif',
'liepard.gif', 'lileep.gif', 'lilligant.gif', 'lillipup.gif',
'linoone-galar.gif', 'linoone.gif', 'litleo.gif', 'litten.gif',
'litwick.gif', 'lombre.gif', 'lopunny-mega.gif', 'lopunny.gif',
'lotad.gif', 'loudred.gif', 'lucario-mega.gif', 'lucario.gif',
'ludicolo-f.gif', 'ludicolo.gif', 'lugia.gif', 'lumineon-f.gif',
'lumineon.gif', 'lunala.gif', 'lunatone.gif', 'lurantis-totem.gif',
'lurantis.gif', 'luvdisc.gif', 'luxio-f.gif', 'luxio.gif',
'luxray-f.gif', 'luxray.gif', 'lycanroc-dusk.gif',
'lycanroc-midnight.gif', 'lycanroc.gif', 'machamp-gmax.gif',
'machamp.gif', 'machoke.gif', 'machop.gif', 'magby.gif', 'magcargo.gif',
'magearna-original.gif', 'magearna.gif', 'magikarp-f.gif',
'magikarp.gif', 'magmar.gif', 'magmortar.gif', 'magnemite.gif',
'magneton.gif', 'magnezone.gif', 'makuhita.gif', 'malaconda.gif',
'malamar.gif', 'mamoswine-f.gif', 'mamoswine.gif', 'manaphy.gif',
'mandibuzz.gif', 'manectric-mega.gif', 'manectric.gif', 'mankey.gif',
'mantine.gif', 'mantyke.gif', 'maractus.gif', 'mareanie.gif',
'mareep.gif', 'marill.gif', 'marowak-alola-totem.gif',
'marowak-alola.gif', 'marowak-totem.gif', 'marowak.gif',
'marshadow.gif', 'marshtomp.gif', 'masquerain.gif', 'mawile-mega.gif',
'mawile.gif', 'medicham-f.gif', 'medicham-mega.gif', 'medicham.gif',
'meditite-f.gif', 'meditite.gif', 'meganium-f.gif', 'meganium.gif',
'melmetal-gmax.gif', 'melmetal.gif', 'meloetta-pirouette.gif',
'meloetta.gif', 'meltan.gif', 'meowstic-f.gif', 'meowstic.gif',
'meowth-alola.gif', 'meowth-galar.gif', 'meowth-gmax.gif', 'meowth.gif',
'mesprit.gif', 'metagross-mega.gif', 'metagross.gif', 'metang.gif',
'metapod.gif', 'mew.gif', 'mewtwo-megax.gif', 'mewtwo-megay.gif',
'mewtwo.gif', 'mienfoo.gif', 'mienshao.gif', 'mightyena.gif',
'milcery.gif', 'milotic-f.gif', 'milotic.gif', 'miltank.gif',
'mimejr.gif', 'mimikyu-busted-totem.gif', 'mimikyu-busted.gif',
'mimikyu-totem.gif', 'mimikyu.gif', 'minccino.gif', 'minior-blue.gif',
'minior-green.gif', 'minior-indigo.gif', 'minior-meteor.gif',
'minior-orange.gif', 'minior-violet.gif', 'minior-yellow.gif',
'minior.gif', 'minun.gif', 'misdreavus.gif', 'mismagius.gif',
'mollux.gif', 'moltres.gif', 'monferno.gif', 'morelull.gif',
'morgrem.gif', 'morpeko-hangry.gif', 'morpeko.gif', 'mothim.gif',
'mrmime-galar.gif', 'mrmime.gif', 'mrrime.gif', 'mudbray.gif',
'mudkip.gif', 'mudsdale.gif', 'muk-alola.gif', 'muk.gif',
'munchlax.gif', 'munna.gif', 'murkrow-f.gif', 'murkrow.gif',
'musharna.gif', 'naganadel.gif', 'natu.gif', 'naviathan.gif',
'necrozma-dawnwings.gif', 'necrozma-duskmane.gif', 'necrozma-ultra.gif',
'necrozma.gif', 'necturna.gif', 'nickit.gif', 'nidoking.gif',
'nidoqueen.gif', 'nidoran-f.gif', 'nidoran.gif', 'nidoranf.gif',
'nidoranm.gif', 'nidorina.gif', 'nidorino.gif', 'nihilego.gif',
'nincada.gif', 'ninetales-alola.gif', 'ninetales.gif', 'ninjask.gif',
'noctowl.gif', 'noibat.gif', 'noivern.gif', 'nosepass.gif',
'numel-f.gif', 'numel.gif', 'nuzleaf-f.gif', 'nuzleaf.gif',
'obstagoon.gif', 'octillery-f.gif', 'octillery.gif', 'oddish.gif',
'omanyte.gif', 'omastar.gif', 'onix.gif', 'oranguru.gif',
'orbeetle-gmax.gif', 'orbeetle.gif', 'oricorio-pau.gif',
'oricorio-pompom.gif', 'oricorio-sensu.gif', 'oricorio.gif',
'oshawott.gif', 'pachirisu-f.gif', 'pachirisu.gif', 'pajantom.gif',
'palkia.gif', 'palossand.gif', 'palpitoad.gif', 'pancham.gif',
'pangoro.gif', 'panpour.gif', 'pansage.gif', 'pansear.gif', 'paras.gif',
'parasect.gif', 'passimian.gif', 'patrat.gif', 'pawniard.gif',
'pelipper.gif', 'perrserker.gif', 'persian-alola.gif', 'persian.gif',
'petilil.gif', 'phanpy.gif', 'phantump.gif', 'pheromosa.gif',
'phione.gif', 'pichu.gif', 'pidgeot-mega.gif', 'pidgeot.gif',
'pidgeotto.gif', 'pidgey.gif', 'pidove.gif', 'pignite.gif',
'pikachu-alola.gif', 'pikachu-belle.gif', 'pikachu-cosplay.gif',
'pikachu-f.gif', 'pikachu-gmax.gif', 'pikachu-hoenn.gif',
'pikachu-kalos.gif', 'pikachu-libre.gif', 'pikachu-original.gif',
'pikachu-partner.gif', 'pikachu-phd.gif', 'pikachu-pop-star.gif',
'pikachu-popstar.gif', 'pikachu-rockstar.gif', 'pikachu-sinnoh.gif',
'pikachu-starter-f.gif', 'pikachu-starter.gif', 'pikachu-unova.gif',
'pikachu.gif', 'pikipek.gif', 'piloswine-f.gif', 'piloswine.gif',
'pincurchin.gif', 'pineco.gif', 'pinsir-mega.gif', 'pinsir.gif',
'piplup.gif', 'plasmanta.gif', 'pluffle.gif', 'plusle.gif',
'poipole.gif', 'pokestarblackbelt.gif', 'pokestarblackdoor.gif',
'pokestarbrycenman.gif', 'pokestarf00.gif', 'pokestarf002.gif',
'pokestargiant.gif', 'pokestarhumanoid.gif', 'pokestarmonster.gif',
'pokestarmt.gif', 'pokestarmt2.gif', 'pokestarsmeargle.gif',
'pokestarspirit.gif', 'pokestartransport.gif', 'pokestarufo-2.gif',
'pokestarufo.gif', 'pokestarufo2.gif', 'pokestarwhitedoor.gif',
'politoed-f.gif', 'politoed.gif', 'poliwag.gif', 'poliwhirl.gif',
'poliwrath.gif', 'polteageist-antique.gif', 'polteageist.gif',
'ponyta-galar.gif', 'ponyta.gif', 'poochyena.gif', 'popplio.gif',
'porygon-z.gif', 'porygon.gif', 'porygon2.gif', 'porygonz.gif',
'primarina.gif', 'primeape.gif', 'prinplup.gif', 'probopass.gif',
'psyduck.gif', 'pumpkaboo-large.gif', 'pumpkaboo-small.gif',
'pumpkaboo-super.gif', 'pumpkaboo.gif', 'pupitar.gif', 'purrloin.gif',
'purugly.gif', 'pyroak.gif', 'pyroar-f.gif', 'pyroar.gif',
'pyukumuku.gif', 'quagsire-f.gif', 'quagsire.gif', 'quilava.gif',
'quilladin.gif', 'qwilfish.gif', 'raboot.gif', 'raichu-alola.gif',
'raichu.gif', 'raikou.gif', 'ralts.gif', 'rampardos.gif',
'rapidash-galar.gif', 'rapidash.gif', 'raticate-alola-totem.gif',
'raticate-alola.gif', 'raticate-f.gif', 'raticate-totem-a.gif',
'raticate.gif', 'rattata-alola.gif', 'rattata-f.gif', 'rattata.gif',
'rayquaza-mega.gif', 'rayquaza.gif', 'regice.gif', 'regigigas.gif',
'regirock.gif', 'registeel.gif', 'relicanth-f.gif', 'relicanth.gif',
'remoraid.gif', 'reshiram.gif', 'reuniclus.gif', 'rhydon-f.gif',
'rhydon.gif', 'rhyhorn.gif', 'rhyperior-f.gif', 'rhyperior.gif',
'ribombee.gif', 'rillaboom.gif', 'riolu.gif', 'rockruff.gif',
'roggenrola.gif', 'rolycoly.gif', 'rookidee.gif', 'roselia-f.gif',
'roselia.gif', 'roserade-f.gif', 'roserade.gif', 'rotom-f.gif',
'rotom-fan.gif', 'rotom-frost.gif', 'rotom-h.gif', 'rotom-heat.gif',
'rotom-m.gif', 'rotom-mow.gif', 'rotom-s.gif', 'rotom-w.gif',
'rotom-wash.gif', 'rotom.gif', 'rowlet.gif', 'rufflet.gif',
'runerigus.gif', 'sableye-mega.gif', 'sableye.gif',
'salamence-mega.gif', 'salamence.gif', 'salandit.gif',
'salazzle-totem.gif', 'salazzle.gif', 'samurott.gif',
'sandaconda-gmax.gif', 'sandaconda.gif', 'sandile.gif',
'sandshrew-alola.gif', 'sandshrew.gif', 'sandslash-alola.gif',
'sandslash.gif', 'sandygast.gif', 'sawk.gif', 'sawsbuck-autumn.gif',
'sawsbuck-summer.gif', 'sawsbuck-winter.gif', 'sawsbuck.gif',
'scatterbug.gif', 'sceptile-mega.gif', 'sceptile.gif', 'scizor-f.gif',
'scizor-mega.gif', 'scizor.gif', 'scolipede.gif', 'scorbunny.gif',
'scrafty.gif', 'scraggy.gif', 'scratchet.gif', 'scyther-f.gif',
'scyther.gif', 'seadra.gif', 'seaking.gif', 'sealeo.gif', 'seedot.gif',
'seel.gif', 'seismitoad.gif', 'sentret.gif', 'serperior.gif',
'servine.gif', 'seviper.gif', 'sewaddle.gif', 'shapedo-mega.gif',
'sharpedo-mega.gif', 'sharpedo.gif', 'shaymin-sky.gif', 'shaymin.gif',
'shedinja.gif', 'shelgon.gif', 'shellder.gif', 'shellos-east.gif',
'shellos.gif', 'shelmet.gif', 'shieldon.gif', 'shiftry-f.gif',
'shiftry.gif', 'shiinotic.gif', 'shinx-f.gif', 'shinx.gif',
'shroomish.gif', 'shuckle.gif', 'shuppet.gif', 'sigilyph.gif',
'silcoon.gif', 'silicobra.gif', 'silvally-bug.gif', 'silvally-dark.gif',
'silvally-dragon.gif', 'silvally-electric.gif', 'silvally-fairy.gif',
'silvally-fighting.gif', 'silvally-fire.gif', 'silvally-flying.gif',
'silvally-ghost.gif', 'silvally-grass.gif', 'silvally-ground.gif',
'silvally-ice.gif', 'silvally-poison.gif', 'silvally-psychic.gif',
'silvally-rock.gif', 'silvally-steel.gif', 'silvally-water.gif',
'silvally.gif', 'simipour.gif', 'simisage.gif', 'simisear.gif',
'sinistea-antique.gif', 'sinistea.gif', 'sirfetchd.gif',
'sizzlipede.gif', 'skarmory.gif', 'skiddo.gif', 'skiploom.gif',
'skitty.gif', 'skorupi.gif', 'skrelp.gif', 'skuntank.gif',
'skwovet.gif', 'slaking.gif', 'slakoth.gif', 'sliggoo.gif',
'slowbro-mega.gif', 'slowbro.gif', 'slowking.gif', 'slowpoke-galar.gif',
'slowpoke.gif', 'slugma.gif', 'slurpuff.gif', 'smeargle.gif',
'smogecko.gif', 'smoguana.gif', 'smokomodo.gif', 'smoochum.gif',
'snaelstrom.gif', 'sneasel-f.gif', 'sneasel.gif', 'snivy.gif',
'snom.gif', 'snorlax-gmax.gif', 'snorlax.gif', 'snornut.gif',
'snorunt.gif', 'snover-f.gif', 'snover.gif', 'snubbull.gif',
'sobble.gif', 'solgaleo.gif', 'solosis.gif', 'solrock.gif',
'spearow.gif', 'spewpa.gif', 'spheal.gif', 'spinarak.gif', 'spinda.gif',
'spiritomb.gif', 'spoink.gif', 'spritzee.gif', 'squirtle.gif',
'stakataka.gif', 'stantler.gif', 'staraptor-f.gif', 'staraptor.gif',
'staravia-f.gif', 'staravia.gif', 'starly-f.gif', 'starly.gif',
'starmie.gif', 'staryu.gif', 'steelix-f.gif', 'steelix-mega.gif',
'steelix.gif', 'steenee.gif', 'stonjourner.gif', 'stoutland.gif',
'stratagem.gif', 'stufful.gif', 'stunfisk-galar.gif', 'stunfisk.gif',
'stunky.gif', 'substitute.gif', 'sudowoodo-f.gif', 'sudowoodo.gif',
'suicune.gif', 'sunflora.gif', 'sunkern.gif', 'surskit.gif',
'swablu.gif', 'swadloon.gif', 'swalot-f.gif', 'swalot.gif',
'swampert-mega.gif', 'swampert.gif', 'swanna.gif', 'swellow.gif',
'swinub.gif', 'swirlix.gif', 'swoobat.gif', 'sylveon.gif',
'taillow.gif', 'talonflame.gif', 'tangela.gif', 'tangrowth-f.gif',
'tangrowth.gif', 'tapubulu.gif', 'tapufini.gif', 'tapukoko.gif',
'tapulele.gif', 'tauros.gif', 'teddiursa.gif', 'tentacool.gif',
'tentacruel.gif', 'tepig.gif', 'terrakion.gif', 'thievul.gif',
'throh.gif', 'thundurus-therian.gif', 'thundurus.gif', 'thwackey.gif',
'timburr.gif', 'tirtouga.gif', 'togedemaru-totem.gif', 'togedemaru.gif',
'togekiss.gif', 'togepi.gif', 'togetic.gif', 'tomohawk.gif',
'torchic-f.gif', 'torchic.gif', 'torkoal.gif', 'tornadus-therian.gif',
'tornadus.gif', 'torracat.gif', 'torterra.gif', 'totodile.gif',
'toucannon.gif', 'toxapex.gif', 'toxel.gif', 'toxicroak-f.gif',
'toxicroak.gif', 'toxtricity-gmax.gif', 'toxtricity-lowkey.gif',
'toxtricity.gif', 'tranquill.gif', 'trapinch.gif', 'treecko.gif',
'trevenant.gif', 'tropius.gif', 'trubbish.gif', 'trumbeak.gif',
'tsareena.gif', 'turtonator.gif', 'turtwig.gif', 'tympole.gif',
'tynamo.gif', 'typenull.gif', 'typhlosion.gif', 'tyranitar-mega.gif',
'tyranitar.gif', 'tyrantrum.gif', 'tyrantum.gif', 'tyrogue.gif',
'tyrunt.gif', 'umbreon.gif', 'unfezant-f.gif', 'unfezant.gif',
'unown-b.gif', 'unown-c.gif', 'unown-d.gif', 'unown-e.gif',
'unown-exclamation.gif', 'unown-f.gif', 'unown-g.gif', 'unown-h.gif',
'unown-i.gif', 'unown-j.gif', 'unown-k.gif', 'unown-l.gif',
'unown-m.gif', 'unown-n.gif', 'unown-o.gif', 'unown-p.gif',
'unown-q.gif', 'unown-question.gif', 'unown-r.gif', 'unown-s.gif',
'unown-t.gif', 'unown-u.gif', 'unown-v.gif', 'unown-w.gif',
'unown-x.gif', 'unown-y.gif', 'unown-z.gif', 'unown.gif',
'ursaring-f.gif', 'ursaring.gif', 'uxie.gif', 'vanillish.gif',
'vanillite.gif', 'vanilluxe.gif', 'vaporeon.gif', 'venipede.gif',
'venomoth.gif', 'venonat.gif', 'venusaur-mega.gif', 'venusaur.gif',
'vespiquen.gif', 'vibrava.gif', 'victini.gif', 'victreebel.gif',
'vigoroth.gif', 'vikavolt-totem.gif', 'vikavolt.gif', 'vileplume.gif',
'virizion.gif', 'vivillon-archipelago.gif', 'vivillon-continental.gif',
'vivillon-elegant.gif', 'vivillon-fancy.gif', 'vivillon-garden.gif',
'vivillon-highplains.gif', 'vivillon-icysnow.gif',
'vivillon-jungle.gif', 'vivillon-marine.gif', 'vivillon-modern.gif',
'vivillon-monsoon.gif', 'vivillon-ocean.gif', 'vivillon-pokeball.gif',
'vivillon-polar.gif', 'vivillon-river.gif', 'vivillon-sandstorm.gif',
'vivillon-savanna.gif', 'vivillon-sun.gif', 'vivillon-tundra.gif',
'vivillon.gif', 'volbeat.gif', 'volcanion.gif', 'volcarona.gif',
'volkraken.gif', 'voltorb.gif', 'vullaby.gif', 'vulpix-alola.gif',
'vulpix.gif', 'wailmer.gif', 'wailord.gif', 'walrein.gif',
'wartortle.gif', 'watchog.gif', 'weavile-f.gif', 'weavile.gif',
'weedle.gif', 'weepinbell.gif', 'weezing-galar.gif', 'weezing.gif',
'whimsicott.gif', 'whirlipede.gif', 'whiscash.gif', 'whismur.gif',
'wigglytuff.gif', 'wimpod.gif', 'wingull.gif', 'wishiwashi-school.gif',
'wishiwashi.gif', 'wobbuffet-f.gif', 'wobbuffet.gif', 'woobat.gif',
'wooloo.gif', 'wooper-f.gif', 'wooper.gif', 'wormadam-sandy.gif',
'wormadam-trash.gif', 'wormadam.gif', 'wurmple.gif', 'wynaut.gif',
'xatu-f.gif', 'xatu.gif', 'xerneas-neutral.gif', 'xerneas.gif',
'xurkitree.gif', 'yamask-galar.gif', 'yamask.gif', 'yamper.gif',
'yanma.gif', 'yanmega.gif', 'yungoos.gif', 'yveltal.gif',
'zacian-crowned.gif', 'zacian.gif', 'zamazenta-crowned.gif',
'zamazenta.gif', 'zangoose.gif', 'zapdos.gif', 'zebstrika.gif',
'zekrom.gif', 'zeraora.gif', 'zigzagoon-galar.gif', 'zigzagoon.gif',
'zoroark.gif', 'zorua.gif', 'zubat-f.gif', 'zubat.gif', 'zweilous.gif',
'zygarde-10.gif', 'zygarde-complete.gif', 'zygarde.gif']
for i in filename:
url = 'https://play.pokemonshowdown.com/sprites/ani/{}'.format(i)
file_name = str(i[:-4])
dl_img(url, 'files/pokemon/front', file_name)
<|reserved_special_token_1|>
import urllib.request
from urllib.request import Request, urlopen
import json
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
from bs4 import BeautifulSoup
<|reserved_special_token_0|>
def simple_get(url):
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None.
"""
try:
with closing(get(url, stream=True)) as resp:
if is_good_response(resp):
return resp.content
else:
return None
except RequestException as e:
log_error('Error during requests to {0} : {1}'.format(url, str(e)))
return None
def is_good_response(resp):
"""
Returns True if the response seems to be HTML, False otherwise.
"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200 and content_type is not None and
content_type.find('html') > -1)
def log_error(e):
"""
It is always a good idea to log errors.
This function just prints them, but you can
make it do anything.
"""
print(e)
def save_json(file):
with open('gif_list.txt', 'w') as f:
f.write(file)
<|reserved_special_token_0|>
def dl_img(url, file_path, file_name):
full_path = file_path + '/' + file_name + '.gif'
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(url, full_path)
filename = ['abomasnow-f.gif', 'abomasnow-mega.gif', 'abomasnow.gif',
'abra.gif', 'absol-mega.gif', 'absol.gif', 'accelgor.gif',
'aegislash-blade.gif', 'aegislash.gif', 'aerodactyl-mega.gif',
'aerodactyl.gif', 'aggron-mega.gif', 'aggron.gif', 'aipom-f.gif',
'aipom.gif', 'alakazam-mega.gif', 'alakazam.gif',
'alcremie-caramel-swirl-berry.gif', 'alcremie-caramel-swirl-clover.gif',
'alcremie-caramel-swirl-flower.gif', 'alcremie-caramel-swirl-love.gif',
'alcremie-caramel-swirl-ribbon.gif', 'alcremie-caramel-swirl-star.gif',
'alcremie-caramelswirl.gif', 'alcremie-gmax.gif',
'alcremie-lemon-cream-berry.gif', 'alcremie-lemon-cream-clover.gif',
'alcremie-lemon-cream-flower.gif', 'alcremie-lemon-cream-love.gif',
'alcremie-lemon-cream-ribbon.gif', 'alcremie-lemon-cream-star.gif',
'alcremie-lemoncream.gif', 'alcremie-matcha-cream-berry.gif',
'alcremie-matcha-cream-clover.gif', 'alcremie-matcha-cream-flower.gif',
'alcremie-matcha-cream-love.gif', 'alcremie-matcha-cream-ribbon.gif',
'alcremie-matcha-cream-star.gif', 'alcremie-matchacream.gif',
'alcremie-mint-cream-berry.gif', 'alcremie-mint-cream-clover.gif',
'alcremie-mint-cream-flower.gif', 'alcremie-mint-cream-love.gif',
'alcremie-mint-cream-ribbon.gif', 'alcremie-mint-cream-star.gif',
'alcremie-mintcream.gif', 'alcremie-rainbow-swirl-berry.gif',
'alcremie-rainbow-swirl-clover.gif',
'alcremie-rainbow-swirl-flower.gif', 'alcremie-rainbow-swirl-love.gif',
'alcremie-rainbow-swirl-ribbon.gif', 'alcremie-rainbow-swirl-star.gif',
'alcremie-rainbowswirl.gif', 'alcremie-ruby-cream-berry.gif',
'alcremie-ruby-cream-clover.gif', 'alcremie-ruby-cream-flower.gif',
'alcremie-ruby-cream-love.gif', 'alcremie-ruby-cream-ribbon.gif',
'alcremie-ruby-cream-star.gif', 'alcremie-ruby-swirl-berry.gif',
'alcremie-ruby-swirl-clover.gif', 'alcremie-ruby-swirl-flower.gif',
'alcremie-ruby-swirl-love.gif', 'alcremie-ruby-swirl-ribbon.gif',
'alcremie-ruby-swirl-star.gif', 'alcremie-rubycream.gif',
'alcremie-rubyswirl.gif', 'alcremie-salted-cream-berry.gif',
'alcremie-salted-cream-clover.gif', 'alcremie-salted-cream-flower.gif',
'alcremie-salted-cream-love.gif', 'alcremie-salted-cream-ribbon.gif',
'alcremie-salted-cream-star.gif', 'alcremie-saltedcream.gif',
'alcremie-vanilla-cream-berry.gif', 'alcremie-vanilla-cream-clover.gif',
'alcremie-vanilla-cream-flower.gif', 'alcremie-vanilla-cream-love.gif',
'alcremie-vanilla-cream-ribbon.gif', 'alcremie-vanilla-cream-star.gif',
'alcremie.gif', 'alomomola.gif', 'altaria-mega.gif', 'altaria.gif',
'amaura.gif', 'ambipom-f.gif', 'ambipom.gif', 'amoonguss.gif',
'ampharos-mega.gif', 'ampharos.gif', 'anorith.gif', 'appletun-gmax.gif',
'appletun.gif', 'applin.gif', 'araquanid-totem.gif', 'araquanid.gif',
'arbok.gif', 'arcanine.gif', 'arceus-bug.gif', 'arceus-dark.gif',
'arceus-dragon.gif', 'arceus-electric.gif', 'arceus-fairy.gif',
'arceus-fighting.gif', 'arceus-fire.gif', 'arceus-flying.gif',
'arceus-ghost.gif', 'arceus-grass.gif', 'arceus-ground.gif',
'arceus-ice.gif', 'arceus-poison.gif', 'arceus-psychic.gif',
'arceus-rock.gif', 'arceus-steel.gif', 'arceus-water.gif', 'arceus.gif',
'archen.gif', 'archeops.gif', 'arctovish.gif', 'arctozolt.gif',
'ariados.gif', 'armaldo.gif', 'aromatisse.gif', 'aron.gif',
'arrokuda.gif', 'articuno.gif', 'audino-mega.gif', 'audino.gif',
'aurorus.gif', 'aurumoth.gif', 'avalugg.gif', 'axew.gif', 'azelf.gif',
'azumarill.gif', 'azurill.gif', 'bagon.gif', 'baltoy.gif',
'banette-mega.gif', 'banette.gif', 'barbaracle.gif', 'barboach.gif',
'barboarch.gif', 'barraskewda.gif', 'basculin-bluestriped.gif',
'basculin.gif', 'bastiodon.gif', 'bayleef.gif', 'beartic.gif',
'beautifly-f.gif', 'beautifly.gif', 'beedrill-mega.gif', 'beedrill.gif',
'beheeyem.gif', 'beldum.gif', 'bellossom.gif', 'bellsprout.gif',
'bergmite.gif', 'bewear.gif', 'bibarel-f.gif', 'bibarel.gif',
'bidoof-f.gif', 'bidoof.gif', 'binacle.gif', 'bisharp.gif',
'blacephalon.gif', 'blastoise-mega.gif', 'blastoise.gif',
'blaziken-f.gif', 'blaziken-mega.gif', 'blaziken.gif', 'blipbug.gif',
'blissey.gif', 'blitzle.gif', 'boldore.gif', 'boltund.gif',
'bonsly.gif', 'bouffalant.gif', 'bounsweet.gif', 'braixen.gif',
'braviary.gif', 'breloom.gif', 'brionne.gif', 'bronzong.gif',
'bronzor.gif', 'bruxish.gif', 'budew.gif', 'buizel-f.gif', 'buizel.gif',
'bulbasaur.gif', 'buneary.gif', 'bunnelby.gif', 'burmy-sandy.gif',
'burmy-trash.gif', 'burmy.gif', 'butterfree-gmax.gif', 'butterfree.gif',
'buzzwole.gif', 'cacnea.gif', 'cacturne-f.gif', 'cacturne.gif',
'camerupt-f.gif', 'camerupt-mega.gif', 'camerupt.gif',
'camperupt-mega.gif', 'carbink.gif', 'caribolt.gif', 'carkol.gif',
'carnivine.gif', 'carracosta.gif', 'carvanha.gif', 'cascoon.gif',
'castform-rainy.gif', 'castform-snowy.gif', 'castform-sunny.gif',
'castform.gif', 'caterpie.gif', 'cawmodore.gif', 'celebi.gif',
'celesteela.gif', 'centiskorch-gmax.gif', 'centiskorch.gif',
'chandelure.gif', 'chansey.gif', 'charizard-gmax.gif',
'charizard-megax.gif', 'charizard-megay.gif', 'charizard.gif',
'charjabug.gif', 'charmander.gif', 'charmeleon.gif', 'chatot.gif',
'cherrim-sunshine.gif', 'cherrim.gif', 'cherubi.gif', 'chesnaught.gif',
'chespin.gif', 'chewtle.gif', 'chikorita.gif', 'chimchar.gif',
'chimecho.gif', 'chinchou.gif', 'chingling.gif', 'cinccino.gif',
'cinderace.gif', 'clamperl.gif', 'clauncher.gif', 'clawitzer.gif',
'claydol.gif', 'clefable.gif', 'clefairy.gif', 'cleffa.gif',
'clobbopus.gif', 'cloyster.gif', 'coalossal-gmax.gif', 'coalossal.gif',
'cobalion.gif', 'cofagrigus.gif', 'combee-f.gif', 'combee.gif',
'combusken-f.gif', 'combusken.gif', 'comfey.gif', 'conkeldurr.gif',
'copperajah-gmax.gif', 'copperajah.gif', 'corphish.gif',
'corsola-galar.gif', 'corsola.gif', 'corviknight-gmax.gif',
'corviknight.gif', 'corvisquire.gif', 'cosmoem.gif', 'cosmog.gif',
'cottonee.gif', 'crabominable.gif', 'crabrawler.gif', 'cradily.gif',
'cramorant-gorging.gif', 'cramorant-gulping.gif', 'cramorant.gif',
'cranidos.gif', 'crawdaunt.gif', 'cresselia.gif', 'croagunk-f.gif',
'croagunk.gif', 'crobat.gif', 'croconaw.gif', 'crucibelle-mega.gif',
'crucibelle.gif', 'crustle.gif', 'cryogonal.gif', 'cubchoo.gif',
'cubone.gif', 'cufant.gif', 'cursola.gif', 'cutiefly.gif',
'cyndaquil.gif', 'darkrai.gif', 'darmanitan-galar.gif',
'darmanitan-galarzen.gif', 'darmanitan-zen.gif', 'darmanitan.gif',
'dartrix.gif', 'darumaka-galar.gif', 'darumaka.gif', 'decidueye.gif',
'dedenne.gif', 'deerling-autumn.gif', 'deerling-summer.gif',
'deerling-winter.gif', 'deerling.gif', 'deino.gif', 'delcatty.gif',
'delibird.gif', 'delphox.gif', 'deoxys-attack.gif',
'deoxys-defense.gif', 'deoxys-speed.gif', 'deoxys.gif', 'dewgong.gif',
'dewott.gif', 'dewpider.gif', 'dhelmise.gif', 'dialga.gif',
'diancie-mega.gif', 'diancie.gif', 'diggersby.gif', 'diglett-alola.gif',
'diglett.gif', 'ditto.gif', 'dodrio.gif', 'doduo.gif', 'donphan-f.gif',
'donphan.gif', 'dottler.gif', 'doublade.gif', 'dracovish.gif',
'dracozolt.gif', 'dragalge.gif', 'dragapult.gif', 'dragonair.gif',
'dragonite.gif', 'drakloak.gif', 'drampa.gif', 'drapion.gif',
'dratini.gif', 'drednaw-gmax.gif', 'drednaw.gif', 'dreepy.gif',
'drifblim.gif', 'drifloon.gif', 'drilbur.gif', 'drizzile.gif',
'drowzee.gif', 'druddigon.gif', 'dubwool.gif', 'ducklett.gif',
'dugtrio-alola.gif', 'dugtrio.gif', 'dunsparce.gif', 'duosion.gif',
'duraludon-gmax.gif', 'duraludon.gif', 'durant.gif', 'dusclops.gif',
'dusknoir.gif', 'duskull.gif', 'dustox-f.gif', 'dustox.gif',
'dwebble.gif', 'eelektrik.gif', 'eelektross.gif', 'eevee-gmax.gif',
'eevee-starter.gif', 'eevee.gif', 'eiscue-noice.gif', 'eiscue.gif',
'ekans.gif', 'eldegoss.gif', 'electabuzz.gif', 'electivire.gif',
'electrike.gif', 'electrode.gif', 'elekid.gif', 'elgyem.gif',
'emboar.gif', 'emolga.gif', 'empoleon.gif', 'entei.gif',
'equilibra.gif', 'escavalier.gif', 'espeon.gif', 'espurr.gif',
'eternatus-eternamax.gif', 'eternatus.gif', 'excadrill.gif',
'exeggcute.gif', 'exeggutor-alola.gif', 'exeggutor.gif', 'exploud.gif',
'falinks.gif', 'farfetchd-galar.gif', 'farfetchd.gif', 'fearow.gif',
'feebas.gif', 'fennekin.gif', 'feraligatr.gif', 'ferroseed.gif',
'ferrothorn.gif', 'fidgit.gif', 'finneon-f.gif', 'finneon.gif',
'flaaffy.gif', 'flabebe-blue.gif', 'flabebe-orange.gif',
'flabebe-white.gif', 'flabebe-yellow.gif', 'flabebe.gif',
'flapple-gmax.gif', 'flapple.gif', 'flareon.gif', 'fletchinder.gif',
'fletchling.gif', 'floatzel-f.gif', 'floatzel.gif', 'floette-blue.gif',
'floette-eternal.gif', 'floette-orange.gif', 'floette-white.gif',
'floette-yellow.gif', 'floette.gif', 'florges-blue.gif',
'florges-orange.gif', 'florges-white.gif', 'florges-yellow.gif',
'florges.gif', 'flygon.gif', 'fomantis.gif', 'foongus.gif',
'forretress.gif', 'fraxure.gif', 'frillish-f.gif', 'frillish.gif',
'froakie.gif', 'frogadier.gif', 'froslass.gif', 'frosmoth.gif',
'furfrou-dandy.gif', 'furfrou-debutante.gif', 'furfrou-diamond.gif',
'furfrou-heart.gif', 'furfrou-kabuki.gif', 'furfrou-lareine.gif',
'furfrou-matron.gif', 'furfrou-pharaoh.gif', 'furfrou-star.gif',
'furfrou.gif', 'furret.gif', 'gabite-f.gif', 'gabite.gif',
'gallade-mega.gif', 'gallade.gif', 'galvantula.gif',
'garbodor-gmax.gif', 'garbodor.gif', 'garchomp-f.gif',
'garchomp-mega.gif', 'garchomp.gif', 'gardevoir-mega.gif',
'gardevoir.gif', 'gastly.gif', 'gastrodon-east.gif', 'gastrodon.gif',
'genesect-burn.gif', 'genesect-chill.gif', 'genesect-douse.gif',
'genesect-shock.gif', 'genesect.gif', 'gengar-gmax.gif',
'gengar-mega.gif', 'gengar.gif', 'geodude-alola.gif', 'geodude.gif',
'gible-f.gif', 'gible.gif', 'gigalith.gif', 'girafarig-f.gif',
'girafarig.gif', 'giratina-origin.gif', 'giratina.gif', 'glaceon.gif',
'glalie-mega.gif', 'glalie.gif', 'glameow.gif', 'gligar-f.gif',
'gligar.gif', 'gliscor.gif', 'gloom.gif', 'gogoat.gif', 'golbat-f.gif',
'golbat.gif', 'goldeen.gif', 'golduck.gif', 'golem-alola.gif',
'golem.gif', 'golett.gif', 'golisopod.gif', 'golurk.gif', 'goodra.gif',
'goomy.gif', 'gorebyss.gif', 'gossifleur.gif', 'gothita.gif',
'gothitelle.gif', 'gothorita.gif', 'gourgeist-large.gif',
'gourgeist-small.gif', 'gourgeist-super.gif', 'gourgeist.gif',
'granbull.gif', 'grapploct.gif', 'graveler-alola.gif', 'graveler.gif',
'greedent.gif', 'greninja-ash.gif', 'greninja.gif', 'grimer-alola.gif',
'grimer.gif', 'grimmsnarl-gmax.gif', 'grimmsnarl.gif', 'grookey.gif',
'grotle.gif', 'groudon-primal.gif', 'groudon.gif', 'grovyle.gif',
'growlithe.gif', 'grubbin.gif', 'grumpig.gif', 'gulpin-f.gif',
'gulpin.gif', 'gumshoos-totem.gif', 'gumshoos.gif', 'gurdurr.gif',
'guzzlord.gif', 'gyarados-mega.gif', 'gyarados.gif', 'hakamoo.gif',
'happiny.gif', 'hariyama.gif', 'hatenna.gif', 'hatterene-gmax.gif',
'hatterene.gif', 'hattrem.gif', 'haunter.gif', 'hawlucha.gif',
'haxorus.gif', 'heatmor.gif', 'heatran.gif', 'heliolisk.gif',
'helioptile.gif', 'heracross-f.gif', 'heracross-mega.gif',
'heracross.gif', 'herdier.gif', 'hippopotas-f.gif', 'hippopotas.gif',
'hippowdon-f.gif', 'hippowdon.gif', 'hitmonchan.gif', 'hitmonlee.gif',
'hitmontop.gif', 'honchkrow.gif', 'honedge.gif', 'hooh.gif',
'hoopa-mega.gif', 'hoopa-unbound.gif', 'hoopa.gif', 'hoothoot.gif',
'hoppip.gif', 'horsea.gif', 'houndoom-f.gif', 'houndoom-mega.gif',
'houndoom.gif', 'houndour.gif', 'huntail.gif', 'hydreigon.gif',
'hypno.gif', 'igglybuff.gif', 'illumise.gif', 'impidimp.gif',
'incineroar.gif', 'indeedee-f.gif', 'indeedee.gif', 'infernape.gif',
'inkay.gif', 'inteleon.gif', 'ivysaur.gif', 'jangmoo.gif',
'jellicent-f.gif', 'jellicent.gif', 'jigglypuff.gif', 'jirachi.gif',
'jolteon.gif', 'joltik.gif', 'jumbao.gif', 'jumpluff.gif', 'jynx.gif',
'kabuto.gif', 'kabutops.gif', 'kadabra.gif', 'kakuna.gif',
'kangaskhan-mega.gif', 'kangaskhan.gif', 'karrablast.gif',
'kartana.gif', 'kecleon.gif', 'keldeo-resolute.gif', 'keldeo.gif',
'kerfluffle-f.gif', 'kerfluffle.gif', 'kingdra.gif', 'kingler-gmax.gif',
'kingler.gif', 'kirlia.gif', 'kitsunoh-f.gif', 'kitsunoh.gif',
'klang.gif', 'klefki.gif', 'klink.gif', 'klinklang.gif', 'koffing.gif',
'komala.gif', 'kommo-o-totem.gif', 'kommoo.gif', 'krabby.gif',
'kricketot-f.gif', 'kricketot.gif', 'kricketune-f.gif',
'kricketune.gif', 'krilowatt.gif', 'krokorok.gif', 'krookodile.gif',
'kyogre-primal.gif', 'kyogre.gif', 'kyurem-black.gif',
'kyurem-white.gif', 'kyurem.gif', 'lairon.gif', 'lampent.gif',
'landorus-therian.gif', 'landorus.gif', 'lanturn.gif',
'lapras-gmax.gif', 'lapras.gif', 'larvesta.gif', 'larvitar.gif',
'latias-mega.gif', 'latias.gif', 'latios-mega.gif', 'latios.gif',
'leafeon.gif', 'leavanny.gif', 'ledian-f.gif', 'ledian.gif',
'ledyba-f.gif', 'ledyba.gif', 'lickilicky.gif', 'lickitung.gif',
'liepard.gif', 'lileep.gif', 'lilligant.gif', 'lillipup.gif',
'linoone-galar.gif', 'linoone.gif', 'litleo.gif', 'litten.gif',
'litwick.gif', 'lombre.gif', 'lopunny-mega.gif', 'lopunny.gif',
'lotad.gif', 'loudred.gif', 'lucario-mega.gif', 'lucario.gif',
'ludicolo-f.gif', 'ludicolo.gif', 'lugia.gif', 'lumineon-f.gif',
'lumineon.gif', 'lunala.gif', 'lunatone.gif', 'lurantis-totem.gif',
'lurantis.gif', 'luvdisc.gif', 'luxio-f.gif', 'luxio.gif',
'luxray-f.gif', 'luxray.gif', 'lycanroc-dusk.gif',
'lycanroc-midnight.gif', 'lycanroc.gif', 'machamp-gmax.gif',
'machamp.gif', 'machoke.gif', 'machop.gif', 'magby.gif', 'magcargo.gif',
'magearna-original.gif', 'magearna.gif', 'magikarp-f.gif',
'magikarp.gif', 'magmar.gif', 'magmortar.gif', 'magnemite.gif',
'magneton.gif', 'magnezone.gif', 'makuhita.gif', 'malaconda.gif',
'malamar.gif', 'mamoswine-f.gif', 'mamoswine.gif', 'manaphy.gif',
'mandibuzz.gif', 'manectric-mega.gif', 'manectric.gif', 'mankey.gif',
'mantine.gif', 'mantyke.gif', 'maractus.gif', 'mareanie.gif',
'mareep.gif', 'marill.gif', 'marowak-alola-totem.gif',
'marowak-alola.gif', 'marowak-totem.gif', 'marowak.gif',
'marshadow.gif', 'marshtomp.gif', 'masquerain.gif', 'mawile-mega.gif',
'mawile.gif', 'medicham-f.gif', 'medicham-mega.gif', 'medicham.gif',
'meditite-f.gif', 'meditite.gif', 'meganium-f.gif', 'meganium.gif',
'melmetal-gmax.gif', 'melmetal.gif', 'meloetta-pirouette.gif',
'meloetta.gif', 'meltan.gif', 'meowstic-f.gif', 'meowstic.gif',
'meowth-alola.gif', 'meowth-galar.gif', 'meowth-gmax.gif', 'meowth.gif',
'mesprit.gif', 'metagross-mega.gif', 'metagross.gif', 'metang.gif',
'metapod.gif', 'mew.gif', 'mewtwo-megax.gif', 'mewtwo-megay.gif',
'mewtwo.gif', 'mienfoo.gif', 'mienshao.gif', 'mightyena.gif',
'milcery.gif', 'milotic-f.gif', 'milotic.gif', 'miltank.gif',
'mimejr.gif', 'mimikyu-busted-totem.gif', 'mimikyu-busted.gif',
'mimikyu-totem.gif', 'mimikyu.gif', 'minccino.gif', 'minior-blue.gif',
'minior-green.gif', 'minior-indigo.gif', 'minior-meteor.gif',
'minior-orange.gif', 'minior-violet.gif', 'minior-yellow.gif',
'minior.gif', 'minun.gif', 'misdreavus.gif', 'mismagius.gif',
'mollux.gif', 'moltres.gif', 'monferno.gif', 'morelull.gif',
'morgrem.gif', 'morpeko-hangry.gif', 'morpeko.gif', 'mothim.gif',
'mrmime-galar.gif', 'mrmime.gif', 'mrrime.gif', 'mudbray.gif',
'mudkip.gif', 'mudsdale.gif', 'muk-alola.gif', 'muk.gif',
'munchlax.gif', 'munna.gif', 'murkrow-f.gif', 'murkrow.gif',
'musharna.gif', 'naganadel.gif', 'natu.gif', 'naviathan.gif',
'necrozma-dawnwings.gif', 'necrozma-duskmane.gif', 'necrozma-ultra.gif',
'necrozma.gif', 'necturna.gif', 'nickit.gif', 'nidoking.gif',
'nidoqueen.gif', 'nidoran-f.gif', 'nidoran.gif', 'nidoranf.gif',
'nidoranm.gif', 'nidorina.gif', 'nidorino.gif', 'nihilego.gif',
'nincada.gif', 'ninetales-alola.gif', 'ninetales.gif', 'ninjask.gif',
'noctowl.gif', 'noibat.gif', 'noivern.gif', 'nosepass.gif',
'numel-f.gif', 'numel.gif', 'nuzleaf-f.gif', 'nuzleaf.gif',
'obstagoon.gif', 'octillery-f.gif', 'octillery.gif', 'oddish.gif',
'omanyte.gif', 'omastar.gif', 'onix.gif', 'oranguru.gif',
'orbeetle-gmax.gif', 'orbeetle.gif', 'oricorio-pau.gif',
'oricorio-pompom.gif', 'oricorio-sensu.gif', 'oricorio.gif',
'oshawott.gif', 'pachirisu-f.gif', 'pachirisu.gif', 'pajantom.gif',
'palkia.gif', 'palossand.gif', 'palpitoad.gif', 'pancham.gif',
'pangoro.gif', 'panpour.gif', 'pansage.gif', 'pansear.gif', 'paras.gif',
'parasect.gif', 'passimian.gif', 'patrat.gif', 'pawniard.gif',
'pelipper.gif', 'perrserker.gif', 'persian-alola.gif', 'persian.gif',
'petilil.gif', 'phanpy.gif', 'phantump.gif', 'pheromosa.gif',
'phione.gif', 'pichu.gif', 'pidgeot-mega.gif', 'pidgeot.gif',
'pidgeotto.gif', 'pidgey.gif', 'pidove.gif', 'pignite.gif',
'pikachu-alola.gif', 'pikachu-belle.gif', 'pikachu-cosplay.gif',
'pikachu-f.gif', 'pikachu-gmax.gif', 'pikachu-hoenn.gif',
'pikachu-kalos.gif', 'pikachu-libre.gif', 'pikachu-original.gif',
'pikachu-partner.gif', 'pikachu-phd.gif', 'pikachu-pop-star.gif',
'pikachu-popstar.gif', 'pikachu-rockstar.gif', 'pikachu-sinnoh.gif',
'pikachu-starter-f.gif', 'pikachu-starter.gif', 'pikachu-unova.gif',
'pikachu.gif', 'pikipek.gif', 'piloswine-f.gif', 'piloswine.gif',
'pincurchin.gif', 'pineco.gif', 'pinsir-mega.gif', 'pinsir.gif',
'piplup.gif', 'plasmanta.gif', 'pluffle.gif', 'plusle.gif',
'poipole.gif', 'pokestarblackbelt.gif', 'pokestarblackdoor.gif',
'pokestarbrycenman.gif', 'pokestarf00.gif', 'pokestarf002.gif',
'pokestargiant.gif', 'pokestarhumanoid.gif', 'pokestarmonster.gif',
'pokestarmt.gif', 'pokestarmt2.gif', 'pokestarsmeargle.gif',
'pokestarspirit.gif', 'pokestartransport.gif', 'pokestarufo-2.gif',
'pokestarufo.gif', 'pokestarufo2.gif', 'pokestarwhitedoor.gif',
'politoed-f.gif', 'politoed.gif', 'poliwag.gif', 'poliwhirl.gif',
'poliwrath.gif', 'polteageist-antique.gif', 'polteageist.gif',
'ponyta-galar.gif', 'ponyta.gif', 'poochyena.gif', 'popplio.gif',
'porygon-z.gif', 'porygon.gif', 'porygon2.gif', 'porygonz.gif',
'primarina.gif', 'primeape.gif', 'prinplup.gif', 'probopass.gif',
'psyduck.gif', 'pumpkaboo-large.gif', 'pumpkaboo-small.gif',
'pumpkaboo-super.gif', 'pumpkaboo.gif', 'pupitar.gif', 'purrloin.gif',
'purugly.gif', 'pyroak.gif', 'pyroar-f.gif', 'pyroar.gif',
'pyukumuku.gif', 'quagsire-f.gif', 'quagsire.gif', 'quilava.gif',
'quilladin.gif', 'qwilfish.gif', 'raboot.gif', 'raichu-alola.gif',
'raichu.gif', 'raikou.gif', 'ralts.gif', 'rampardos.gif',
'rapidash-galar.gif', 'rapidash.gif', 'raticate-alola-totem.gif',
'raticate-alola.gif', 'raticate-f.gif', 'raticate-totem-a.gif',
'raticate.gif', 'rattata-alola.gif', 'rattata-f.gif', 'rattata.gif',
'rayquaza-mega.gif', 'rayquaza.gif', 'regice.gif', 'regigigas.gif',
'regirock.gif', 'registeel.gif', 'relicanth-f.gif', 'relicanth.gif',
'remoraid.gif', 'reshiram.gif', 'reuniclus.gif', 'rhydon-f.gif',
'rhydon.gif', 'rhyhorn.gif', 'rhyperior-f.gif', 'rhyperior.gif',
'ribombee.gif', 'rillaboom.gif', 'riolu.gif', 'rockruff.gif',
'roggenrola.gif', 'rolycoly.gif', 'rookidee.gif', 'roselia-f.gif',
'roselia.gif', 'roserade-f.gif', 'roserade.gif', 'rotom-f.gif',
'rotom-fan.gif', 'rotom-frost.gif', 'rotom-h.gif', 'rotom-heat.gif',
'rotom-m.gif', 'rotom-mow.gif', 'rotom-s.gif', 'rotom-w.gif',
'rotom-wash.gif', 'rotom.gif', 'rowlet.gif', 'rufflet.gif',
'runerigus.gif', 'sableye-mega.gif', 'sableye.gif',
'salamence-mega.gif', 'salamence.gif', 'salandit.gif',
'salazzle-totem.gif', 'salazzle.gif', 'samurott.gif',
'sandaconda-gmax.gif', 'sandaconda.gif', 'sandile.gif',
'sandshrew-alola.gif', 'sandshrew.gif', 'sandslash-alola.gif',
'sandslash.gif', 'sandygast.gif', 'sawk.gif', 'sawsbuck-autumn.gif',
'sawsbuck-summer.gif', 'sawsbuck-winter.gif', 'sawsbuck.gif',
'scatterbug.gif', 'sceptile-mega.gif', 'sceptile.gif', 'scizor-f.gif',
'scizor-mega.gif', 'scizor.gif', 'scolipede.gif', 'scorbunny.gif',
'scrafty.gif', 'scraggy.gif', 'scratchet.gif', 'scyther-f.gif',
'scyther.gif', 'seadra.gif', 'seaking.gif', 'sealeo.gif', 'seedot.gif',
'seel.gif', 'seismitoad.gif', 'sentret.gif', 'serperior.gif',
'servine.gif', 'seviper.gif', 'sewaddle.gif', 'shapedo-mega.gif',
'sharpedo-mega.gif', 'sharpedo.gif', 'shaymin-sky.gif', 'shaymin.gif',
'shedinja.gif', 'shelgon.gif', 'shellder.gif', 'shellos-east.gif',
'shellos.gif', 'shelmet.gif', 'shieldon.gif', 'shiftry-f.gif',
'shiftry.gif', 'shiinotic.gif', 'shinx-f.gif', 'shinx.gif',
'shroomish.gif', 'shuckle.gif', 'shuppet.gif', 'sigilyph.gif',
'silcoon.gif', 'silicobra.gif', 'silvally-bug.gif', 'silvally-dark.gif',
'silvally-dragon.gif', 'silvally-electric.gif', 'silvally-fairy.gif',
'silvally-fighting.gif', 'silvally-fire.gif', 'silvally-flying.gif',
'silvally-ghost.gif', 'silvally-grass.gif', 'silvally-ground.gif',
'silvally-ice.gif', 'silvally-poison.gif', 'silvally-psychic.gif',
'silvally-rock.gif', 'silvally-steel.gif', 'silvally-water.gif',
'silvally.gif', 'simipour.gif', 'simisage.gif', 'simisear.gif',
'sinistea-antique.gif', 'sinistea.gif', 'sirfetchd.gif',
'sizzlipede.gif', 'skarmory.gif', 'skiddo.gif', 'skiploom.gif',
'skitty.gif', 'skorupi.gif', 'skrelp.gif', 'skuntank.gif',
'skwovet.gif', 'slaking.gif', 'slakoth.gif', 'sliggoo.gif',
'slowbro-mega.gif', 'slowbro.gif', 'slowking.gif', 'slowpoke-galar.gif',
'slowpoke.gif', 'slugma.gif', 'slurpuff.gif', 'smeargle.gif',
'smogecko.gif', 'smoguana.gif', 'smokomodo.gif', 'smoochum.gif',
'snaelstrom.gif', 'sneasel-f.gif', 'sneasel.gif', 'snivy.gif',
'snom.gif', 'snorlax-gmax.gif', 'snorlax.gif', 'snornut.gif',
'snorunt.gif', 'snover-f.gif', 'snover.gif', 'snubbull.gif',
'sobble.gif', 'solgaleo.gif', 'solosis.gif', 'solrock.gif',
'spearow.gif', 'spewpa.gif', 'spheal.gif', 'spinarak.gif', 'spinda.gif',
'spiritomb.gif', 'spoink.gif', 'spritzee.gif', 'squirtle.gif',
'stakataka.gif', 'stantler.gif', 'staraptor-f.gif', 'staraptor.gif',
'staravia-f.gif', 'staravia.gif', 'starly-f.gif', 'starly.gif',
'starmie.gif', 'staryu.gif', 'steelix-f.gif', 'steelix-mega.gif',
'steelix.gif', 'steenee.gif', 'stonjourner.gif', 'stoutland.gif',
'stratagem.gif', 'stufful.gif', 'stunfisk-galar.gif', 'stunfisk.gif',
'stunky.gif', 'substitute.gif', 'sudowoodo-f.gif', 'sudowoodo.gif',
'suicune.gif', 'sunflora.gif', 'sunkern.gif', 'surskit.gif',
'swablu.gif', 'swadloon.gif', 'swalot-f.gif', 'swalot.gif',
'swampert-mega.gif', 'swampert.gif', 'swanna.gif', 'swellow.gif',
'swinub.gif', 'swirlix.gif', 'swoobat.gif', 'sylveon.gif',
'taillow.gif', 'talonflame.gif', 'tangela.gif', 'tangrowth-f.gif',
'tangrowth.gif', 'tapubulu.gif', 'tapufini.gif', 'tapukoko.gif',
'tapulele.gif', 'tauros.gif', 'teddiursa.gif', 'tentacool.gif',
'tentacruel.gif', 'tepig.gif', 'terrakion.gif', 'thievul.gif',
'throh.gif', 'thundurus-therian.gif', 'thundurus.gif', 'thwackey.gif',
'timburr.gif', 'tirtouga.gif', 'togedemaru-totem.gif', 'togedemaru.gif',
'togekiss.gif', 'togepi.gif', 'togetic.gif', 'tomohawk.gif',
'torchic-f.gif', 'torchic.gif', 'torkoal.gif', 'tornadus-therian.gif',
'tornadus.gif', 'torracat.gif', 'torterra.gif', 'totodile.gif',
'toucannon.gif', 'toxapex.gif', 'toxel.gif', 'toxicroak-f.gif',
'toxicroak.gif', 'toxtricity-gmax.gif', 'toxtricity-lowkey.gif',
'toxtricity.gif', 'tranquill.gif', 'trapinch.gif', 'treecko.gif',
'trevenant.gif', 'tropius.gif', 'trubbish.gif', 'trumbeak.gif',
'tsareena.gif', 'turtonator.gif', 'turtwig.gif', 'tympole.gif',
'tynamo.gif', 'typenull.gif', 'typhlosion.gif', 'tyranitar-mega.gif',
'tyranitar.gif', 'tyrantrum.gif', 'tyrantum.gif', 'tyrogue.gif',
'tyrunt.gif', 'umbreon.gif', 'unfezant-f.gif', 'unfezant.gif',
'unown-b.gif', 'unown-c.gif', 'unown-d.gif', 'unown-e.gif',
'unown-exclamation.gif', 'unown-f.gif', 'unown-g.gif', 'unown-h.gif',
'unown-i.gif', 'unown-j.gif', 'unown-k.gif', 'unown-l.gif',
'unown-m.gif', 'unown-n.gif', 'unown-o.gif', 'unown-p.gif',
'unown-q.gif', 'unown-question.gif', 'unown-r.gif', 'unown-s.gif',
'unown-t.gif', 'unown-u.gif', 'unown-v.gif', 'unown-w.gif',
'unown-x.gif', 'unown-y.gif', 'unown-z.gif', 'unown.gif',
'ursaring-f.gif', 'ursaring.gif', 'uxie.gif', 'vanillish.gif',
'vanillite.gif', 'vanilluxe.gif', 'vaporeon.gif', 'venipede.gif',
'venomoth.gif', 'venonat.gif', 'venusaur-mega.gif', 'venusaur.gif',
'vespiquen.gif', 'vibrava.gif', 'victini.gif', 'victreebel.gif',
'vigoroth.gif', 'vikavolt-totem.gif', 'vikavolt.gif', 'vileplume.gif',
'virizion.gif', 'vivillon-archipelago.gif', 'vivillon-continental.gif',
'vivillon-elegant.gif', 'vivillon-fancy.gif', 'vivillon-garden.gif',
'vivillon-highplains.gif', 'vivillon-icysnow.gif',
'vivillon-jungle.gif', 'vivillon-marine.gif', 'vivillon-modern.gif',
'vivillon-monsoon.gif', 'vivillon-ocean.gif', 'vivillon-pokeball.gif',
'vivillon-polar.gif', 'vivillon-river.gif', 'vivillon-sandstorm.gif',
'vivillon-savanna.gif', 'vivillon-sun.gif', 'vivillon-tundra.gif',
'vivillon.gif', 'volbeat.gif', 'volcanion.gif', 'volcarona.gif',
'volkraken.gif', 'voltorb.gif', 'vullaby.gif', 'vulpix-alola.gif',
'vulpix.gif', 'wailmer.gif', 'wailord.gif', 'walrein.gif',
'wartortle.gif', 'watchog.gif', 'weavile-f.gif', 'weavile.gif',
'weedle.gif', 'weepinbell.gif', 'weezing-galar.gif', 'weezing.gif',
'whimsicott.gif', 'whirlipede.gif', 'whiscash.gif', 'whismur.gif',
'wigglytuff.gif', 'wimpod.gif', 'wingull.gif', 'wishiwashi-school.gif',
'wishiwashi.gif', 'wobbuffet-f.gif', 'wobbuffet.gif', 'woobat.gif',
'wooloo.gif', 'wooper-f.gif', 'wooper.gif', 'wormadam-sandy.gif',
'wormadam-trash.gif', 'wormadam.gif', 'wurmple.gif', 'wynaut.gif',
'xatu-f.gif', 'xatu.gif', 'xerneas-neutral.gif', 'xerneas.gif',
'xurkitree.gif', 'yamask-galar.gif', 'yamask.gif', 'yamper.gif',
'yanma.gif', 'yanmega.gif', 'yungoos.gif', 'yveltal.gif',
'zacian-crowned.gif', 'zacian.gif', 'zamazenta-crowned.gif',
'zamazenta.gif', 'zangoose.gif', 'zapdos.gif', 'zebstrika.gif',
'zekrom.gif', 'zeraora.gif', 'zigzagoon-galar.gif', 'zigzagoon.gif',
'zoroark.gif', 'zorua.gif', 'zubat-f.gif', 'zubat.gif', 'zweilous.gif',
'zygarde-10.gif', 'zygarde-complete.gif', 'zygarde.gif']
for i in filename:
url = 'https://play.pokemonshowdown.com/sprites/ani/{}'.format(i)
file_name = str(i[:-4])
dl_img(url, 'files/pokemon/front', file_name)
<|reserved_special_token_1|>
import urllib.request
from urllib.request import Request, urlopen
import json
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
from bs4 import BeautifulSoup
"""
Web Scraper ======================================================================
"""
def simple_get(url):
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None.
"""
try:
with closing(get(url, stream=True)) as resp:
if is_good_response(resp):
return resp.content
else:
return None
except RequestException as e:
log_error('Error during requests to {0} : {1}'.format(url, str(e)))
return None
def is_good_response(resp):
"""
Returns True if the response seems to be HTML, False otherwise.
"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200
and content_type is not None
and content_type.find('html') > -1)
def log_error(e):
"""
It is always a good idea to log errors.
This function just prints them, but you can
make it do anything.
"""
print(e)
def save_json(file):
with open('gif_list.txt', 'w') as f:
f.write(file)
"""
Scraping
"""
# req = Request('https://play.pokemonshowdown.com/sprites/ani/', headers={'User-Agent': 'Mozilla/5.0'})
# url = urlopen(req).read()
# url = 'https://play.pokemonshowdown.com/sprites/ani/'
# raw_html = simple_get(url)
# soup = BeautifulSoup(url, 'lxml')
# # a = soup.find_all('td', attrs={'valign': 'top'})
# a = soup.find_all('a')
# videolist = []
# print(a)
# for v in a:
# tmp = v['href']
# videolist.append(tmp)
# filename = videolist[5:]
# print(filename)
def dl_img(url, file_path, file_name):
full_path = file_path + '/' + file_name + '.gif'
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(url, full_path)
filename = ['abomasnow-f.gif', 'abomasnow-mega.gif', 'abomasnow.gif', 'abra.gif', 'absol-mega.gif', 'absol.gif', 'accelgor.gif', 'aegislash-blade.gif', 'aegislash.gif', 'aerodactyl-mega.gif', 'aerodactyl.gif', 'aggron-mega.gif', 'aggron.gif', 'aipom-f.gif', 'aipom.gif', 'alakazam-mega.gif', 'alakazam.gif', 'alcremie-caramel-swirl-berry.gif', 'alcremie-caramel-swirl-clover.gif', 'alcremie-caramel-swirl-flower.gif', 'alcremie-caramel-swirl-love.gif', 'alcremie-caramel-swirl-ribbon.gif', 'alcremie-caramel-swirl-star.gif', 'alcremie-caramelswirl.gif', 'alcremie-gmax.gif', 'alcremie-lemon-cream-berry.gif', 'alcremie-lemon-cream-clover.gif', 'alcremie-lemon-cream-flower.gif', 'alcremie-lemon-cream-love.gif', 'alcremie-lemon-cream-ribbon.gif', 'alcremie-lemon-cream-star.gif', 'alcremie-lemoncream.gif', 'alcremie-matcha-cream-berry.gif', 'alcremie-matcha-cream-clover.gif', 'alcremie-matcha-cream-flower.gif', 'alcremie-matcha-cream-love.gif', 'alcremie-matcha-cream-ribbon.gif', 'alcremie-matcha-cream-star.gif', 'alcremie-matchacream.gif', 'alcremie-mint-cream-berry.gif', 'alcremie-mint-cream-clover.gif', 'alcremie-mint-cream-flower.gif', 'alcremie-mint-cream-love.gif', 'alcremie-mint-cream-ribbon.gif', 'alcremie-mint-cream-star.gif', 'alcremie-mintcream.gif', 'alcremie-rainbow-swirl-berry.gif', 'alcremie-rainbow-swirl-clover.gif', 'alcremie-rainbow-swirl-flower.gif', 'alcremie-rainbow-swirl-love.gif', 'alcremie-rainbow-swirl-ribbon.gif', 'alcremie-rainbow-swirl-star.gif', 'alcremie-rainbowswirl.gif', 'alcremie-ruby-cream-berry.gif', 'alcremie-ruby-cream-clover.gif', 'alcremie-ruby-cream-flower.gif', 'alcremie-ruby-cream-love.gif', 'alcremie-ruby-cream-ribbon.gif', 'alcremie-ruby-cream-star.gif', 'alcremie-ruby-swirl-berry.gif', 'alcremie-ruby-swirl-clover.gif', 'alcremie-ruby-swirl-flower.gif', 'alcremie-ruby-swirl-love.gif', 'alcremie-ruby-swirl-ribbon.gif', 'alcremie-ruby-swirl-star.gif', 'alcremie-rubycream.gif', 'alcremie-rubyswirl.gif', 'alcremie-salted-cream-berry.gif', 'alcremie-salted-cream-clover.gif', 'alcremie-salted-cream-flower.gif', 'alcremie-salted-cream-love.gif', 'alcremie-salted-cream-ribbon.gif', 'alcremie-salted-cream-star.gif', 'alcremie-saltedcream.gif', 'alcremie-vanilla-cream-berry.gif', 'alcremie-vanilla-cream-clover.gif', 'alcremie-vanilla-cream-flower.gif', 'alcremie-vanilla-cream-love.gif', 'alcremie-vanilla-cream-ribbon.gif', 'alcremie-vanilla-cream-star.gif', 'alcremie.gif', 'alomomola.gif', 'altaria-mega.gif', 'altaria.gif', 'amaura.gif', 'ambipom-f.gif', 'ambipom.gif', 'amoonguss.gif', 'ampharos-mega.gif', 'ampharos.gif', 'anorith.gif', 'appletun-gmax.gif', 'appletun.gif', 'applin.gif', 'araquanid-totem.gif', 'araquanid.gif', 'arbok.gif', 'arcanine.gif', 'arceus-bug.gif', 'arceus-dark.gif', 'arceus-dragon.gif', 'arceus-electric.gif', 'arceus-fairy.gif', 'arceus-fighting.gif', 'arceus-fire.gif', 'arceus-flying.gif', 'arceus-ghost.gif', 'arceus-grass.gif', 'arceus-ground.gif', 'arceus-ice.gif', 'arceus-poison.gif', 'arceus-psychic.gif', 'arceus-rock.gif', 'arceus-steel.gif', 'arceus-water.gif', 'arceus.gif', 'archen.gif', 'archeops.gif', 'arctovish.gif', 'arctozolt.gif', 'ariados.gif', 'armaldo.gif', 'aromatisse.gif', 'aron.gif', 'arrokuda.gif', 'articuno.gif', 'audino-mega.gif', 'audino.gif', 'aurorus.gif', 'aurumoth.gif', 'avalugg.gif', 'axew.gif', 'azelf.gif', 'azumarill.gif', 'azurill.gif', 'bagon.gif', 'baltoy.gif', 'banette-mega.gif', 'banette.gif', 'barbaracle.gif', 'barboach.gif', 'barboarch.gif', 'barraskewda.gif', 'basculin-bluestriped.gif', 'basculin.gif', 'bastiodon.gif', 'bayleef.gif', 'beartic.gif', 'beautifly-f.gif', 'beautifly.gif', 'beedrill-mega.gif', 'beedrill.gif', 'beheeyem.gif', 'beldum.gif', 'bellossom.gif', 'bellsprout.gif', 'bergmite.gif', 'bewear.gif', 'bibarel-f.gif', 'bibarel.gif', 'bidoof-f.gif', 'bidoof.gif', 'binacle.gif', 'bisharp.gif', 'blacephalon.gif', 'blastoise-mega.gif', 'blastoise.gif', 'blaziken-f.gif', 'blaziken-mega.gif', 'blaziken.gif', 'blipbug.gif', 'blissey.gif', 'blitzle.gif', 'boldore.gif', 'boltund.gif', 'bonsly.gif', 'bouffalant.gif', 'bounsweet.gif', 'braixen.gif', 'braviary.gif', 'breloom.gif', 'brionne.gif', 'bronzong.gif', 'bronzor.gif', 'bruxish.gif', 'budew.gif', 'buizel-f.gif', 'buizel.gif', 'bulbasaur.gif', 'buneary.gif', 'bunnelby.gif', 'burmy-sandy.gif', 'burmy-trash.gif', 'burmy.gif', 'butterfree-gmax.gif', 'butterfree.gif', 'buzzwole.gif', 'cacnea.gif', 'cacturne-f.gif', 'cacturne.gif', 'camerupt-f.gif', 'camerupt-mega.gif', 'camerupt.gif', 'camperupt-mega.gif', 'carbink.gif', 'caribolt.gif', 'carkol.gif', 'carnivine.gif', 'carracosta.gif', 'carvanha.gif', 'cascoon.gif', 'castform-rainy.gif', 'castform-snowy.gif', 'castform-sunny.gif', 'castform.gif', 'caterpie.gif', 'cawmodore.gif', 'celebi.gif', 'celesteela.gif', 'centiskorch-gmax.gif', 'centiskorch.gif', 'chandelure.gif', 'chansey.gif', 'charizard-gmax.gif', 'charizard-megax.gif', 'charizard-megay.gif', 'charizard.gif', 'charjabug.gif', 'charmander.gif', 'charmeleon.gif', 'chatot.gif', 'cherrim-sunshine.gif', 'cherrim.gif', 'cherubi.gif', 'chesnaught.gif', 'chespin.gif', 'chewtle.gif', 'chikorita.gif', 'chimchar.gif', 'chimecho.gif', 'chinchou.gif', 'chingling.gif', 'cinccino.gif', 'cinderace.gif', 'clamperl.gif', 'clauncher.gif', 'clawitzer.gif', 'claydol.gif', 'clefable.gif', 'clefairy.gif', 'cleffa.gif', 'clobbopus.gif', 'cloyster.gif', 'coalossal-gmax.gif', 'coalossal.gif', 'cobalion.gif', 'cofagrigus.gif', 'combee-f.gif', 'combee.gif', 'combusken-f.gif', 'combusken.gif', 'comfey.gif', 'conkeldurr.gif', 'copperajah-gmax.gif', 'copperajah.gif', 'corphish.gif', 'corsola-galar.gif', 'corsola.gif', 'corviknight-gmax.gif', 'corviknight.gif', 'corvisquire.gif', 'cosmoem.gif', 'cosmog.gif', 'cottonee.gif', 'crabominable.gif', 'crabrawler.gif', 'cradily.gif', 'cramorant-gorging.gif', 'cramorant-gulping.gif', 'cramorant.gif', 'cranidos.gif', 'crawdaunt.gif', 'cresselia.gif', 'croagunk-f.gif', 'croagunk.gif', 'crobat.gif', 'croconaw.gif', 'crucibelle-mega.gif', 'crucibelle.gif', 'crustle.gif', 'cryogonal.gif', 'cubchoo.gif', 'cubone.gif', 'cufant.gif', 'cursola.gif', 'cutiefly.gif', 'cyndaquil.gif', 'darkrai.gif', 'darmanitan-galar.gif', 'darmanitan-galarzen.gif', 'darmanitan-zen.gif', 'darmanitan.gif', 'dartrix.gif', 'darumaka-galar.gif', 'darumaka.gif', 'decidueye.gif', 'dedenne.gif', 'deerling-autumn.gif', 'deerling-summer.gif', 'deerling-winter.gif', 'deerling.gif', 'deino.gif', 'delcatty.gif', 'delibird.gif', 'delphox.gif', 'deoxys-attack.gif', 'deoxys-defense.gif', 'deoxys-speed.gif', 'deoxys.gif', 'dewgong.gif', 'dewott.gif', 'dewpider.gif', 'dhelmise.gif', 'dialga.gif', 'diancie-mega.gif', 'diancie.gif', 'diggersby.gif', 'diglett-alola.gif', 'diglett.gif', 'ditto.gif', 'dodrio.gif', 'doduo.gif', 'donphan-f.gif', 'donphan.gif', 'dottler.gif', 'doublade.gif', 'dracovish.gif', 'dracozolt.gif', 'dragalge.gif', 'dragapult.gif', 'dragonair.gif', 'dragonite.gif', 'drakloak.gif', 'drampa.gif', 'drapion.gif', 'dratini.gif', 'drednaw-gmax.gif', 'drednaw.gif', 'dreepy.gif', 'drifblim.gif', 'drifloon.gif', 'drilbur.gif', 'drizzile.gif', 'drowzee.gif', 'druddigon.gif', 'dubwool.gif', 'ducklett.gif', 'dugtrio-alola.gif', 'dugtrio.gif', 'dunsparce.gif', 'duosion.gif', 'duraludon-gmax.gif', 'duraludon.gif', 'durant.gif', 'dusclops.gif', 'dusknoir.gif', 'duskull.gif', 'dustox-f.gif', 'dustox.gif', 'dwebble.gif', 'eelektrik.gif', 'eelektross.gif', 'eevee-gmax.gif', 'eevee-starter.gif', 'eevee.gif', 'eiscue-noice.gif', 'eiscue.gif', 'ekans.gif', 'eldegoss.gif', 'electabuzz.gif', 'electivire.gif', 'electrike.gif', 'electrode.gif', 'elekid.gif', 'elgyem.gif', 'emboar.gif', 'emolga.gif', 'empoleon.gif', 'entei.gif', 'equilibra.gif', 'escavalier.gif', 'espeon.gif', 'espurr.gif', 'eternatus-eternamax.gif', 'eternatus.gif', 'excadrill.gif', 'exeggcute.gif', 'exeggutor-alola.gif', 'exeggutor.gif', 'exploud.gif', 'falinks.gif', 'farfetchd-galar.gif', 'farfetchd.gif', 'fearow.gif', 'feebas.gif', 'fennekin.gif', 'feraligatr.gif', 'ferroseed.gif', 'ferrothorn.gif', 'fidgit.gif', 'finneon-f.gif', 'finneon.gif', 'flaaffy.gif', 'flabebe-blue.gif', 'flabebe-orange.gif', 'flabebe-white.gif', 'flabebe-yellow.gif', 'flabebe.gif', 'flapple-gmax.gif', 'flapple.gif', 'flareon.gif', 'fletchinder.gif', 'fletchling.gif', 'floatzel-f.gif', 'floatzel.gif', 'floette-blue.gif', 'floette-eternal.gif', 'floette-orange.gif', 'floette-white.gif', 'floette-yellow.gif', 'floette.gif', 'florges-blue.gif', 'florges-orange.gif', 'florges-white.gif', 'florges-yellow.gif', 'florges.gif', 'flygon.gif', 'fomantis.gif', 'foongus.gif', 'forretress.gif', 'fraxure.gif', 'frillish-f.gif', 'frillish.gif', 'froakie.gif', 'frogadier.gif', 'froslass.gif', 'frosmoth.gif', 'furfrou-dandy.gif', 'furfrou-debutante.gif', 'furfrou-diamond.gif', 'furfrou-heart.gif', 'furfrou-kabuki.gif', 'furfrou-lareine.gif', 'furfrou-matron.gif', 'furfrou-pharaoh.gif', 'furfrou-star.gif', 'furfrou.gif', 'furret.gif', 'gabite-f.gif', 'gabite.gif', 'gallade-mega.gif', 'gallade.gif', 'galvantula.gif', 'garbodor-gmax.gif', 'garbodor.gif', 'garchomp-f.gif', 'garchomp-mega.gif', 'garchomp.gif', 'gardevoir-mega.gif', 'gardevoir.gif', 'gastly.gif', 'gastrodon-east.gif', 'gastrodon.gif', 'genesect-burn.gif', 'genesect-chill.gif', 'genesect-douse.gif', 'genesect-shock.gif', 'genesect.gif', 'gengar-gmax.gif', 'gengar-mega.gif', 'gengar.gif', 'geodude-alola.gif', 'geodude.gif', 'gible-f.gif', 'gible.gif', 'gigalith.gif', 'girafarig-f.gif', 'girafarig.gif', 'giratina-origin.gif', 'giratina.gif', 'glaceon.gif', 'glalie-mega.gif', 'glalie.gif', 'glameow.gif', 'gligar-f.gif', 'gligar.gif', 'gliscor.gif', 'gloom.gif', 'gogoat.gif', 'golbat-f.gif', 'golbat.gif', 'goldeen.gif', 'golduck.gif', 'golem-alola.gif', 'golem.gif', 'golett.gif', 'golisopod.gif', 'golurk.gif', 'goodra.gif', 'goomy.gif', 'gorebyss.gif', 'gossifleur.gif', 'gothita.gif', 'gothitelle.gif', 'gothorita.gif', 'gourgeist-large.gif', 'gourgeist-small.gif', 'gourgeist-super.gif', 'gourgeist.gif', 'granbull.gif', 'grapploct.gif', 'graveler-alola.gif', 'graveler.gif', 'greedent.gif', 'greninja-ash.gif', 'greninja.gif', 'grimer-alola.gif', 'grimer.gif', 'grimmsnarl-gmax.gif', 'grimmsnarl.gif', 'grookey.gif', 'grotle.gif', 'groudon-primal.gif', 'groudon.gif', 'grovyle.gif', 'growlithe.gif', 'grubbin.gif', 'grumpig.gif', 'gulpin-f.gif', 'gulpin.gif', 'gumshoos-totem.gif', 'gumshoos.gif', 'gurdurr.gif', 'guzzlord.gif', 'gyarados-mega.gif', 'gyarados.gif', 'hakamoo.gif', 'happiny.gif', 'hariyama.gif', 'hatenna.gif', 'hatterene-gmax.gif', 'hatterene.gif', 'hattrem.gif', 'haunter.gif', 'hawlucha.gif', 'haxorus.gif', 'heatmor.gif', 'heatran.gif', 'heliolisk.gif', 'helioptile.gif', 'heracross-f.gif', 'heracross-mega.gif', 'heracross.gif', 'herdier.gif', 'hippopotas-f.gif', 'hippopotas.gif', 'hippowdon-f.gif', 'hippowdon.gif', 'hitmonchan.gif', 'hitmonlee.gif', 'hitmontop.gif', 'honchkrow.gif', 'honedge.gif', 'hooh.gif', 'hoopa-mega.gif', 'hoopa-unbound.gif', 'hoopa.gif', 'hoothoot.gif', 'hoppip.gif', 'horsea.gif', 'houndoom-f.gif', 'houndoom-mega.gif', 'houndoom.gif', 'houndour.gif', 'huntail.gif', 'hydreigon.gif', 'hypno.gif', 'igglybuff.gif', 'illumise.gif', 'impidimp.gif', 'incineroar.gif', 'indeedee-f.gif', 'indeedee.gif', 'infernape.gif', 'inkay.gif', 'inteleon.gif', 'ivysaur.gif', 'jangmoo.gif', 'jellicent-f.gif', 'jellicent.gif', 'jigglypuff.gif', 'jirachi.gif', 'jolteon.gif', 'joltik.gif', 'jumbao.gif', 'jumpluff.gif', 'jynx.gif', 'kabuto.gif', 'kabutops.gif', 'kadabra.gif', 'kakuna.gif', 'kangaskhan-mega.gif', 'kangaskhan.gif', 'karrablast.gif', 'kartana.gif', 'kecleon.gif', 'keldeo-resolute.gif', 'keldeo.gif', 'kerfluffle-f.gif', 'kerfluffle.gif', 'kingdra.gif', 'kingler-gmax.gif', 'kingler.gif', 'kirlia.gif', 'kitsunoh-f.gif', 'kitsunoh.gif', 'klang.gif', 'klefki.gif', 'klink.gif', 'klinklang.gif', 'koffing.gif', 'komala.gif', 'kommo-o-totem.gif', 'kommoo.gif', 'krabby.gif', 'kricketot-f.gif', 'kricketot.gif', 'kricketune-f.gif', 'kricketune.gif', 'krilowatt.gif', 'krokorok.gif', 'krookodile.gif', 'kyogre-primal.gif', 'kyogre.gif', 'kyurem-black.gif', 'kyurem-white.gif', 'kyurem.gif', 'lairon.gif', 'lampent.gif', 'landorus-therian.gif', 'landorus.gif', 'lanturn.gif', 'lapras-gmax.gif', 'lapras.gif', 'larvesta.gif', 'larvitar.gif', 'latias-mega.gif', 'latias.gif', 'latios-mega.gif', 'latios.gif', 'leafeon.gif', 'leavanny.gif', 'ledian-f.gif', 'ledian.gif', 'ledyba-f.gif', 'ledyba.gif', 'lickilicky.gif', 'lickitung.gif', 'liepard.gif', 'lileep.gif', 'lilligant.gif', 'lillipup.gif', 'linoone-galar.gif', 'linoone.gif', 'litleo.gif', 'litten.gif', 'litwick.gif', 'lombre.gif', 'lopunny-mega.gif', 'lopunny.gif', 'lotad.gif', 'loudred.gif', 'lucario-mega.gif', 'lucario.gif', 'ludicolo-f.gif', 'ludicolo.gif', 'lugia.gif', 'lumineon-f.gif', 'lumineon.gif', 'lunala.gif', 'lunatone.gif', 'lurantis-totem.gif', 'lurantis.gif', 'luvdisc.gif', 'luxio-f.gif', 'luxio.gif', 'luxray-f.gif', 'luxray.gif', 'lycanroc-dusk.gif', 'lycanroc-midnight.gif', 'lycanroc.gif', 'machamp-gmax.gif', 'machamp.gif', 'machoke.gif', 'machop.gif', 'magby.gif', 'magcargo.gif', 'magearna-original.gif', 'magearna.gif', 'magikarp-f.gif', 'magikarp.gif', 'magmar.gif', 'magmortar.gif', 'magnemite.gif', 'magneton.gif', 'magnezone.gif', 'makuhita.gif', 'malaconda.gif', 'malamar.gif', 'mamoswine-f.gif', 'mamoswine.gif', 'manaphy.gif', 'mandibuzz.gif', 'manectric-mega.gif', 'manectric.gif', 'mankey.gif', 'mantine.gif', 'mantyke.gif', 'maractus.gif', 'mareanie.gif', 'mareep.gif', 'marill.gif', 'marowak-alola-totem.gif', 'marowak-alola.gif', 'marowak-totem.gif', 'marowak.gif', 'marshadow.gif', 'marshtomp.gif', 'masquerain.gif', 'mawile-mega.gif', 'mawile.gif', 'medicham-f.gif', 'medicham-mega.gif', 'medicham.gif', 'meditite-f.gif', 'meditite.gif', 'meganium-f.gif', 'meganium.gif', 'melmetal-gmax.gif', 'melmetal.gif', 'meloetta-pirouette.gif', 'meloetta.gif', 'meltan.gif', 'meowstic-f.gif', 'meowstic.gif', 'meowth-alola.gif', 'meowth-galar.gif', 'meowth-gmax.gif', 'meowth.gif', 'mesprit.gif', 'metagross-mega.gif', 'metagross.gif', 'metang.gif', 'metapod.gif', 'mew.gif', 'mewtwo-megax.gif', 'mewtwo-megay.gif', 'mewtwo.gif', 'mienfoo.gif', 'mienshao.gif', 'mightyena.gif', 'milcery.gif', 'milotic-f.gif', 'milotic.gif', 'miltank.gif', 'mimejr.gif', 'mimikyu-busted-totem.gif', 'mimikyu-busted.gif', 'mimikyu-totem.gif', 'mimikyu.gif', 'minccino.gif', 'minior-blue.gif', 'minior-green.gif', 'minior-indigo.gif', 'minior-meteor.gif', 'minior-orange.gif', 'minior-violet.gif', 'minior-yellow.gif', 'minior.gif', 'minun.gif', 'misdreavus.gif', 'mismagius.gif', 'mollux.gif', 'moltres.gif', 'monferno.gif', 'morelull.gif', 'morgrem.gif', 'morpeko-hangry.gif', 'morpeko.gif', 'mothim.gif', 'mrmime-galar.gif', 'mrmime.gif', 'mrrime.gif', 'mudbray.gif', 'mudkip.gif', 'mudsdale.gif', 'muk-alola.gif', 'muk.gif', 'munchlax.gif', 'munna.gif', 'murkrow-f.gif', 'murkrow.gif', 'musharna.gif', 'naganadel.gif', 'natu.gif', 'naviathan.gif', 'necrozma-dawnwings.gif', 'necrozma-duskmane.gif', 'necrozma-ultra.gif', 'necrozma.gif', 'necturna.gif', 'nickit.gif', 'nidoking.gif', 'nidoqueen.gif', 'nidoran-f.gif', 'nidoran.gif', 'nidoranf.gif', 'nidoranm.gif', 'nidorina.gif', 'nidorino.gif', 'nihilego.gif', 'nincada.gif', 'ninetales-alola.gif', 'ninetales.gif', 'ninjask.gif', 'noctowl.gif', 'noibat.gif', 'noivern.gif', 'nosepass.gif', 'numel-f.gif', 'numel.gif', 'nuzleaf-f.gif', 'nuzleaf.gif', 'obstagoon.gif', 'octillery-f.gif', 'octillery.gif', 'oddish.gif', 'omanyte.gif', 'omastar.gif', 'onix.gif', 'oranguru.gif', 'orbeetle-gmax.gif', 'orbeetle.gif', 'oricorio-pau.gif', 'oricorio-pompom.gif', 'oricorio-sensu.gif', 'oricorio.gif', 'oshawott.gif', 'pachirisu-f.gif', 'pachirisu.gif', 'pajantom.gif', 'palkia.gif', 'palossand.gif', 'palpitoad.gif', 'pancham.gif', 'pangoro.gif', 'panpour.gif', 'pansage.gif', 'pansear.gif', 'paras.gif', 'parasect.gif', 'passimian.gif', 'patrat.gif', 'pawniard.gif', 'pelipper.gif', 'perrserker.gif', 'persian-alola.gif', 'persian.gif', 'petilil.gif', 'phanpy.gif', 'phantump.gif', 'pheromosa.gif', 'phione.gif', 'pichu.gif', 'pidgeot-mega.gif', 'pidgeot.gif', 'pidgeotto.gif', 'pidgey.gif', 'pidove.gif', 'pignite.gif', 'pikachu-alola.gif', 'pikachu-belle.gif', 'pikachu-cosplay.gif', 'pikachu-f.gif', 'pikachu-gmax.gif', 'pikachu-hoenn.gif', 'pikachu-kalos.gif', 'pikachu-libre.gif', 'pikachu-original.gif', 'pikachu-partner.gif', 'pikachu-phd.gif', 'pikachu-pop-star.gif', 'pikachu-popstar.gif', 'pikachu-rockstar.gif', 'pikachu-sinnoh.gif', 'pikachu-starter-f.gif', 'pikachu-starter.gif', 'pikachu-unova.gif', 'pikachu.gif', 'pikipek.gif', 'piloswine-f.gif', 'piloswine.gif', 'pincurchin.gif', 'pineco.gif', 'pinsir-mega.gif', 'pinsir.gif', 'piplup.gif', 'plasmanta.gif', 'pluffle.gif', 'plusle.gif', 'poipole.gif', 'pokestarblackbelt.gif', 'pokestarblackdoor.gif', 'pokestarbrycenman.gif', 'pokestarf00.gif', 'pokestarf002.gif', 'pokestargiant.gif', 'pokestarhumanoid.gif', 'pokestarmonster.gif', 'pokestarmt.gif', 'pokestarmt2.gif', 'pokestarsmeargle.gif', 'pokestarspirit.gif', 'pokestartransport.gif', 'pokestarufo-2.gif', 'pokestarufo.gif', 'pokestarufo2.gif', 'pokestarwhitedoor.gif', 'politoed-f.gif', 'politoed.gif', 'poliwag.gif', 'poliwhirl.gif', 'poliwrath.gif', 'polteageist-antique.gif', 'polteageist.gif', 'ponyta-galar.gif', 'ponyta.gif', 'poochyena.gif', 'popplio.gif', 'porygon-z.gif', 'porygon.gif', 'porygon2.gif', 'porygonz.gif', 'primarina.gif', 'primeape.gif', 'prinplup.gif', 'probopass.gif', 'psyduck.gif', 'pumpkaboo-large.gif', 'pumpkaboo-small.gif', 'pumpkaboo-super.gif', 'pumpkaboo.gif', 'pupitar.gif', 'purrloin.gif', 'purugly.gif', 'pyroak.gif', 'pyroar-f.gif', 'pyroar.gif', 'pyukumuku.gif', 'quagsire-f.gif', 'quagsire.gif', 'quilava.gif', 'quilladin.gif', 'qwilfish.gif', 'raboot.gif', 'raichu-alola.gif', 'raichu.gif', 'raikou.gif', 'ralts.gif', 'rampardos.gif', 'rapidash-galar.gif', 'rapidash.gif', 'raticate-alola-totem.gif', 'raticate-alola.gif', 'raticate-f.gif', 'raticate-totem-a.gif', 'raticate.gif', 'rattata-alola.gif', 'rattata-f.gif', 'rattata.gif', 'rayquaza-mega.gif', 'rayquaza.gif', 'regice.gif', 'regigigas.gif', 'regirock.gif', 'registeel.gif', 'relicanth-f.gif', 'relicanth.gif', 'remoraid.gif', 'reshiram.gif', 'reuniclus.gif', 'rhydon-f.gif', 'rhydon.gif', 'rhyhorn.gif', 'rhyperior-f.gif', 'rhyperior.gif', 'ribombee.gif', 'rillaboom.gif', 'riolu.gif', 'rockruff.gif', 'roggenrola.gif', 'rolycoly.gif', 'rookidee.gif', 'roselia-f.gif', 'roselia.gif', 'roserade-f.gif', 'roserade.gif', 'rotom-f.gif', 'rotom-fan.gif', 'rotom-frost.gif', 'rotom-h.gif', 'rotom-heat.gif', 'rotom-m.gif', 'rotom-mow.gif', 'rotom-s.gif', 'rotom-w.gif', 'rotom-wash.gif', 'rotom.gif', 'rowlet.gif', 'rufflet.gif', 'runerigus.gif', 'sableye-mega.gif', 'sableye.gif', 'salamence-mega.gif', 'salamence.gif', 'salandit.gif', 'salazzle-totem.gif', 'salazzle.gif', 'samurott.gif', 'sandaconda-gmax.gif', 'sandaconda.gif', 'sandile.gif', 'sandshrew-alola.gif', 'sandshrew.gif', 'sandslash-alola.gif', 'sandslash.gif', 'sandygast.gif', 'sawk.gif', 'sawsbuck-autumn.gif', 'sawsbuck-summer.gif', 'sawsbuck-winter.gif', 'sawsbuck.gif', 'scatterbug.gif', 'sceptile-mega.gif', 'sceptile.gif', 'scizor-f.gif', 'scizor-mega.gif', 'scizor.gif', 'scolipede.gif', 'scorbunny.gif', 'scrafty.gif', 'scraggy.gif', 'scratchet.gif', 'scyther-f.gif', 'scyther.gif', 'seadra.gif', 'seaking.gif', 'sealeo.gif', 'seedot.gif', 'seel.gif', 'seismitoad.gif', 'sentret.gif', 'serperior.gif', 'servine.gif', 'seviper.gif', 'sewaddle.gif', 'shapedo-mega.gif', 'sharpedo-mega.gif', 'sharpedo.gif', 'shaymin-sky.gif', 'shaymin.gif', 'shedinja.gif', 'shelgon.gif', 'shellder.gif', 'shellos-east.gif', 'shellos.gif', 'shelmet.gif', 'shieldon.gif', 'shiftry-f.gif', 'shiftry.gif', 'shiinotic.gif', 'shinx-f.gif', 'shinx.gif', 'shroomish.gif', 'shuckle.gif', 'shuppet.gif', 'sigilyph.gif', 'silcoon.gif', 'silicobra.gif', 'silvally-bug.gif', 'silvally-dark.gif', 'silvally-dragon.gif', 'silvally-electric.gif', 'silvally-fairy.gif', 'silvally-fighting.gif', 'silvally-fire.gif', 'silvally-flying.gif', 'silvally-ghost.gif', 'silvally-grass.gif', 'silvally-ground.gif', 'silvally-ice.gif', 'silvally-poison.gif', 'silvally-psychic.gif', 'silvally-rock.gif', 'silvally-steel.gif', 'silvally-water.gif', 'silvally.gif', 'simipour.gif', 'simisage.gif', 'simisear.gif', 'sinistea-antique.gif', 'sinistea.gif', 'sirfetchd.gif', 'sizzlipede.gif', 'skarmory.gif', 'skiddo.gif', 'skiploom.gif', 'skitty.gif', 'skorupi.gif', 'skrelp.gif', 'skuntank.gif', 'skwovet.gif', 'slaking.gif', 'slakoth.gif', 'sliggoo.gif', 'slowbro-mega.gif', 'slowbro.gif', 'slowking.gif', 'slowpoke-galar.gif', 'slowpoke.gif', 'slugma.gif', 'slurpuff.gif', 'smeargle.gif', 'smogecko.gif', 'smoguana.gif', 'smokomodo.gif', 'smoochum.gif', 'snaelstrom.gif', 'sneasel-f.gif', 'sneasel.gif', 'snivy.gif', 'snom.gif', 'snorlax-gmax.gif', 'snorlax.gif', 'snornut.gif', 'snorunt.gif', 'snover-f.gif', 'snover.gif', 'snubbull.gif', 'sobble.gif', 'solgaleo.gif', 'solosis.gif', 'solrock.gif', 'spearow.gif', 'spewpa.gif', 'spheal.gif', 'spinarak.gif', 'spinda.gif', 'spiritomb.gif', 'spoink.gif', 'spritzee.gif', 'squirtle.gif', 'stakataka.gif', 'stantler.gif', 'staraptor-f.gif', 'staraptor.gif', 'staravia-f.gif', 'staravia.gif', 'starly-f.gif', 'starly.gif', 'starmie.gif', 'staryu.gif', 'steelix-f.gif', 'steelix-mega.gif', 'steelix.gif', 'steenee.gif', 'stonjourner.gif', 'stoutland.gif', 'stratagem.gif', 'stufful.gif', 'stunfisk-galar.gif', 'stunfisk.gif', 'stunky.gif', 'substitute.gif', 'sudowoodo-f.gif', 'sudowoodo.gif', 'suicune.gif', 'sunflora.gif', 'sunkern.gif', 'surskit.gif', 'swablu.gif', 'swadloon.gif', 'swalot-f.gif', 'swalot.gif', 'swampert-mega.gif', 'swampert.gif', 'swanna.gif', 'swellow.gif', 'swinub.gif', 'swirlix.gif', 'swoobat.gif', 'sylveon.gif', 'taillow.gif', 'talonflame.gif', 'tangela.gif', 'tangrowth-f.gif', 'tangrowth.gif', 'tapubulu.gif', 'tapufini.gif', 'tapukoko.gif', 'tapulele.gif', 'tauros.gif', 'teddiursa.gif', 'tentacool.gif', 'tentacruel.gif', 'tepig.gif', 'terrakion.gif', 'thievul.gif', 'throh.gif', 'thundurus-therian.gif', 'thundurus.gif', 'thwackey.gif', 'timburr.gif', 'tirtouga.gif', 'togedemaru-totem.gif', 'togedemaru.gif', 'togekiss.gif', 'togepi.gif', 'togetic.gif', 'tomohawk.gif', 'torchic-f.gif', 'torchic.gif', 'torkoal.gif', 'tornadus-therian.gif', 'tornadus.gif', 'torracat.gif', 'torterra.gif', 'totodile.gif', 'toucannon.gif', 'toxapex.gif', 'toxel.gif', 'toxicroak-f.gif', 'toxicroak.gif', 'toxtricity-gmax.gif', 'toxtricity-lowkey.gif', 'toxtricity.gif', 'tranquill.gif', 'trapinch.gif', 'treecko.gif', 'trevenant.gif', 'tropius.gif', 'trubbish.gif', 'trumbeak.gif', 'tsareena.gif', 'turtonator.gif', 'turtwig.gif', 'tympole.gif', 'tynamo.gif', 'typenull.gif', 'typhlosion.gif', 'tyranitar-mega.gif', 'tyranitar.gif', 'tyrantrum.gif', 'tyrantum.gif', 'tyrogue.gif', 'tyrunt.gif', 'umbreon.gif', 'unfezant-f.gif', 'unfezant.gif', 'unown-b.gif', 'unown-c.gif', 'unown-d.gif', 'unown-e.gif', 'unown-exclamation.gif', 'unown-f.gif', 'unown-g.gif', 'unown-h.gif', 'unown-i.gif', 'unown-j.gif', 'unown-k.gif', 'unown-l.gif', 'unown-m.gif', 'unown-n.gif', 'unown-o.gif', 'unown-p.gif', 'unown-q.gif', 'unown-question.gif', 'unown-r.gif', 'unown-s.gif', 'unown-t.gif', 'unown-u.gif', 'unown-v.gif', 'unown-w.gif', 'unown-x.gif', 'unown-y.gif', 'unown-z.gif', 'unown.gif', 'ursaring-f.gif', 'ursaring.gif', 'uxie.gif', 'vanillish.gif', 'vanillite.gif', 'vanilluxe.gif', 'vaporeon.gif', 'venipede.gif', 'venomoth.gif', 'venonat.gif', 'venusaur-mega.gif', 'venusaur.gif', 'vespiquen.gif', 'vibrava.gif', 'victini.gif', 'victreebel.gif', 'vigoroth.gif', 'vikavolt-totem.gif', 'vikavolt.gif', 'vileplume.gif', 'virizion.gif', 'vivillon-archipelago.gif', 'vivillon-continental.gif', 'vivillon-elegant.gif', 'vivillon-fancy.gif', 'vivillon-garden.gif', 'vivillon-highplains.gif', 'vivillon-icysnow.gif', 'vivillon-jungle.gif', 'vivillon-marine.gif', 'vivillon-modern.gif', 'vivillon-monsoon.gif', 'vivillon-ocean.gif', 'vivillon-pokeball.gif', 'vivillon-polar.gif', 'vivillon-river.gif', 'vivillon-sandstorm.gif', 'vivillon-savanna.gif', 'vivillon-sun.gif', 'vivillon-tundra.gif', 'vivillon.gif', 'volbeat.gif', 'volcanion.gif', 'volcarona.gif', 'volkraken.gif', 'voltorb.gif', 'vullaby.gif', 'vulpix-alola.gif', 'vulpix.gif', 'wailmer.gif', 'wailord.gif', 'walrein.gif', 'wartortle.gif', 'watchog.gif', 'weavile-f.gif', 'weavile.gif', 'weedle.gif', 'weepinbell.gif', 'weezing-galar.gif', 'weezing.gif', 'whimsicott.gif', 'whirlipede.gif', 'whiscash.gif', 'whismur.gif', 'wigglytuff.gif', 'wimpod.gif', 'wingull.gif', 'wishiwashi-school.gif', 'wishiwashi.gif', 'wobbuffet-f.gif', 'wobbuffet.gif', 'woobat.gif', 'wooloo.gif', 'wooper-f.gif', 'wooper.gif', 'wormadam-sandy.gif', 'wormadam-trash.gif', 'wormadam.gif', 'wurmple.gif', 'wynaut.gif', 'xatu-f.gif', 'xatu.gif', 'xerneas-neutral.gif', 'xerneas.gif', 'xurkitree.gif', 'yamask-galar.gif', 'yamask.gif', 'yamper.gif', 'yanma.gif', 'yanmega.gif', 'yungoos.gif', 'yveltal.gif', 'zacian-crowned.gif', 'zacian.gif', 'zamazenta-crowned.gif', 'zamazenta.gif', 'zangoose.gif', 'zapdos.gif', 'zebstrika.gif', 'zekrom.gif', 'zeraora.gif', 'zigzagoon-galar.gif', 'zigzagoon.gif', 'zoroark.gif', 'zorua.gif', 'zubat-f.gif', 'zubat.gif', 'zweilous.gif', 'zygarde-10.gif', 'zygarde-complete.gif', 'zygarde.gif']
for i in filename:
url = 'https://play.pokemonshowdown.com/sprites/ani/{}'.format(i)
file_name = str(i[:-4])
dl_img(url, 'files/pokemon/front', file_name)
|
flexible
|
{
"blob_id": "4c9a3983180cc75c39da41f7f9b595811ba0dc35",
"index": 8390,
"step-1": "<mask token>\n\n\ndef simple_get(url):\n \"\"\"\n Attempts to get the content at `url` by making an HTTP GET request.\n If the content-type of response is some kind of HTML/XML, return the\n text content, otherwise return None.\n \"\"\"\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n return None\n\n\ndef is_good_response(resp):\n \"\"\"\n Returns True if the response seems to be HTML, False otherwise.\n \"\"\"\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 and content_type is not None and \n content_type.find('html') > -1)\n\n\ndef log_error(e):\n \"\"\"\n It is always a good idea to log errors.\n This function just prints them, but you can\n make it do anything.\n \"\"\"\n print(e)\n\n\ndef save_json(file):\n with open('gif_list.txt', 'w') as f:\n f.write(file)\n\n\n<mask token>\n\n\ndef dl_img(url, file_path, file_name):\n full_path = file_path + '/' + file_name + '.gif'\n opener = urllib.request.build_opener()\n opener.addheaders = [('User-agent', 'Mozilla/5.0')]\n urllib.request.install_opener(opener)\n urllib.request.urlretrieve(url, full_path)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef simple_get(url):\n \"\"\"\n Attempts to get the content at `url` by making an HTTP GET request.\n If the content-type of response is some kind of HTML/XML, return the\n text content, otherwise return None.\n \"\"\"\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n return None\n\n\ndef is_good_response(resp):\n \"\"\"\n Returns True if the response seems to be HTML, False otherwise.\n \"\"\"\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 and content_type is not None and \n content_type.find('html') > -1)\n\n\ndef log_error(e):\n \"\"\"\n It is always a good idea to log errors.\n This function just prints them, but you can\n make it do anything.\n \"\"\"\n print(e)\n\n\ndef save_json(file):\n with open('gif_list.txt', 'w') as f:\n f.write(file)\n\n\n<mask token>\n\n\ndef dl_img(url, file_path, file_name):\n full_path = file_path + '/' + file_name + '.gif'\n opener = urllib.request.build_opener()\n opener.addheaders = [('User-agent', 'Mozilla/5.0')]\n urllib.request.install_opener(opener)\n urllib.request.urlretrieve(url, full_path)\n\n\n<mask token>\nfor i in filename:\n url = 'https://play.pokemonshowdown.com/sprites/ani/{}'.format(i)\n file_name = str(i[:-4])\n dl_img(url, 'files/pokemon/front', file_name)\n",
"step-3": "<mask token>\n\n\ndef simple_get(url):\n \"\"\"\n Attempts to get the content at `url` by making an HTTP GET request.\n If the content-type of response is some kind of HTML/XML, return the\n text content, otherwise return None.\n \"\"\"\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n return None\n\n\ndef is_good_response(resp):\n \"\"\"\n Returns True if the response seems to be HTML, False otherwise.\n \"\"\"\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 and content_type is not None and \n content_type.find('html') > -1)\n\n\ndef log_error(e):\n \"\"\"\n It is always a good idea to log errors.\n This function just prints them, but you can\n make it do anything.\n \"\"\"\n print(e)\n\n\ndef save_json(file):\n with open('gif_list.txt', 'w') as f:\n f.write(file)\n\n\n<mask token>\n\n\ndef dl_img(url, file_path, file_name):\n full_path = file_path + '/' + file_name + '.gif'\n opener = urllib.request.build_opener()\n opener.addheaders = [('User-agent', 'Mozilla/5.0')]\n urllib.request.install_opener(opener)\n urllib.request.urlretrieve(url, full_path)\n\n\nfilename = ['abomasnow-f.gif', 'abomasnow-mega.gif', 'abomasnow.gif',\n 'abra.gif', 'absol-mega.gif', 'absol.gif', 'accelgor.gif',\n 'aegislash-blade.gif', 'aegislash.gif', 'aerodactyl-mega.gif',\n 'aerodactyl.gif', 'aggron-mega.gif', 'aggron.gif', 'aipom-f.gif',\n 'aipom.gif', 'alakazam-mega.gif', 'alakazam.gif',\n 'alcremie-caramel-swirl-berry.gif', 'alcremie-caramel-swirl-clover.gif',\n 'alcremie-caramel-swirl-flower.gif', 'alcremie-caramel-swirl-love.gif',\n 'alcremie-caramel-swirl-ribbon.gif', 'alcremie-caramel-swirl-star.gif',\n 'alcremie-caramelswirl.gif', 'alcremie-gmax.gif',\n 'alcremie-lemon-cream-berry.gif', 'alcremie-lemon-cream-clover.gif',\n 'alcremie-lemon-cream-flower.gif', 'alcremie-lemon-cream-love.gif',\n 'alcremie-lemon-cream-ribbon.gif', 'alcremie-lemon-cream-star.gif',\n 'alcremie-lemoncream.gif', 'alcremie-matcha-cream-berry.gif',\n 'alcremie-matcha-cream-clover.gif', 'alcremie-matcha-cream-flower.gif',\n 'alcremie-matcha-cream-love.gif', 'alcremie-matcha-cream-ribbon.gif',\n 'alcremie-matcha-cream-star.gif', 'alcremie-matchacream.gif',\n 'alcremie-mint-cream-berry.gif', 'alcremie-mint-cream-clover.gif',\n 'alcremie-mint-cream-flower.gif', 'alcremie-mint-cream-love.gif',\n 'alcremie-mint-cream-ribbon.gif', 'alcremie-mint-cream-star.gif',\n 'alcremie-mintcream.gif', 'alcremie-rainbow-swirl-berry.gif',\n 'alcremie-rainbow-swirl-clover.gif',\n 'alcremie-rainbow-swirl-flower.gif', 'alcremie-rainbow-swirl-love.gif',\n 'alcremie-rainbow-swirl-ribbon.gif', 'alcremie-rainbow-swirl-star.gif',\n 'alcremie-rainbowswirl.gif', 'alcremie-ruby-cream-berry.gif',\n 'alcremie-ruby-cream-clover.gif', 'alcremie-ruby-cream-flower.gif',\n 'alcremie-ruby-cream-love.gif', 'alcremie-ruby-cream-ribbon.gif',\n 'alcremie-ruby-cream-star.gif', 'alcremie-ruby-swirl-berry.gif',\n 'alcremie-ruby-swirl-clover.gif', 'alcremie-ruby-swirl-flower.gif',\n 'alcremie-ruby-swirl-love.gif', 'alcremie-ruby-swirl-ribbon.gif',\n 'alcremie-ruby-swirl-star.gif', 'alcremie-rubycream.gif',\n 'alcremie-rubyswirl.gif', 'alcremie-salted-cream-berry.gif',\n 'alcremie-salted-cream-clover.gif', 'alcremie-salted-cream-flower.gif',\n 'alcremie-salted-cream-love.gif', 'alcremie-salted-cream-ribbon.gif',\n 'alcremie-salted-cream-star.gif', 'alcremie-saltedcream.gif',\n 'alcremie-vanilla-cream-berry.gif', 'alcremie-vanilla-cream-clover.gif',\n 'alcremie-vanilla-cream-flower.gif', 'alcremie-vanilla-cream-love.gif',\n 'alcremie-vanilla-cream-ribbon.gif', 'alcremie-vanilla-cream-star.gif',\n 'alcremie.gif', 'alomomola.gif', 'altaria-mega.gif', 'altaria.gif',\n 'amaura.gif', 'ambipom-f.gif', 'ambipom.gif', 'amoonguss.gif',\n 'ampharos-mega.gif', 'ampharos.gif', 'anorith.gif', 'appletun-gmax.gif',\n 'appletun.gif', 'applin.gif', 'araquanid-totem.gif', 'araquanid.gif',\n 'arbok.gif', 'arcanine.gif', 'arceus-bug.gif', 'arceus-dark.gif',\n 'arceus-dragon.gif', 'arceus-electric.gif', 'arceus-fairy.gif',\n 'arceus-fighting.gif', 'arceus-fire.gif', 'arceus-flying.gif',\n 'arceus-ghost.gif', 'arceus-grass.gif', 'arceus-ground.gif',\n 'arceus-ice.gif', 'arceus-poison.gif', 'arceus-psychic.gif',\n 'arceus-rock.gif', 'arceus-steel.gif', 'arceus-water.gif', 'arceus.gif',\n 'archen.gif', 'archeops.gif', 'arctovish.gif', 'arctozolt.gif',\n 'ariados.gif', 'armaldo.gif', 'aromatisse.gif', 'aron.gif',\n 'arrokuda.gif', 'articuno.gif', 'audino-mega.gif', 'audino.gif',\n 'aurorus.gif', 'aurumoth.gif', 'avalugg.gif', 'axew.gif', 'azelf.gif',\n 'azumarill.gif', 'azurill.gif', 'bagon.gif', 'baltoy.gif',\n 'banette-mega.gif', 'banette.gif', 'barbaracle.gif', 'barboach.gif',\n 'barboarch.gif', 'barraskewda.gif', 'basculin-bluestriped.gif',\n 'basculin.gif', 'bastiodon.gif', 'bayleef.gif', 'beartic.gif',\n 'beautifly-f.gif', 'beautifly.gif', 'beedrill-mega.gif', 'beedrill.gif',\n 'beheeyem.gif', 'beldum.gif', 'bellossom.gif', 'bellsprout.gif',\n 'bergmite.gif', 'bewear.gif', 'bibarel-f.gif', 'bibarel.gif',\n 'bidoof-f.gif', 'bidoof.gif', 'binacle.gif', 'bisharp.gif',\n 'blacephalon.gif', 'blastoise-mega.gif', 'blastoise.gif',\n 'blaziken-f.gif', 'blaziken-mega.gif', 'blaziken.gif', 'blipbug.gif',\n 'blissey.gif', 'blitzle.gif', 'boldore.gif', 'boltund.gif',\n 'bonsly.gif', 'bouffalant.gif', 'bounsweet.gif', 'braixen.gif',\n 'braviary.gif', 'breloom.gif', 'brionne.gif', 'bronzong.gif',\n 'bronzor.gif', 'bruxish.gif', 'budew.gif', 'buizel-f.gif', 'buizel.gif',\n 'bulbasaur.gif', 'buneary.gif', 'bunnelby.gif', 'burmy-sandy.gif',\n 'burmy-trash.gif', 'burmy.gif', 'butterfree-gmax.gif', 'butterfree.gif',\n 'buzzwole.gif', 'cacnea.gif', 'cacturne-f.gif', 'cacturne.gif',\n 'camerupt-f.gif', 'camerupt-mega.gif', 'camerupt.gif',\n 'camperupt-mega.gif', 'carbink.gif', 'caribolt.gif', 'carkol.gif',\n 'carnivine.gif', 'carracosta.gif', 'carvanha.gif', 'cascoon.gif',\n 'castform-rainy.gif', 'castform-snowy.gif', 'castform-sunny.gif',\n 'castform.gif', 'caterpie.gif', 'cawmodore.gif', 'celebi.gif',\n 'celesteela.gif', 'centiskorch-gmax.gif', 'centiskorch.gif',\n 'chandelure.gif', 'chansey.gif', 'charizard-gmax.gif',\n 'charizard-megax.gif', 'charizard-megay.gif', 'charizard.gif',\n 'charjabug.gif', 'charmander.gif', 'charmeleon.gif', 'chatot.gif',\n 'cherrim-sunshine.gif', 'cherrim.gif', 'cherubi.gif', 'chesnaught.gif',\n 'chespin.gif', 'chewtle.gif', 'chikorita.gif', 'chimchar.gif',\n 'chimecho.gif', 'chinchou.gif', 'chingling.gif', 'cinccino.gif',\n 'cinderace.gif', 'clamperl.gif', 'clauncher.gif', 'clawitzer.gif',\n 'claydol.gif', 'clefable.gif', 'clefairy.gif', 'cleffa.gif',\n 'clobbopus.gif', 'cloyster.gif', 'coalossal-gmax.gif', 'coalossal.gif',\n 'cobalion.gif', 'cofagrigus.gif', 'combee-f.gif', 'combee.gif',\n 'combusken-f.gif', 'combusken.gif', 'comfey.gif', 'conkeldurr.gif',\n 'copperajah-gmax.gif', 'copperajah.gif', 'corphish.gif',\n 'corsola-galar.gif', 'corsola.gif', 'corviknight-gmax.gif',\n 'corviknight.gif', 'corvisquire.gif', 'cosmoem.gif', 'cosmog.gif',\n 'cottonee.gif', 'crabominable.gif', 'crabrawler.gif', 'cradily.gif',\n 'cramorant-gorging.gif', 'cramorant-gulping.gif', 'cramorant.gif',\n 'cranidos.gif', 'crawdaunt.gif', 'cresselia.gif', 'croagunk-f.gif',\n 'croagunk.gif', 'crobat.gif', 'croconaw.gif', 'crucibelle-mega.gif',\n 'crucibelle.gif', 'crustle.gif', 'cryogonal.gif', 'cubchoo.gif',\n 'cubone.gif', 'cufant.gif', 'cursola.gif', 'cutiefly.gif',\n 'cyndaquil.gif', 'darkrai.gif', 'darmanitan-galar.gif',\n 'darmanitan-galarzen.gif', 'darmanitan-zen.gif', 'darmanitan.gif',\n 'dartrix.gif', 'darumaka-galar.gif', 'darumaka.gif', 'decidueye.gif',\n 'dedenne.gif', 'deerling-autumn.gif', 'deerling-summer.gif',\n 'deerling-winter.gif', 'deerling.gif', 'deino.gif', 'delcatty.gif',\n 'delibird.gif', 'delphox.gif', 'deoxys-attack.gif',\n 'deoxys-defense.gif', 'deoxys-speed.gif', 'deoxys.gif', 'dewgong.gif',\n 'dewott.gif', 'dewpider.gif', 'dhelmise.gif', 'dialga.gif',\n 'diancie-mega.gif', 'diancie.gif', 'diggersby.gif', 'diglett-alola.gif',\n 'diglett.gif', 'ditto.gif', 'dodrio.gif', 'doduo.gif', 'donphan-f.gif',\n 'donphan.gif', 'dottler.gif', 'doublade.gif', 'dracovish.gif',\n 'dracozolt.gif', 'dragalge.gif', 'dragapult.gif', 'dragonair.gif',\n 'dragonite.gif', 'drakloak.gif', 'drampa.gif', 'drapion.gif',\n 'dratini.gif', 'drednaw-gmax.gif', 'drednaw.gif', 'dreepy.gif',\n 'drifblim.gif', 'drifloon.gif', 'drilbur.gif', 'drizzile.gif',\n 'drowzee.gif', 'druddigon.gif', 'dubwool.gif', 'ducklett.gif',\n 'dugtrio-alola.gif', 'dugtrio.gif', 'dunsparce.gif', 'duosion.gif',\n 'duraludon-gmax.gif', 'duraludon.gif', 'durant.gif', 'dusclops.gif',\n 'dusknoir.gif', 'duskull.gif', 'dustox-f.gif', 'dustox.gif',\n 'dwebble.gif', 'eelektrik.gif', 'eelektross.gif', 'eevee-gmax.gif',\n 'eevee-starter.gif', 'eevee.gif', 'eiscue-noice.gif', 'eiscue.gif',\n 'ekans.gif', 'eldegoss.gif', 'electabuzz.gif', 'electivire.gif',\n 'electrike.gif', 'electrode.gif', 'elekid.gif', 'elgyem.gif',\n 'emboar.gif', 'emolga.gif', 'empoleon.gif', 'entei.gif',\n 'equilibra.gif', 'escavalier.gif', 'espeon.gif', 'espurr.gif',\n 'eternatus-eternamax.gif', 'eternatus.gif', 'excadrill.gif',\n 'exeggcute.gif', 'exeggutor-alola.gif', 'exeggutor.gif', 'exploud.gif',\n 'falinks.gif', 'farfetchd-galar.gif', 'farfetchd.gif', 'fearow.gif',\n 'feebas.gif', 'fennekin.gif', 'feraligatr.gif', 'ferroseed.gif',\n 'ferrothorn.gif', 'fidgit.gif', 'finneon-f.gif', 'finneon.gif',\n 'flaaffy.gif', 'flabebe-blue.gif', 'flabebe-orange.gif',\n 'flabebe-white.gif', 'flabebe-yellow.gif', 'flabebe.gif',\n 'flapple-gmax.gif', 'flapple.gif', 'flareon.gif', 'fletchinder.gif',\n 'fletchling.gif', 'floatzel-f.gif', 'floatzel.gif', 'floette-blue.gif',\n 'floette-eternal.gif', 'floette-orange.gif', 'floette-white.gif',\n 'floette-yellow.gif', 'floette.gif', 'florges-blue.gif',\n 'florges-orange.gif', 'florges-white.gif', 'florges-yellow.gif',\n 'florges.gif', 'flygon.gif', 'fomantis.gif', 'foongus.gif',\n 'forretress.gif', 'fraxure.gif', 'frillish-f.gif', 'frillish.gif',\n 'froakie.gif', 'frogadier.gif', 'froslass.gif', 'frosmoth.gif',\n 'furfrou-dandy.gif', 'furfrou-debutante.gif', 'furfrou-diamond.gif',\n 'furfrou-heart.gif', 'furfrou-kabuki.gif', 'furfrou-lareine.gif',\n 'furfrou-matron.gif', 'furfrou-pharaoh.gif', 'furfrou-star.gif',\n 'furfrou.gif', 'furret.gif', 'gabite-f.gif', 'gabite.gif',\n 'gallade-mega.gif', 'gallade.gif', 'galvantula.gif',\n 'garbodor-gmax.gif', 'garbodor.gif', 'garchomp-f.gif',\n 'garchomp-mega.gif', 'garchomp.gif', 'gardevoir-mega.gif',\n 'gardevoir.gif', 'gastly.gif', 'gastrodon-east.gif', 'gastrodon.gif',\n 'genesect-burn.gif', 'genesect-chill.gif', 'genesect-douse.gif',\n 'genesect-shock.gif', 'genesect.gif', 'gengar-gmax.gif',\n 'gengar-mega.gif', 'gengar.gif', 'geodude-alola.gif', 'geodude.gif',\n 'gible-f.gif', 'gible.gif', 'gigalith.gif', 'girafarig-f.gif',\n 'girafarig.gif', 'giratina-origin.gif', 'giratina.gif', 'glaceon.gif',\n 'glalie-mega.gif', 'glalie.gif', 'glameow.gif', 'gligar-f.gif',\n 'gligar.gif', 'gliscor.gif', 'gloom.gif', 'gogoat.gif', 'golbat-f.gif',\n 'golbat.gif', 'goldeen.gif', 'golduck.gif', 'golem-alola.gif',\n 'golem.gif', 'golett.gif', 'golisopod.gif', 'golurk.gif', 'goodra.gif',\n 'goomy.gif', 'gorebyss.gif', 'gossifleur.gif', 'gothita.gif',\n 'gothitelle.gif', 'gothorita.gif', 'gourgeist-large.gif',\n 'gourgeist-small.gif', 'gourgeist-super.gif', 'gourgeist.gif',\n 'granbull.gif', 'grapploct.gif', 'graveler-alola.gif', 'graveler.gif',\n 'greedent.gif', 'greninja-ash.gif', 'greninja.gif', 'grimer-alola.gif',\n 'grimer.gif', 'grimmsnarl-gmax.gif', 'grimmsnarl.gif', 'grookey.gif',\n 'grotle.gif', 'groudon-primal.gif', 'groudon.gif', 'grovyle.gif',\n 'growlithe.gif', 'grubbin.gif', 'grumpig.gif', 'gulpin-f.gif',\n 'gulpin.gif', 'gumshoos-totem.gif', 'gumshoos.gif', 'gurdurr.gif',\n 'guzzlord.gif', 'gyarados-mega.gif', 'gyarados.gif', 'hakamoo.gif',\n 'happiny.gif', 'hariyama.gif', 'hatenna.gif', 'hatterene-gmax.gif',\n 'hatterene.gif', 'hattrem.gif', 'haunter.gif', 'hawlucha.gif',\n 'haxorus.gif', 'heatmor.gif', 'heatran.gif', 'heliolisk.gif',\n 'helioptile.gif', 'heracross-f.gif', 'heracross-mega.gif',\n 'heracross.gif', 'herdier.gif', 'hippopotas-f.gif', 'hippopotas.gif',\n 'hippowdon-f.gif', 'hippowdon.gif', 'hitmonchan.gif', 'hitmonlee.gif',\n 'hitmontop.gif', 'honchkrow.gif', 'honedge.gif', 'hooh.gif',\n 'hoopa-mega.gif', 'hoopa-unbound.gif', 'hoopa.gif', 'hoothoot.gif',\n 'hoppip.gif', 'horsea.gif', 'houndoom-f.gif', 'houndoom-mega.gif',\n 'houndoom.gif', 'houndour.gif', 'huntail.gif', 'hydreigon.gif',\n 'hypno.gif', 'igglybuff.gif', 'illumise.gif', 'impidimp.gif',\n 'incineroar.gif', 'indeedee-f.gif', 'indeedee.gif', 'infernape.gif',\n 'inkay.gif', 'inteleon.gif', 'ivysaur.gif', 'jangmoo.gif',\n 'jellicent-f.gif', 'jellicent.gif', 'jigglypuff.gif', 'jirachi.gif',\n 'jolteon.gif', 'joltik.gif', 'jumbao.gif', 'jumpluff.gif', 'jynx.gif',\n 'kabuto.gif', 'kabutops.gif', 'kadabra.gif', 'kakuna.gif',\n 'kangaskhan-mega.gif', 'kangaskhan.gif', 'karrablast.gif',\n 'kartana.gif', 'kecleon.gif', 'keldeo-resolute.gif', 'keldeo.gif',\n 'kerfluffle-f.gif', 'kerfluffle.gif', 'kingdra.gif', 'kingler-gmax.gif',\n 'kingler.gif', 'kirlia.gif', 'kitsunoh-f.gif', 'kitsunoh.gif',\n 'klang.gif', 'klefki.gif', 'klink.gif', 'klinklang.gif', 'koffing.gif',\n 'komala.gif', 'kommo-o-totem.gif', 'kommoo.gif', 'krabby.gif',\n 'kricketot-f.gif', 'kricketot.gif', 'kricketune-f.gif',\n 'kricketune.gif', 'krilowatt.gif', 'krokorok.gif', 'krookodile.gif',\n 'kyogre-primal.gif', 'kyogre.gif', 'kyurem-black.gif',\n 'kyurem-white.gif', 'kyurem.gif', 'lairon.gif', 'lampent.gif',\n 'landorus-therian.gif', 'landorus.gif', 'lanturn.gif',\n 'lapras-gmax.gif', 'lapras.gif', 'larvesta.gif', 'larvitar.gif',\n 'latias-mega.gif', 'latias.gif', 'latios-mega.gif', 'latios.gif',\n 'leafeon.gif', 'leavanny.gif', 'ledian-f.gif', 'ledian.gif',\n 'ledyba-f.gif', 'ledyba.gif', 'lickilicky.gif', 'lickitung.gif',\n 'liepard.gif', 'lileep.gif', 'lilligant.gif', 'lillipup.gif',\n 'linoone-galar.gif', 'linoone.gif', 'litleo.gif', 'litten.gif',\n 'litwick.gif', 'lombre.gif', 'lopunny-mega.gif', 'lopunny.gif',\n 'lotad.gif', 'loudred.gif', 'lucario-mega.gif', 'lucario.gif',\n 'ludicolo-f.gif', 'ludicolo.gif', 'lugia.gif', 'lumineon-f.gif',\n 'lumineon.gif', 'lunala.gif', 'lunatone.gif', 'lurantis-totem.gif',\n 'lurantis.gif', 'luvdisc.gif', 'luxio-f.gif', 'luxio.gif',\n 'luxray-f.gif', 'luxray.gif', 'lycanroc-dusk.gif',\n 'lycanroc-midnight.gif', 'lycanroc.gif', 'machamp-gmax.gif',\n 'machamp.gif', 'machoke.gif', 'machop.gif', 'magby.gif', 'magcargo.gif',\n 'magearna-original.gif', 'magearna.gif', 'magikarp-f.gif',\n 'magikarp.gif', 'magmar.gif', 'magmortar.gif', 'magnemite.gif',\n 'magneton.gif', 'magnezone.gif', 'makuhita.gif', 'malaconda.gif',\n 'malamar.gif', 'mamoswine-f.gif', 'mamoswine.gif', 'manaphy.gif',\n 'mandibuzz.gif', 'manectric-mega.gif', 'manectric.gif', 'mankey.gif',\n 'mantine.gif', 'mantyke.gif', 'maractus.gif', 'mareanie.gif',\n 'mareep.gif', 'marill.gif', 'marowak-alola-totem.gif',\n 'marowak-alola.gif', 'marowak-totem.gif', 'marowak.gif',\n 'marshadow.gif', 'marshtomp.gif', 'masquerain.gif', 'mawile-mega.gif',\n 'mawile.gif', 'medicham-f.gif', 'medicham-mega.gif', 'medicham.gif',\n 'meditite-f.gif', 'meditite.gif', 'meganium-f.gif', 'meganium.gif',\n 'melmetal-gmax.gif', 'melmetal.gif', 'meloetta-pirouette.gif',\n 'meloetta.gif', 'meltan.gif', 'meowstic-f.gif', 'meowstic.gif',\n 'meowth-alola.gif', 'meowth-galar.gif', 'meowth-gmax.gif', 'meowth.gif',\n 'mesprit.gif', 'metagross-mega.gif', 'metagross.gif', 'metang.gif',\n 'metapod.gif', 'mew.gif', 'mewtwo-megax.gif', 'mewtwo-megay.gif',\n 'mewtwo.gif', 'mienfoo.gif', 'mienshao.gif', 'mightyena.gif',\n 'milcery.gif', 'milotic-f.gif', 'milotic.gif', 'miltank.gif',\n 'mimejr.gif', 'mimikyu-busted-totem.gif', 'mimikyu-busted.gif',\n 'mimikyu-totem.gif', 'mimikyu.gif', 'minccino.gif', 'minior-blue.gif',\n 'minior-green.gif', 'minior-indigo.gif', 'minior-meteor.gif',\n 'minior-orange.gif', 'minior-violet.gif', 'minior-yellow.gif',\n 'minior.gif', 'minun.gif', 'misdreavus.gif', 'mismagius.gif',\n 'mollux.gif', 'moltres.gif', 'monferno.gif', 'morelull.gif',\n 'morgrem.gif', 'morpeko-hangry.gif', 'morpeko.gif', 'mothim.gif',\n 'mrmime-galar.gif', 'mrmime.gif', 'mrrime.gif', 'mudbray.gif',\n 'mudkip.gif', 'mudsdale.gif', 'muk-alola.gif', 'muk.gif',\n 'munchlax.gif', 'munna.gif', 'murkrow-f.gif', 'murkrow.gif',\n 'musharna.gif', 'naganadel.gif', 'natu.gif', 'naviathan.gif',\n 'necrozma-dawnwings.gif', 'necrozma-duskmane.gif', 'necrozma-ultra.gif',\n 'necrozma.gif', 'necturna.gif', 'nickit.gif', 'nidoking.gif',\n 'nidoqueen.gif', 'nidoran-f.gif', 'nidoran.gif', 'nidoranf.gif',\n 'nidoranm.gif', 'nidorina.gif', 'nidorino.gif', 'nihilego.gif',\n 'nincada.gif', 'ninetales-alola.gif', 'ninetales.gif', 'ninjask.gif',\n 'noctowl.gif', 'noibat.gif', 'noivern.gif', 'nosepass.gif',\n 'numel-f.gif', 'numel.gif', 'nuzleaf-f.gif', 'nuzleaf.gif',\n 'obstagoon.gif', 'octillery-f.gif', 'octillery.gif', 'oddish.gif',\n 'omanyte.gif', 'omastar.gif', 'onix.gif', 'oranguru.gif',\n 'orbeetle-gmax.gif', 'orbeetle.gif', 'oricorio-pau.gif',\n 'oricorio-pompom.gif', 'oricorio-sensu.gif', 'oricorio.gif',\n 'oshawott.gif', 'pachirisu-f.gif', 'pachirisu.gif', 'pajantom.gif',\n 'palkia.gif', 'palossand.gif', 'palpitoad.gif', 'pancham.gif',\n 'pangoro.gif', 'panpour.gif', 'pansage.gif', 'pansear.gif', 'paras.gif',\n 'parasect.gif', 'passimian.gif', 'patrat.gif', 'pawniard.gif',\n 'pelipper.gif', 'perrserker.gif', 'persian-alola.gif', 'persian.gif',\n 'petilil.gif', 'phanpy.gif', 'phantump.gif', 'pheromosa.gif',\n 'phione.gif', 'pichu.gif', 'pidgeot-mega.gif', 'pidgeot.gif',\n 'pidgeotto.gif', 'pidgey.gif', 'pidove.gif', 'pignite.gif',\n 'pikachu-alola.gif', 'pikachu-belle.gif', 'pikachu-cosplay.gif',\n 'pikachu-f.gif', 'pikachu-gmax.gif', 'pikachu-hoenn.gif',\n 'pikachu-kalos.gif', 'pikachu-libre.gif', 'pikachu-original.gif',\n 'pikachu-partner.gif', 'pikachu-phd.gif', 'pikachu-pop-star.gif',\n 'pikachu-popstar.gif', 'pikachu-rockstar.gif', 'pikachu-sinnoh.gif',\n 'pikachu-starter-f.gif', 'pikachu-starter.gif', 'pikachu-unova.gif',\n 'pikachu.gif', 'pikipek.gif', 'piloswine-f.gif', 'piloswine.gif',\n 'pincurchin.gif', 'pineco.gif', 'pinsir-mega.gif', 'pinsir.gif',\n 'piplup.gif', 'plasmanta.gif', 'pluffle.gif', 'plusle.gif',\n 'poipole.gif', 'pokestarblackbelt.gif', 'pokestarblackdoor.gif',\n 'pokestarbrycenman.gif', 'pokestarf00.gif', 'pokestarf002.gif',\n 'pokestargiant.gif', 'pokestarhumanoid.gif', 'pokestarmonster.gif',\n 'pokestarmt.gif', 'pokestarmt2.gif', 'pokestarsmeargle.gif',\n 'pokestarspirit.gif', 'pokestartransport.gif', 'pokestarufo-2.gif',\n 'pokestarufo.gif', 'pokestarufo2.gif', 'pokestarwhitedoor.gif',\n 'politoed-f.gif', 'politoed.gif', 'poliwag.gif', 'poliwhirl.gif',\n 'poliwrath.gif', 'polteageist-antique.gif', 'polteageist.gif',\n 'ponyta-galar.gif', 'ponyta.gif', 'poochyena.gif', 'popplio.gif',\n 'porygon-z.gif', 'porygon.gif', 'porygon2.gif', 'porygonz.gif',\n 'primarina.gif', 'primeape.gif', 'prinplup.gif', 'probopass.gif',\n 'psyduck.gif', 'pumpkaboo-large.gif', 'pumpkaboo-small.gif',\n 'pumpkaboo-super.gif', 'pumpkaboo.gif', 'pupitar.gif', 'purrloin.gif',\n 'purugly.gif', 'pyroak.gif', 'pyroar-f.gif', 'pyroar.gif',\n 'pyukumuku.gif', 'quagsire-f.gif', 'quagsire.gif', 'quilava.gif',\n 'quilladin.gif', 'qwilfish.gif', 'raboot.gif', 'raichu-alola.gif',\n 'raichu.gif', 'raikou.gif', 'ralts.gif', 'rampardos.gif',\n 'rapidash-galar.gif', 'rapidash.gif', 'raticate-alola-totem.gif',\n 'raticate-alola.gif', 'raticate-f.gif', 'raticate-totem-a.gif',\n 'raticate.gif', 'rattata-alola.gif', 'rattata-f.gif', 'rattata.gif',\n 'rayquaza-mega.gif', 'rayquaza.gif', 'regice.gif', 'regigigas.gif',\n 'regirock.gif', 'registeel.gif', 'relicanth-f.gif', 'relicanth.gif',\n 'remoraid.gif', 'reshiram.gif', 'reuniclus.gif', 'rhydon-f.gif',\n 'rhydon.gif', 'rhyhorn.gif', 'rhyperior-f.gif', 'rhyperior.gif',\n 'ribombee.gif', 'rillaboom.gif', 'riolu.gif', 'rockruff.gif',\n 'roggenrola.gif', 'rolycoly.gif', 'rookidee.gif', 'roselia-f.gif',\n 'roselia.gif', 'roserade-f.gif', 'roserade.gif', 'rotom-f.gif',\n 'rotom-fan.gif', 'rotom-frost.gif', 'rotom-h.gif', 'rotom-heat.gif',\n 'rotom-m.gif', 'rotom-mow.gif', 'rotom-s.gif', 'rotom-w.gif',\n 'rotom-wash.gif', 'rotom.gif', 'rowlet.gif', 'rufflet.gif',\n 'runerigus.gif', 'sableye-mega.gif', 'sableye.gif',\n 'salamence-mega.gif', 'salamence.gif', 'salandit.gif',\n 'salazzle-totem.gif', 'salazzle.gif', 'samurott.gif',\n 'sandaconda-gmax.gif', 'sandaconda.gif', 'sandile.gif',\n 'sandshrew-alola.gif', 'sandshrew.gif', 'sandslash-alola.gif',\n 'sandslash.gif', 'sandygast.gif', 'sawk.gif', 'sawsbuck-autumn.gif',\n 'sawsbuck-summer.gif', 'sawsbuck-winter.gif', 'sawsbuck.gif',\n 'scatterbug.gif', 'sceptile-mega.gif', 'sceptile.gif', 'scizor-f.gif',\n 'scizor-mega.gif', 'scizor.gif', 'scolipede.gif', 'scorbunny.gif',\n 'scrafty.gif', 'scraggy.gif', 'scratchet.gif', 'scyther-f.gif',\n 'scyther.gif', 'seadra.gif', 'seaking.gif', 'sealeo.gif', 'seedot.gif',\n 'seel.gif', 'seismitoad.gif', 'sentret.gif', 'serperior.gif',\n 'servine.gif', 'seviper.gif', 'sewaddle.gif', 'shapedo-mega.gif',\n 'sharpedo-mega.gif', 'sharpedo.gif', 'shaymin-sky.gif', 'shaymin.gif',\n 'shedinja.gif', 'shelgon.gif', 'shellder.gif', 'shellos-east.gif',\n 'shellos.gif', 'shelmet.gif', 'shieldon.gif', 'shiftry-f.gif',\n 'shiftry.gif', 'shiinotic.gif', 'shinx-f.gif', 'shinx.gif',\n 'shroomish.gif', 'shuckle.gif', 'shuppet.gif', 'sigilyph.gif',\n 'silcoon.gif', 'silicobra.gif', 'silvally-bug.gif', 'silvally-dark.gif',\n 'silvally-dragon.gif', 'silvally-electric.gif', 'silvally-fairy.gif',\n 'silvally-fighting.gif', 'silvally-fire.gif', 'silvally-flying.gif',\n 'silvally-ghost.gif', 'silvally-grass.gif', 'silvally-ground.gif',\n 'silvally-ice.gif', 'silvally-poison.gif', 'silvally-psychic.gif',\n 'silvally-rock.gif', 'silvally-steel.gif', 'silvally-water.gif',\n 'silvally.gif', 'simipour.gif', 'simisage.gif', 'simisear.gif',\n 'sinistea-antique.gif', 'sinistea.gif', 'sirfetchd.gif',\n 'sizzlipede.gif', 'skarmory.gif', 'skiddo.gif', 'skiploom.gif',\n 'skitty.gif', 'skorupi.gif', 'skrelp.gif', 'skuntank.gif',\n 'skwovet.gif', 'slaking.gif', 'slakoth.gif', 'sliggoo.gif',\n 'slowbro-mega.gif', 'slowbro.gif', 'slowking.gif', 'slowpoke-galar.gif',\n 'slowpoke.gif', 'slugma.gif', 'slurpuff.gif', 'smeargle.gif',\n 'smogecko.gif', 'smoguana.gif', 'smokomodo.gif', 'smoochum.gif',\n 'snaelstrom.gif', 'sneasel-f.gif', 'sneasel.gif', 'snivy.gif',\n 'snom.gif', 'snorlax-gmax.gif', 'snorlax.gif', 'snornut.gif',\n 'snorunt.gif', 'snover-f.gif', 'snover.gif', 'snubbull.gif',\n 'sobble.gif', 'solgaleo.gif', 'solosis.gif', 'solrock.gif',\n 'spearow.gif', 'spewpa.gif', 'spheal.gif', 'spinarak.gif', 'spinda.gif',\n 'spiritomb.gif', 'spoink.gif', 'spritzee.gif', 'squirtle.gif',\n 'stakataka.gif', 'stantler.gif', 'staraptor-f.gif', 'staraptor.gif',\n 'staravia-f.gif', 'staravia.gif', 'starly-f.gif', 'starly.gif',\n 'starmie.gif', 'staryu.gif', 'steelix-f.gif', 'steelix-mega.gif',\n 'steelix.gif', 'steenee.gif', 'stonjourner.gif', 'stoutland.gif',\n 'stratagem.gif', 'stufful.gif', 'stunfisk-galar.gif', 'stunfisk.gif',\n 'stunky.gif', 'substitute.gif', 'sudowoodo-f.gif', 'sudowoodo.gif',\n 'suicune.gif', 'sunflora.gif', 'sunkern.gif', 'surskit.gif',\n 'swablu.gif', 'swadloon.gif', 'swalot-f.gif', 'swalot.gif',\n 'swampert-mega.gif', 'swampert.gif', 'swanna.gif', 'swellow.gif',\n 'swinub.gif', 'swirlix.gif', 'swoobat.gif', 'sylveon.gif',\n 'taillow.gif', 'talonflame.gif', 'tangela.gif', 'tangrowth-f.gif',\n 'tangrowth.gif', 'tapubulu.gif', 'tapufini.gif', 'tapukoko.gif',\n 'tapulele.gif', 'tauros.gif', 'teddiursa.gif', 'tentacool.gif',\n 'tentacruel.gif', 'tepig.gif', 'terrakion.gif', 'thievul.gif',\n 'throh.gif', 'thundurus-therian.gif', 'thundurus.gif', 'thwackey.gif',\n 'timburr.gif', 'tirtouga.gif', 'togedemaru-totem.gif', 'togedemaru.gif',\n 'togekiss.gif', 'togepi.gif', 'togetic.gif', 'tomohawk.gif',\n 'torchic-f.gif', 'torchic.gif', 'torkoal.gif', 'tornadus-therian.gif',\n 'tornadus.gif', 'torracat.gif', 'torterra.gif', 'totodile.gif',\n 'toucannon.gif', 'toxapex.gif', 'toxel.gif', 'toxicroak-f.gif',\n 'toxicroak.gif', 'toxtricity-gmax.gif', 'toxtricity-lowkey.gif',\n 'toxtricity.gif', 'tranquill.gif', 'trapinch.gif', 'treecko.gif',\n 'trevenant.gif', 'tropius.gif', 'trubbish.gif', 'trumbeak.gif',\n 'tsareena.gif', 'turtonator.gif', 'turtwig.gif', 'tympole.gif',\n 'tynamo.gif', 'typenull.gif', 'typhlosion.gif', 'tyranitar-mega.gif',\n 'tyranitar.gif', 'tyrantrum.gif', 'tyrantum.gif', 'tyrogue.gif',\n 'tyrunt.gif', 'umbreon.gif', 'unfezant-f.gif', 'unfezant.gif',\n 'unown-b.gif', 'unown-c.gif', 'unown-d.gif', 'unown-e.gif',\n 'unown-exclamation.gif', 'unown-f.gif', 'unown-g.gif', 'unown-h.gif',\n 'unown-i.gif', 'unown-j.gif', 'unown-k.gif', 'unown-l.gif',\n 'unown-m.gif', 'unown-n.gif', 'unown-o.gif', 'unown-p.gif',\n 'unown-q.gif', 'unown-question.gif', 'unown-r.gif', 'unown-s.gif',\n 'unown-t.gif', 'unown-u.gif', 'unown-v.gif', 'unown-w.gif',\n 'unown-x.gif', 'unown-y.gif', 'unown-z.gif', 'unown.gif',\n 'ursaring-f.gif', 'ursaring.gif', 'uxie.gif', 'vanillish.gif',\n 'vanillite.gif', 'vanilluxe.gif', 'vaporeon.gif', 'venipede.gif',\n 'venomoth.gif', 'venonat.gif', 'venusaur-mega.gif', 'venusaur.gif',\n 'vespiquen.gif', 'vibrava.gif', 'victini.gif', 'victreebel.gif',\n 'vigoroth.gif', 'vikavolt-totem.gif', 'vikavolt.gif', 'vileplume.gif',\n 'virizion.gif', 'vivillon-archipelago.gif', 'vivillon-continental.gif',\n 'vivillon-elegant.gif', 'vivillon-fancy.gif', 'vivillon-garden.gif',\n 'vivillon-highplains.gif', 'vivillon-icysnow.gif',\n 'vivillon-jungle.gif', 'vivillon-marine.gif', 'vivillon-modern.gif',\n 'vivillon-monsoon.gif', 'vivillon-ocean.gif', 'vivillon-pokeball.gif',\n 'vivillon-polar.gif', 'vivillon-river.gif', 'vivillon-sandstorm.gif',\n 'vivillon-savanna.gif', 'vivillon-sun.gif', 'vivillon-tundra.gif',\n 'vivillon.gif', 'volbeat.gif', 'volcanion.gif', 'volcarona.gif',\n 'volkraken.gif', 'voltorb.gif', 'vullaby.gif', 'vulpix-alola.gif',\n 'vulpix.gif', 'wailmer.gif', 'wailord.gif', 'walrein.gif',\n 'wartortle.gif', 'watchog.gif', 'weavile-f.gif', 'weavile.gif',\n 'weedle.gif', 'weepinbell.gif', 'weezing-galar.gif', 'weezing.gif',\n 'whimsicott.gif', 'whirlipede.gif', 'whiscash.gif', 'whismur.gif',\n 'wigglytuff.gif', 'wimpod.gif', 'wingull.gif', 'wishiwashi-school.gif',\n 'wishiwashi.gif', 'wobbuffet-f.gif', 'wobbuffet.gif', 'woobat.gif',\n 'wooloo.gif', 'wooper-f.gif', 'wooper.gif', 'wormadam-sandy.gif',\n 'wormadam-trash.gif', 'wormadam.gif', 'wurmple.gif', 'wynaut.gif',\n 'xatu-f.gif', 'xatu.gif', 'xerneas-neutral.gif', 'xerneas.gif',\n 'xurkitree.gif', 'yamask-galar.gif', 'yamask.gif', 'yamper.gif',\n 'yanma.gif', 'yanmega.gif', 'yungoos.gif', 'yveltal.gif',\n 'zacian-crowned.gif', 'zacian.gif', 'zamazenta-crowned.gif',\n 'zamazenta.gif', 'zangoose.gif', 'zapdos.gif', 'zebstrika.gif',\n 'zekrom.gif', 'zeraora.gif', 'zigzagoon-galar.gif', 'zigzagoon.gif',\n 'zoroark.gif', 'zorua.gif', 'zubat-f.gif', 'zubat.gif', 'zweilous.gif',\n 'zygarde-10.gif', 'zygarde-complete.gif', 'zygarde.gif']\nfor i in filename:\n url = 'https://play.pokemonshowdown.com/sprites/ani/{}'.format(i)\n file_name = str(i[:-4])\n dl_img(url, 'files/pokemon/front', file_name)\n",
"step-4": "import urllib.request\nfrom urllib.request import Request, urlopen\nimport json\nfrom requests import get\nfrom requests.exceptions import RequestException\nfrom contextlib import closing\nfrom bs4 import BeautifulSoup\n<mask token>\n\n\ndef simple_get(url):\n \"\"\"\n Attempts to get the content at `url` by making an HTTP GET request.\n If the content-type of response is some kind of HTML/XML, return the\n text content, otherwise return None.\n \"\"\"\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n return None\n\n\ndef is_good_response(resp):\n \"\"\"\n Returns True if the response seems to be HTML, False otherwise.\n \"\"\"\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 and content_type is not None and \n content_type.find('html') > -1)\n\n\ndef log_error(e):\n \"\"\"\n It is always a good idea to log errors.\n This function just prints them, but you can\n make it do anything.\n \"\"\"\n print(e)\n\n\ndef save_json(file):\n with open('gif_list.txt', 'w') as f:\n f.write(file)\n\n\n<mask token>\n\n\ndef dl_img(url, file_path, file_name):\n full_path = file_path + '/' + file_name + '.gif'\n opener = urllib.request.build_opener()\n opener.addheaders = [('User-agent', 'Mozilla/5.0')]\n urllib.request.install_opener(opener)\n urllib.request.urlretrieve(url, full_path)\n\n\nfilename = ['abomasnow-f.gif', 'abomasnow-mega.gif', 'abomasnow.gif',\n 'abra.gif', 'absol-mega.gif', 'absol.gif', 'accelgor.gif',\n 'aegislash-blade.gif', 'aegislash.gif', 'aerodactyl-mega.gif',\n 'aerodactyl.gif', 'aggron-mega.gif', 'aggron.gif', 'aipom-f.gif',\n 'aipom.gif', 'alakazam-mega.gif', 'alakazam.gif',\n 'alcremie-caramel-swirl-berry.gif', 'alcremie-caramel-swirl-clover.gif',\n 'alcremie-caramel-swirl-flower.gif', 'alcremie-caramel-swirl-love.gif',\n 'alcremie-caramel-swirl-ribbon.gif', 'alcremie-caramel-swirl-star.gif',\n 'alcremie-caramelswirl.gif', 'alcremie-gmax.gif',\n 'alcremie-lemon-cream-berry.gif', 'alcremie-lemon-cream-clover.gif',\n 'alcremie-lemon-cream-flower.gif', 'alcremie-lemon-cream-love.gif',\n 'alcremie-lemon-cream-ribbon.gif', 'alcremie-lemon-cream-star.gif',\n 'alcremie-lemoncream.gif', 'alcremie-matcha-cream-berry.gif',\n 'alcremie-matcha-cream-clover.gif', 'alcremie-matcha-cream-flower.gif',\n 'alcremie-matcha-cream-love.gif', 'alcremie-matcha-cream-ribbon.gif',\n 'alcremie-matcha-cream-star.gif', 'alcremie-matchacream.gif',\n 'alcremie-mint-cream-berry.gif', 'alcremie-mint-cream-clover.gif',\n 'alcremie-mint-cream-flower.gif', 'alcremie-mint-cream-love.gif',\n 'alcremie-mint-cream-ribbon.gif', 'alcremie-mint-cream-star.gif',\n 'alcremie-mintcream.gif', 'alcremie-rainbow-swirl-berry.gif',\n 'alcremie-rainbow-swirl-clover.gif',\n 'alcremie-rainbow-swirl-flower.gif', 'alcremie-rainbow-swirl-love.gif',\n 'alcremie-rainbow-swirl-ribbon.gif', 'alcremie-rainbow-swirl-star.gif',\n 'alcremie-rainbowswirl.gif', 'alcremie-ruby-cream-berry.gif',\n 'alcremie-ruby-cream-clover.gif', 'alcremie-ruby-cream-flower.gif',\n 'alcremie-ruby-cream-love.gif', 'alcremie-ruby-cream-ribbon.gif',\n 'alcremie-ruby-cream-star.gif', 'alcremie-ruby-swirl-berry.gif',\n 'alcremie-ruby-swirl-clover.gif', 'alcremie-ruby-swirl-flower.gif',\n 'alcremie-ruby-swirl-love.gif', 'alcremie-ruby-swirl-ribbon.gif',\n 'alcremie-ruby-swirl-star.gif', 'alcremie-rubycream.gif',\n 'alcremie-rubyswirl.gif', 'alcremie-salted-cream-berry.gif',\n 'alcremie-salted-cream-clover.gif', 'alcremie-salted-cream-flower.gif',\n 'alcremie-salted-cream-love.gif', 'alcremie-salted-cream-ribbon.gif',\n 'alcremie-salted-cream-star.gif', 'alcremie-saltedcream.gif',\n 'alcremie-vanilla-cream-berry.gif', 'alcremie-vanilla-cream-clover.gif',\n 'alcremie-vanilla-cream-flower.gif', 'alcremie-vanilla-cream-love.gif',\n 'alcremie-vanilla-cream-ribbon.gif', 'alcremie-vanilla-cream-star.gif',\n 'alcremie.gif', 'alomomola.gif', 'altaria-mega.gif', 'altaria.gif',\n 'amaura.gif', 'ambipom-f.gif', 'ambipom.gif', 'amoonguss.gif',\n 'ampharos-mega.gif', 'ampharos.gif', 'anorith.gif', 'appletun-gmax.gif',\n 'appletun.gif', 'applin.gif', 'araquanid-totem.gif', 'araquanid.gif',\n 'arbok.gif', 'arcanine.gif', 'arceus-bug.gif', 'arceus-dark.gif',\n 'arceus-dragon.gif', 'arceus-electric.gif', 'arceus-fairy.gif',\n 'arceus-fighting.gif', 'arceus-fire.gif', 'arceus-flying.gif',\n 'arceus-ghost.gif', 'arceus-grass.gif', 'arceus-ground.gif',\n 'arceus-ice.gif', 'arceus-poison.gif', 'arceus-psychic.gif',\n 'arceus-rock.gif', 'arceus-steel.gif', 'arceus-water.gif', 'arceus.gif',\n 'archen.gif', 'archeops.gif', 'arctovish.gif', 'arctozolt.gif',\n 'ariados.gif', 'armaldo.gif', 'aromatisse.gif', 'aron.gif',\n 'arrokuda.gif', 'articuno.gif', 'audino-mega.gif', 'audino.gif',\n 'aurorus.gif', 'aurumoth.gif', 'avalugg.gif', 'axew.gif', 'azelf.gif',\n 'azumarill.gif', 'azurill.gif', 'bagon.gif', 'baltoy.gif',\n 'banette-mega.gif', 'banette.gif', 'barbaracle.gif', 'barboach.gif',\n 'barboarch.gif', 'barraskewda.gif', 'basculin-bluestriped.gif',\n 'basculin.gif', 'bastiodon.gif', 'bayleef.gif', 'beartic.gif',\n 'beautifly-f.gif', 'beautifly.gif', 'beedrill-mega.gif', 'beedrill.gif',\n 'beheeyem.gif', 'beldum.gif', 'bellossom.gif', 'bellsprout.gif',\n 'bergmite.gif', 'bewear.gif', 'bibarel-f.gif', 'bibarel.gif',\n 'bidoof-f.gif', 'bidoof.gif', 'binacle.gif', 'bisharp.gif',\n 'blacephalon.gif', 'blastoise-mega.gif', 'blastoise.gif',\n 'blaziken-f.gif', 'blaziken-mega.gif', 'blaziken.gif', 'blipbug.gif',\n 'blissey.gif', 'blitzle.gif', 'boldore.gif', 'boltund.gif',\n 'bonsly.gif', 'bouffalant.gif', 'bounsweet.gif', 'braixen.gif',\n 'braviary.gif', 'breloom.gif', 'brionne.gif', 'bronzong.gif',\n 'bronzor.gif', 'bruxish.gif', 'budew.gif', 'buizel-f.gif', 'buizel.gif',\n 'bulbasaur.gif', 'buneary.gif', 'bunnelby.gif', 'burmy-sandy.gif',\n 'burmy-trash.gif', 'burmy.gif', 'butterfree-gmax.gif', 'butterfree.gif',\n 'buzzwole.gif', 'cacnea.gif', 'cacturne-f.gif', 'cacturne.gif',\n 'camerupt-f.gif', 'camerupt-mega.gif', 'camerupt.gif',\n 'camperupt-mega.gif', 'carbink.gif', 'caribolt.gif', 'carkol.gif',\n 'carnivine.gif', 'carracosta.gif', 'carvanha.gif', 'cascoon.gif',\n 'castform-rainy.gif', 'castform-snowy.gif', 'castform-sunny.gif',\n 'castform.gif', 'caterpie.gif', 'cawmodore.gif', 'celebi.gif',\n 'celesteela.gif', 'centiskorch-gmax.gif', 'centiskorch.gif',\n 'chandelure.gif', 'chansey.gif', 'charizard-gmax.gif',\n 'charizard-megax.gif', 'charizard-megay.gif', 'charizard.gif',\n 'charjabug.gif', 'charmander.gif', 'charmeleon.gif', 'chatot.gif',\n 'cherrim-sunshine.gif', 'cherrim.gif', 'cherubi.gif', 'chesnaught.gif',\n 'chespin.gif', 'chewtle.gif', 'chikorita.gif', 'chimchar.gif',\n 'chimecho.gif', 'chinchou.gif', 'chingling.gif', 'cinccino.gif',\n 'cinderace.gif', 'clamperl.gif', 'clauncher.gif', 'clawitzer.gif',\n 'claydol.gif', 'clefable.gif', 'clefairy.gif', 'cleffa.gif',\n 'clobbopus.gif', 'cloyster.gif', 'coalossal-gmax.gif', 'coalossal.gif',\n 'cobalion.gif', 'cofagrigus.gif', 'combee-f.gif', 'combee.gif',\n 'combusken-f.gif', 'combusken.gif', 'comfey.gif', 'conkeldurr.gif',\n 'copperajah-gmax.gif', 'copperajah.gif', 'corphish.gif',\n 'corsola-galar.gif', 'corsola.gif', 'corviknight-gmax.gif',\n 'corviknight.gif', 'corvisquire.gif', 'cosmoem.gif', 'cosmog.gif',\n 'cottonee.gif', 'crabominable.gif', 'crabrawler.gif', 'cradily.gif',\n 'cramorant-gorging.gif', 'cramorant-gulping.gif', 'cramorant.gif',\n 'cranidos.gif', 'crawdaunt.gif', 'cresselia.gif', 'croagunk-f.gif',\n 'croagunk.gif', 'crobat.gif', 'croconaw.gif', 'crucibelle-mega.gif',\n 'crucibelle.gif', 'crustle.gif', 'cryogonal.gif', 'cubchoo.gif',\n 'cubone.gif', 'cufant.gif', 'cursola.gif', 'cutiefly.gif',\n 'cyndaquil.gif', 'darkrai.gif', 'darmanitan-galar.gif',\n 'darmanitan-galarzen.gif', 'darmanitan-zen.gif', 'darmanitan.gif',\n 'dartrix.gif', 'darumaka-galar.gif', 'darumaka.gif', 'decidueye.gif',\n 'dedenne.gif', 'deerling-autumn.gif', 'deerling-summer.gif',\n 'deerling-winter.gif', 'deerling.gif', 'deino.gif', 'delcatty.gif',\n 'delibird.gif', 'delphox.gif', 'deoxys-attack.gif',\n 'deoxys-defense.gif', 'deoxys-speed.gif', 'deoxys.gif', 'dewgong.gif',\n 'dewott.gif', 'dewpider.gif', 'dhelmise.gif', 'dialga.gif',\n 'diancie-mega.gif', 'diancie.gif', 'diggersby.gif', 'diglett-alola.gif',\n 'diglett.gif', 'ditto.gif', 'dodrio.gif', 'doduo.gif', 'donphan-f.gif',\n 'donphan.gif', 'dottler.gif', 'doublade.gif', 'dracovish.gif',\n 'dracozolt.gif', 'dragalge.gif', 'dragapult.gif', 'dragonair.gif',\n 'dragonite.gif', 'drakloak.gif', 'drampa.gif', 'drapion.gif',\n 'dratini.gif', 'drednaw-gmax.gif', 'drednaw.gif', 'dreepy.gif',\n 'drifblim.gif', 'drifloon.gif', 'drilbur.gif', 'drizzile.gif',\n 'drowzee.gif', 'druddigon.gif', 'dubwool.gif', 'ducklett.gif',\n 'dugtrio-alola.gif', 'dugtrio.gif', 'dunsparce.gif', 'duosion.gif',\n 'duraludon-gmax.gif', 'duraludon.gif', 'durant.gif', 'dusclops.gif',\n 'dusknoir.gif', 'duskull.gif', 'dustox-f.gif', 'dustox.gif',\n 'dwebble.gif', 'eelektrik.gif', 'eelektross.gif', 'eevee-gmax.gif',\n 'eevee-starter.gif', 'eevee.gif', 'eiscue-noice.gif', 'eiscue.gif',\n 'ekans.gif', 'eldegoss.gif', 'electabuzz.gif', 'electivire.gif',\n 'electrike.gif', 'electrode.gif', 'elekid.gif', 'elgyem.gif',\n 'emboar.gif', 'emolga.gif', 'empoleon.gif', 'entei.gif',\n 'equilibra.gif', 'escavalier.gif', 'espeon.gif', 'espurr.gif',\n 'eternatus-eternamax.gif', 'eternatus.gif', 'excadrill.gif',\n 'exeggcute.gif', 'exeggutor-alola.gif', 'exeggutor.gif', 'exploud.gif',\n 'falinks.gif', 'farfetchd-galar.gif', 'farfetchd.gif', 'fearow.gif',\n 'feebas.gif', 'fennekin.gif', 'feraligatr.gif', 'ferroseed.gif',\n 'ferrothorn.gif', 'fidgit.gif', 'finneon-f.gif', 'finneon.gif',\n 'flaaffy.gif', 'flabebe-blue.gif', 'flabebe-orange.gif',\n 'flabebe-white.gif', 'flabebe-yellow.gif', 'flabebe.gif',\n 'flapple-gmax.gif', 'flapple.gif', 'flareon.gif', 'fletchinder.gif',\n 'fletchling.gif', 'floatzel-f.gif', 'floatzel.gif', 'floette-blue.gif',\n 'floette-eternal.gif', 'floette-orange.gif', 'floette-white.gif',\n 'floette-yellow.gif', 'floette.gif', 'florges-blue.gif',\n 'florges-orange.gif', 'florges-white.gif', 'florges-yellow.gif',\n 'florges.gif', 'flygon.gif', 'fomantis.gif', 'foongus.gif',\n 'forretress.gif', 'fraxure.gif', 'frillish-f.gif', 'frillish.gif',\n 'froakie.gif', 'frogadier.gif', 'froslass.gif', 'frosmoth.gif',\n 'furfrou-dandy.gif', 'furfrou-debutante.gif', 'furfrou-diamond.gif',\n 'furfrou-heart.gif', 'furfrou-kabuki.gif', 'furfrou-lareine.gif',\n 'furfrou-matron.gif', 'furfrou-pharaoh.gif', 'furfrou-star.gif',\n 'furfrou.gif', 'furret.gif', 'gabite-f.gif', 'gabite.gif',\n 'gallade-mega.gif', 'gallade.gif', 'galvantula.gif',\n 'garbodor-gmax.gif', 'garbodor.gif', 'garchomp-f.gif',\n 'garchomp-mega.gif', 'garchomp.gif', 'gardevoir-mega.gif',\n 'gardevoir.gif', 'gastly.gif', 'gastrodon-east.gif', 'gastrodon.gif',\n 'genesect-burn.gif', 'genesect-chill.gif', 'genesect-douse.gif',\n 'genesect-shock.gif', 'genesect.gif', 'gengar-gmax.gif',\n 'gengar-mega.gif', 'gengar.gif', 'geodude-alola.gif', 'geodude.gif',\n 'gible-f.gif', 'gible.gif', 'gigalith.gif', 'girafarig-f.gif',\n 'girafarig.gif', 'giratina-origin.gif', 'giratina.gif', 'glaceon.gif',\n 'glalie-mega.gif', 'glalie.gif', 'glameow.gif', 'gligar-f.gif',\n 'gligar.gif', 'gliscor.gif', 'gloom.gif', 'gogoat.gif', 'golbat-f.gif',\n 'golbat.gif', 'goldeen.gif', 'golduck.gif', 'golem-alola.gif',\n 'golem.gif', 'golett.gif', 'golisopod.gif', 'golurk.gif', 'goodra.gif',\n 'goomy.gif', 'gorebyss.gif', 'gossifleur.gif', 'gothita.gif',\n 'gothitelle.gif', 'gothorita.gif', 'gourgeist-large.gif',\n 'gourgeist-small.gif', 'gourgeist-super.gif', 'gourgeist.gif',\n 'granbull.gif', 'grapploct.gif', 'graveler-alola.gif', 'graveler.gif',\n 'greedent.gif', 'greninja-ash.gif', 'greninja.gif', 'grimer-alola.gif',\n 'grimer.gif', 'grimmsnarl-gmax.gif', 'grimmsnarl.gif', 'grookey.gif',\n 'grotle.gif', 'groudon-primal.gif', 'groudon.gif', 'grovyle.gif',\n 'growlithe.gif', 'grubbin.gif', 'grumpig.gif', 'gulpin-f.gif',\n 'gulpin.gif', 'gumshoos-totem.gif', 'gumshoos.gif', 'gurdurr.gif',\n 'guzzlord.gif', 'gyarados-mega.gif', 'gyarados.gif', 'hakamoo.gif',\n 'happiny.gif', 'hariyama.gif', 'hatenna.gif', 'hatterene-gmax.gif',\n 'hatterene.gif', 'hattrem.gif', 'haunter.gif', 'hawlucha.gif',\n 'haxorus.gif', 'heatmor.gif', 'heatran.gif', 'heliolisk.gif',\n 'helioptile.gif', 'heracross-f.gif', 'heracross-mega.gif',\n 'heracross.gif', 'herdier.gif', 'hippopotas-f.gif', 'hippopotas.gif',\n 'hippowdon-f.gif', 'hippowdon.gif', 'hitmonchan.gif', 'hitmonlee.gif',\n 'hitmontop.gif', 'honchkrow.gif', 'honedge.gif', 'hooh.gif',\n 'hoopa-mega.gif', 'hoopa-unbound.gif', 'hoopa.gif', 'hoothoot.gif',\n 'hoppip.gif', 'horsea.gif', 'houndoom-f.gif', 'houndoom-mega.gif',\n 'houndoom.gif', 'houndour.gif', 'huntail.gif', 'hydreigon.gif',\n 'hypno.gif', 'igglybuff.gif', 'illumise.gif', 'impidimp.gif',\n 'incineroar.gif', 'indeedee-f.gif', 'indeedee.gif', 'infernape.gif',\n 'inkay.gif', 'inteleon.gif', 'ivysaur.gif', 'jangmoo.gif',\n 'jellicent-f.gif', 'jellicent.gif', 'jigglypuff.gif', 'jirachi.gif',\n 'jolteon.gif', 'joltik.gif', 'jumbao.gif', 'jumpluff.gif', 'jynx.gif',\n 'kabuto.gif', 'kabutops.gif', 'kadabra.gif', 'kakuna.gif',\n 'kangaskhan-mega.gif', 'kangaskhan.gif', 'karrablast.gif',\n 'kartana.gif', 'kecleon.gif', 'keldeo-resolute.gif', 'keldeo.gif',\n 'kerfluffle-f.gif', 'kerfluffle.gif', 'kingdra.gif', 'kingler-gmax.gif',\n 'kingler.gif', 'kirlia.gif', 'kitsunoh-f.gif', 'kitsunoh.gif',\n 'klang.gif', 'klefki.gif', 'klink.gif', 'klinklang.gif', 'koffing.gif',\n 'komala.gif', 'kommo-o-totem.gif', 'kommoo.gif', 'krabby.gif',\n 'kricketot-f.gif', 'kricketot.gif', 'kricketune-f.gif',\n 'kricketune.gif', 'krilowatt.gif', 'krokorok.gif', 'krookodile.gif',\n 'kyogre-primal.gif', 'kyogre.gif', 'kyurem-black.gif',\n 'kyurem-white.gif', 'kyurem.gif', 'lairon.gif', 'lampent.gif',\n 'landorus-therian.gif', 'landorus.gif', 'lanturn.gif',\n 'lapras-gmax.gif', 'lapras.gif', 'larvesta.gif', 'larvitar.gif',\n 'latias-mega.gif', 'latias.gif', 'latios-mega.gif', 'latios.gif',\n 'leafeon.gif', 'leavanny.gif', 'ledian-f.gif', 'ledian.gif',\n 'ledyba-f.gif', 'ledyba.gif', 'lickilicky.gif', 'lickitung.gif',\n 'liepard.gif', 'lileep.gif', 'lilligant.gif', 'lillipup.gif',\n 'linoone-galar.gif', 'linoone.gif', 'litleo.gif', 'litten.gif',\n 'litwick.gif', 'lombre.gif', 'lopunny-mega.gif', 'lopunny.gif',\n 'lotad.gif', 'loudred.gif', 'lucario-mega.gif', 'lucario.gif',\n 'ludicolo-f.gif', 'ludicolo.gif', 'lugia.gif', 'lumineon-f.gif',\n 'lumineon.gif', 'lunala.gif', 'lunatone.gif', 'lurantis-totem.gif',\n 'lurantis.gif', 'luvdisc.gif', 'luxio-f.gif', 'luxio.gif',\n 'luxray-f.gif', 'luxray.gif', 'lycanroc-dusk.gif',\n 'lycanroc-midnight.gif', 'lycanroc.gif', 'machamp-gmax.gif',\n 'machamp.gif', 'machoke.gif', 'machop.gif', 'magby.gif', 'magcargo.gif',\n 'magearna-original.gif', 'magearna.gif', 'magikarp-f.gif',\n 'magikarp.gif', 'magmar.gif', 'magmortar.gif', 'magnemite.gif',\n 'magneton.gif', 'magnezone.gif', 'makuhita.gif', 'malaconda.gif',\n 'malamar.gif', 'mamoswine-f.gif', 'mamoswine.gif', 'manaphy.gif',\n 'mandibuzz.gif', 'manectric-mega.gif', 'manectric.gif', 'mankey.gif',\n 'mantine.gif', 'mantyke.gif', 'maractus.gif', 'mareanie.gif',\n 'mareep.gif', 'marill.gif', 'marowak-alola-totem.gif',\n 'marowak-alola.gif', 'marowak-totem.gif', 'marowak.gif',\n 'marshadow.gif', 'marshtomp.gif', 'masquerain.gif', 'mawile-mega.gif',\n 'mawile.gif', 'medicham-f.gif', 'medicham-mega.gif', 'medicham.gif',\n 'meditite-f.gif', 'meditite.gif', 'meganium-f.gif', 'meganium.gif',\n 'melmetal-gmax.gif', 'melmetal.gif', 'meloetta-pirouette.gif',\n 'meloetta.gif', 'meltan.gif', 'meowstic-f.gif', 'meowstic.gif',\n 'meowth-alola.gif', 'meowth-galar.gif', 'meowth-gmax.gif', 'meowth.gif',\n 'mesprit.gif', 'metagross-mega.gif', 'metagross.gif', 'metang.gif',\n 'metapod.gif', 'mew.gif', 'mewtwo-megax.gif', 'mewtwo-megay.gif',\n 'mewtwo.gif', 'mienfoo.gif', 'mienshao.gif', 'mightyena.gif',\n 'milcery.gif', 'milotic-f.gif', 'milotic.gif', 'miltank.gif',\n 'mimejr.gif', 'mimikyu-busted-totem.gif', 'mimikyu-busted.gif',\n 'mimikyu-totem.gif', 'mimikyu.gif', 'minccino.gif', 'minior-blue.gif',\n 'minior-green.gif', 'minior-indigo.gif', 'minior-meteor.gif',\n 'minior-orange.gif', 'minior-violet.gif', 'minior-yellow.gif',\n 'minior.gif', 'minun.gif', 'misdreavus.gif', 'mismagius.gif',\n 'mollux.gif', 'moltres.gif', 'monferno.gif', 'morelull.gif',\n 'morgrem.gif', 'morpeko-hangry.gif', 'morpeko.gif', 'mothim.gif',\n 'mrmime-galar.gif', 'mrmime.gif', 'mrrime.gif', 'mudbray.gif',\n 'mudkip.gif', 'mudsdale.gif', 'muk-alola.gif', 'muk.gif',\n 'munchlax.gif', 'munna.gif', 'murkrow-f.gif', 'murkrow.gif',\n 'musharna.gif', 'naganadel.gif', 'natu.gif', 'naviathan.gif',\n 'necrozma-dawnwings.gif', 'necrozma-duskmane.gif', 'necrozma-ultra.gif',\n 'necrozma.gif', 'necturna.gif', 'nickit.gif', 'nidoking.gif',\n 'nidoqueen.gif', 'nidoran-f.gif', 'nidoran.gif', 'nidoranf.gif',\n 'nidoranm.gif', 'nidorina.gif', 'nidorino.gif', 'nihilego.gif',\n 'nincada.gif', 'ninetales-alola.gif', 'ninetales.gif', 'ninjask.gif',\n 'noctowl.gif', 'noibat.gif', 'noivern.gif', 'nosepass.gif',\n 'numel-f.gif', 'numel.gif', 'nuzleaf-f.gif', 'nuzleaf.gif',\n 'obstagoon.gif', 'octillery-f.gif', 'octillery.gif', 'oddish.gif',\n 'omanyte.gif', 'omastar.gif', 'onix.gif', 'oranguru.gif',\n 'orbeetle-gmax.gif', 'orbeetle.gif', 'oricorio-pau.gif',\n 'oricorio-pompom.gif', 'oricorio-sensu.gif', 'oricorio.gif',\n 'oshawott.gif', 'pachirisu-f.gif', 'pachirisu.gif', 'pajantom.gif',\n 'palkia.gif', 'palossand.gif', 'palpitoad.gif', 'pancham.gif',\n 'pangoro.gif', 'panpour.gif', 'pansage.gif', 'pansear.gif', 'paras.gif',\n 'parasect.gif', 'passimian.gif', 'patrat.gif', 'pawniard.gif',\n 'pelipper.gif', 'perrserker.gif', 'persian-alola.gif', 'persian.gif',\n 'petilil.gif', 'phanpy.gif', 'phantump.gif', 'pheromosa.gif',\n 'phione.gif', 'pichu.gif', 'pidgeot-mega.gif', 'pidgeot.gif',\n 'pidgeotto.gif', 'pidgey.gif', 'pidove.gif', 'pignite.gif',\n 'pikachu-alola.gif', 'pikachu-belle.gif', 'pikachu-cosplay.gif',\n 'pikachu-f.gif', 'pikachu-gmax.gif', 'pikachu-hoenn.gif',\n 'pikachu-kalos.gif', 'pikachu-libre.gif', 'pikachu-original.gif',\n 'pikachu-partner.gif', 'pikachu-phd.gif', 'pikachu-pop-star.gif',\n 'pikachu-popstar.gif', 'pikachu-rockstar.gif', 'pikachu-sinnoh.gif',\n 'pikachu-starter-f.gif', 'pikachu-starter.gif', 'pikachu-unova.gif',\n 'pikachu.gif', 'pikipek.gif', 'piloswine-f.gif', 'piloswine.gif',\n 'pincurchin.gif', 'pineco.gif', 'pinsir-mega.gif', 'pinsir.gif',\n 'piplup.gif', 'plasmanta.gif', 'pluffle.gif', 'plusle.gif',\n 'poipole.gif', 'pokestarblackbelt.gif', 'pokestarblackdoor.gif',\n 'pokestarbrycenman.gif', 'pokestarf00.gif', 'pokestarf002.gif',\n 'pokestargiant.gif', 'pokestarhumanoid.gif', 'pokestarmonster.gif',\n 'pokestarmt.gif', 'pokestarmt2.gif', 'pokestarsmeargle.gif',\n 'pokestarspirit.gif', 'pokestartransport.gif', 'pokestarufo-2.gif',\n 'pokestarufo.gif', 'pokestarufo2.gif', 'pokestarwhitedoor.gif',\n 'politoed-f.gif', 'politoed.gif', 'poliwag.gif', 'poliwhirl.gif',\n 'poliwrath.gif', 'polteageist-antique.gif', 'polteageist.gif',\n 'ponyta-galar.gif', 'ponyta.gif', 'poochyena.gif', 'popplio.gif',\n 'porygon-z.gif', 'porygon.gif', 'porygon2.gif', 'porygonz.gif',\n 'primarina.gif', 'primeape.gif', 'prinplup.gif', 'probopass.gif',\n 'psyduck.gif', 'pumpkaboo-large.gif', 'pumpkaboo-small.gif',\n 'pumpkaboo-super.gif', 'pumpkaboo.gif', 'pupitar.gif', 'purrloin.gif',\n 'purugly.gif', 'pyroak.gif', 'pyroar-f.gif', 'pyroar.gif',\n 'pyukumuku.gif', 'quagsire-f.gif', 'quagsire.gif', 'quilava.gif',\n 'quilladin.gif', 'qwilfish.gif', 'raboot.gif', 'raichu-alola.gif',\n 'raichu.gif', 'raikou.gif', 'ralts.gif', 'rampardos.gif',\n 'rapidash-galar.gif', 'rapidash.gif', 'raticate-alola-totem.gif',\n 'raticate-alola.gif', 'raticate-f.gif', 'raticate-totem-a.gif',\n 'raticate.gif', 'rattata-alola.gif', 'rattata-f.gif', 'rattata.gif',\n 'rayquaza-mega.gif', 'rayquaza.gif', 'regice.gif', 'regigigas.gif',\n 'regirock.gif', 'registeel.gif', 'relicanth-f.gif', 'relicanth.gif',\n 'remoraid.gif', 'reshiram.gif', 'reuniclus.gif', 'rhydon-f.gif',\n 'rhydon.gif', 'rhyhorn.gif', 'rhyperior-f.gif', 'rhyperior.gif',\n 'ribombee.gif', 'rillaboom.gif', 'riolu.gif', 'rockruff.gif',\n 'roggenrola.gif', 'rolycoly.gif', 'rookidee.gif', 'roselia-f.gif',\n 'roselia.gif', 'roserade-f.gif', 'roserade.gif', 'rotom-f.gif',\n 'rotom-fan.gif', 'rotom-frost.gif', 'rotom-h.gif', 'rotom-heat.gif',\n 'rotom-m.gif', 'rotom-mow.gif', 'rotom-s.gif', 'rotom-w.gif',\n 'rotom-wash.gif', 'rotom.gif', 'rowlet.gif', 'rufflet.gif',\n 'runerigus.gif', 'sableye-mega.gif', 'sableye.gif',\n 'salamence-mega.gif', 'salamence.gif', 'salandit.gif',\n 'salazzle-totem.gif', 'salazzle.gif', 'samurott.gif',\n 'sandaconda-gmax.gif', 'sandaconda.gif', 'sandile.gif',\n 'sandshrew-alola.gif', 'sandshrew.gif', 'sandslash-alola.gif',\n 'sandslash.gif', 'sandygast.gif', 'sawk.gif', 'sawsbuck-autumn.gif',\n 'sawsbuck-summer.gif', 'sawsbuck-winter.gif', 'sawsbuck.gif',\n 'scatterbug.gif', 'sceptile-mega.gif', 'sceptile.gif', 'scizor-f.gif',\n 'scizor-mega.gif', 'scizor.gif', 'scolipede.gif', 'scorbunny.gif',\n 'scrafty.gif', 'scraggy.gif', 'scratchet.gif', 'scyther-f.gif',\n 'scyther.gif', 'seadra.gif', 'seaking.gif', 'sealeo.gif', 'seedot.gif',\n 'seel.gif', 'seismitoad.gif', 'sentret.gif', 'serperior.gif',\n 'servine.gif', 'seviper.gif', 'sewaddle.gif', 'shapedo-mega.gif',\n 'sharpedo-mega.gif', 'sharpedo.gif', 'shaymin-sky.gif', 'shaymin.gif',\n 'shedinja.gif', 'shelgon.gif', 'shellder.gif', 'shellos-east.gif',\n 'shellos.gif', 'shelmet.gif', 'shieldon.gif', 'shiftry-f.gif',\n 'shiftry.gif', 'shiinotic.gif', 'shinx-f.gif', 'shinx.gif',\n 'shroomish.gif', 'shuckle.gif', 'shuppet.gif', 'sigilyph.gif',\n 'silcoon.gif', 'silicobra.gif', 'silvally-bug.gif', 'silvally-dark.gif',\n 'silvally-dragon.gif', 'silvally-electric.gif', 'silvally-fairy.gif',\n 'silvally-fighting.gif', 'silvally-fire.gif', 'silvally-flying.gif',\n 'silvally-ghost.gif', 'silvally-grass.gif', 'silvally-ground.gif',\n 'silvally-ice.gif', 'silvally-poison.gif', 'silvally-psychic.gif',\n 'silvally-rock.gif', 'silvally-steel.gif', 'silvally-water.gif',\n 'silvally.gif', 'simipour.gif', 'simisage.gif', 'simisear.gif',\n 'sinistea-antique.gif', 'sinistea.gif', 'sirfetchd.gif',\n 'sizzlipede.gif', 'skarmory.gif', 'skiddo.gif', 'skiploom.gif',\n 'skitty.gif', 'skorupi.gif', 'skrelp.gif', 'skuntank.gif',\n 'skwovet.gif', 'slaking.gif', 'slakoth.gif', 'sliggoo.gif',\n 'slowbro-mega.gif', 'slowbro.gif', 'slowking.gif', 'slowpoke-galar.gif',\n 'slowpoke.gif', 'slugma.gif', 'slurpuff.gif', 'smeargle.gif',\n 'smogecko.gif', 'smoguana.gif', 'smokomodo.gif', 'smoochum.gif',\n 'snaelstrom.gif', 'sneasel-f.gif', 'sneasel.gif', 'snivy.gif',\n 'snom.gif', 'snorlax-gmax.gif', 'snorlax.gif', 'snornut.gif',\n 'snorunt.gif', 'snover-f.gif', 'snover.gif', 'snubbull.gif',\n 'sobble.gif', 'solgaleo.gif', 'solosis.gif', 'solrock.gif',\n 'spearow.gif', 'spewpa.gif', 'spheal.gif', 'spinarak.gif', 'spinda.gif',\n 'spiritomb.gif', 'spoink.gif', 'spritzee.gif', 'squirtle.gif',\n 'stakataka.gif', 'stantler.gif', 'staraptor-f.gif', 'staraptor.gif',\n 'staravia-f.gif', 'staravia.gif', 'starly-f.gif', 'starly.gif',\n 'starmie.gif', 'staryu.gif', 'steelix-f.gif', 'steelix-mega.gif',\n 'steelix.gif', 'steenee.gif', 'stonjourner.gif', 'stoutland.gif',\n 'stratagem.gif', 'stufful.gif', 'stunfisk-galar.gif', 'stunfisk.gif',\n 'stunky.gif', 'substitute.gif', 'sudowoodo-f.gif', 'sudowoodo.gif',\n 'suicune.gif', 'sunflora.gif', 'sunkern.gif', 'surskit.gif',\n 'swablu.gif', 'swadloon.gif', 'swalot-f.gif', 'swalot.gif',\n 'swampert-mega.gif', 'swampert.gif', 'swanna.gif', 'swellow.gif',\n 'swinub.gif', 'swirlix.gif', 'swoobat.gif', 'sylveon.gif',\n 'taillow.gif', 'talonflame.gif', 'tangela.gif', 'tangrowth-f.gif',\n 'tangrowth.gif', 'tapubulu.gif', 'tapufini.gif', 'tapukoko.gif',\n 'tapulele.gif', 'tauros.gif', 'teddiursa.gif', 'tentacool.gif',\n 'tentacruel.gif', 'tepig.gif', 'terrakion.gif', 'thievul.gif',\n 'throh.gif', 'thundurus-therian.gif', 'thundurus.gif', 'thwackey.gif',\n 'timburr.gif', 'tirtouga.gif', 'togedemaru-totem.gif', 'togedemaru.gif',\n 'togekiss.gif', 'togepi.gif', 'togetic.gif', 'tomohawk.gif',\n 'torchic-f.gif', 'torchic.gif', 'torkoal.gif', 'tornadus-therian.gif',\n 'tornadus.gif', 'torracat.gif', 'torterra.gif', 'totodile.gif',\n 'toucannon.gif', 'toxapex.gif', 'toxel.gif', 'toxicroak-f.gif',\n 'toxicroak.gif', 'toxtricity-gmax.gif', 'toxtricity-lowkey.gif',\n 'toxtricity.gif', 'tranquill.gif', 'trapinch.gif', 'treecko.gif',\n 'trevenant.gif', 'tropius.gif', 'trubbish.gif', 'trumbeak.gif',\n 'tsareena.gif', 'turtonator.gif', 'turtwig.gif', 'tympole.gif',\n 'tynamo.gif', 'typenull.gif', 'typhlosion.gif', 'tyranitar-mega.gif',\n 'tyranitar.gif', 'tyrantrum.gif', 'tyrantum.gif', 'tyrogue.gif',\n 'tyrunt.gif', 'umbreon.gif', 'unfezant-f.gif', 'unfezant.gif',\n 'unown-b.gif', 'unown-c.gif', 'unown-d.gif', 'unown-e.gif',\n 'unown-exclamation.gif', 'unown-f.gif', 'unown-g.gif', 'unown-h.gif',\n 'unown-i.gif', 'unown-j.gif', 'unown-k.gif', 'unown-l.gif',\n 'unown-m.gif', 'unown-n.gif', 'unown-o.gif', 'unown-p.gif',\n 'unown-q.gif', 'unown-question.gif', 'unown-r.gif', 'unown-s.gif',\n 'unown-t.gif', 'unown-u.gif', 'unown-v.gif', 'unown-w.gif',\n 'unown-x.gif', 'unown-y.gif', 'unown-z.gif', 'unown.gif',\n 'ursaring-f.gif', 'ursaring.gif', 'uxie.gif', 'vanillish.gif',\n 'vanillite.gif', 'vanilluxe.gif', 'vaporeon.gif', 'venipede.gif',\n 'venomoth.gif', 'venonat.gif', 'venusaur-mega.gif', 'venusaur.gif',\n 'vespiquen.gif', 'vibrava.gif', 'victini.gif', 'victreebel.gif',\n 'vigoroth.gif', 'vikavolt-totem.gif', 'vikavolt.gif', 'vileplume.gif',\n 'virizion.gif', 'vivillon-archipelago.gif', 'vivillon-continental.gif',\n 'vivillon-elegant.gif', 'vivillon-fancy.gif', 'vivillon-garden.gif',\n 'vivillon-highplains.gif', 'vivillon-icysnow.gif',\n 'vivillon-jungle.gif', 'vivillon-marine.gif', 'vivillon-modern.gif',\n 'vivillon-monsoon.gif', 'vivillon-ocean.gif', 'vivillon-pokeball.gif',\n 'vivillon-polar.gif', 'vivillon-river.gif', 'vivillon-sandstorm.gif',\n 'vivillon-savanna.gif', 'vivillon-sun.gif', 'vivillon-tundra.gif',\n 'vivillon.gif', 'volbeat.gif', 'volcanion.gif', 'volcarona.gif',\n 'volkraken.gif', 'voltorb.gif', 'vullaby.gif', 'vulpix-alola.gif',\n 'vulpix.gif', 'wailmer.gif', 'wailord.gif', 'walrein.gif',\n 'wartortle.gif', 'watchog.gif', 'weavile-f.gif', 'weavile.gif',\n 'weedle.gif', 'weepinbell.gif', 'weezing-galar.gif', 'weezing.gif',\n 'whimsicott.gif', 'whirlipede.gif', 'whiscash.gif', 'whismur.gif',\n 'wigglytuff.gif', 'wimpod.gif', 'wingull.gif', 'wishiwashi-school.gif',\n 'wishiwashi.gif', 'wobbuffet-f.gif', 'wobbuffet.gif', 'woobat.gif',\n 'wooloo.gif', 'wooper-f.gif', 'wooper.gif', 'wormadam-sandy.gif',\n 'wormadam-trash.gif', 'wormadam.gif', 'wurmple.gif', 'wynaut.gif',\n 'xatu-f.gif', 'xatu.gif', 'xerneas-neutral.gif', 'xerneas.gif',\n 'xurkitree.gif', 'yamask-galar.gif', 'yamask.gif', 'yamper.gif',\n 'yanma.gif', 'yanmega.gif', 'yungoos.gif', 'yveltal.gif',\n 'zacian-crowned.gif', 'zacian.gif', 'zamazenta-crowned.gif',\n 'zamazenta.gif', 'zangoose.gif', 'zapdos.gif', 'zebstrika.gif',\n 'zekrom.gif', 'zeraora.gif', 'zigzagoon-galar.gif', 'zigzagoon.gif',\n 'zoroark.gif', 'zorua.gif', 'zubat-f.gif', 'zubat.gif', 'zweilous.gif',\n 'zygarde-10.gif', 'zygarde-complete.gif', 'zygarde.gif']\nfor i in filename:\n url = 'https://play.pokemonshowdown.com/sprites/ani/{}'.format(i)\n file_name = str(i[:-4])\n dl_img(url, 'files/pokemon/front', file_name)\n",
"step-5": "import urllib.request\r\nfrom urllib.request import Request, urlopen\r\nimport json\r\n\r\nfrom requests import get\r\nfrom requests.exceptions import RequestException\r\nfrom contextlib import closing\r\nfrom bs4 import BeautifulSoup\r\n\r\n\"\"\"\r\nWeb Scraper ======================================================================\r\n\"\"\"\r\n\r\n\r\ndef simple_get(url):\r\n \"\"\"\r\n Attempts to get the content at `url` by making an HTTP GET request.\r\n If the content-type of response is some kind of HTML/XML, return the\r\n text content, otherwise return None.\r\n \"\"\"\r\n try:\r\n with closing(get(url, stream=True)) as resp:\r\n if is_good_response(resp):\r\n return resp.content\r\n else:\r\n return None\r\n\r\n except RequestException as e:\r\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\r\n return None\r\n\r\n\r\ndef is_good_response(resp):\r\n \"\"\"\r\n Returns True if the response seems to be HTML, False otherwise.\r\n \"\"\"\r\n content_type = resp.headers['Content-Type'].lower()\r\n return (resp.status_code == 200\r\n and content_type is not None\r\n and content_type.find('html') > -1)\r\n\r\n\r\ndef log_error(e):\r\n \"\"\"\r\n It is always a good idea to log errors.\r\n This function just prints them, but you can\r\n make it do anything.\r\n \"\"\"\r\n print(e)\r\n\r\n\r\n\r\ndef save_json(file):\r\n with open('gif_list.txt', 'w') as f:\r\n f.write(file)\r\n\r\n\r\n\"\"\"\r\n Scraping\r\n\"\"\"\r\n\r\n# req = Request('https://play.pokemonshowdown.com/sprites/ani/', headers={'User-Agent': 'Mozilla/5.0'})\r\n# url = urlopen(req).read()\r\n# url = 'https://play.pokemonshowdown.com/sprites/ani/'\r\n# raw_html = simple_get(url)\r\n# soup = BeautifulSoup(url, 'lxml')\r\n# # a = soup.find_all('td', attrs={'valign': 'top'})\r\n# a = soup.find_all('a')\r\n# videolist = []\r\n# print(a)\r\n# for v in a:\r\n# tmp = v['href']\r\n# videolist.append(tmp)\r\n# filename = videolist[5:]\r\n# print(filename)\r\n\r\n\r\ndef dl_img(url, file_path, file_name):\r\n full_path = file_path + '/' + file_name + '.gif'\r\n opener = urllib.request.build_opener()\r\n opener.addheaders = [('User-agent', 'Mozilla/5.0')]\r\n urllib.request.install_opener(opener)\r\n urllib.request.urlretrieve(url, full_path)\r\n\r\n\r\nfilename = ['abomasnow-f.gif', 'abomasnow-mega.gif', 'abomasnow.gif', 'abra.gif', 'absol-mega.gif', 'absol.gif', 'accelgor.gif', 'aegislash-blade.gif', 'aegislash.gif', 'aerodactyl-mega.gif', 'aerodactyl.gif', 'aggron-mega.gif', 'aggron.gif', 'aipom-f.gif', 'aipom.gif', 'alakazam-mega.gif', 'alakazam.gif', 'alcremie-caramel-swirl-berry.gif', 'alcremie-caramel-swirl-clover.gif', 'alcremie-caramel-swirl-flower.gif', 'alcremie-caramel-swirl-love.gif', 'alcremie-caramel-swirl-ribbon.gif', 'alcremie-caramel-swirl-star.gif', 'alcremie-caramelswirl.gif', 'alcremie-gmax.gif', 'alcremie-lemon-cream-berry.gif', 'alcremie-lemon-cream-clover.gif', 'alcremie-lemon-cream-flower.gif', 'alcremie-lemon-cream-love.gif', 'alcremie-lemon-cream-ribbon.gif', 'alcremie-lemon-cream-star.gif', 'alcremie-lemoncream.gif', 'alcremie-matcha-cream-berry.gif', 'alcremie-matcha-cream-clover.gif', 'alcremie-matcha-cream-flower.gif', 'alcremie-matcha-cream-love.gif', 'alcremie-matcha-cream-ribbon.gif', 'alcremie-matcha-cream-star.gif', 'alcremie-matchacream.gif', 'alcremie-mint-cream-berry.gif', 'alcremie-mint-cream-clover.gif', 'alcremie-mint-cream-flower.gif', 'alcremie-mint-cream-love.gif', 'alcremie-mint-cream-ribbon.gif', 'alcremie-mint-cream-star.gif', 'alcremie-mintcream.gif', 'alcremie-rainbow-swirl-berry.gif', 'alcremie-rainbow-swirl-clover.gif', 'alcremie-rainbow-swirl-flower.gif', 'alcremie-rainbow-swirl-love.gif', 'alcremie-rainbow-swirl-ribbon.gif', 'alcremie-rainbow-swirl-star.gif', 'alcremie-rainbowswirl.gif', 'alcremie-ruby-cream-berry.gif', 'alcremie-ruby-cream-clover.gif', 'alcremie-ruby-cream-flower.gif', 'alcremie-ruby-cream-love.gif', 'alcremie-ruby-cream-ribbon.gif', 'alcremie-ruby-cream-star.gif', 'alcremie-ruby-swirl-berry.gif', 'alcremie-ruby-swirl-clover.gif', 'alcremie-ruby-swirl-flower.gif', 'alcremie-ruby-swirl-love.gif', 'alcremie-ruby-swirl-ribbon.gif', 'alcremie-ruby-swirl-star.gif', 'alcremie-rubycream.gif', 'alcremie-rubyswirl.gif', 'alcremie-salted-cream-berry.gif', 'alcremie-salted-cream-clover.gif', 'alcremie-salted-cream-flower.gif', 'alcremie-salted-cream-love.gif', 'alcremie-salted-cream-ribbon.gif', 'alcremie-salted-cream-star.gif', 'alcremie-saltedcream.gif', 'alcremie-vanilla-cream-berry.gif', 'alcremie-vanilla-cream-clover.gif', 'alcremie-vanilla-cream-flower.gif', 'alcremie-vanilla-cream-love.gif', 'alcremie-vanilla-cream-ribbon.gif', 'alcremie-vanilla-cream-star.gif', 'alcremie.gif', 'alomomola.gif', 'altaria-mega.gif', 'altaria.gif', 'amaura.gif', 'ambipom-f.gif', 'ambipom.gif', 'amoonguss.gif', 'ampharos-mega.gif', 'ampharos.gif', 'anorith.gif', 'appletun-gmax.gif', 'appletun.gif', 'applin.gif', 'araquanid-totem.gif', 'araquanid.gif', 'arbok.gif', 'arcanine.gif', 'arceus-bug.gif', 'arceus-dark.gif', 'arceus-dragon.gif', 'arceus-electric.gif', 'arceus-fairy.gif', 'arceus-fighting.gif', 'arceus-fire.gif', 'arceus-flying.gif', 'arceus-ghost.gif', 'arceus-grass.gif', 'arceus-ground.gif', 'arceus-ice.gif', 'arceus-poison.gif', 'arceus-psychic.gif', 'arceus-rock.gif', 'arceus-steel.gif', 'arceus-water.gif', 'arceus.gif', 'archen.gif', 'archeops.gif', 'arctovish.gif', 'arctozolt.gif', 'ariados.gif', 'armaldo.gif', 'aromatisse.gif', 'aron.gif', 'arrokuda.gif', 'articuno.gif', 'audino-mega.gif', 'audino.gif', 'aurorus.gif', 'aurumoth.gif', 'avalugg.gif', 'axew.gif', 'azelf.gif', 'azumarill.gif', 'azurill.gif', 'bagon.gif', 'baltoy.gif', 'banette-mega.gif', 'banette.gif', 'barbaracle.gif', 'barboach.gif', 'barboarch.gif', 'barraskewda.gif', 'basculin-bluestriped.gif', 'basculin.gif', 'bastiodon.gif', 'bayleef.gif', 'beartic.gif', 'beautifly-f.gif', 'beautifly.gif', 'beedrill-mega.gif', 'beedrill.gif', 'beheeyem.gif', 'beldum.gif', 'bellossom.gif', 'bellsprout.gif', 'bergmite.gif', 'bewear.gif', 'bibarel-f.gif', 'bibarel.gif', 'bidoof-f.gif', 'bidoof.gif', 'binacle.gif', 'bisharp.gif', 'blacephalon.gif', 'blastoise-mega.gif', 'blastoise.gif', 'blaziken-f.gif', 'blaziken-mega.gif', 'blaziken.gif', 'blipbug.gif', 'blissey.gif', 'blitzle.gif', 'boldore.gif', 'boltund.gif', 'bonsly.gif', 'bouffalant.gif', 'bounsweet.gif', 'braixen.gif', 'braviary.gif', 'breloom.gif', 'brionne.gif', 'bronzong.gif', 'bronzor.gif', 'bruxish.gif', 'budew.gif', 'buizel-f.gif', 'buizel.gif', 'bulbasaur.gif', 'buneary.gif', 'bunnelby.gif', 'burmy-sandy.gif', 'burmy-trash.gif', 'burmy.gif', 'butterfree-gmax.gif', 'butterfree.gif', 'buzzwole.gif', 'cacnea.gif', 'cacturne-f.gif', 'cacturne.gif', 'camerupt-f.gif', 'camerupt-mega.gif', 'camerupt.gif', 'camperupt-mega.gif', 'carbink.gif', 'caribolt.gif', 'carkol.gif', 'carnivine.gif', 'carracosta.gif', 'carvanha.gif', 'cascoon.gif', 'castform-rainy.gif', 'castform-snowy.gif', 'castform-sunny.gif', 'castform.gif', 'caterpie.gif', 'cawmodore.gif', 'celebi.gif', 'celesteela.gif', 'centiskorch-gmax.gif', 'centiskorch.gif', 'chandelure.gif', 'chansey.gif', 'charizard-gmax.gif', 'charizard-megax.gif', 'charizard-megay.gif', 'charizard.gif', 'charjabug.gif', 'charmander.gif', 'charmeleon.gif', 'chatot.gif', 'cherrim-sunshine.gif', 'cherrim.gif', 'cherubi.gif', 'chesnaught.gif', 'chespin.gif', 'chewtle.gif', 'chikorita.gif', 'chimchar.gif', 'chimecho.gif', 'chinchou.gif', 'chingling.gif', 'cinccino.gif', 'cinderace.gif', 'clamperl.gif', 'clauncher.gif', 'clawitzer.gif', 'claydol.gif', 'clefable.gif', 'clefairy.gif', 'cleffa.gif', 'clobbopus.gif', 'cloyster.gif', 'coalossal-gmax.gif', 'coalossal.gif', 'cobalion.gif', 'cofagrigus.gif', 'combee-f.gif', 'combee.gif', 'combusken-f.gif', 'combusken.gif', 'comfey.gif', 'conkeldurr.gif', 'copperajah-gmax.gif', 'copperajah.gif', 'corphish.gif', 'corsola-galar.gif', 'corsola.gif', 'corviknight-gmax.gif', 'corviknight.gif', 'corvisquire.gif', 'cosmoem.gif', 'cosmog.gif', 'cottonee.gif', 'crabominable.gif', 'crabrawler.gif', 'cradily.gif', 'cramorant-gorging.gif', 'cramorant-gulping.gif', 'cramorant.gif', 'cranidos.gif', 'crawdaunt.gif', 'cresselia.gif', 'croagunk-f.gif', 'croagunk.gif', 'crobat.gif', 'croconaw.gif', 'crucibelle-mega.gif', 'crucibelle.gif', 'crustle.gif', 'cryogonal.gif', 'cubchoo.gif', 'cubone.gif', 'cufant.gif', 'cursola.gif', 'cutiefly.gif', 'cyndaquil.gif', 'darkrai.gif', 'darmanitan-galar.gif', 'darmanitan-galarzen.gif', 'darmanitan-zen.gif', 'darmanitan.gif', 'dartrix.gif', 'darumaka-galar.gif', 'darumaka.gif', 'decidueye.gif', 'dedenne.gif', 'deerling-autumn.gif', 'deerling-summer.gif', 'deerling-winter.gif', 'deerling.gif', 'deino.gif', 'delcatty.gif', 'delibird.gif', 'delphox.gif', 'deoxys-attack.gif', 'deoxys-defense.gif', 'deoxys-speed.gif', 'deoxys.gif', 'dewgong.gif', 'dewott.gif', 'dewpider.gif', 'dhelmise.gif', 'dialga.gif', 'diancie-mega.gif', 'diancie.gif', 'diggersby.gif', 'diglett-alola.gif', 'diglett.gif', 'ditto.gif', 'dodrio.gif', 'doduo.gif', 'donphan-f.gif', 'donphan.gif', 'dottler.gif', 'doublade.gif', 'dracovish.gif', 'dracozolt.gif', 'dragalge.gif', 'dragapult.gif', 'dragonair.gif', 'dragonite.gif', 'drakloak.gif', 'drampa.gif', 'drapion.gif', 'dratini.gif', 'drednaw-gmax.gif', 'drednaw.gif', 'dreepy.gif', 'drifblim.gif', 'drifloon.gif', 'drilbur.gif', 'drizzile.gif', 'drowzee.gif', 'druddigon.gif', 'dubwool.gif', 'ducklett.gif', 'dugtrio-alola.gif', 'dugtrio.gif', 'dunsparce.gif', 'duosion.gif', 'duraludon-gmax.gif', 'duraludon.gif', 'durant.gif', 'dusclops.gif', 'dusknoir.gif', 'duskull.gif', 'dustox-f.gif', 'dustox.gif', 'dwebble.gif', 'eelektrik.gif', 'eelektross.gif', 'eevee-gmax.gif', 'eevee-starter.gif', 'eevee.gif', 'eiscue-noice.gif', 'eiscue.gif', 'ekans.gif', 'eldegoss.gif', 'electabuzz.gif', 'electivire.gif', 'electrike.gif', 'electrode.gif', 'elekid.gif', 'elgyem.gif', 'emboar.gif', 'emolga.gif', 'empoleon.gif', 'entei.gif', 'equilibra.gif', 'escavalier.gif', 'espeon.gif', 'espurr.gif', 'eternatus-eternamax.gif', 'eternatus.gif', 'excadrill.gif', 'exeggcute.gif', 'exeggutor-alola.gif', 'exeggutor.gif', 'exploud.gif', 'falinks.gif', 'farfetchd-galar.gif', 'farfetchd.gif', 'fearow.gif', 'feebas.gif', 'fennekin.gif', 'feraligatr.gif', 'ferroseed.gif', 'ferrothorn.gif', 'fidgit.gif', 'finneon-f.gif', 'finneon.gif', 'flaaffy.gif', 'flabebe-blue.gif', 'flabebe-orange.gif', 'flabebe-white.gif', 'flabebe-yellow.gif', 'flabebe.gif', 'flapple-gmax.gif', 'flapple.gif', 'flareon.gif', 'fletchinder.gif', 'fletchling.gif', 'floatzel-f.gif', 'floatzel.gif', 'floette-blue.gif', 'floette-eternal.gif', 'floette-orange.gif', 'floette-white.gif', 'floette-yellow.gif', 'floette.gif', 'florges-blue.gif', 'florges-orange.gif', 'florges-white.gif', 'florges-yellow.gif', 'florges.gif', 'flygon.gif', 'fomantis.gif', 'foongus.gif', 'forretress.gif', 'fraxure.gif', 'frillish-f.gif', 'frillish.gif', 'froakie.gif', 'frogadier.gif', 'froslass.gif', 'frosmoth.gif', 'furfrou-dandy.gif', 'furfrou-debutante.gif', 'furfrou-diamond.gif', 'furfrou-heart.gif', 'furfrou-kabuki.gif', 'furfrou-lareine.gif', 'furfrou-matron.gif', 'furfrou-pharaoh.gif', 'furfrou-star.gif', 'furfrou.gif', 'furret.gif', 'gabite-f.gif', 'gabite.gif', 'gallade-mega.gif', 'gallade.gif', 'galvantula.gif', 'garbodor-gmax.gif', 'garbodor.gif', 'garchomp-f.gif', 'garchomp-mega.gif', 'garchomp.gif', 'gardevoir-mega.gif', 'gardevoir.gif', 'gastly.gif', 'gastrodon-east.gif', 'gastrodon.gif', 'genesect-burn.gif', 'genesect-chill.gif', 'genesect-douse.gif', 'genesect-shock.gif', 'genesect.gif', 'gengar-gmax.gif', 'gengar-mega.gif', 'gengar.gif', 'geodude-alola.gif', 'geodude.gif', 'gible-f.gif', 'gible.gif', 'gigalith.gif', 'girafarig-f.gif', 'girafarig.gif', 'giratina-origin.gif', 'giratina.gif', 'glaceon.gif', 'glalie-mega.gif', 'glalie.gif', 'glameow.gif', 'gligar-f.gif', 'gligar.gif', 'gliscor.gif', 'gloom.gif', 'gogoat.gif', 'golbat-f.gif', 'golbat.gif', 'goldeen.gif', 'golduck.gif', 'golem-alola.gif', 'golem.gif', 'golett.gif', 'golisopod.gif', 'golurk.gif', 'goodra.gif', 'goomy.gif', 'gorebyss.gif', 'gossifleur.gif', 'gothita.gif', 'gothitelle.gif', 'gothorita.gif', 'gourgeist-large.gif', 'gourgeist-small.gif', 'gourgeist-super.gif', 'gourgeist.gif', 'granbull.gif', 'grapploct.gif', 'graveler-alola.gif', 'graveler.gif', 'greedent.gif', 'greninja-ash.gif', 'greninja.gif', 'grimer-alola.gif', 'grimer.gif', 'grimmsnarl-gmax.gif', 'grimmsnarl.gif', 'grookey.gif', 'grotle.gif', 'groudon-primal.gif', 'groudon.gif', 'grovyle.gif', 'growlithe.gif', 'grubbin.gif', 'grumpig.gif', 'gulpin-f.gif', 'gulpin.gif', 'gumshoos-totem.gif', 'gumshoos.gif', 'gurdurr.gif', 'guzzlord.gif', 'gyarados-mega.gif', 'gyarados.gif', 'hakamoo.gif', 'happiny.gif', 'hariyama.gif', 'hatenna.gif', 'hatterene-gmax.gif', 'hatterene.gif', 'hattrem.gif', 'haunter.gif', 'hawlucha.gif', 'haxorus.gif', 'heatmor.gif', 'heatran.gif', 'heliolisk.gif', 'helioptile.gif', 'heracross-f.gif', 'heracross-mega.gif', 'heracross.gif', 'herdier.gif', 'hippopotas-f.gif', 'hippopotas.gif', 'hippowdon-f.gif', 'hippowdon.gif', 'hitmonchan.gif', 'hitmonlee.gif', 'hitmontop.gif', 'honchkrow.gif', 'honedge.gif', 'hooh.gif', 'hoopa-mega.gif', 'hoopa-unbound.gif', 'hoopa.gif', 'hoothoot.gif', 'hoppip.gif', 'horsea.gif', 'houndoom-f.gif', 'houndoom-mega.gif', 'houndoom.gif', 'houndour.gif', 'huntail.gif', 'hydreigon.gif', 'hypno.gif', 'igglybuff.gif', 'illumise.gif', 'impidimp.gif', 'incineroar.gif', 'indeedee-f.gif', 'indeedee.gif', 'infernape.gif', 'inkay.gif', 'inteleon.gif', 'ivysaur.gif', 'jangmoo.gif', 'jellicent-f.gif', 'jellicent.gif', 'jigglypuff.gif', 'jirachi.gif', 'jolteon.gif', 'joltik.gif', 'jumbao.gif', 'jumpluff.gif', 'jynx.gif', 'kabuto.gif', 'kabutops.gif', 'kadabra.gif', 'kakuna.gif', 'kangaskhan-mega.gif', 'kangaskhan.gif', 'karrablast.gif', 'kartana.gif', 'kecleon.gif', 'keldeo-resolute.gif', 'keldeo.gif', 'kerfluffle-f.gif', 'kerfluffle.gif', 'kingdra.gif', 'kingler-gmax.gif', 'kingler.gif', 'kirlia.gif', 'kitsunoh-f.gif', 'kitsunoh.gif', 'klang.gif', 'klefki.gif', 'klink.gif', 'klinklang.gif', 'koffing.gif', 'komala.gif', 'kommo-o-totem.gif', 'kommoo.gif', 'krabby.gif', 'kricketot-f.gif', 'kricketot.gif', 'kricketune-f.gif', 'kricketune.gif', 'krilowatt.gif', 'krokorok.gif', 'krookodile.gif', 'kyogre-primal.gif', 'kyogre.gif', 'kyurem-black.gif', 'kyurem-white.gif', 'kyurem.gif', 'lairon.gif', 'lampent.gif', 'landorus-therian.gif', 'landorus.gif', 'lanturn.gif', 'lapras-gmax.gif', 'lapras.gif', 'larvesta.gif', 'larvitar.gif', 'latias-mega.gif', 'latias.gif', 'latios-mega.gif', 'latios.gif', 'leafeon.gif', 'leavanny.gif', 'ledian-f.gif', 'ledian.gif', 'ledyba-f.gif', 'ledyba.gif', 'lickilicky.gif', 'lickitung.gif', 'liepard.gif', 'lileep.gif', 'lilligant.gif', 'lillipup.gif', 'linoone-galar.gif', 'linoone.gif', 'litleo.gif', 'litten.gif', 'litwick.gif', 'lombre.gif', 'lopunny-mega.gif', 'lopunny.gif', 'lotad.gif', 'loudred.gif', 'lucario-mega.gif', 'lucario.gif', 'ludicolo-f.gif', 'ludicolo.gif', 'lugia.gif', 'lumineon-f.gif', 'lumineon.gif', 'lunala.gif', 'lunatone.gif', 'lurantis-totem.gif', 'lurantis.gif', 'luvdisc.gif', 'luxio-f.gif', 'luxio.gif', 'luxray-f.gif', 'luxray.gif', 'lycanroc-dusk.gif', 'lycanroc-midnight.gif', 'lycanroc.gif', 'machamp-gmax.gif', 'machamp.gif', 'machoke.gif', 'machop.gif', 'magby.gif', 'magcargo.gif', 'magearna-original.gif', 'magearna.gif', 'magikarp-f.gif', 'magikarp.gif', 'magmar.gif', 'magmortar.gif', 'magnemite.gif', 'magneton.gif', 'magnezone.gif', 'makuhita.gif', 'malaconda.gif', 'malamar.gif', 'mamoswine-f.gif', 'mamoswine.gif', 'manaphy.gif', 'mandibuzz.gif', 'manectric-mega.gif', 'manectric.gif', 'mankey.gif', 'mantine.gif', 'mantyke.gif', 'maractus.gif', 'mareanie.gif', 'mareep.gif', 'marill.gif', 'marowak-alola-totem.gif', 'marowak-alola.gif', 'marowak-totem.gif', 'marowak.gif', 'marshadow.gif', 'marshtomp.gif', 'masquerain.gif', 'mawile-mega.gif', 'mawile.gif', 'medicham-f.gif', 'medicham-mega.gif', 'medicham.gif', 'meditite-f.gif', 'meditite.gif', 'meganium-f.gif', 'meganium.gif', 'melmetal-gmax.gif', 'melmetal.gif', 'meloetta-pirouette.gif', 'meloetta.gif', 'meltan.gif', 'meowstic-f.gif', 'meowstic.gif', 'meowth-alola.gif', 'meowth-galar.gif', 'meowth-gmax.gif', 'meowth.gif', 'mesprit.gif', 'metagross-mega.gif', 'metagross.gif', 'metang.gif', 'metapod.gif', 'mew.gif', 'mewtwo-megax.gif', 'mewtwo-megay.gif', 'mewtwo.gif', 'mienfoo.gif', 'mienshao.gif', 'mightyena.gif', 'milcery.gif', 'milotic-f.gif', 'milotic.gif', 'miltank.gif', 'mimejr.gif', 'mimikyu-busted-totem.gif', 'mimikyu-busted.gif', 'mimikyu-totem.gif', 'mimikyu.gif', 'minccino.gif', 'minior-blue.gif', 'minior-green.gif', 'minior-indigo.gif', 'minior-meteor.gif', 'minior-orange.gif', 'minior-violet.gif', 'minior-yellow.gif', 'minior.gif', 'minun.gif', 'misdreavus.gif', 'mismagius.gif', 'mollux.gif', 'moltres.gif', 'monferno.gif', 'morelull.gif', 'morgrem.gif', 'morpeko-hangry.gif', 'morpeko.gif', 'mothim.gif', 'mrmime-galar.gif', 'mrmime.gif', 'mrrime.gif', 'mudbray.gif', 'mudkip.gif', 'mudsdale.gif', 'muk-alola.gif', 'muk.gif', 'munchlax.gif', 'munna.gif', 'murkrow-f.gif', 'murkrow.gif', 'musharna.gif', 'naganadel.gif', 'natu.gif', 'naviathan.gif', 'necrozma-dawnwings.gif', 'necrozma-duskmane.gif', 'necrozma-ultra.gif', 'necrozma.gif', 'necturna.gif', 'nickit.gif', 'nidoking.gif', 'nidoqueen.gif', 'nidoran-f.gif', 'nidoran.gif', 'nidoranf.gif', 'nidoranm.gif', 'nidorina.gif', 'nidorino.gif', 'nihilego.gif', 'nincada.gif', 'ninetales-alola.gif', 'ninetales.gif', 'ninjask.gif', 'noctowl.gif', 'noibat.gif', 'noivern.gif', 'nosepass.gif', 'numel-f.gif', 'numel.gif', 'nuzleaf-f.gif', 'nuzleaf.gif', 'obstagoon.gif', 'octillery-f.gif', 'octillery.gif', 'oddish.gif', 'omanyte.gif', 'omastar.gif', 'onix.gif', 'oranguru.gif', 'orbeetle-gmax.gif', 'orbeetle.gif', 'oricorio-pau.gif', 'oricorio-pompom.gif', 'oricorio-sensu.gif', 'oricorio.gif', 'oshawott.gif', 'pachirisu-f.gif', 'pachirisu.gif', 'pajantom.gif', 'palkia.gif', 'palossand.gif', 'palpitoad.gif', 'pancham.gif', 'pangoro.gif', 'panpour.gif', 'pansage.gif', 'pansear.gif', 'paras.gif', 'parasect.gif', 'passimian.gif', 'patrat.gif', 'pawniard.gif', 'pelipper.gif', 'perrserker.gif', 'persian-alola.gif', 'persian.gif', 'petilil.gif', 'phanpy.gif', 'phantump.gif', 'pheromosa.gif', 'phione.gif', 'pichu.gif', 'pidgeot-mega.gif', 'pidgeot.gif', 'pidgeotto.gif', 'pidgey.gif', 'pidove.gif', 'pignite.gif', 'pikachu-alola.gif', 'pikachu-belle.gif', 'pikachu-cosplay.gif', 'pikachu-f.gif', 'pikachu-gmax.gif', 'pikachu-hoenn.gif', 'pikachu-kalos.gif', 'pikachu-libre.gif', 'pikachu-original.gif', 'pikachu-partner.gif', 'pikachu-phd.gif', 'pikachu-pop-star.gif', 'pikachu-popstar.gif', 'pikachu-rockstar.gif', 'pikachu-sinnoh.gif', 'pikachu-starter-f.gif', 'pikachu-starter.gif', 'pikachu-unova.gif', 'pikachu.gif', 'pikipek.gif', 'piloswine-f.gif', 'piloswine.gif', 'pincurchin.gif', 'pineco.gif', 'pinsir-mega.gif', 'pinsir.gif', 'piplup.gif', 'plasmanta.gif', 'pluffle.gif', 'plusle.gif', 'poipole.gif', 'pokestarblackbelt.gif', 'pokestarblackdoor.gif', 'pokestarbrycenman.gif', 'pokestarf00.gif', 'pokestarf002.gif', 'pokestargiant.gif', 'pokestarhumanoid.gif', 'pokestarmonster.gif', 'pokestarmt.gif', 'pokestarmt2.gif', 'pokestarsmeargle.gif', 'pokestarspirit.gif', 'pokestartransport.gif', 'pokestarufo-2.gif', 'pokestarufo.gif', 'pokestarufo2.gif', 'pokestarwhitedoor.gif', 'politoed-f.gif', 'politoed.gif', 'poliwag.gif', 'poliwhirl.gif', 'poliwrath.gif', 'polteageist-antique.gif', 'polteageist.gif', 'ponyta-galar.gif', 'ponyta.gif', 'poochyena.gif', 'popplio.gif', 'porygon-z.gif', 'porygon.gif', 'porygon2.gif', 'porygonz.gif', 'primarina.gif', 'primeape.gif', 'prinplup.gif', 'probopass.gif', 'psyduck.gif', 'pumpkaboo-large.gif', 'pumpkaboo-small.gif', 'pumpkaboo-super.gif', 'pumpkaboo.gif', 'pupitar.gif', 'purrloin.gif', 'purugly.gif', 'pyroak.gif', 'pyroar-f.gif', 'pyroar.gif', 'pyukumuku.gif', 'quagsire-f.gif', 'quagsire.gif', 'quilava.gif', 'quilladin.gif', 'qwilfish.gif', 'raboot.gif', 'raichu-alola.gif', 'raichu.gif', 'raikou.gif', 'ralts.gif', 'rampardos.gif', 'rapidash-galar.gif', 'rapidash.gif', 'raticate-alola-totem.gif', 'raticate-alola.gif', 'raticate-f.gif', 'raticate-totem-a.gif', 'raticate.gif', 'rattata-alola.gif', 'rattata-f.gif', 'rattata.gif', 'rayquaza-mega.gif', 'rayquaza.gif', 'regice.gif', 'regigigas.gif', 'regirock.gif', 'registeel.gif', 'relicanth-f.gif', 'relicanth.gif', 'remoraid.gif', 'reshiram.gif', 'reuniclus.gif', 'rhydon-f.gif', 'rhydon.gif', 'rhyhorn.gif', 'rhyperior-f.gif', 'rhyperior.gif', 'ribombee.gif', 'rillaboom.gif', 'riolu.gif', 'rockruff.gif', 'roggenrola.gif', 'rolycoly.gif', 'rookidee.gif', 'roselia-f.gif', 'roselia.gif', 'roserade-f.gif', 'roserade.gif', 'rotom-f.gif', 'rotom-fan.gif', 'rotom-frost.gif', 'rotom-h.gif', 'rotom-heat.gif', 'rotom-m.gif', 'rotom-mow.gif', 'rotom-s.gif', 'rotom-w.gif', 'rotom-wash.gif', 'rotom.gif', 'rowlet.gif', 'rufflet.gif', 'runerigus.gif', 'sableye-mega.gif', 'sableye.gif', 'salamence-mega.gif', 'salamence.gif', 'salandit.gif', 'salazzle-totem.gif', 'salazzle.gif', 'samurott.gif', 'sandaconda-gmax.gif', 'sandaconda.gif', 'sandile.gif', 'sandshrew-alola.gif', 'sandshrew.gif', 'sandslash-alola.gif', 'sandslash.gif', 'sandygast.gif', 'sawk.gif', 'sawsbuck-autumn.gif', 'sawsbuck-summer.gif', 'sawsbuck-winter.gif', 'sawsbuck.gif', 'scatterbug.gif', 'sceptile-mega.gif', 'sceptile.gif', 'scizor-f.gif', 'scizor-mega.gif', 'scizor.gif', 'scolipede.gif', 'scorbunny.gif', 'scrafty.gif', 'scraggy.gif', 'scratchet.gif', 'scyther-f.gif', 'scyther.gif', 'seadra.gif', 'seaking.gif', 'sealeo.gif', 'seedot.gif', 'seel.gif', 'seismitoad.gif', 'sentret.gif', 'serperior.gif', 'servine.gif', 'seviper.gif', 'sewaddle.gif', 'shapedo-mega.gif', 'sharpedo-mega.gif', 'sharpedo.gif', 'shaymin-sky.gif', 'shaymin.gif', 'shedinja.gif', 'shelgon.gif', 'shellder.gif', 'shellos-east.gif', 'shellos.gif', 'shelmet.gif', 'shieldon.gif', 'shiftry-f.gif', 'shiftry.gif', 'shiinotic.gif', 'shinx-f.gif', 'shinx.gif', 'shroomish.gif', 'shuckle.gif', 'shuppet.gif', 'sigilyph.gif', 'silcoon.gif', 'silicobra.gif', 'silvally-bug.gif', 'silvally-dark.gif', 'silvally-dragon.gif', 'silvally-electric.gif', 'silvally-fairy.gif', 'silvally-fighting.gif', 'silvally-fire.gif', 'silvally-flying.gif', 'silvally-ghost.gif', 'silvally-grass.gif', 'silvally-ground.gif', 'silvally-ice.gif', 'silvally-poison.gif', 'silvally-psychic.gif', 'silvally-rock.gif', 'silvally-steel.gif', 'silvally-water.gif', 'silvally.gif', 'simipour.gif', 'simisage.gif', 'simisear.gif', 'sinistea-antique.gif', 'sinistea.gif', 'sirfetchd.gif', 'sizzlipede.gif', 'skarmory.gif', 'skiddo.gif', 'skiploom.gif', 'skitty.gif', 'skorupi.gif', 'skrelp.gif', 'skuntank.gif', 'skwovet.gif', 'slaking.gif', 'slakoth.gif', 'sliggoo.gif', 'slowbro-mega.gif', 'slowbro.gif', 'slowking.gif', 'slowpoke-galar.gif', 'slowpoke.gif', 'slugma.gif', 'slurpuff.gif', 'smeargle.gif', 'smogecko.gif', 'smoguana.gif', 'smokomodo.gif', 'smoochum.gif', 'snaelstrom.gif', 'sneasel-f.gif', 'sneasel.gif', 'snivy.gif', 'snom.gif', 'snorlax-gmax.gif', 'snorlax.gif', 'snornut.gif', 'snorunt.gif', 'snover-f.gif', 'snover.gif', 'snubbull.gif', 'sobble.gif', 'solgaleo.gif', 'solosis.gif', 'solrock.gif', 'spearow.gif', 'spewpa.gif', 'spheal.gif', 'spinarak.gif', 'spinda.gif', 'spiritomb.gif', 'spoink.gif', 'spritzee.gif', 'squirtle.gif', 'stakataka.gif', 'stantler.gif', 'staraptor-f.gif', 'staraptor.gif', 'staravia-f.gif', 'staravia.gif', 'starly-f.gif', 'starly.gif', 'starmie.gif', 'staryu.gif', 'steelix-f.gif', 'steelix-mega.gif', 'steelix.gif', 'steenee.gif', 'stonjourner.gif', 'stoutland.gif', 'stratagem.gif', 'stufful.gif', 'stunfisk-galar.gif', 'stunfisk.gif', 'stunky.gif', 'substitute.gif', 'sudowoodo-f.gif', 'sudowoodo.gif', 'suicune.gif', 'sunflora.gif', 'sunkern.gif', 'surskit.gif', 'swablu.gif', 'swadloon.gif', 'swalot-f.gif', 'swalot.gif', 'swampert-mega.gif', 'swampert.gif', 'swanna.gif', 'swellow.gif', 'swinub.gif', 'swirlix.gif', 'swoobat.gif', 'sylveon.gif', 'taillow.gif', 'talonflame.gif', 'tangela.gif', 'tangrowth-f.gif', 'tangrowth.gif', 'tapubulu.gif', 'tapufini.gif', 'tapukoko.gif', 'tapulele.gif', 'tauros.gif', 'teddiursa.gif', 'tentacool.gif', 'tentacruel.gif', 'tepig.gif', 'terrakion.gif', 'thievul.gif', 'throh.gif', 'thundurus-therian.gif', 'thundurus.gif', 'thwackey.gif', 'timburr.gif', 'tirtouga.gif', 'togedemaru-totem.gif', 'togedemaru.gif', 'togekiss.gif', 'togepi.gif', 'togetic.gif', 'tomohawk.gif', 'torchic-f.gif', 'torchic.gif', 'torkoal.gif', 'tornadus-therian.gif', 'tornadus.gif', 'torracat.gif', 'torterra.gif', 'totodile.gif', 'toucannon.gif', 'toxapex.gif', 'toxel.gif', 'toxicroak-f.gif', 'toxicroak.gif', 'toxtricity-gmax.gif', 'toxtricity-lowkey.gif', 'toxtricity.gif', 'tranquill.gif', 'trapinch.gif', 'treecko.gif', 'trevenant.gif', 'tropius.gif', 'trubbish.gif', 'trumbeak.gif', 'tsareena.gif', 'turtonator.gif', 'turtwig.gif', 'tympole.gif', 'tynamo.gif', 'typenull.gif', 'typhlosion.gif', 'tyranitar-mega.gif', 'tyranitar.gif', 'tyrantrum.gif', 'tyrantum.gif', 'tyrogue.gif', 'tyrunt.gif', 'umbreon.gif', 'unfezant-f.gif', 'unfezant.gif', 'unown-b.gif', 'unown-c.gif', 'unown-d.gif', 'unown-e.gif', 'unown-exclamation.gif', 'unown-f.gif', 'unown-g.gif', 'unown-h.gif', 'unown-i.gif', 'unown-j.gif', 'unown-k.gif', 'unown-l.gif', 'unown-m.gif', 'unown-n.gif', 'unown-o.gif', 'unown-p.gif', 'unown-q.gif', 'unown-question.gif', 'unown-r.gif', 'unown-s.gif', 'unown-t.gif', 'unown-u.gif', 'unown-v.gif', 'unown-w.gif', 'unown-x.gif', 'unown-y.gif', 'unown-z.gif', 'unown.gif', 'ursaring-f.gif', 'ursaring.gif', 'uxie.gif', 'vanillish.gif', 'vanillite.gif', 'vanilluxe.gif', 'vaporeon.gif', 'venipede.gif', 'venomoth.gif', 'venonat.gif', 'venusaur-mega.gif', 'venusaur.gif', 'vespiquen.gif', 'vibrava.gif', 'victini.gif', 'victreebel.gif', 'vigoroth.gif', 'vikavolt-totem.gif', 'vikavolt.gif', 'vileplume.gif', 'virizion.gif', 'vivillon-archipelago.gif', 'vivillon-continental.gif', 'vivillon-elegant.gif', 'vivillon-fancy.gif', 'vivillon-garden.gif', 'vivillon-highplains.gif', 'vivillon-icysnow.gif', 'vivillon-jungle.gif', 'vivillon-marine.gif', 'vivillon-modern.gif', 'vivillon-monsoon.gif', 'vivillon-ocean.gif', 'vivillon-pokeball.gif', 'vivillon-polar.gif', 'vivillon-river.gif', 'vivillon-sandstorm.gif', 'vivillon-savanna.gif', 'vivillon-sun.gif', 'vivillon-tundra.gif', 'vivillon.gif', 'volbeat.gif', 'volcanion.gif', 'volcarona.gif', 'volkraken.gif', 'voltorb.gif', 'vullaby.gif', 'vulpix-alola.gif', 'vulpix.gif', 'wailmer.gif', 'wailord.gif', 'walrein.gif', 'wartortle.gif', 'watchog.gif', 'weavile-f.gif', 'weavile.gif', 'weedle.gif', 'weepinbell.gif', 'weezing-galar.gif', 'weezing.gif', 'whimsicott.gif', 'whirlipede.gif', 'whiscash.gif', 'whismur.gif', 'wigglytuff.gif', 'wimpod.gif', 'wingull.gif', 'wishiwashi-school.gif', 'wishiwashi.gif', 'wobbuffet-f.gif', 'wobbuffet.gif', 'woobat.gif', 'wooloo.gif', 'wooper-f.gif', 'wooper.gif', 'wormadam-sandy.gif', 'wormadam-trash.gif', 'wormadam.gif', 'wurmple.gif', 'wynaut.gif', 'xatu-f.gif', 'xatu.gif', 'xerneas-neutral.gif', 'xerneas.gif', 'xurkitree.gif', 'yamask-galar.gif', 'yamask.gif', 'yamper.gif', 'yanma.gif', 'yanmega.gif', 'yungoos.gif', 'yveltal.gif', 'zacian-crowned.gif', 'zacian.gif', 'zamazenta-crowned.gif', 'zamazenta.gif', 'zangoose.gif', 'zapdos.gif', 'zebstrika.gif', 'zekrom.gif', 'zeraora.gif', 'zigzagoon-galar.gif', 'zigzagoon.gif', 'zoroark.gif', 'zorua.gif', 'zubat-f.gif', 'zubat.gif', 'zweilous.gif', 'zygarde-10.gif', 'zygarde-complete.gif', 'zygarde.gif']\r\n\r\n\r\nfor i in filename:\r\n url = 'https://play.pokemonshowdown.com/sprites/ani/{}'.format(i)\r\n file_name = str(i[:-4])\r\n dl_img(url, 'files/pokemon/front', file_name)\r\n\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(sslist)
print('입력하신 날짜의 10년 후 -> ', end='')
<|reserved_special_token_0|>
print(str(year) + '년', end='')
print(sslist[1] + '월', end='')
print(sslist[2] + '일')
<|reserved_special_token_1|>
ss = input('날짜: 년/월/일 입력-> ')
sslist = ss.split('/')
print(sslist)
print('입력하신 날짜의 10년 후 -> ', end='')
year = int(sslist[0]) + 10
print(str(year) + '년', end='')
print(sslist[1] + '월', end='')
print(sslist[2] + '일')
<|reserved_special_token_1|>
# 2019/10/08 2019년10월8일
ss = input('날짜: 년/월/일 입력-> ')
sslist = ss.split('/')
print(sslist)
print('입력하신 날짜의 10년 후 -> ', end='')
year = int(sslist[0]) + 10
print(str(year) + "년", end='')
print(sslist[1] + "월", end='')
print(sslist[2] + "일")
|
flexible
|
{
"blob_id": "fb2ef5a90b6e2582450726905868dd1b78e36166",
"index": 5008,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(sslist)\nprint('입력하신 날짜의 10년 후 -> ', end='')\n<mask token>\nprint(str(year) + '년', end='')\nprint(sslist[1] + '월', end='')\nprint(sslist[2] + '일')\n",
"step-3": "ss = input('날짜: 년/월/일 입력-> ')\nsslist = ss.split('/')\nprint(sslist)\nprint('입력하신 날짜의 10년 후 -> ', end='')\nyear = int(sslist[0]) + 10\nprint(str(year) + '년', end='')\nprint(sslist[1] + '월', end='')\nprint(sslist[2] + '일')\n",
"step-4": "# 2019/10/08 2019년10월8일\r\n\r\nss = input('날짜: 년/월/일 입력-> ')\r\n\r\nsslist = ss.split('/')\r\nprint(sslist)\r\n\r\nprint('입력하신 날짜의 10년 후 -> ', end='')\r\nyear = int(sslist[0]) + 10\r\nprint(str(year) + \"년\", end='')\r\nprint(sslist[1] + \"월\", end='')\r\nprint(sslist[2] + \"일\")\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def search(request):
return render(request, 'ui/search.html')
def search_printed(request):
print_url = ''
setting = Setting.objects.filter(name='printer').first()
if setting != None:
print_url = setting.value
return render(request, 'ui/search.html', {'print_url': print_url})
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def search(request):
return render(request, 'ui/search.html')
def search_printed(request):
print_url = ''
setting = Setting.objects.filter(name='printer').first()
if setting != None:
print_url = setting.value
return render(request, 'ui/search.html', {'print_url': print_url})
<|reserved_special_token_0|>
def queue_tablet(request):
print_url = ''
setting = Setting.objects.filter(name='printer_admin').first()
if setting != None:
print_url = setting.value
return render(request, 'ui/queue.html', {'print_url': print_url,
'footer': False})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def search(request):
return render(request, 'ui/search.html')
def search_printed(request):
print_url = ''
setting = Setting.objects.filter(name='printer').first()
if setting != None:
print_url = setting.value
return render(request, 'ui/search.html', {'print_url': print_url})
@login_required
def queue(request):
print_url = ''
setting = Setting.objects.filter(name='printer_admin').first()
if setting != None:
print_url = setting.value
return render(request, 'ui/queue.html', {'print_url': print_url,
'footer': True})
def queue_tablet(request):
print_url = ''
setting = Setting.objects.filter(name='printer_admin').first()
if setting != None:
print_url = setting.value
return render(request, 'ui/queue.html', {'print_url': print_url,
'footer': False})
<|reserved_special_token_1|>
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from orders.models import Setting
def search(request):
return render(request, 'ui/search.html')
def search_printed(request):
print_url = ''
setting = Setting.objects.filter(name='printer').first()
if setting != None:
print_url = setting.value
return render(request, 'ui/search.html', {'print_url': print_url})
@login_required
def queue(request):
print_url = ''
setting = Setting.objects.filter(name='printer_admin').first()
if setting != None:
print_url = setting.value
return render(request, 'ui/queue.html', {'print_url': print_url,
'footer': True})
def queue_tablet(request):
print_url = ''
setting = Setting.objects.filter(name='printer_admin').first()
if setting != None:
print_url = setting.value
return render(request, 'ui/queue.html', {'print_url': print_url,
'footer': False})
|
flexible
|
{
"blob_id": "f16d43d9dfb3e9b9589fa92eb82aaa4c73fe48cd",
"index": 1264,
"step-1": "<mask token>\n\n\ndef search(request):\n return render(request, 'ui/search.html')\n\n\ndef search_printed(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/search.html', {'print_url': print_url})\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef search(request):\n return render(request, 'ui/search.html')\n\n\ndef search_printed(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/search.html', {'print_url': print_url})\n\n\n<mask token>\n\n\ndef queue_tablet(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer_admin').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/queue.html', {'print_url': print_url,\n 'footer': False})\n",
"step-3": "<mask token>\n\n\ndef search(request):\n return render(request, 'ui/search.html')\n\n\ndef search_printed(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/search.html', {'print_url': print_url})\n\n\n@login_required\ndef queue(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer_admin').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/queue.html', {'print_url': print_url,\n 'footer': True})\n\n\ndef queue_tablet(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer_admin').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/queue.html', {'print_url': print_url,\n 'footer': False})\n",
"step-4": "from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\nfrom orders.models import Setting\n\n\ndef search(request):\n return render(request, 'ui/search.html')\n\n\ndef search_printed(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/search.html', {'print_url': print_url})\n\n\n@login_required\ndef queue(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer_admin').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/queue.html', {'print_url': print_url,\n 'footer': True})\n\n\ndef queue_tablet(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer_admin').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/queue.html', {'print_url': print_url,\n 'footer': False})\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
import numpy as np
np.random.seed(1)
class MonteCarloGameDriver():
def __init__(self):
self.default_moves = np.array(['w','a','s','d'])
self.probability_distribution = np.array([.25,.25,.25,.25])
def run_game(self, simulation_size=20):
from game import GameLayout
from copy import deepcopy
game = GameLayout()
while game.active:
# simulate simulation_size games starting at this point
game_performance = self.simulate(game, simulation_size)
if len(game_performance)==0:
game.end_game()
print("After {} simulations, achieved max tile {} and score {}".format(simulation_size, game.final_layout.max(), game.score))
break
# return the first move with highest average score
recommendation = max(game_performance, key=game_performance.get)
game.swipe(recommendation)
# game is over
self.log_game(game)
def simulate(self, game, simulation_size):
from collections import defaultdict
game_performance = defaultdict(list)
from copy import deepcopy
for i in range(simulation_size):
# run copy game multiple times, saving final scores and first moves each time
game_copy = deepcopy(game)
game_copy.reset()
while game_copy.active:
move_order = self.weighted_shuffle(self.default_moves, self.probability_distribution)
for move in move_order:
try:
game_copy.swipe(move)
break
except:
# move didn't work, try next move
continue
# log final score and first move
try:
game_performance[self.default_moves[(game_copy.moves[0]==1).argmax()]].append(game_copy.score)
except AttributeError:
pass
# get average score for each first move
game_performance = {key: np.mean(val) for key, val in game_performance.items()}
return game_performance
def weighted_shuffle(self, options,weights):
lst = list(options)
w = [None]*len(lst) # make a copy
for i in range(len(lst)):
win_idx = np.random.choice(range(len(lst)), p=weights)
w[i] = lst[win_idx]
del lst[win_idx]
weights = np.delete(weights, win_idx)
weights = weights/weights.sum()
return w
def log_game(self, game):
assert not game.active # must be a finished game
try:
self.final_scores = np.append(self.final_scores, game.score)
self.num_moves = np.append(self.num_moves, game.num_moves)
self.layouts.append(game.layouts)
self.final_layouts.append(game.final_layout)
self.moves.append(game.moves)
self.scores.append(game.scores)
self.tile_sums = np.append(self.tile_sums, game.final_layout.sum())
self.max_tile = np.append(self.max_tile, game.final_layout.max())
self.wins = np.append(self.wins, game.won)
except AttributeError:
self.final_scores = np.array(game.score)
self.num_moves = np.array(game.num_moves)
self.layouts = [game.layouts]
self.final_layouts = [game.final_layout]
self.moves = [game.moves]
self.scores = [game.scores]
self.tile_sums = np.array(game.final_layout.sum())
self.max_tile = np.array(game.final_layout.max())
self.wins = np.array(game.won)
|
normal
|
{
"blob_id": "aeb986360c6990f9375f2552cbdeef595af815b4",
"index": 6432,
"step-1": "<mask token>\n\n\nclass MonteCarloGameDriver:\n\n def __init__(self):\n self.default_moves = np.array(['w', 'a', 's', 'd'])\n self.probability_distribution = np.array([0.25, 0.25, 0.25, 0.25])\n <mask token>\n\n def simulate(self, game, simulation_size):\n from collections import defaultdict\n game_performance = defaultdict(list)\n from copy import deepcopy\n for i in range(simulation_size):\n game_copy = deepcopy(game)\n game_copy.reset()\n while game_copy.active:\n move_order = self.weighted_shuffle(self.default_moves, self\n .probability_distribution)\n for move in move_order:\n try:\n game_copy.swipe(move)\n break\n except:\n continue\n try:\n game_performance[self.default_moves[(game_copy.moves[0] == \n 1).argmax()]].append(game_copy.score)\n except AttributeError:\n pass\n game_performance = {key: np.mean(val) for key, val in\n game_performance.items()}\n return game_performance\n\n def weighted_shuffle(self, options, weights):\n lst = list(options)\n w = [None] * len(lst)\n for i in range(len(lst)):\n win_idx = np.random.choice(range(len(lst)), p=weights)\n w[i] = lst[win_idx]\n del lst[win_idx]\n weights = np.delete(weights, win_idx)\n weights = weights / weights.sum()\n return w\n\n def log_game(self, game):\n assert not game.active\n try:\n self.final_scores = np.append(self.final_scores, game.score)\n self.num_moves = np.append(self.num_moves, game.num_moves)\n self.layouts.append(game.layouts)\n self.final_layouts.append(game.final_layout)\n self.moves.append(game.moves)\n self.scores.append(game.scores)\n self.tile_sums = np.append(self.tile_sums, game.final_layout.sum())\n self.max_tile = np.append(self.max_tile, game.final_layout.max())\n self.wins = np.append(self.wins, game.won)\n except AttributeError:\n self.final_scores = np.array(game.score)\n self.num_moves = np.array(game.num_moves)\n self.layouts = [game.layouts]\n self.final_layouts = [game.final_layout]\n self.moves = [game.moves]\n self.scores = [game.scores]\n self.tile_sums = np.array(game.final_layout.sum())\n self.max_tile = np.array(game.final_layout.max())\n self.wins = np.array(game.won)\n",
"step-2": "<mask token>\n\n\nclass MonteCarloGameDriver:\n\n def __init__(self):\n self.default_moves = np.array(['w', 'a', 's', 'd'])\n self.probability_distribution = np.array([0.25, 0.25, 0.25, 0.25])\n\n def run_game(self, simulation_size=20):\n from game import GameLayout\n from copy import deepcopy\n game = GameLayout()\n while game.active:\n game_performance = self.simulate(game, simulation_size)\n if len(game_performance) == 0:\n game.end_game()\n print('After {} simulations, achieved max tile {} and score {}'\n .format(simulation_size, game.final_layout.max(), game.\n score))\n break\n recommendation = max(game_performance, key=game_performance.get)\n game.swipe(recommendation)\n self.log_game(game)\n\n def simulate(self, game, simulation_size):\n from collections import defaultdict\n game_performance = defaultdict(list)\n from copy import deepcopy\n for i in range(simulation_size):\n game_copy = deepcopy(game)\n game_copy.reset()\n while game_copy.active:\n move_order = self.weighted_shuffle(self.default_moves, self\n .probability_distribution)\n for move in move_order:\n try:\n game_copy.swipe(move)\n break\n except:\n continue\n try:\n game_performance[self.default_moves[(game_copy.moves[0] == \n 1).argmax()]].append(game_copy.score)\n except AttributeError:\n pass\n game_performance = {key: np.mean(val) for key, val in\n game_performance.items()}\n return game_performance\n\n def weighted_shuffle(self, options, weights):\n lst = list(options)\n w = [None] * len(lst)\n for i in range(len(lst)):\n win_idx = np.random.choice(range(len(lst)), p=weights)\n w[i] = lst[win_idx]\n del lst[win_idx]\n weights = np.delete(weights, win_idx)\n weights = weights / weights.sum()\n return w\n\n def log_game(self, game):\n assert not game.active\n try:\n self.final_scores = np.append(self.final_scores, game.score)\n self.num_moves = np.append(self.num_moves, game.num_moves)\n self.layouts.append(game.layouts)\n self.final_layouts.append(game.final_layout)\n self.moves.append(game.moves)\n self.scores.append(game.scores)\n self.tile_sums = np.append(self.tile_sums, game.final_layout.sum())\n self.max_tile = np.append(self.max_tile, game.final_layout.max())\n self.wins = np.append(self.wins, game.won)\n except AttributeError:\n self.final_scores = np.array(game.score)\n self.num_moves = np.array(game.num_moves)\n self.layouts = [game.layouts]\n self.final_layouts = [game.final_layout]\n self.moves = [game.moves]\n self.scores = [game.scores]\n self.tile_sums = np.array(game.final_layout.sum())\n self.max_tile = np.array(game.final_layout.max())\n self.wins = np.array(game.won)\n",
"step-3": "<mask token>\nnp.random.seed(1)\n\n\nclass MonteCarloGameDriver:\n\n def __init__(self):\n self.default_moves = np.array(['w', 'a', 's', 'd'])\n self.probability_distribution = np.array([0.25, 0.25, 0.25, 0.25])\n\n def run_game(self, simulation_size=20):\n from game import GameLayout\n from copy import deepcopy\n game = GameLayout()\n while game.active:\n game_performance = self.simulate(game, simulation_size)\n if len(game_performance) == 0:\n game.end_game()\n print('After {} simulations, achieved max tile {} and score {}'\n .format(simulation_size, game.final_layout.max(), game.\n score))\n break\n recommendation = max(game_performance, key=game_performance.get)\n game.swipe(recommendation)\n self.log_game(game)\n\n def simulate(self, game, simulation_size):\n from collections import defaultdict\n game_performance = defaultdict(list)\n from copy import deepcopy\n for i in range(simulation_size):\n game_copy = deepcopy(game)\n game_copy.reset()\n while game_copy.active:\n move_order = self.weighted_shuffle(self.default_moves, self\n .probability_distribution)\n for move in move_order:\n try:\n game_copy.swipe(move)\n break\n except:\n continue\n try:\n game_performance[self.default_moves[(game_copy.moves[0] == \n 1).argmax()]].append(game_copy.score)\n except AttributeError:\n pass\n game_performance = {key: np.mean(val) for key, val in\n game_performance.items()}\n return game_performance\n\n def weighted_shuffle(self, options, weights):\n lst = list(options)\n w = [None] * len(lst)\n for i in range(len(lst)):\n win_idx = np.random.choice(range(len(lst)), p=weights)\n w[i] = lst[win_idx]\n del lst[win_idx]\n weights = np.delete(weights, win_idx)\n weights = weights / weights.sum()\n return w\n\n def log_game(self, game):\n assert not game.active\n try:\n self.final_scores = np.append(self.final_scores, game.score)\n self.num_moves = np.append(self.num_moves, game.num_moves)\n self.layouts.append(game.layouts)\n self.final_layouts.append(game.final_layout)\n self.moves.append(game.moves)\n self.scores.append(game.scores)\n self.tile_sums = np.append(self.tile_sums, game.final_layout.sum())\n self.max_tile = np.append(self.max_tile, game.final_layout.max())\n self.wins = np.append(self.wins, game.won)\n except AttributeError:\n self.final_scores = np.array(game.score)\n self.num_moves = np.array(game.num_moves)\n self.layouts = [game.layouts]\n self.final_layouts = [game.final_layout]\n self.moves = [game.moves]\n self.scores = [game.scores]\n self.tile_sums = np.array(game.final_layout.sum())\n self.max_tile = np.array(game.final_layout.max())\n self.wins = np.array(game.won)\n",
"step-4": "import numpy as np\nnp.random.seed(1)\n\n\nclass MonteCarloGameDriver:\n\n def __init__(self):\n self.default_moves = np.array(['w', 'a', 's', 'd'])\n self.probability_distribution = np.array([0.25, 0.25, 0.25, 0.25])\n\n def run_game(self, simulation_size=20):\n from game import GameLayout\n from copy import deepcopy\n game = GameLayout()\n while game.active:\n game_performance = self.simulate(game, simulation_size)\n if len(game_performance) == 0:\n game.end_game()\n print('After {} simulations, achieved max tile {} and score {}'\n .format(simulation_size, game.final_layout.max(), game.\n score))\n break\n recommendation = max(game_performance, key=game_performance.get)\n game.swipe(recommendation)\n self.log_game(game)\n\n def simulate(self, game, simulation_size):\n from collections import defaultdict\n game_performance = defaultdict(list)\n from copy import deepcopy\n for i in range(simulation_size):\n game_copy = deepcopy(game)\n game_copy.reset()\n while game_copy.active:\n move_order = self.weighted_shuffle(self.default_moves, self\n .probability_distribution)\n for move in move_order:\n try:\n game_copy.swipe(move)\n break\n except:\n continue\n try:\n game_performance[self.default_moves[(game_copy.moves[0] == \n 1).argmax()]].append(game_copy.score)\n except AttributeError:\n pass\n game_performance = {key: np.mean(val) for key, val in\n game_performance.items()}\n return game_performance\n\n def weighted_shuffle(self, options, weights):\n lst = list(options)\n w = [None] * len(lst)\n for i in range(len(lst)):\n win_idx = np.random.choice(range(len(lst)), p=weights)\n w[i] = lst[win_idx]\n del lst[win_idx]\n weights = np.delete(weights, win_idx)\n weights = weights / weights.sum()\n return w\n\n def log_game(self, game):\n assert not game.active\n try:\n self.final_scores = np.append(self.final_scores, game.score)\n self.num_moves = np.append(self.num_moves, game.num_moves)\n self.layouts.append(game.layouts)\n self.final_layouts.append(game.final_layout)\n self.moves.append(game.moves)\n self.scores.append(game.scores)\n self.tile_sums = np.append(self.tile_sums, game.final_layout.sum())\n self.max_tile = np.append(self.max_tile, game.final_layout.max())\n self.wins = np.append(self.wins, game.won)\n except AttributeError:\n self.final_scores = np.array(game.score)\n self.num_moves = np.array(game.num_moves)\n self.layouts = [game.layouts]\n self.final_layouts = [game.final_layout]\n self.moves = [game.moves]\n self.scores = [game.scores]\n self.tile_sums = np.array(game.final_layout.sum())\n self.max_tile = np.array(game.final_layout.max())\n self.wins = np.array(game.won)\n",
"step-5": "import numpy as np\nnp.random.seed(1)\n\nclass MonteCarloGameDriver(): \n def __init__(self):\n self.default_moves = np.array(['w','a','s','d'])\n self.probability_distribution = np.array([.25,.25,.25,.25])\n\n def run_game(self, simulation_size=20):\n from game import GameLayout\n from copy import deepcopy\n \n game = GameLayout()\n \n while game.active:\n # simulate simulation_size games starting at this point\n game_performance = self.simulate(game, simulation_size)\n \n if len(game_performance)==0:\n game.end_game()\n \n print(\"After {} simulations, achieved max tile {} and score {}\".format(simulation_size, game.final_layout.max(), game.score))\n break\n\n # return the first move with highest average score\n recommendation = max(game_performance, key=game_performance.get)\n \n game.swipe(recommendation)\n \n # game is over\n self.log_game(game)\n\n \n def simulate(self, game, simulation_size):\n from collections import defaultdict\n game_performance = defaultdict(list)\n \n from copy import deepcopy\n\n for i in range(simulation_size):\n # run copy game multiple times, saving final scores and first moves each time\n game_copy = deepcopy(game)\n game_copy.reset()\n\n while game_copy.active:\n move_order = self.weighted_shuffle(self.default_moves, self.probability_distribution)\n for move in move_order:\n try:\n game_copy.swipe(move)\n break\n except:\n # move didn't work, try next move\n continue\n # log final score and first move\n try:\n game_performance[self.default_moves[(game_copy.moves[0]==1).argmax()]].append(game_copy.score)\n except AttributeError:\n pass\n \n # get average score for each first move\n game_performance = {key: np.mean(val) for key, val in game_performance.items()}\n \n return game_performance\n\n \n def weighted_shuffle(self, options,weights):\n lst = list(options)\n w = [None]*len(lst) # make a copy\n for i in range(len(lst)):\n win_idx = np.random.choice(range(len(lst)), p=weights)\n w[i] = lst[win_idx]\n del lst[win_idx]\n weights = np.delete(weights, win_idx)\n weights = weights/weights.sum()\n return w\n\n\n def log_game(self, game):\n assert not game.active # must be a finished game\n try:\n self.final_scores = np.append(self.final_scores, game.score)\n self.num_moves = np.append(self.num_moves, game.num_moves)\n self.layouts.append(game.layouts)\n self.final_layouts.append(game.final_layout)\n self.moves.append(game.moves)\n self.scores.append(game.scores)\n self.tile_sums = np.append(self.tile_sums, game.final_layout.sum())\n self.max_tile = np.append(self.max_tile, game.final_layout.max())\n self.wins = np.append(self.wins, game.won)\n\n except AttributeError:\n self.final_scores = np.array(game.score)\n self.num_moves = np.array(game.num_moves)\n self.layouts = [game.layouts]\n self.final_layouts = [game.final_layout]\n self.moves = [game.moves]\n self.scores = [game.scores]\n self.tile_sums = np.array(game.final_layout.sum())\n self.max_tile = np.array(game.final_layout.max())\n self.wins = np.array(game.won)",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class Posts(db.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __repr__(self):
return '<Posts {}>'.format(self.name)
@validates('title')
def validate_title(self, key, title):
if not title:
raise AssertionError('Title is required')
if len(title) < 20:
raise AssertionError('Title must be at least 20 character')
return title
@validates('content')
def validate_content(self, key, content):
if not content:
raise AssertionError('Content is required')
if len(content) < 200:
raise AssertionError('Content must be at least 200 character')
return content
@validates('category')
def validate_category(self, key, category):
if not category:
raise AssertionError('Category is required')
if len(category) < 3:
raise AssertionError('Category must be at least 3 character')
return category
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Posts(db.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __repr__(self):
return '<Posts {}>'.format(self.name)
@validates('title')
def validate_title(self, key, title):
if not title:
raise AssertionError('Title is required')
if len(title) < 20:
raise AssertionError('Title must be at least 20 character')
return title
@validates('content')
def validate_content(self, key, content):
if not content:
raise AssertionError('Content is required')
if len(content) < 200:
raise AssertionError('Content must be at least 200 character')
return content
@validates('category')
def validate_category(self, key, category):
if not category:
raise AssertionError('Category is required')
if len(category) < 3:
raise AssertionError('Category must be at least 3 character')
return category
@validates('status')
def validate_status(self, key, status):
if not status:
raise AssertionError('Status is required')
elif status not in ['Publish', 'Draft', 'Thrash']:
raise AssertionError('{} is not supported status'.format(status))
return status
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Posts(db.Model):
id = db.Column(db.BigInteger, primary_key=True, autoincrement=True)
title = db.Column(db.String(200))
content = db.Column(db.Text)
category = db.Column(db.String(100))
created_date = db.Column(db.DateTime, default=datetime.utcnow)
updated_date = db.Column(db.DateTime, default=datetime.utcnow)
status = db.Column(db.Enum('Publish', 'Draft', 'Thrash', name=
'postsStatus'), default='Draft')
def __repr__(self):
return '<Posts {}>'.format(self.name)
@validates('title')
def validate_title(self, key, title):
if not title:
raise AssertionError('Title is required')
if len(title) < 20:
raise AssertionError('Title must be at least 20 character')
return title
@validates('content')
def validate_content(self, key, content):
if not content:
raise AssertionError('Content is required')
if len(content) < 200:
raise AssertionError('Content must be at least 200 character')
return content
@validates('category')
def validate_category(self, key, category):
if not category:
raise AssertionError('Category is required')
if len(category) < 3:
raise AssertionError('Category must be at least 3 character')
return category
@validates('status')
def validate_status(self, key, status):
if not status:
raise AssertionError('Status is required')
elif status not in ['Publish', 'Draft', 'Thrash']:
raise AssertionError('{} is not supported status'.format(status))
return status
<|reserved_special_token_1|>
from app import db
from datetime import datetime
from sqlalchemy.orm import validates
class Posts(db.Model):
id = db.Column(db.BigInteger, primary_key=True, autoincrement=True)
title = db.Column(db.String(200))
content = db.Column(db.Text)
category = db.Column(db.String(100))
created_date = db.Column(db.DateTime, default=datetime.utcnow)
updated_date = db.Column(db.DateTime, default=datetime.utcnow)
status = db.Column(db.Enum('Publish', 'Draft', 'Thrash', name=
'postsStatus'), default='Draft')
def __repr__(self):
return '<Posts {}>'.format(self.name)
@validates('title')
def validate_title(self, key, title):
if not title:
raise AssertionError('Title is required')
if len(title) < 20:
raise AssertionError('Title must be at least 20 character')
return title
@validates('content')
def validate_content(self, key, content):
if not content:
raise AssertionError('Content is required')
if len(content) < 200:
raise AssertionError('Content must be at least 200 character')
return content
@validates('category')
def validate_category(self, key, category):
if not category:
raise AssertionError('Category is required')
if len(category) < 3:
raise AssertionError('Category must be at least 3 character')
return category
@validates('status')
def validate_status(self, key, status):
if not status:
raise AssertionError('Status is required')
elif status not in ['Publish', 'Draft', 'Thrash']:
raise AssertionError('{} is not supported status'.format(status))
return status
<|reserved_special_token_1|>
from app import db
from datetime import datetime
from sqlalchemy.orm import validates
class Posts(db.Model):
id = db.Column(db.BigInteger, primary_key=True, autoincrement=True)
title = db.Column(db.String(200))
content = db.Column(db.Text)
category = db.Column(db.String(100))
created_date = db.Column(db.DateTime, default=datetime.utcnow)
updated_date = db.Column(db.DateTime, default=datetime.utcnow)
status = db.Column(db.Enum('Publish', 'Draft', 'Thrash', name='postsStatus'), default='Draft')
def __repr__(self):
return '<Posts {}>'.format(self.name)
@validates("title")
def validate_title(self, key, title):
if not title:
raise AssertionError("Title is required")
if len(title) < 20:
raise AssertionError("Title must be at least 20 character")
return title
@validates("content")
def validate_content(self, key, content):
if not content:
raise AssertionError("Content is required")
if len(content) < 200:
raise AssertionError("Content must be at least 200 character")
return content
@validates("category")
def validate_category(self, key, category):
if not category:
raise AssertionError("Category is required")
if len(category) < 3:
raise AssertionError("Category must be at least 3 character")
return category
@validates("status")
def validate_status(self, key, status):
if not status:
raise AssertionError("Status is required")
elif status not in ['Publish', 'Draft', 'Thrash']:
raise AssertionError("{} is not supported status".format(status))
return status
|
flexible
|
{
"blob_id": "29298ee7ddb4e524a23000abf86854d72f49954c",
"index": 1850,
"step-1": "<mask token>\n\n\nclass Posts(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __repr__(self):\n return '<Posts {}>'.format(self.name)\n\n @validates('title')\n def validate_title(self, key, title):\n if not title:\n raise AssertionError('Title is required')\n if len(title) < 20:\n raise AssertionError('Title must be at least 20 character')\n return title\n\n @validates('content')\n def validate_content(self, key, content):\n if not content:\n raise AssertionError('Content is required')\n if len(content) < 200:\n raise AssertionError('Content must be at least 200 character')\n return content\n\n @validates('category')\n def validate_category(self, key, category):\n if not category:\n raise AssertionError('Category is required')\n if len(category) < 3:\n raise AssertionError('Category must be at least 3 character')\n return category\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Posts(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __repr__(self):\n return '<Posts {}>'.format(self.name)\n\n @validates('title')\n def validate_title(self, key, title):\n if not title:\n raise AssertionError('Title is required')\n if len(title) < 20:\n raise AssertionError('Title must be at least 20 character')\n return title\n\n @validates('content')\n def validate_content(self, key, content):\n if not content:\n raise AssertionError('Content is required')\n if len(content) < 200:\n raise AssertionError('Content must be at least 200 character')\n return content\n\n @validates('category')\n def validate_category(self, key, category):\n if not category:\n raise AssertionError('Category is required')\n if len(category) < 3:\n raise AssertionError('Category must be at least 3 character')\n return category\n\n @validates('status')\n def validate_status(self, key, status):\n if not status:\n raise AssertionError('Status is required')\n elif status not in ['Publish', 'Draft', 'Thrash']:\n raise AssertionError('{} is not supported status'.format(status))\n return status\n",
"step-3": "<mask token>\n\n\nclass Posts(db.Model):\n id = db.Column(db.BigInteger, primary_key=True, autoincrement=True)\n title = db.Column(db.String(200))\n content = db.Column(db.Text)\n category = db.Column(db.String(100))\n created_date = db.Column(db.DateTime, default=datetime.utcnow)\n updated_date = db.Column(db.DateTime, default=datetime.utcnow)\n status = db.Column(db.Enum('Publish', 'Draft', 'Thrash', name=\n 'postsStatus'), default='Draft')\n\n def __repr__(self):\n return '<Posts {}>'.format(self.name)\n\n @validates('title')\n def validate_title(self, key, title):\n if not title:\n raise AssertionError('Title is required')\n if len(title) < 20:\n raise AssertionError('Title must be at least 20 character')\n return title\n\n @validates('content')\n def validate_content(self, key, content):\n if not content:\n raise AssertionError('Content is required')\n if len(content) < 200:\n raise AssertionError('Content must be at least 200 character')\n return content\n\n @validates('category')\n def validate_category(self, key, category):\n if not category:\n raise AssertionError('Category is required')\n if len(category) < 3:\n raise AssertionError('Category must be at least 3 character')\n return category\n\n @validates('status')\n def validate_status(self, key, status):\n if not status:\n raise AssertionError('Status is required')\n elif status not in ['Publish', 'Draft', 'Thrash']:\n raise AssertionError('{} is not supported status'.format(status))\n return status\n",
"step-4": "from app import db\nfrom datetime import datetime\nfrom sqlalchemy.orm import validates\n\n\nclass Posts(db.Model):\n id = db.Column(db.BigInteger, primary_key=True, autoincrement=True)\n title = db.Column(db.String(200))\n content = db.Column(db.Text)\n category = db.Column(db.String(100))\n created_date = db.Column(db.DateTime, default=datetime.utcnow)\n updated_date = db.Column(db.DateTime, default=datetime.utcnow)\n status = db.Column(db.Enum('Publish', 'Draft', 'Thrash', name=\n 'postsStatus'), default='Draft')\n\n def __repr__(self):\n return '<Posts {}>'.format(self.name)\n\n @validates('title')\n def validate_title(self, key, title):\n if not title:\n raise AssertionError('Title is required')\n if len(title) < 20:\n raise AssertionError('Title must be at least 20 character')\n return title\n\n @validates('content')\n def validate_content(self, key, content):\n if not content:\n raise AssertionError('Content is required')\n if len(content) < 200:\n raise AssertionError('Content must be at least 200 character')\n return content\n\n @validates('category')\n def validate_category(self, key, category):\n if not category:\n raise AssertionError('Category is required')\n if len(category) < 3:\n raise AssertionError('Category must be at least 3 character')\n return category\n\n @validates('status')\n def validate_status(self, key, status):\n if not status:\n raise AssertionError('Status is required')\n elif status not in ['Publish', 'Draft', 'Thrash']:\n raise AssertionError('{} is not supported status'.format(status))\n return status\n",
"step-5": "from app import db\nfrom datetime import datetime\nfrom sqlalchemy.orm import validates\n\nclass Posts(db.Model):\n id = db.Column(db.BigInteger, primary_key=True, autoincrement=True)\n title = db.Column(db.String(200))\n content = db.Column(db.Text)\n category = db.Column(db.String(100))\n created_date = db.Column(db.DateTime, default=datetime.utcnow)\n updated_date = db.Column(db.DateTime, default=datetime.utcnow)\n status = db.Column(db.Enum('Publish', 'Draft', 'Thrash', name='postsStatus'), default='Draft')\n \n def __repr__(self):\n return '<Posts {}>'.format(self.name)\n \n @validates(\"title\")\n def validate_title(self, key, title):\n if not title: \n raise AssertionError(\"Title is required\")\n if len(title) < 20:\n raise AssertionError(\"Title must be at least 20 character\")\n return title\n \n @validates(\"content\")\n def validate_content(self, key, content):\n if not content: \n raise AssertionError(\"Content is required\")\n if len(content) < 200:\n raise AssertionError(\"Content must be at least 200 character\")\n return content\n \n @validates(\"category\")\n def validate_category(self, key, category):\n if not category: \n raise AssertionError(\"Category is required\")\n if len(category) < 3:\n raise AssertionError(\"Category must be at least 3 character\")\n return category\n \n @validates(\"status\")\n def validate_status(self, key, status):\n if not status: \n raise AssertionError(\"Status is required\")\n elif status not in ['Publish', 'Draft', 'Thrash']:\n raise AssertionError(\"{} is not supported status\".format(status))\n return status\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
#Aplicacion de la funcion super()
class Persona():
def __init__(self,nombre,edad,lugar_residencia):
self.nombre = nombre
self.edad = edad
self.residencia = lugar_residencia
def descripcion(self):
print("Nombre: ",self.nombre," Edad: ", self.edad," Lugar de residencia: ",self.residencia)
def hola(self):
print("Hola Mundo")
class Empleado(Persona):
def __init__(self,salario,antiguedad,nombre_empleado,edad_empleado,residencia_empleado):
super().__init__(nombre_empleado,edad_empleado,residencia_empleado)#Hace la llamada al constructor de la clase padre que esta heredando
self.salario = salario
self.antiguedad_persona=antiguedad
super().hola()
def descripcion(self):
super().descripcion()
print("Salario: " ,self.salario, "Antiguedad: ",self.antiguedad_persona)
Antonio = Persona("Alex",23,"Merida")
Antonio.descripcion()
print(isinstance(Antonio,Empleado))
#Principio de sustitucion
#consiste en plantearse las siguientes preguntas:
#es siempre un o una
#funcion isinstance()--> nos informa si un objeto es instancia de una clase determinada devuelve verdadero o falso
|
normal
|
{
"blob_id": "92a50bcdbb4c03d1a4813a93c2e0986250516f14",
"index": 1117,
"step-1": "class Persona:\n <mask token>\n <mask token>\n\n def hola(self):\n print('Hola Mundo')\n\n\nclass Empleado(Persona):\n\n def __init__(self, salario, antiguedad, nombre_empleado, edad_empleado,\n residencia_empleado):\n super().__init__(nombre_empleado, edad_empleado, residencia_empleado)\n self.salario = salario\n self.antiguedad_persona = antiguedad\n super().hola()\n\n def descripcion(self):\n super().descripcion()\n print('Salario: ', self.salario, 'Antiguedad: ', self.\n antiguedad_persona)\n\n\n<mask token>\n",
"step-2": "class Persona:\n\n def __init__(self, nombre, edad, lugar_residencia):\n self.nombre = nombre\n self.edad = edad\n self.residencia = lugar_residencia\n <mask token>\n\n def hola(self):\n print('Hola Mundo')\n\n\nclass Empleado(Persona):\n\n def __init__(self, salario, antiguedad, nombre_empleado, edad_empleado,\n residencia_empleado):\n super().__init__(nombre_empleado, edad_empleado, residencia_empleado)\n self.salario = salario\n self.antiguedad_persona = antiguedad\n super().hola()\n\n def descripcion(self):\n super().descripcion()\n print('Salario: ', self.salario, 'Antiguedad: ', self.\n antiguedad_persona)\n\n\n<mask token>\n",
"step-3": "class Persona:\n\n def __init__(self, nombre, edad, lugar_residencia):\n self.nombre = nombre\n self.edad = edad\n self.residencia = lugar_residencia\n\n def descripcion(self):\n print('Nombre: ', self.nombre, ' Edad: ', self.edad,\n ' Lugar de residencia: ', self.residencia)\n\n def hola(self):\n print('Hola Mundo')\n\n\nclass Empleado(Persona):\n\n def __init__(self, salario, antiguedad, nombre_empleado, edad_empleado,\n residencia_empleado):\n super().__init__(nombre_empleado, edad_empleado, residencia_empleado)\n self.salario = salario\n self.antiguedad_persona = antiguedad\n super().hola()\n\n def descripcion(self):\n super().descripcion()\n print('Salario: ', self.salario, 'Antiguedad: ', self.\n antiguedad_persona)\n\n\n<mask token>\nAntonio.descripcion()\nprint(isinstance(Antonio, Empleado))\n",
"step-4": "class Persona:\n\n def __init__(self, nombre, edad, lugar_residencia):\n self.nombre = nombre\n self.edad = edad\n self.residencia = lugar_residencia\n\n def descripcion(self):\n print('Nombre: ', self.nombre, ' Edad: ', self.edad,\n ' Lugar de residencia: ', self.residencia)\n\n def hola(self):\n print('Hola Mundo')\n\n\nclass Empleado(Persona):\n\n def __init__(self, salario, antiguedad, nombre_empleado, edad_empleado,\n residencia_empleado):\n super().__init__(nombre_empleado, edad_empleado, residencia_empleado)\n self.salario = salario\n self.antiguedad_persona = antiguedad\n super().hola()\n\n def descripcion(self):\n super().descripcion()\n print('Salario: ', self.salario, 'Antiguedad: ', self.\n antiguedad_persona)\n\n\nAntonio = Persona('Alex', 23, 'Merida')\nAntonio.descripcion()\nprint(isinstance(Antonio, Empleado))\n",
"step-5": "\n\n#Aplicacion de la funcion super()\n\nclass Persona():\n def __init__(self,nombre,edad,lugar_residencia):\n self.nombre = nombre\n self.edad = edad\n self.residencia = lugar_residencia\n \n def descripcion(self):\n print(\"Nombre: \",self.nombre,\" Edad: \", self.edad,\" Lugar de residencia: \",self.residencia)\n \n def hola(self):\n print(\"Hola Mundo\")\n\nclass Empleado(Persona):\n\n def __init__(self,salario,antiguedad,nombre_empleado,edad_empleado,residencia_empleado):\n\n super().__init__(nombre_empleado,edad_empleado,residencia_empleado)#Hace la llamada al constructor de la clase padre que esta heredando\n self.salario = salario\n self.antiguedad_persona=antiguedad\n\n super().hola()\n \n def descripcion(self):\n super().descripcion()\n print(\"Salario: \" ,self.salario, \"Antiguedad: \",self.antiguedad_persona)\n\n\nAntonio = Persona(\"Alex\",23,\"Merida\")\nAntonio.descripcion()\n\nprint(isinstance(Antonio,Empleado))\n\n\n#Principio de sustitucion\n#consiste en plantearse las siguientes preguntas:\n\n#es siempre un o una\n\n#funcion isinstance()--> nos informa si un objeto es instancia de una clase determinada devuelve verdadero o falso\n\n\n\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
#!/usr/bin/env python
# Standardised set up
import RPi.GPIO as GPIO # External module imports GPIO
import time # Library to slow or give a rest to the script
import timeit # Alternative timing library for platform specific timing
import sys # Library to access program arguments and call exits
import os # Library provides functionality to clear screen
import random
import datetime
import collections
import threading
from Queue import Queue
# Pin definiton using Broadcom scheme
solenoid1 = 23 # GPIO 16
solenoid2 = 24 # GPIO 18
solenoid3 = 4 # GPIO 07
solenoid4 = 17 # GPIO 11
motor1 = 18 # GPIO 12
led1 = 25 # GPIO 22
switch1 = 6 # GPIO 31
switch2 = 13 # GPIO 33
# Pin setup
GPIO.setmode(GPIO.BCM) # Broadcom pin-numbering scheme
GPIO.setup(solenoid1, GPIO.OUT) # set as I/O output
GPIO.setup(solenoid2, GPIO.OUT) # set as I/O output
GPIO.setup(solenoid3, GPIO.OUT) # set as I/O output
GPIO.setup(solenoid4, GPIO.OUT) # set as I/O output
GPIO.setup(led1, GPIO.OUT) # set as I/O output
GPIO.setup(motor1, GPIO.OUT) # set as I/O output
motor1pwm = GPIO.PWM(motor1,100) # set pwm on motor1 pin
GPIO.setup(switch1, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(switch2, GPIO.IN, pull_up_down=GPIO.PUD_UP)
class colour:
purple = '\033[95m'
cyan = '\033[96m'
darkcyan = '\033[36m'
blue = '\033[94m'
green = '\033[92m'
yellow = '\033[93m'
red = '\033[91m'
bold = '\033[1m'
underline = '\033[4m'
end = '\033[0m'
|
normal
|
{
"blob_id": "4e9fd3ee2a78fae164d9f38704443ac5b2f4c11c",
"index": 1189,
"step-1": "<mask token>\n\n\nclass colour:\n purple = '\\x1b[95m'\n cyan = '\\x1b[96m'\n darkcyan = '\\x1b[36m'\n blue = '\\x1b[94m'\n green = '\\x1b[92m'\n yellow = '\\x1b[93m'\n red = '\\x1b[91m'\n bold = '\\x1b[1m'\n underline = '\\x1b[4m'\n end = '\\x1b[0m'\n",
"step-2": "<mask token>\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(solenoid1, GPIO.OUT)\nGPIO.setup(solenoid2, GPIO.OUT)\nGPIO.setup(solenoid3, GPIO.OUT)\nGPIO.setup(solenoid4, GPIO.OUT)\nGPIO.setup(led1, GPIO.OUT)\nGPIO.setup(motor1, GPIO.OUT)\n<mask token>\nGPIO.setup(switch1, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(switch2, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n\nclass colour:\n purple = '\\x1b[95m'\n cyan = '\\x1b[96m'\n darkcyan = '\\x1b[36m'\n blue = '\\x1b[94m'\n green = '\\x1b[92m'\n yellow = '\\x1b[93m'\n red = '\\x1b[91m'\n bold = '\\x1b[1m'\n underline = '\\x1b[4m'\n end = '\\x1b[0m'\n",
"step-3": "<mask token>\nsolenoid1 = 23\nsolenoid2 = 24\nsolenoid3 = 4\nsolenoid4 = 17\nmotor1 = 18\nled1 = 25\nswitch1 = 6\nswitch2 = 13\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(solenoid1, GPIO.OUT)\nGPIO.setup(solenoid2, GPIO.OUT)\nGPIO.setup(solenoid3, GPIO.OUT)\nGPIO.setup(solenoid4, GPIO.OUT)\nGPIO.setup(led1, GPIO.OUT)\nGPIO.setup(motor1, GPIO.OUT)\nmotor1pwm = GPIO.PWM(motor1, 100)\nGPIO.setup(switch1, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(switch2, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n\nclass colour:\n purple = '\\x1b[95m'\n cyan = '\\x1b[96m'\n darkcyan = '\\x1b[36m'\n blue = '\\x1b[94m'\n green = '\\x1b[92m'\n yellow = '\\x1b[93m'\n red = '\\x1b[91m'\n bold = '\\x1b[1m'\n underline = '\\x1b[4m'\n end = '\\x1b[0m'\n",
"step-4": "import RPi.GPIO as GPIO\nimport time\nimport timeit\nimport sys\nimport os\nimport random\nimport datetime\nimport collections\nimport threading\nfrom Queue import Queue\nsolenoid1 = 23\nsolenoid2 = 24\nsolenoid3 = 4\nsolenoid4 = 17\nmotor1 = 18\nled1 = 25\nswitch1 = 6\nswitch2 = 13\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(solenoid1, GPIO.OUT)\nGPIO.setup(solenoid2, GPIO.OUT)\nGPIO.setup(solenoid3, GPIO.OUT)\nGPIO.setup(solenoid4, GPIO.OUT)\nGPIO.setup(led1, GPIO.OUT)\nGPIO.setup(motor1, GPIO.OUT)\nmotor1pwm = GPIO.PWM(motor1, 100)\nGPIO.setup(switch1, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(switch2, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n\nclass colour:\n purple = '\\x1b[95m'\n cyan = '\\x1b[96m'\n darkcyan = '\\x1b[36m'\n blue = '\\x1b[94m'\n green = '\\x1b[92m'\n yellow = '\\x1b[93m'\n red = '\\x1b[91m'\n bold = '\\x1b[1m'\n underline = '\\x1b[4m'\n end = '\\x1b[0m'\n",
"step-5": "#!/usr/bin/env python\n\n# Standardised set up\nimport RPi.GPIO as GPIO # External module imports GPIO\nimport time # Library to slow or give a rest to the script\nimport timeit # Alternative timing library for platform specific timing\nimport sys # Library to access program arguments and call exits\nimport os # Library provides functionality to clear screen\nimport random\nimport datetime\nimport collections\nimport threading\nfrom Queue import Queue\n\n# Pin definiton using Broadcom scheme\nsolenoid1 = 23 # GPIO 16\nsolenoid2 = 24 # GPIO 18\nsolenoid3 = 4 # GPIO 07\nsolenoid4 = 17 # GPIO 11\nmotor1 = 18 # GPIO 12\nled1 = 25 # GPIO 22\nswitch1 = 6 # GPIO 31\nswitch2 = 13 # GPIO 33\n\n# Pin setup\nGPIO.setmode(GPIO.BCM) # Broadcom pin-numbering scheme\nGPIO.setup(solenoid1, GPIO.OUT) # set as I/O output\nGPIO.setup(solenoid2, GPIO.OUT) # set as I/O output\nGPIO.setup(solenoid3, GPIO.OUT) # set as I/O output\nGPIO.setup(solenoid4, GPIO.OUT) # set as I/O output\nGPIO.setup(led1, GPIO.OUT) # set as I/O output\nGPIO.setup(motor1, GPIO.OUT) # set as I/O output\nmotor1pwm = GPIO.PWM(motor1,100) # set pwm on motor1 pin\nGPIO.setup(switch1, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(switch2, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\nclass colour:\n purple = '\\033[95m'\n cyan = '\\033[96m'\n darkcyan = '\\033[36m'\n blue = '\\033[94m'\n green = '\\033[92m'\n yellow = '\\033[93m'\n red = '\\033[91m'\n bold = '\\033[1m'\n underline = '\\033[4m'\n end = '\\033[0m'\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
class Node:
def __init__ (self, val):
self.childleft = None
self.childright = None
self.nodedata = val
root = Node("Kaif")
root.childleft = Node("name")
root.childright = Node("!")
root.childleft.childleft = Node("My")
root.childleft.childright = Node("is")
message = input("Solve In order traversal, Type root: ")
print()
def trying():
if message == "root":
def InOrd(root):
if root:
InOrd(root.childleft)
print(root.nodedata)
InOrd(root.childright)
InOrd(root)
trying()
|
normal
|
{
"blob_id": "73e4346007acae769b94a55ef53a48a9d3325002",
"index": 7262,
"step-1": "class Node:\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Node:\n\n def __init__(self, val):\n self.childleft = None\n self.childright = None\n self.nodedata = val\n\n\n<mask token>\n\n\ndef trying():\n if message == 'root':\n\n def InOrd(root):\n if root:\n InOrd(root.childleft)\n print(root.nodedata)\n InOrd(root.childright)\n InOrd(root)\n\n\n<mask token>\n",
"step-3": "class Node:\n\n def __init__(self, val):\n self.childleft = None\n self.childright = None\n self.nodedata = val\n\n\n<mask token>\nprint()\n\n\ndef trying():\n if message == 'root':\n\n def InOrd(root):\n if root:\n InOrd(root.childleft)\n print(root.nodedata)\n InOrd(root.childright)\n InOrd(root)\n\n\ntrying()\n",
"step-4": "class Node:\n\n def __init__(self, val):\n self.childleft = None\n self.childright = None\n self.nodedata = val\n\n\nroot = Node('Kaif')\nroot.childleft = Node('name')\nroot.childright = Node('!')\nroot.childleft.childleft = Node('My')\nroot.childleft.childright = Node('is')\nmessage = input('Solve In order traversal, Type root: ')\nprint()\n\n\ndef trying():\n if message == 'root':\n\n def InOrd(root):\n if root:\n InOrd(root.childleft)\n print(root.nodedata)\n InOrd(root.childright)\n InOrd(root)\n\n\ntrying()\n",
"step-5": "class Node:\r\n def __init__ (self, val):\r\n self.childleft = None\r\n self.childright = None\r\n self.nodedata = val\r\n\r\nroot = Node(\"Kaif\")\r\nroot.childleft = Node(\"name\")\r\nroot.childright = Node(\"!\")\r\nroot.childleft.childleft = Node(\"My\")\r\nroot.childleft.childright = Node(\"is\")\r\n\r\n\r\n\r\nmessage = input(\"Solve In order traversal, Type root: \")\r\nprint()\r\ndef trying():\r\n if message == \"root\":\r\n def InOrd(root):\r\n if root:\r\n InOrd(root.childleft)\r\n print(root.nodedata)\r\n InOrd(root.childright)\r\n InOrd(root)\r\ntrying()",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def print_results(r):
"""Checks for errors and prints the results of a request"""
print(r.text)
if r.status_code != 200:
raise RuntimeError(
'Could not obtain data from the BSE. Check the error information above'
)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def print_results(r):
"""Checks for errors and prints the results of a request"""
print(r.text)
if r.status_code != 200:
raise RuntimeError(
'Could not obtain data from the BSE. Check the error information above'
)
<|reserved_special_token_0|>
print_results(r)
<|reserved_special_token_0|>
print_results(r)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
main_bse_url = 'http://basissetexchange.org'
base_url = os.environ.get('BSE_API_URL', main_bse_url)
def print_results(r):
"""Checks for errors and prints the results of a request"""
print(r.text)
if r.status_code != 200:
raise RuntimeError(
'Could not obtain data from the BSE. Check the error information above'
)
headers = {'User-Agent': 'BSE Example Python Script', 'From': 'bse@molssi.org'}
r = requests.get(base_url + '/api/basis/def2-qzvpd/format/nwchem', headers=
headers)
print_results(r)
params = {'elements': [1, 6, 7]}
r = requests.get(base_url + '/api/basis/cc-pvqz/format/psi4', params=params,
headers=headers)
print_results(r)
<|reserved_special_token_1|>
import os
import requests
main_bse_url = 'http://basissetexchange.org'
base_url = os.environ.get('BSE_API_URL', main_bse_url)
def print_results(r):
"""Checks for errors and prints the results of a request"""
print(r.text)
if r.status_code != 200:
raise RuntimeError(
'Could not obtain data from the BSE. Check the error information above'
)
headers = {'User-Agent': 'BSE Example Python Script', 'From': 'bse@molssi.org'}
r = requests.get(base_url + '/api/basis/def2-qzvpd/format/nwchem', headers=
headers)
print_results(r)
params = {'elements': [1, 6, 7]}
r = requests.get(base_url + '/api/basis/cc-pvqz/format/psi4', params=params,
headers=headers)
print_results(r)
<|reserved_special_token_1|>
#!/usr/bin/env python3
import os
import requests
# This is the main url of the BSE API
# THIS WILL CHANGE TO HTTPS IN THE FUTURE
# HTTPS IS RECOMMENDED
main_bse_url = "http://basissetexchange.org"
# This allows for overriding the URL via an environment variable
# Feel free to just use the base_url below
base_url = os.environ.get('BSE_API_URL', main_bse_url)
def print_results(r):
'''Checks for errors and prints the results of a request'''
# r.text will contain the formatted output as a string
print(r.text)
if r.status_code != 200:
raise RuntimeError("Could not obtain data from the BSE. Check the error information above")
############################################
# Change the user agent and 'from' headers
############################################
# Change these to something more
# descriptive if you would like. This lets us know
# how many different people/groups are using the site
# Valid email is COMPLETELY OPTIONAL. Put whatever
# you would like in there, or leave it as is. If you
# do put your email there, we will never give it
# away or email you, except in case we think errors in
# your script are causing us problems.
headers = {
'User-Agent': 'BSE Example Python Script',
'From': 'bse@molssi.org'
}
###############################################################
# Get the def2-QZVP basis for all elements in nwchem format
# Note that basis set names and formats are not case sensitive
###############################################################
r = requests.get(base_url + '/api/basis/def2-qzvpd/format/nwchem',
headers=headers
)
print_results(r)
######################################################################
# Get the cc-pvqz basis for hydrogen and carbon in gaussian94 format
######################################################################
# Elements can be passed a variety of ways. Here, I'm just
# passing a list of Z numbers. See elements.py for other ways
# you can specify elements
params = {'elements': [1, 6, 7]}
r = requests.get(base_url + '/api/basis/cc-pvqz/format/psi4',
params=params,
headers=headers
)
print_results(r)
|
flexible
|
{
"blob_id": "168a76fd3bb43afe26a6a217e90f48704b4f2042",
"index": 6738,
"step-1": "<mask token>\n\n\ndef print_results(r):\n \"\"\"Checks for errors and prints the results of a request\"\"\"\n print(r.text)\n if r.status_code != 200:\n raise RuntimeError(\n 'Could not obtain data from the BSE. Check the error information above'\n )\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef print_results(r):\n \"\"\"Checks for errors and prints the results of a request\"\"\"\n print(r.text)\n if r.status_code != 200:\n raise RuntimeError(\n 'Could not obtain data from the BSE. Check the error information above'\n )\n\n\n<mask token>\nprint_results(r)\n<mask token>\nprint_results(r)\n",
"step-3": "<mask token>\nmain_bse_url = 'http://basissetexchange.org'\nbase_url = os.environ.get('BSE_API_URL', main_bse_url)\n\n\ndef print_results(r):\n \"\"\"Checks for errors and prints the results of a request\"\"\"\n print(r.text)\n if r.status_code != 200:\n raise RuntimeError(\n 'Could not obtain data from the BSE. Check the error information above'\n )\n\n\nheaders = {'User-Agent': 'BSE Example Python Script', 'From': 'bse@molssi.org'}\nr = requests.get(base_url + '/api/basis/def2-qzvpd/format/nwchem', headers=\n headers)\nprint_results(r)\nparams = {'elements': [1, 6, 7]}\nr = requests.get(base_url + '/api/basis/cc-pvqz/format/psi4', params=params,\n headers=headers)\nprint_results(r)\n",
"step-4": "import os\nimport requests\nmain_bse_url = 'http://basissetexchange.org'\nbase_url = os.environ.get('BSE_API_URL', main_bse_url)\n\n\ndef print_results(r):\n \"\"\"Checks for errors and prints the results of a request\"\"\"\n print(r.text)\n if r.status_code != 200:\n raise RuntimeError(\n 'Could not obtain data from the BSE. Check the error information above'\n )\n\n\nheaders = {'User-Agent': 'BSE Example Python Script', 'From': 'bse@molssi.org'}\nr = requests.get(base_url + '/api/basis/def2-qzvpd/format/nwchem', headers=\n headers)\nprint_results(r)\nparams = {'elements': [1, 6, 7]}\nr = requests.get(base_url + '/api/basis/cc-pvqz/format/psi4', params=params,\n headers=headers)\nprint_results(r)\n",
"step-5": "#!/usr/bin/env python3\n\nimport os\nimport requests\n\n# This is the main url of the BSE API\n# THIS WILL CHANGE TO HTTPS IN THE FUTURE\n# HTTPS IS RECOMMENDED\nmain_bse_url = \"http://basissetexchange.org\"\n\n# This allows for overriding the URL via an environment variable\n# Feel free to just use the base_url below\nbase_url = os.environ.get('BSE_API_URL', main_bse_url)\n\n\ndef print_results(r):\n '''Checks for errors and prints the results of a request'''\n\n # r.text will contain the formatted output as a string\n print(r.text)\n if r.status_code != 200:\n raise RuntimeError(\"Could not obtain data from the BSE. Check the error information above\")\n\n\n\n############################################\n# Change the user agent and 'from' headers\n############################################\n\n# Change these to something more\n# descriptive if you would like. This lets us know\n# how many different people/groups are using the site\n\n# Valid email is COMPLETELY OPTIONAL. Put whatever\n# you would like in there, or leave it as is. If you\n# do put your email there, we will never give it\n# away or email you, except in case we think errors in\n# your script are causing us problems.\nheaders = {\n 'User-Agent': 'BSE Example Python Script',\n 'From': 'bse@molssi.org'\n}\n\n\n###############################################################\n# Get the def2-QZVP basis for all elements in nwchem format\n# Note that basis set names and formats are not case sensitive\n###############################################################\nr = requests.get(base_url + '/api/basis/def2-qzvpd/format/nwchem',\n headers=headers\n )\n\nprint_results(r)\n\n\n######################################################################\n# Get the cc-pvqz basis for hydrogen and carbon in gaussian94 format\n######################################################################\n# Elements can be passed a variety of ways. Here, I'm just\n# passing a list of Z numbers. See elements.py for other ways\n# you can specify elements\nparams = {'elements': [1, 6, 7]}\nr = requests.get(base_url + '/api/basis/cc-pvqz/format/psi4',\n params=params,\n headers=headers\n )\n\nprint_results(r)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def minRemoveToMakeValid(self, s: str) ->str:
bracketsToRemove = set()
stack = []
for i, c in enumerate(s):
if c not in '()':
continue
if c == '(':
stack.append(i)
elif not stack:
bracketsToRemove.add(i)
else:
stack.pop()
bracketsToRemove = bracketsToRemove.union(set(stack))
stringBuilder = []
for i, c in enumerate(s):
if i not in bracketsToRemove:
stringBuilder.append(c)
return ''.join(stringBuilder)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def minRemoveToMakeValid(self, s: str) ->str:
bracketsToRemove = set()
stack = []
for i, c in enumerate(s):
if c not in '()':
continue
if c == '(':
stack.append(i)
elif not stack:
bracketsToRemove.add(i)
else:
stack.pop()
bracketsToRemove = bracketsToRemove.union(set(stack))
stringBuilder = []
for i, c in enumerate(s):
if i not in bracketsToRemove:
stringBuilder.append(c)
return ''.join(stringBuilder)
Solution().minRemoveToMakeValid('L(ee)(t(()coe')
<|reserved_special_token_1|>
class Solution:
def minRemoveToMakeValid(self, s: str) -> str:
bracketsToRemove = set()
stack = []
for i, c in enumerate(s):
if c not in '()':
continue
if c == '(':
stack.append(i)
elif not stack:
bracketsToRemove.add(i)
else:
stack.pop()
bracketsToRemove = bracketsToRemove.union(set(stack))
stringBuilder = []
for i,c in enumerate(s):
if i not in bracketsToRemove:
stringBuilder.append(c)
return "".join(stringBuilder)
Solution().minRemoveToMakeValid('L(ee)(t(()coe')
|
flexible
|
{
"blob_id": "1bab6b039462bb5762aa588d5ba7c3e74362d0a7",
"index": 823,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "class Solution:\n\n def minRemoveToMakeValid(self, s: str) ->str:\n bracketsToRemove = set()\n stack = []\n for i, c in enumerate(s):\n if c not in '()':\n continue\n if c == '(':\n stack.append(i)\n elif not stack:\n bracketsToRemove.add(i)\n else:\n stack.pop()\n bracketsToRemove = bracketsToRemove.union(set(stack))\n stringBuilder = []\n for i, c in enumerate(s):\n if i not in bracketsToRemove:\n stringBuilder.append(c)\n return ''.join(stringBuilder)\n\n\n<mask token>\n",
"step-4": "class Solution:\n\n def minRemoveToMakeValid(self, s: str) ->str:\n bracketsToRemove = set()\n stack = []\n for i, c in enumerate(s):\n if c not in '()':\n continue\n if c == '(':\n stack.append(i)\n elif not stack:\n bracketsToRemove.add(i)\n else:\n stack.pop()\n bracketsToRemove = bracketsToRemove.union(set(stack))\n stringBuilder = []\n for i, c in enumerate(s):\n if i not in bracketsToRemove:\n stringBuilder.append(c)\n return ''.join(stringBuilder)\n\n\nSolution().minRemoveToMakeValid('L(ee)(t(()coe')\n",
"step-5": "class Solution:\n def minRemoveToMakeValid(self, s: str) -> str:\n bracketsToRemove = set()\n stack = []\n \n for i, c in enumerate(s):\n \n if c not in '()':\n continue\n if c == '(':\n stack.append(i)\n elif not stack:\n bracketsToRemove.add(i)\n else:\n stack.pop()\n \n bracketsToRemove = bracketsToRemove.union(set(stack))\n stringBuilder = []\n for i,c in enumerate(s):\n if i not in bracketsToRemove:\n stringBuilder.append(c)\n \n return \"\".join(stringBuilder)\n\n\nSolution().minRemoveToMakeValid('L(ee)(t(()coe')\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def getDescentPeriods(self, prices: List[int]) ->int:
ans = 1
dp = 1
for i in range(1, len(prices)):
if prices[i] == prices[i - 1] - 1:
dp += 1
else:
dp = 1
ans += dp
return ans
<|reserved_special_token_1|>
class Solution:
def getDescentPeriods(self, prices: List[int]) -> int:
ans = 1 # prices[0]
dp = 1
for i in range(1, len(prices)):
if prices[i] == prices[i - 1] - 1:
dp += 1
else:
dp = 1
ans += dp
return ans
|
flexible
|
{
"blob_id": "d10468d2d0aefa19a7d225bfffad03ec6cb6e082",
"index": 4079,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def getDescentPeriods(self, prices: List[int]) ->int:\n ans = 1\n dp = 1\n for i in range(1, len(prices)):\n if prices[i] == prices[i - 1] - 1:\n dp += 1\n else:\n dp = 1\n ans += dp\n return ans\n",
"step-4": "class Solution:\n def getDescentPeriods(self, prices: List[int]) -> int:\n ans = 1 # prices[0]\n dp = 1\n\n for i in range(1, len(prices)):\n if prices[i] == prices[i - 1] - 1:\n dp += 1\n else:\n dp = 1\n ans += dp\n\n return ans\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
import cv2
def optical_flow_from_video():
cap = cv2.VideoCapture("/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi")
# 设置 ShiTomasi 角点检测的参数
feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7)
# 设置 lucas kanade 光流场的参数
# maxLevel 为使用图像金字塔的层数
lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# 产生随机的颜色值
color = np.random.randint(0, 255, (100, 3))
# 获取第一帧,并寻找其中的角点
_, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
# 创建一个掩膜为了后面绘制角点的光流轨迹
mask = np.zeros_like(old_frame)
while True:
ret, frame = cap.read()
if ret:
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 计算能够获取的角点的新位置
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st == 1]
good_old = p0[st == 1]
# 绘制角点的轨迹
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
img = cv2.add(frame, mask)
cv2.imshow('frame', img)
if cv2.waitKey(30) & 0xff == ord("q"):
break
# 更新当前帧和当前角点的位置
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
else:
break
pass
cv2.destroyAllWindows()
cap.release()
pass
def optical_flow_from_camera():
cap = cv2.VideoCapture(0)
# 设置 ShiTomasi 角点检测的参数
feature_params = dict(maxCorners=100, qualityLevel=0.3,
minDistance=7, blockSize=7)
# 设置 lucas kanade 光流场的参数
# maxLevel 为使用图像金字塔的层数
lk_params = dict(winSize=(15, 15), maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# 产生随机的颜色值
color = np.random.randint(0, 255, (100, 3))
# 获取第一帧,并寻找其中的角点
_, old_frame = cap.read()
old_frame = cv2.flip(old_frame, 1)
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
# 创建一个掩膜为了后面绘制角点的光流轨迹
mask = np.zeros_like(old_frame)
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
if ret:
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 计算能够获取的角点的新位置
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st == 1]
good_old = p0[st == 1]
# 绘制角点的轨迹
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
img = cv2.add(frame, mask)
cv2.imshow('frame', img)
if cv2.waitKey(30) & 0xff == ord("q"):
break
# 更新当前帧和当前角点的位置
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
else:
break
pass
cv2.destroyAllWindows()
cap.release()
pass
def optical_flow_from_camera_farneback2():
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
ret, frame1 = cap.read()
frame1 = cv2.flip(frame1, 1)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
while True:
try:
ret, frame2 = cap.read()
frame2 = cv2.flip(frame2, 1)
except Exception:
break
pass
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
if cv2.waitKey(1) & 0xff == "q":
break
prvs = next
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_from_camera_farneback(flip=True, resize=True):
# cap = cv2.VideoCapture('test.mp4')
# cap = cv2.VideoCapture('test2.ts')
cap = cv2.VideoCapture("/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi")
# cap = cv2.VideoCapture(0)
width = 640
height = 480
cap.set(3, width)
cap.set(4, height)
ret, frame1 = cap.read()
if flip:
frame1 = cv2.flip(frame1, 1)
if resize:
frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
while True:
try:
ret, frame2 = cap.read()
if flip:
frame2 = cv2.flip(frame2, 1)
if resize:
frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.INTER_CUBIC)
cv2.imshow('frame1', frame2)
except Exception:
break
pass
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3, 5, 1.2, 1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('frame2', rgb)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
if cv2.waitKey(1) & 0xff == "q":
break
prvs = next
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_from_camera_farneback_and_write_video():
# cap = cv2.VideoCapture('eccv.avi')
cap = cv2.VideoCapture('./yaogan/chen_1.mp4')
width = 640
height = 480
cap.set(3, width)
cap.set(4, height)
ret, frame1 = cap.read()
frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
i = 0
while True:
try:
ret, frame2 = cap.read()
frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.INTER_CUBIC)
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3, 5, 1.2, 1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
i += 1
cv2.imwrite("{}/{}.jpg".format("test2", str(i)), result)
if cv2.waitKey(1) & 0xff == "q":
break
prvs = next
except Exception:
break
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_farneback_and_write_video():
def crop(frame):
# start_x = 1400
# end_x = start_x + 600
# start_y = 100
# end_y = start_y + 700
start_x = 800
end_x = start_x + 500
start_y = 1500
end_y = start_y + 500
return frame[start_x:end_x, start_y: end_y]
cap = cv2.VideoCapture('./yaogan/chen_1.mp4')
ret, frame1 = cap.read()
frame1 = crop(frame1)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
i = 0
while True:
try:
ret, frame2 = cap.read()
i += 1
if i % 2 != 0:
continue
frame2 = crop(frame2)
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5, levels=3,
winsize=7, iterations=3, poly_n=5, poly_sigma=1.2, flags=1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
cv2.imwrite("{}/{}.jpg".format("test2", str(i // 3)), result)
if cv2.waitKey(1) & 0xff == "q":
break
prvs = next
except Exception:
break
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_from_camera_farneback_2(flip=False, resize=True):
# cap = cv2.VideoCapture('test.mp4')
# cap = cv2.VideoCapture('test2.ts')
cap = cv2.VideoCapture("/home/ubuntu/data1.5TB/异常dataset/ShanghaiTech/train/01_001.avi")
# cap = cv2.VideoCapture(0)
width = 800
height = 500
cap.set(3, width)
cap.set(4, height)
ret, frame1 = cap.read()
if flip:
frame1 = cv2.flip(frame1, 1)
if resize:
frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
while True:
try:
ret, frame2 = cap.read()
if flip:
frame2 = cv2.flip(frame2, 1)
if resize:
frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.INTER_CUBIC)
cv2.imshow('frame1', frame2)
except Exception:
break
pass
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5, levels=3, winsize=8,
iterations=5, poly_n=5, poly_sigma=1.2, flags=1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('frame2', rgb)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
if cv2.waitKey(100) & 0xff == "q":
break
prvs = next
pass
cap.release()
cv2.destroyAllWindows()
pass
if __name__ == '__main__':
optical_flow_farneback_and_write_video()
pass
|
normal
|
{
"blob_id": "ae0547aa1af2d4dd73bb60154574e64e74107a58",
"index": 4062,
"step-1": "<mask token>\n\n\ndef optical_flow_from_video():\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\ndef optical_flow_from_camera():\n cap = cv2.VideoCapture(0)\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_frame = cv2.flip(old_frame, 1)\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\n<mask token>\n\n\ndef optical_flow_from_camera_farneback(flip=True, resize=True):\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.\n INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=\n cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3,\n 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n cv2.imshow('frame2', rgb)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_and_write_video():\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.\n INTER_CUBIC)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, \n 20, 3, 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n i += 1\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_farneback_and_write_video():\n\n def crop(frame):\n start_x = 800\n end_x = start_x + 500\n start_y = 1500\n end_y = start_y + 500\n return frame[start_x:end_x, start_y:end_y]\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n ret, frame1 = cap.read()\n frame1 = crop(frame1)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n i += 1\n if i % 2 != 0:\n continue\n frame2 = crop(frame2)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale\n =0.5, levels=3, winsize=7, iterations=3, poly_n=5,\n poly_sigma=1.2, flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i // 3)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef optical_flow_from_video():\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\ndef optical_flow_from_camera():\n cap = cv2.VideoCapture(0)\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_frame = cv2.flip(old_frame, 1)\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\n<mask token>\n\n\ndef optical_flow_from_camera_farneback(flip=True, resize=True):\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.\n INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=\n cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3,\n 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n cv2.imshow('frame2', rgb)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_and_write_video():\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.\n INTER_CUBIC)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, \n 20, 3, 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n i += 1\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_farneback_and_write_video():\n\n def crop(frame):\n start_x = 800\n end_x = start_x + 500\n start_y = 1500\n end_y = start_y + 500\n return frame[start_x:end_x, start_y:end_y]\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n ret, frame1 = cap.read()\n frame1 = crop(frame1)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n i += 1\n if i % 2 != 0:\n continue\n frame2 = crop(frame2)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale\n =0.5, levels=3, winsize=7, iterations=3, poly_n=5,\n poly_sigma=1.2, flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i // 3)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_2(flip=False, resize=True):\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/ShanghaiTech/train/01_001.avi')\n width = 800\n height = 500\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.\n INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=\n cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5,\n levels=3, winsize=8, iterations=5, poly_n=5, poly_sigma=1.2,\n flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n cv2.imshow('frame2', rgb)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(100) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef optical_flow_from_video():\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\ndef optical_flow_from_camera():\n cap = cv2.VideoCapture(0)\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_frame = cv2.flip(old_frame, 1)\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\ndef optical_flow_from_camera_farneback2():\n cap = cv2.VideoCapture(0)\n cap.set(3, 640)\n cap.set(4, 480)\n ret, frame1 = cap.read()\n frame1 = cv2.flip(frame1, 1)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.flip(frame2, 1)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3,\n 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback(flip=True, resize=True):\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.\n INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=\n cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3,\n 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n cv2.imshow('frame2', rgb)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_and_write_video():\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.\n INTER_CUBIC)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, \n 20, 3, 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n i += 1\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_farneback_and_write_video():\n\n def crop(frame):\n start_x = 800\n end_x = start_x + 500\n start_y = 1500\n end_y = start_y + 500\n return frame[start_x:end_x, start_y:end_y]\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n ret, frame1 = cap.read()\n frame1 = crop(frame1)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n i += 1\n if i % 2 != 0:\n continue\n frame2 = crop(frame2)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale\n =0.5, levels=3, winsize=7, iterations=3, poly_n=5,\n poly_sigma=1.2, flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i // 3)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_2(flip=False, resize=True):\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/ShanghaiTech/train/01_001.avi')\n width = 800\n height = 500\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.\n INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=\n cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5,\n levels=3, winsize=8, iterations=5, poly_n=5, poly_sigma=1.2,\n flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n cv2.imshow('frame2', rgb)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(100) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef optical_flow_from_video():\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\ndef optical_flow_from_camera():\n cap = cv2.VideoCapture(0)\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_frame = cv2.flip(old_frame, 1)\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\ndef optical_flow_from_camera_farneback2():\n cap = cv2.VideoCapture(0)\n cap.set(3, 640)\n cap.set(4, 480)\n ret, frame1 = cap.read()\n frame1 = cv2.flip(frame1, 1)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.flip(frame2, 1)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3,\n 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback(flip=True, resize=True):\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.\n INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=\n cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3,\n 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n cv2.imshow('frame2', rgb)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_and_write_video():\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.\n INTER_CUBIC)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, \n 20, 3, 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n i += 1\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_farneback_and_write_video():\n\n def crop(frame):\n start_x = 800\n end_x = start_x + 500\n start_y = 1500\n end_y = start_y + 500\n return frame[start_x:end_x, start_y:end_y]\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n ret, frame1 = cap.read()\n frame1 = crop(frame1)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n i += 1\n if i % 2 != 0:\n continue\n frame2 = crop(frame2)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale\n =0.5, levels=3, winsize=7, iterations=3, poly_n=5,\n poly_sigma=1.2, flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i // 3)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_2(flip=False, resize=True):\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/ShanghaiTech/train/01_001.avi')\n width = 800\n height = 500\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.\n INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=\n cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5,\n levels=3, winsize=8, iterations=5, poly_n=5, poly_sigma=1.2,\n flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n cv2.imshow('frame2', rgb)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(100) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\nif __name__ == '__main__':\n optical_flow_farneback_and_write_video()\n pass\n",
"step-5": "import numpy as np\nimport cv2\n\n\ndef optical_flow_from_video():\n cap = cv2.VideoCapture(\"/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi\")\n\n # 设置 ShiTomasi 角点检测的参数\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7)\n # 设置 lucas kanade 光流场的参数\n # maxLevel 为使用图像金字塔的层数\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n # 产生随机的颜色值\n color = np.random.randint(0, 255, (100, 3))\n\n # 获取第一帧,并寻找其中的角点\n _, old_frame = cap.read()\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n\n # 创建一个掩膜为了后面绘制角点的光流轨迹\n mask = np.zeros_like(old_frame)\n\n while True:\n ret, frame = cap.read()\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # 计算能够获取的角点的新位置\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\n # Select good points\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n # 绘制角点的轨迹\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n\n img = cv2.add(frame, mask)\n\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 0xff == ord(\"q\"):\n break\n\n # 更新当前帧和当前角点的位置\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n\n pass\n\n cv2.destroyAllWindows()\n cap.release()\n\n pass\n\n\ndef optical_flow_from_camera():\n cap = cv2.VideoCapture(0)\n\n # 设置 ShiTomasi 角点检测的参数\n feature_params = dict(maxCorners=100, qualityLevel=0.3,\n minDistance=7, blockSize=7)\n # 设置 lucas kanade 光流场的参数\n # maxLevel 为使用图像金字塔的层数\n lk_params = dict(winSize=(15, 15), maxLevel=2,\n criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n # 产生随机的颜色值\n color = np.random.randint(0, 255, (100, 3))\n\n # 获取第一帧,并寻找其中的角点\n _, old_frame = cap.read()\n old_frame = cv2.flip(old_frame, 1)\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n\n # 创建一个掩膜为了后面绘制角点的光流轨迹\n mask = np.zeros_like(old_frame)\n\n while True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # 计算能够获取的角点的新位置\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\n # Select good points\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n # 绘制角点的轨迹\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n\n img = cv2.add(frame, mask)\n\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 0xff == ord(\"q\"):\n break\n\n # 更新当前帧和当前角点的位置\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n\n pass\n\n cv2.destroyAllWindows()\n cap.release()\n\n pass\n\n\ndef optical_flow_from_camera_farneback2():\n cap = cv2.VideoCapture(0)\n\n cap.set(3, 640)\n cap.set(4, 480)\n\n ret, frame1 = cap.read()\n frame1 = cv2.flip(frame1, 1)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.flip(frame2, 1)\n except Exception:\n break\n pass\n\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n\n if cv2.waitKey(1) & 0xff == \"q\":\n break\n prvs = next\n pass\n\n cap.release()\n cv2.destroyAllWindows()\n\n pass\n\n\ndef optical_flow_from_camera_farneback(flip=True, resize=True):\n # cap = cv2.VideoCapture('test.mp4')\n # cap = cv2.VideoCapture('test2.ts')\n cap = cv2.VideoCapture(\"/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi\")\n # cap = cv2.VideoCapture(0)\n\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3, 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n\n cv2.imshow('frame2', rgb)\n\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n\n if cv2.waitKey(1) & 0xff == \"q\":\n break\n prvs = next\n pass\n\n cap.release()\n cv2.destroyAllWindows()\n\n pass\n\n\ndef optical_flow_from_camera_farneback_and_write_video():\n # cap = cv2.VideoCapture('eccv.avi')\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n\n ret, frame1 = cap.read()\n\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n\n i = 0\n\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.INTER_CUBIC)\n\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3, 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n\n i += 1\n cv2.imwrite(\"{}/{}.jpg\".format(\"test2\", str(i)), result)\n\n if cv2.waitKey(1) & 0xff == \"q\":\n break\n prvs = next\n except Exception:\n break\n pass\n\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_farneback_and_write_video():\n\n def crop(frame):\n # start_x = 1400\n # end_x = start_x + 600\n # start_y = 100\n # end_y = start_y + 700\n start_x = 800\n end_x = start_x + 500\n start_y = 1500\n end_y = start_y + 500\n return frame[start_x:end_x, start_y: end_y]\n\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n\n ret, frame1 = cap.read()\n frame1 = crop(frame1)\n\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n\n i = 0\n\n while True:\n try:\n ret, frame2 = cap.read()\n\n i += 1\n if i % 2 != 0:\n continue\n\n frame2 = crop(frame2)\n\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5, levels=3,\n winsize=7, iterations=3, poly_n=5, poly_sigma=1.2, flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n\n cv2.imwrite(\"{}/{}.jpg\".format(\"test2\", str(i // 3)), result)\n\n if cv2.waitKey(1) & 0xff == \"q\":\n break\n prvs = next\n except Exception:\n break\n pass\n\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_2(flip=False, resize=True):\n # cap = cv2.VideoCapture('test.mp4')\n # cap = cv2.VideoCapture('test2.ts')\n cap = cv2.VideoCapture(\"/home/ubuntu/data1.5TB/异常dataset/ShanghaiTech/train/01_001.avi\")\n # cap = cv2.VideoCapture(0)\n\n width = 800\n height = 500\n cap.set(3, width)\n cap.set(4, height)\n\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5, levels=3, winsize=8,\n iterations=5, poly_n=5, poly_sigma=1.2, flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n\n cv2.imshow('frame2', rgb)\n\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n\n if cv2.waitKey(100) & 0xff == \"q\":\n break\n prvs = next\n pass\n\n cap.release()\n cv2.destroyAllWindows()\n\n pass\n\n\nif __name__ == '__main__':\n optical_flow_farneback_and_write_video()\n pass\n",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
<|reserved_special_token_0|>
def _check_tags(generator: InputOutputGenerator, name: str):
for output, inputs in generator.items():
necessary_tags = dict(collections.Counter(get_tags(output)))
for index, input in enumerate(inputs):
input_tags = dict(collections.Counter(get_tags(input)))
for tag, count in necessary_tags.items():
tag = tag.replace("'", '')
if tag not in input_tags:
raise Exception(
f'missing {tag} in example {index + 1} of {name} `{output}`'
)
diff = count - input_tags[tag]
if diff > 0:
raise Exception(
f'missing {diff} {tag} in example {index + 1} of {name} `{output}`. '
+
f'Expected to find {count} {tag}, found {input_tags[tag]}.'
)
def _check_tag_name(tag):
if not re.fullmatch(tag_regex, '#' + tag.strip()):
raise Exception('# %s is invalid, only letters and _ are allowed' % tag
)
def _check_defined_twice(generators, tag):
if tag in generators:
raise Exception('# %s is being defined twice' % tag)
<|reserved_special_token_0|>
def _check_all_used_tags(generators):
available_tags = [('#' + x) for x in builtin_generators] + [('#' + x) for
x in generators.keys()]
for key, generator in generators.items():
if type(generator) == list:
for tag in generator:
if '#' + tag not in available_tags:
raise Exception(
"- %s is used in # %s but it's not defined anywhere. Defined tags are %s"
% (tag, key, ', '.join(available_tags)))
else:
for output in generator.keys():
tags = get_tags(output)
for tag in tags:
if tag not in available_tags:
raise Exception(
"%s is used in # %s but it's not defined anywhere. Defined tags are %s"
% (tag, key, ', '.join(available_tags)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _check_tags(generator: InputOutputGenerator, name: str):
for output, inputs in generator.items():
necessary_tags = dict(collections.Counter(get_tags(output)))
for index, input in enumerate(inputs):
input_tags = dict(collections.Counter(get_tags(input)))
for tag, count in necessary_tags.items():
tag = tag.replace("'", '')
if tag not in input_tags:
raise Exception(
f'missing {tag} in example {index + 1} of {name} `{output}`'
)
diff = count - input_tags[tag]
if diff > 0:
raise Exception(
f'missing {diff} {tag} in example {index + 1} of {name} `{output}`. '
+
f'Expected to find {count} {tag}, found {input_tags[tag]}.'
)
def _check_tag_name(tag):
if not re.fullmatch(tag_regex, '#' + tag.strip()):
raise Exception('# %s is invalid, only letters and _ are allowed' % tag
)
def _check_defined_twice(generators, tag):
if tag in generators:
raise Exception('# %s is being defined twice' % tag)
def _check_previous_generator(generators, name):
if not name:
return
if type(generators[name]) == list:
return
if len(generators[name]) == 0:
raise Exception('output missing on # %s' % name)
for index, inputs in enumerate(generators[name].values()):
if len(inputs) == 0:
raise Exception(
f'input examples missing on # {name}, on example #{index}')
def _check_all_used_tags(generators):
available_tags = [('#' + x) for x in builtin_generators] + [('#' + x) for
x in generators.keys()]
for key, generator in generators.items():
if type(generator) == list:
for tag in generator:
if '#' + tag not in available_tags:
raise Exception(
"- %s is used in # %s but it's not defined anywhere. Defined tags are %s"
% (tag, key, ', '.join(available_tags)))
else:
for output in generator.keys():
tags = get_tags(output)
for tag in tags:
if tag not in available_tags:
raise Exception(
"%s is used in # %s but it's not defined anywhere. Defined tags are %s"
% (tag, key, ', '.join(available_tags)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parse(content: str) ->Generators:
parser = Parser()
document = parser.parse(content)
tag_name = ''
last_output = None
generators: Generators = {}
for item in document.children:
if type(item) == Heading:
_check_previous_generator(generators, tag_name)
last_output = None
tag_name = item.children[0].children
_check_tag_name(tag_name)
_check_defined_twice(generators, tag_name)
generators[tag_name] = {}
elif type(item) == Paragraph and type(item.children[0]) == CodeSpan:
current_generator = generators[tag_name]
if type(current_generator) == dict:
last_output = item.children[0].children.replace('<empty>', '')
current_generator[last_output] = []
else:
raise Exception(f'Mixing list and inputs/output in {tag_name}')
elif type(item) == CodeBlock:
current_generator = generators[tag_name]
if last_output is None:
raise Exception(
f'Input example defined without output in {tag_name}')
elif type(current_generator) == dict:
inputs = item.children[0].children.strip().split('\n')
inputs = [x.replace('<empty>', '') for x in inputs]
current_generator[last_output] += inputs
_check_tags(current_generator, tag_name)
else:
raise Exception(f'Mixing list and inputs/output in {tag_name}')
elif type(item) == List:
generators[tag_name] = [x.children[0].children[0].children for
x in item.children]
_check_previous_generator(generators, tag_name)
_check_all_used_tags(generators)
return generators
def _check_tags(generator: InputOutputGenerator, name: str):
for output, inputs in generator.items():
necessary_tags = dict(collections.Counter(get_tags(output)))
for index, input in enumerate(inputs):
input_tags = dict(collections.Counter(get_tags(input)))
for tag, count in necessary_tags.items():
tag = tag.replace("'", '')
if tag not in input_tags:
raise Exception(
f'missing {tag} in example {index + 1} of {name} `{output}`'
)
diff = count - input_tags[tag]
if diff > 0:
raise Exception(
f'missing {diff} {tag} in example {index + 1} of {name} `{output}`. '
+
f'Expected to find {count} {tag}, found {input_tags[tag]}.'
)
def _check_tag_name(tag):
if not re.fullmatch(tag_regex, '#' + tag.strip()):
raise Exception('# %s is invalid, only letters and _ are allowed' % tag
)
def _check_defined_twice(generators, tag):
if tag in generators:
raise Exception('# %s is being defined twice' % tag)
def _check_previous_generator(generators, name):
if not name:
return
if type(generators[name]) == list:
return
if len(generators[name]) == 0:
raise Exception('output missing on # %s' % name)
for index, inputs in enumerate(generators[name].values()):
if len(inputs) == 0:
raise Exception(
f'input examples missing on # {name}, on example #{index}')
def _check_all_used_tags(generators):
available_tags = [('#' + x) for x in builtin_generators] + [('#' + x) for
x in generators.keys()]
for key, generator in generators.items():
if type(generator) == list:
for tag in generator:
if '#' + tag not in available_tags:
raise Exception(
"- %s is used in # %s but it's not defined anywhere. Defined tags are %s"
% (tag, key, ', '.join(available_tags)))
else:
for output in generator.keys():
tags = get_tags(output)
for tag in tags:
if tag not in available_tags:
raise Exception(
"%s is used in # %s but it's not defined anywhere. Defined tags are %s"
% (tag, key, ', '.join(available_tags)))
<|reserved_special_token_1|>
from marko.parser import Parser
from marko.block import Heading, Paragraph, CodeBlock, List
from marko.inline import CodeSpan
from langcreator.common import Generators, InputOutputGenerator, tag_regex, get_tags, builtin_generators
import collections
import re
def parse(content: str) ->Generators:
parser = Parser()
document = parser.parse(content)
tag_name = ''
last_output = None
generators: Generators = {}
for item in document.children:
if type(item) == Heading:
_check_previous_generator(generators, tag_name)
last_output = None
tag_name = item.children[0].children
_check_tag_name(tag_name)
_check_defined_twice(generators, tag_name)
generators[tag_name] = {}
elif type(item) == Paragraph and type(item.children[0]) == CodeSpan:
current_generator = generators[tag_name]
if type(current_generator) == dict:
last_output = item.children[0].children.replace('<empty>', '')
current_generator[last_output] = []
else:
raise Exception(f'Mixing list and inputs/output in {tag_name}')
elif type(item) == CodeBlock:
current_generator = generators[tag_name]
if last_output is None:
raise Exception(
f'Input example defined without output in {tag_name}')
elif type(current_generator) == dict:
inputs = item.children[0].children.strip().split('\n')
inputs = [x.replace('<empty>', '') for x in inputs]
current_generator[last_output] += inputs
_check_tags(current_generator, tag_name)
else:
raise Exception(f'Mixing list and inputs/output in {tag_name}')
elif type(item) == List:
generators[tag_name] = [x.children[0].children[0].children for
x in item.children]
_check_previous_generator(generators, tag_name)
_check_all_used_tags(generators)
return generators
def _check_tags(generator: InputOutputGenerator, name: str):
for output, inputs in generator.items():
necessary_tags = dict(collections.Counter(get_tags(output)))
for index, input in enumerate(inputs):
input_tags = dict(collections.Counter(get_tags(input)))
for tag, count in necessary_tags.items():
tag = tag.replace("'", '')
if tag not in input_tags:
raise Exception(
f'missing {tag} in example {index + 1} of {name} `{output}`'
)
diff = count - input_tags[tag]
if diff > 0:
raise Exception(
f'missing {diff} {tag} in example {index + 1} of {name} `{output}`. '
+
f'Expected to find {count} {tag}, found {input_tags[tag]}.'
)
def _check_tag_name(tag):
if not re.fullmatch(tag_regex, '#' + tag.strip()):
raise Exception('# %s is invalid, only letters and _ are allowed' % tag
)
def _check_defined_twice(generators, tag):
if tag in generators:
raise Exception('# %s is being defined twice' % tag)
def _check_previous_generator(generators, name):
if not name:
return
if type(generators[name]) == list:
return
if len(generators[name]) == 0:
raise Exception('output missing on # %s' % name)
for index, inputs in enumerate(generators[name].values()):
if len(inputs) == 0:
raise Exception(
f'input examples missing on # {name}, on example #{index}')
def _check_all_used_tags(generators):
available_tags = [('#' + x) for x in builtin_generators] + [('#' + x) for
x in generators.keys()]
for key, generator in generators.items():
if type(generator) == list:
for tag in generator:
if '#' + tag not in available_tags:
raise Exception(
"- %s is used in # %s but it's not defined anywhere. Defined tags are %s"
% (tag, key, ', '.join(available_tags)))
else:
for output in generator.keys():
tags = get_tags(output)
for tag in tags:
if tag not in available_tags:
raise Exception(
"%s is used in # %s but it's not defined anywhere. Defined tags are %s"
% (tag, key, ', '.join(available_tags)))
<|reserved_special_token_1|>
from marko.parser import Parser # type: ignore
from marko.block import Heading, Paragraph, CodeBlock, List # type: ignore
from marko.inline import CodeSpan # type: ignore
from langcreator.common import Generators, InputOutputGenerator, tag_regex, get_tags, builtin_generators
import collections
import re
def parse(content: str) -> Generators:
parser = Parser()
document = parser.parse(content)
tag_name = ""
last_output = None
generators: Generators = {}
for item in document.children:
if type(item) == Heading:
_check_previous_generator(generators, tag_name)
# TODO: test
last_output = None
tag_name = item.children[0].children
_check_tag_name(tag_name)
_check_defined_twice(generators, tag_name)
generators[tag_name] = {}
elif type(item) == Paragraph and type(item.children[0]) == CodeSpan:
current_generator = generators[tag_name]
if type(current_generator) == dict:
last_output = item.children[0].children.replace("<empty>", "")
current_generator[last_output] = []
else:
raise Exception(f"Mixing list and inputs/output in {tag_name}")
elif type(item) == CodeBlock:
current_generator = generators[tag_name]
if last_output is None:
# TODO: test
raise Exception(
f"Input example defined without output in {tag_name}")
elif type(current_generator) == dict:
inputs = item.children[0].children.strip().split("\n")
inputs = [x.replace("<empty>", "") for x in inputs]
current_generator[last_output] += inputs
_check_tags(current_generator, tag_name)
else:
raise Exception(f"Mixing list and inputs/output in {tag_name}")
elif type(item) == List:
generators[tag_name] = [
x.children[0].children[0].children for x in item.children
]
_check_previous_generator(generators, tag_name)
_check_all_used_tags(generators)
return generators
def _check_tags(generator: InputOutputGenerator, name: str):
for output, inputs in generator.items():
necessary_tags = dict(collections.Counter(get_tags(output)))
for index, input in enumerate(inputs):
input_tags = dict(collections.Counter(get_tags(input)))
for tag, count in necessary_tags.items():
tag = tag.replace("'", "")
if tag not in input_tags:
raise Exception(
f"missing {tag} in example {index + 1} of {name} `{output}`"
)
diff = count - input_tags[tag]
if diff > 0:
raise Exception(
f"missing {diff} {tag} in example {index + 1} of {name} `{output}`. "
+
f"Expected to find {count} {tag}, found {input_tags[tag]}."
)
def _check_tag_name(tag):
if not re.fullmatch(tag_regex, "#" + tag.strip()):
raise Exception("# %s is invalid, only letters and _ are allowed" %
(tag))
def _check_defined_twice(generators, tag):
if tag in generators:
raise Exception("# %s is being defined twice" % (tag))
def _check_previous_generator(generators, name):
if not name:
return
if type(generators[name]) == list:
return
if len(generators[name]) == 0:
raise Exception("output missing on # %s" % name)
for index, inputs in enumerate(generators[name].values()):
if len(inputs) == 0:
raise Exception(
f"input examples missing on # {name}, on example #{index}")
def _check_all_used_tags(generators):
available_tags = ["#" + x for x in builtin_generators
] + ["#" + x for x in generators.keys()]
for key, generator in generators.items():
if type(generator) == list:
for tag in generator:
if "#" + tag not in available_tags:
raise Exception(
"- %s is used in # %s but it's not defined anywhere. Defined tags are %s"
% (tag, key, ", ".join(available_tags)))
else:
for output in generator.keys():
tags = get_tags(output)
for tag in tags:
if tag not in available_tags:
raise Exception(
"%s is used in # %s but it's not defined anywhere. Defined tags are %s"
% (tag, key, ", ".join(available_tags)))
|
flexible
|
{
"blob_id": "0bbc8aa77436193ab47c0fe8cf0d7c6dffcfe097",
"index": 8066,
"step-1": "<mask token>\n\n\ndef _check_tags(generator: InputOutputGenerator, name: str):\n for output, inputs in generator.items():\n necessary_tags = dict(collections.Counter(get_tags(output)))\n for index, input in enumerate(inputs):\n input_tags = dict(collections.Counter(get_tags(input)))\n for tag, count in necessary_tags.items():\n tag = tag.replace(\"'\", '')\n if tag not in input_tags:\n raise Exception(\n f'missing {tag} in example {index + 1} of {name} `{output}`'\n )\n diff = count - input_tags[tag]\n if diff > 0:\n raise Exception(\n f'missing {diff} {tag} in example {index + 1} of {name} `{output}`. '\n +\n f'Expected to find {count} {tag}, found {input_tags[tag]}.'\n )\n\n\ndef _check_tag_name(tag):\n if not re.fullmatch(tag_regex, '#' + tag.strip()):\n raise Exception('# %s is invalid, only letters and _ are allowed' % tag\n )\n\n\ndef _check_defined_twice(generators, tag):\n if tag in generators:\n raise Exception('# %s is being defined twice' % tag)\n\n\n<mask token>\n\n\ndef _check_all_used_tags(generators):\n available_tags = [('#' + x) for x in builtin_generators] + [('#' + x) for\n x in generators.keys()]\n for key, generator in generators.items():\n if type(generator) == list:\n for tag in generator:\n if '#' + tag not in available_tags:\n raise Exception(\n \"- %s is used in # %s but it's not defined anywhere. Defined tags are %s\"\n % (tag, key, ', '.join(available_tags)))\n else:\n for output in generator.keys():\n tags = get_tags(output)\n for tag in tags:\n if tag not in available_tags:\n raise Exception(\n \"%s is used in # %s but it's not defined anywhere. Defined tags are %s\"\n % (tag, key, ', '.join(available_tags)))\n",
"step-2": "<mask token>\n\n\ndef _check_tags(generator: InputOutputGenerator, name: str):\n for output, inputs in generator.items():\n necessary_tags = dict(collections.Counter(get_tags(output)))\n for index, input in enumerate(inputs):\n input_tags = dict(collections.Counter(get_tags(input)))\n for tag, count in necessary_tags.items():\n tag = tag.replace(\"'\", '')\n if tag not in input_tags:\n raise Exception(\n f'missing {tag} in example {index + 1} of {name} `{output}`'\n )\n diff = count - input_tags[tag]\n if diff > 0:\n raise Exception(\n f'missing {diff} {tag} in example {index + 1} of {name} `{output}`. '\n +\n f'Expected to find {count} {tag}, found {input_tags[tag]}.'\n )\n\n\ndef _check_tag_name(tag):\n if not re.fullmatch(tag_regex, '#' + tag.strip()):\n raise Exception('# %s is invalid, only letters and _ are allowed' % tag\n )\n\n\ndef _check_defined_twice(generators, tag):\n if tag in generators:\n raise Exception('# %s is being defined twice' % tag)\n\n\ndef _check_previous_generator(generators, name):\n if not name:\n return\n if type(generators[name]) == list:\n return\n if len(generators[name]) == 0:\n raise Exception('output missing on # %s' % name)\n for index, inputs in enumerate(generators[name].values()):\n if len(inputs) == 0:\n raise Exception(\n f'input examples missing on # {name}, on example #{index}')\n\n\ndef _check_all_used_tags(generators):\n available_tags = [('#' + x) for x in builtin_generators] + [('#' + x) for\n x in generators.keys()]\n for key, generator in generators.items():\n if type(generator) == list:\n for tag in generator:\n if '#' + tag not in available_tags:\n raise Exception(\n \"- %s is used in # %s but it's not defined anywhere. Defined tags are %s\"\n % (tag, key, ', '.join(available_tags)))\n else:\n for output in generator.keys():\n tags = get_tags(output)\n for tag in tags:\n if tag not in available_tags:\n raise Exception(\n \"%s is used in # %s but it's not defined anywhere. Defined tags are %s\"\n % (tag, key, ', '.join(available_tags)))\n",
"step-3": "<mask token>\n\n\ndef parse(content: str) ->Generators:\n parser = Parser()\n document = parser.parse(content)\n tag_name = ''\n last_output = None\n generators: Generators = {}\n for item in document.children:\n if type(item) == Heading:\n _check_previous_generator(generators, tag_name)\n last_output = None\n tag_name = item.children[0].children\n _check_tag_name(tag_name)\n _check_defined_twice(generators, tag_name)\n generators[tag_name] = {}\n elif type(item) == Paragraph and type(item.children[0]) == CodeSpan:\n current_generator = generators[tag_name]\n if type(current_generator) == dict:\n last_output = item.children[0].children.replace('<empty>', '')\n current_generator[last_output] = []\n else:\n raise Exception(f'Mixing list and inputs/output in {tag_name}')\n elif type(item) == CodeBlock:\n current_generator = generators[tag_name]\n if last_output is None:\n raise Exception(\n f'Input example defined without output in {tag_name}')\n elif type(current_generator) == dict:\n inputs = item.children[0].children.strip().split('\\n')\n inputs = [x.replace('<empty>', '') for x in inputs]\n current_generator[last_output] += inputs\n _check_tags(current_generator, tag_name)\n else:\n raise Exception(f'Mixing list and inputs/output in {tag_name}')\n elif type(item) == List:\n generators[tag_name] = [x.children[0].children[0].children for\n x in item.children]\n _check_previous_generator(generators, tag_name)\n _check_all_used_tags(generators)\n return generators\n\n\ndef _check_tags(generator: InputOutputGenerator, name: str):\n for output, inputs in generator.items():\n necessary_tags = dict(collections.Counter(get_tags(output)))\n for index, input in enumerate(inputs):\n input_tags = dict(collections.Counter(get_tags(input)))\n for tag, count in necessary_tags.items():\n tag = tag.replace(\"'\", '')\n if tag not in input_tags:\n raise Exception(\n f'missing {tag} in example {index + 1} of {name} `{output}`'\n )\n diff = count - input_tags[tag]\n if diff > 0:\n raise Exception(\n f'missing {diff} {tag} in example {index + 1} of {name} `{output}`. '\n +\n f'Expected to find {count} {tag}, found {input_tags[tag]}.'\n )\n\n\ndef _check_tag_name(tag):\n if not re.fullmatch(tag_regex, '#' + tag.strip()):\n raise Exception('# %s is invalid, only letters and _ are allowed' % tag\n )\n\n\ndef _check_defined_twice(generators, tag):\n if tag in generators:\n raise Exception('# %s is being defined twice' % tag)\n\n\ndef _check_previous_generator(generators, name):\n if not name:\n return\n if type(generators[name]) == list:\n return\n if len(generators[name]) == 0:\n raise Exception('output missing on # %s' % name)\n for index, inputs in enumerate(generators[name].values()):\n if len(inputs) == 0:\n raise Exception(\n f'input examples missing on # {name}, on example #{index}')\n\n\ndef _check_all_used_tags(generators):\n available_tags = [('#' + x) for x in builtin_generators] + [('#' + x) for\n x in generators.keys()]\n for key, generator in generators.items():\n if type(generator) == list:\n for tag in generator:\n if '#' + tag not in available_tags:\n raise Exception(\n \"- %s is used in # %s but it's not defined anywhere. Defined tags are %s\"\n % (tag, key, ', '.join(available_tags)))\n else:\n for output in generator.keys():\n tags = get_tags(output)\n for tag in tags:\n if tag not in available_tags:\n raise Exception(\n \"%s is used in # %s but it's not defined anywhere. Defined tags are %s\"\n % (tag, key, ', '.join(available_tags)))\n",
"step-4": "from marko.parser import Parser\nfrom marko.block import Heading, Paragraph, CodeBlock, List\nfrom marko.inline import CodeSpan\nfrom langcreator.common import Generators, InputOutputGenerator, tag_regex, get_tags, builtin_generators\nimport collections\nimport re\n\n\ndef parse(content: str) ->Generators:\n parser = Parser()\n document = parser.parse(content)\n tag_name = ''\n last_output = None\n generators: Generators = {}\n for item in document.children:\n if type(item) == Heading:\n _check_previous_generator(generators, tag_name)\n last_output = None\n tag_name = item.children[0].children\n _check_tag_name(tag_name)\n _check_defined_twice(generators, tag_name)\n generators[tag_name] = {}\n elif type(item) == Paragraph and type(item.children[0]) == CodeSpan:\n current_generator = generators[tag_name]\n if type(current_generator) == dict:\n last_output = item.children[0].children.replace('<empty>', '')\n current_generator[last_output] = []\n else:\n raise Exception(f'Mixing list and inputs/output in {tag_name}')\n elif type(item) == CodeBlock:\n current_generator = generators[tag_name]\n if last_output is None:\n raise Exception(\n f'Input example defined without output in {tag_name}')\n elif type(current_generator) == dict:\n inputs = item.children[0].children.strip().split('\\n')\n inputs = [x.replace('<empty>', '') for x in inputs]\n current_generator[last_output] += inputs\n _check_tags(current_generator, tag_name)\n else:\n raise Exception(f'Mixing list and inputs/output in {tag_name}')\n elif type(item) == List:\n generators[tag_name] = [x.children[0].children[0].children for\n x in item.children]\n _check_previous_generator(generators, tag_name)\n _check_all_used_tags(generators)\n return generators\n\n\ndef _check_tags(generator: InputOutputGenerator, name: str):\n for output, inputs in generator.items():\n necessary_tags = dict(collections.Counter(get_tags(output)))\n for index, input in enumerate(inputs):\n input_tags = dict(collections.Counter(get_tags(input)))\n for tag, count in necessary_tags.items():\n tag = tag.replace(\"'\", '')\n if tag not in input_tags:\n raise Exception(\n f'missing {tag} in example {index + 1} of {name} `{output}`'\n )\n diff = count - input_tags[tag]\n if diff > 0:\n raise Exception(\n f'missing {diff} {tag} in example {index + 1} of {name} `{output}`. '\n +\n f'Expected to find {count} {tag}, found {input_tags[tag]}.'\n )\n\n\ndef _check_tag_name(tag):\n if not re.fullmatch(tag_regex, '#' + tag.strip()):\n raise Exception('# %s is invalid, only letters and _ are allowed' % tag\n )\n\n\ndef _check_defined_twice(generators, tag):\n if tag in generators:\n raise Exception('# %s is being defined twice' % tag)\n\n\ndef _check_previous_generator(generators, name):\n if not name:\n return\n if type(generators[name]) == list:\n return\n if len(generators[name]) == 0:\n raise Exception('output missing on # %s' % name)\n for index, inputs in enumerate(generators[name].values()):\n if len(inputs) == 0:\n raise Exception(\n f'input examples missing on # {name}, on example #{index}')\n\n\ndef _check_all_used_tags(generators):\n available_tags = [('#' + x) for x in builtin_generators] + [('#' + x) for\n x in generators.keys()]\n for key, generator in generators.items():\n if type(generator) == list:\n for tag in generator:\n if '#' + tag not in available_tags:\n raise Exception(\n \"- %s is used in # %s but it's not defined anywhere. Defined tags are %s\"\n % (tag, key, ', '.join(available_tags)))\n else:\n for output in generator.keys():\n tags = get_tags(output)\n for tag in tags:\n if tag not in available_tags:\n raise Exception(\n \"%s is used in # %s but it's not defined anywhere. Defined tags are %s\"\n % (tag, key, ', '.join(available_tags)))\n",
"step-5": "from marko.parser import Parser # type: ignore\nfrom marko.block import Heading, Paragraph, CodeBlock, List # type: ignore\nfrom marko.inline import CodeSpan # type: ignore\nfrom langcreator.common import Generators, InputOutputGenerator, tag_regex, get_tags, builtin_generators\nimport collections\nimport re\n\n\ndef parse(content: str) -> Generators:\n parser = Parser()\n document = parser.parse(content)\n\n tag_name = \"\"\n last_output = None\n generators: Generators = {}\n for item in document.children:\n if type(item) == Heading:\n _check_previous_generator(generators, tag_name)\n # TODO: test\n last_output = None\n tag_name = item.children[0].children\n _check_tag_name(tag_name)\n _check_defined_twice(generators, tag_name)\n generators[tag_name] = {}\n elif type(item) == Paragraph and type(item.children[0]) == CodeSpan:\n current_generator = generators[tag_name]\n if type(current_generator) == dict:\n last_output = item.children[0].children.replace(\"<empty>\", \"\")\n current_generator[last_output] = []\n else:\n raise Exception(f\"Mixing list and inputs/output in {tag_name}\")\n elif type(item) == CodeBlock:\n current_generator = generators[tag_name]\n if last_output is None:\n # TODO: test\n raise Exception(\n f\"Input example defined without output in {tag_name}\")\n elif type(current_generator) == dict:\n inputs = item.children[0].children.strip().split(\"\\n\")\n inputs = [x.replace(\"<empty>\", \"\") for x in inputs]\n current_generator[last_output] += inputs\n _check_tags(current_generator, tag_name)\n else:\n raise Exception(f\"Mixing list and inputs/output in {tag_name}\")\n elif type(item) == List:\n generators[tag_name] = [\n x.children[0].children[0].children for x in item.children\n ]\n _check_previous_generator(generators, tag_name)\n\n _check_all_used_tags(generators)\n return generators\n\n\ndef _check_tags(generator: InputOutputGenerator, name: str):\n for output, inputs in generator.items():\n necessary_tags = dict(collections.Counter(get_tags(output)))\n\n for index, input in enumerate(inputs):\n input_tags = dict(collections.Counter(get_tags(input)))\n\n for tag, count in necessary_tags.items():\n tag = tag.replace(\"'\", \"\")\n if tag not in input_tags:\n raise Exception(\n f\"missing {tag} in example {index + 1} of {name} `{output}`\"\n )\n\n diff = count - input_tags[tag]\n if diff > 0:\n raise Exception(\n f\"missing {diff} {tag} in example {index + 1} of {name} `{output}`. \"\n +\n f\"Expected to find {count} {tag}, found {input_tags[tag]}.\"\n )\n\n\ndef _check_tag_name(tag):\n if not re.fullmatch(tag_regex, \"#\" + tag.strip()):\n raise Exception(\"# %s is invalid, only letters and _ are allowed\" %\n (tag))\n\n\ndef _check_defined_twice(generators, tag):\n if tag in generators:\n raise Exception(\"# %s is being defined twice\" % (tag))\n\n\ndef _check_previous_generator(generators, name):\n if not name:\n return\n if type(generators[name]) == list:\n return\n if len(generators[name]) == 0:\n raise Exception(\"output missing on # %s\" % name)\n for index, inputs in enumerate(generators[name].values()):\n if len(inputs) == 0:\n raise Exception(\n f\"input examples missing on # {name}, on example #{index}\")\n\n\ndef _check_all_used_tags(generators):\n available_tags = [\"#\" + x for x in builtin_generators\n ] + [\"#\" + x for x in generators.keys()]\n for key, generator in generators.items():\n if type(generator) == list:\n for tag in generator:\n if \"#\" + tag not in available_tags:\n raise Exception(\n \"- %s is used in # %s but it's not defined anywhere. Defined tags are %s\"\n % (tag, key, \", \".join(available_tags)))\n else:\n for output in generator.keys():\n tags = get_tags(output)\n for tag in tags:\n if tag not in available_tags:\n raise Exception(\n \"%s is used in # %s but it's not defined anywhere. Defined tags are %s\"\n % (tag, key, \", \".join(available_tags)))\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import subprocess
from flask import Flask, render_template, request
from subprocess import Popen, PIPE, check_output
def toggle_relay(value):
session = subprocess.Popen("./relay " + value, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = session.communicate()
if stderr:
raise Exception("Error "+str(stderr))
return stdout
app = Flask(__name__)
@app.route('/', methods=['GET','POST'])
def home():
if request.method == 'POST':
if request.form.get("espresso_button") == 'Espresso':
toggle_relay("small")
if request.form.get("lungo_button") == 'Lungo':
toggle_relay("big")
time = request.form.get("time")
if request.form.get("radios") == 'Espresso':
Popen("echo /home/pi/coffee_maker/relay small | at now + " + time + " minutes", shell=True)
if request.form.get("radios") == 'Lungo':
Popen("echo /home/pi/coffee_maker/relay big | at now + " + time + " minutes", shell=True)
return render_template("index.html")
app.run(host='0.0.0.0', debug=True)
|
normal
|
{
"blob_id": "18d1722529a63f9a1696b09c40dabb1c68ed55f4",
"index": 3423,
"step-1": "<mask token>\n\n\ndef toggle_relay(value):\n session = subprocess.Popen('./relay ' + value, stdout=PIPE, stderr=PIPE,\n shell=True)\n stdout, stderr = session.communicate()\n if stderr:\n raise Exception('Error ' + str(stderr))\n return stdout\n\n\n<mask token>\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n if request.method == 'POST':\n if request.form.get('espresso_button') == 'Espresso':\n toggle_relay('small')\n if request.form.get('lungo_button') == 'Lungo':\n toggle_relay('big')\n time = request.form.get('time')\n if request.form.get('radios') == 'Espresso':\n Popen('echo /home/pi/coffee_maker/relay small | at now + ' +\n time + ' minutes', shell=True)\n if request.form.get('radios') == 'Lungo':\n Popen('echo /home/pi/coffee_maker/relay big | at now + ' + time +\n ' minutes', shell=True)\n return render_template('index.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef toggle_relay(value):\n session = subprocess.Popen('./relay ' + value, stdout=PIPE, stderr=PIPE,\n shell=True)\n stdout, stderr = session.communicate()\n if stderr:\n raise Exception('Error ' + str(stderr))\n return stdout\n\n\n<mask token>\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n if request.method == 'POST':\n if request.form.get('espresso_button') == 'Espresso':\n toggle_relay('small')\n if request.form.get('lungo_button') == 'Lungo':\n toggle_relay('big')\n time = request.form.get('time')\n if request.form.get('radios') == 'Espresso':\n Popen('echo /home/pi/coffee_maker/relay small | at now + ' +\n time + ' minutes', shell=True)\n if request.form.get('radios') == 'Lungo':\n Popen('echo /home/pi/coffee_maker/relay big | at now + ' + time +\n ' minutes', shell=True)\n return render_template('index.html')\n\n\napp.run(host='0.0.0.0', debug=True)\n",
"step-3": "<mask token>\n\n\ndef toggle_relay(value):\n session = subprocess.Popen('./relay ' + value, stdout=PIPE, stderr=PIPE,\n shell=True)\n stdout, stderr = session.communicate()\n if stderr:\n raise Exception('Error ' + str(stderr))\n return stdout\n\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n if request.method == 'POST':\n if request.form.get('espresso_button') == 'Espresso':\n toggle_relay('small')\n if request.form.get('lungo_button') == 'Lungo':\n toggle_relay('big')\n time = request.form.get('time')\n if request.form.get('radios') == 'Espresso':\n Popen('echo /home/pi/coffee_maker/relay small | at now + ' +\n time + ' minutes', shell=True)\n if request.form.get('radios') == 'Lungo':\n Popen('echo /home/pi/coffee_maker/relay big | at now + ' + time +\n ' minutes', shell=True)\n return render_template('index.html')\n\n\napp.run(host='0.0.0.0', debug=True)\n",
"step-4": "import subprocess\nfrom flask import Flask, render_template, request\nfrom subprocess import Popen, PIPE, check_output\n\n\ndef toggle_relay(value):\n session = subprocess.Popen('./relay ' + value, stdout=PIPE, stderr=PIPE,\n shell=True)\n stdout, stderr = session.communicate()\n if stderr:\n raise Exception('Error ' + str(stderr))\n return stdout\n\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n if request.method == 'POST':\n if request.form.get('espresso_button') == 'Espresso':\n toggle_relay('small')\n if request.form.get('lungo_button') == 'Lungo':\n toggle_relay('big')\n time = request.form.get('time')\n if request.form.get('radios') == 'Espresso':\n Popen('echo /home/pi/coffee_maker/relay small | at now + ' +\n time + ' minutes', shell=True)\n if request.form.get('radios') == 'Lungo':\n Popen('echo /home/pi/coffee_maker/relay big | at now + ' + time +\n ' minutes', shell=True)\n return render_template('index.html')\n\n\napp.run(host='0.0.0.0', debug=True)\n",
"step-5": "import subprocess\nfrom flask import Flask, render_template, request\nfrom subprocess import Popen, PIPE, check_output\n\ndef toggle_relay(value):\n session = subprocess.Popen(\"./relay \" + value, stdout=PIPE, stderr=PIPE, shell=True)\n stdout, stderr = session.communicate()\n if stderr:\n raise Exception(\"Error \"+str(stderr))\n return stdout\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET','POST'])\ndef home():\n if request.method == 'POST':\n if request.form.get(\"espresso_button\") == 'Espresso':\n toggle_relay(\"small\")\n if request.form.get(\"lungo_button\") == 'Lungo':\n toggle_relay(\"big\")\n\n time = request.form.get(\"time\")\n \n if request.form.get(\"radios\") == 'Espresso':\n Popen(\"echo /home/pi/coffee_maker/relay small | at now + \" + time + \" minutes\", shell=True)\n if request.form.get(\"radios\") == 'Lungo':\n Popen(\"echo /home/pi/coffee_maker/relay big | at now + \" + time + \" minutes\", shell=True)\n return render_template(\"index.html\")\n\n\napp.run(host='0.0.0.0', debug=True)\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class ItemCrawlSpider(CrawlSpider):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, keyword=None, *args, **kwargs):
super(ItemCrawlSpider, self).__init__(*args, **kwargs)
keyword.replace(' ', '+')
self.start_urls = [
'http://browse.auction.co.kr/search?keyword=%s&p=1' % keyword]
print(self.start_urls)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ItemCrawlSpider(CrawlSpider):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, keyword=None, *args, **kwargs):
super(ItemCrawlSpider, self).__init__(*args, **kwargs)
keyword.replace(' ', '+')
self.start_urls = [
'http://browse.auction.co.kr/search?keyword=%s&p=1' % keyword]
print(self.start_urls)
<|reserved_special_token_0|>
def parse_item(self, response):
i = {}
i['item_img'] = list(response.xpath(
'//*[@id="content"]/div[2]/div[1]/div/div/ul/li/a/img').xpath(
'@src').extract())[0]
print(i['item_img'])
i['item_title'] = response.xpath('//*[@id="frmMain"]/h1/span/text()'
).extract()
i['item_price'] = response.xpath(
'//*[@id="frmMain"]/div[2]/div[1]/div/span/strong/text()').extract(
)
return i
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ItemCrawlSpider(CrawlSpider):
name = 'auction_crwal'
allowed_domains = ['itempage3.auction.co.kr']
def __init__(self, keyword=None, *args, **kwargs):
super(ItemCrawlSpider, self).__init__(*args, **kwargs)
keyword.replace(' ', '+')
self.start_urls = [
'http://browse.auction.co.kr/search?keyword=%s&p=1' % keyword]
print(self.start_urls)
rules = Rule(LinkExtractor(allow='DetailView\\.aspx\\?itemno=*'),
callback='parse_item', follow=True), Rule(LinkExtractor(allow=
'DetailView\\.aspx\\?itemno=*'))
def parse_item(self, response):
i = {}
i['item_img'] = list(response.xpath(
'//*[@id="content"]/div[2]/div[1]/div/div/ul/li/a/img').xpath(
'@src').extract())[0]
print(i['item_img'])
i['item_title'] = response.xpath('//*[@id="frmMain"]/h1/span/text()'
).extract()
i['item_price'] = response.xpath(
'//*[@id="frmMain"]/div[2]/div[1]/div/span/strong/text()').extract(
)
return i
<|reserved_special_token_1|>
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class ItemCrawlSpider(CrawlSpider):
name = 'auction_crwal'
allowed_domains = ['itempage3.auction.co.kr']
def __init__(self, keyword=None, *args, **kwargs):
super(ItemCrawlSpider, self).__init__(*args, **kwargs)
keyword.replace(' ', '+')
self.start_urls = [
'http://browse.auction.co.kr/search?keyword=%s&p=1' % keyword]
print(self.start_urls)
rules = Rule(LinkExtractor(allow='DetailView\\.aspx\\?itemno=*'),
callback='parse_item', follow=True), Rule(LinkExtractor(allow=
'DetailView\\.aspx\\?itemno=*'))
def parse_item(self, response):
i = {}
i['item_img'] = list(response.xpath(
'//*[@id="content"]/div[2]/div[1]/div/div/ul/li/a/img').xpath(
'@src').extract())[0]
print(i['item_img'])
i['item_title'] = response.xpath('//*[@id="frmMain"]/h1/span/text()'
).extract()
i['item_price'] = response.xpath(
'//*[@id="frmMain"]/div[2]/div[1]/div/span/strong/text()').extract(
)
return i
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class ItemCrawlSpider(CrawlSpider):
name = 'auction_crwal'
allowed_domains = ['itempage3.auction.co.kr']
def __init__(self, keyword=None, *args, **kwargs):
super(ItemCrawlSpider, self).__init__(*args, **kwargs)
keyword.replace(' ','+')
self.start_urls = ['http://browse.auction.co.kr/search?keyword=%s&p=1' %keyword]
print(self.start_urls)
rules = (
Rule(
LinkExtractor(allow=r'DetailView\.aspx\?itemno=*'),
callback='parse_item',
follow=True),
Rule(LinkExtractor(allow=r'DetailView\.aspx\?itemno=*'))
)
def parse_item(self, response):
i = {}
i['item_img'] = list(response.xpath(
'//*[@id="content"]/div[2]/div[1]/div/div/ul/li/a/img').xpath("@src").extract())[0]
print(i['item_img'])
i['item_title'] = response.xpath(
'//*[@id="frmMain"]/h1/span/text()').extract()
i['item_price'] = response.xpath(
'//*[@id="frmMain"]/div[2]/div[1]/div/span/strong/text()').extract()
return i
|
flexible
|
{
"blob_id": "cba12d076ed8cba84501983fda9bdce8312f2618",
"index": 6337,
"step-1": "<mask token>\n\n\nclass ItemCrawlSpider(CrawlSpider):\n <mask token>\n <mask token>\n\n def __init__(self, keyword=None, *args, **kwargs):\n super(ItemCrawlSpider, self).__init__(*args, **kwargs)\n keyword.replace(' ', '+')\n self.start_urls = [\n 'http://browse.auction.co.kr/search?keyword=%s&p=1' % keyword]\n print(self.start_urls)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ItemCrawlSpider(CrawlSpider):\n <mask token>\n <mask token>\n\n def __init__(self, keyword=None, *args, **kwargs):\n super(ItemCrawlSpider, self).__init__(*args, **kwargs)\n keyword.replace(' ', '+')\n self.start_urls = [\n 'http://browse.auction.co.kr/search?keyword=%s&p=1' % keyword]\n print(self.start_urls)\n <mask token>\n\n def parse_item(self, response):\n i = {}\n i['item_img'] = list(response.xpath(\n '//*[@id=\"content\"]/div[2]/div[1]/div/div/ul/li/a/img').xpath(\n '@src').extract())[0]\n print(i['item_img'])\n i['item_title'] = response.xpath('//*[@id=\"frmMain\"]/h1/span/text()'\n ).extract()\n i['item_price'] = response.xpath(\n '//*[@id=\"frmMain\"]/div[2]/div[1]/div/span/strong/text()').extract(\n )\n return i\n",
"step-3": "<mask token>\n\n\nclass ItemCrawlSpider(CrawlSpider):\n name = 'auction_crwal'\n allowed_domains = ['itempage3.auction.co.kr']\n\n def __init__(self, keyword=None, *args, **kwargs):\n super(ItemCrawlSpider, self).__init__(*args, **kwargs)\n keyword.replace(' ', '+')\n self.start_urls = [\n 'http://browse.auction.co.kr/search?keyword=%s&p=1' % keyword]\n print(self.start_urls)\n rules = Rule(LinkExtractor(allow='DetailView\\\\.aspx\\\\?itemno=*'),\n callback='parse_item', follow=True), Rule(LinkExtractor(allow=\n 'DetailView\\\\.aspx\\\\?itemno=*'))\n\n def parse_item(self, response):\n i = {}\n i['item_img'] = list(response.xpath(\n '//*[@id=\"content\"]/div[2]/div[1]/div/div/ul/li/a/img').xpath(\n '@src').extract())[0]\n print(i['item_img'])\n i['item_title'] = response.xpath('//*[@id=\"frmMain\"]/h1/span/text()'\n ).extract()\n i['item_price'] = response.xpath(\n '//*[@id=\"frmMain\"]/div[2]/div[1]/div/span/strong/text()').extract(\n )\n return i\n",
"step-4": "import scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\n\n\nclass ItemCrawlSpider(CrawlSpider):\n name = 'auction_crwal'\n allowed_domains = ['itempage3.auction.co.kr']\n\n def __init__(self, keyword=None, *args, **kwargs):\n super(ItemCrawlSpider, self).__init__(*args, **kwargs)\n keyword.replace(' ', '+')\n self.start_urls = [\n 'http://browse.auction.co.kr/search?keyword=%s&p=1' % keyword]\n print(self.start_urls)\n rules = Rule(LinkExtractor(allow='DetailView\\\\.aspx\\\\?itemno=*'),\n callback='parse_item', follow=True), Rule(LinkExtractor(allow=\n 'DetailView\\\\.aspx\\\\?itemno=*'))\n\n def parse_item(self, response):\n i = {}\n i['item_img'] = list(response.xpath(\n '//*[@id=\"content\"]/div[2]/div[1]/div/div/ul/li/a/img').xpath(\n '@src').extract())[0]\n print(i['item_img'])\n i['item_title'] = response.xpath('//*[@id=\"frmMain\"]/h1/span/text()'\n ).extract()\n i['item_price'] = response.xpath(\n '//*[@id=\"frmMain\"]/div[2]/div[1]/div/span/strong/text()').extract(\n )\n return i\n",
"step-5": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\n\n\nclass ItemCrawlSpider(CrawlSpider):\n name = 'auction_crwal'\n allowed_domains = ['itempage3.auction.co.kr']\n\n def __init__(self, keyword=None, *args, **kwargs):\n super(ItemCrawlSpider, self).__init__(*args, **kwargs)\n keyword.replace(' ','+')\n self.start_urls = ['http://browse.auction.co.kr/search?keyword=%s&p=1' %keyword]\n print(self.start_urls)\n\n rules = (\n Rule(\n LinkExtractor(allow=r'DetailView\\.aspx\\?itemno=*'),\n callback='parse_item',\n follow=True),\n Rule(LinkExtractor(allow=r'DetailView\\.aspx\\?itemno=*'))\n )\n\n def parse_item(self, response):\n i = {}\n i['item_img'] = list(response.xpath(\n '//*[@id=\"content\"]/div[2]/div[1]/div/div/ul/li/a/img').xpath(\"@src\").extract())[0]\n print(i['item_img'])\n i['item_title'] = response.xpath(\n '//*[@id=\"frmMain\"]/h1/span/text()').extract()\n i['item_price'] = response.xpath(\n '//*[@id=\"frmMain\"]/div[2]/div[1]/div/span/strong/text()').extract()\n return i\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import copy
import random
def parse_arrow(string):
return tuple(string.split(' -> '))
def parse_sig(string, vals=None):
parts = string.split()
if len(parts) == 1:
return resolve(parts[0], vals)
elif parts[1] == 'AND':
return resolve(parts[0], vals) & resolve(parts[2], vals)
elif parts[1] == 'OR':
return resolve(parts[0], vals) | resolve(parts[2], vals)
elif parts[1] == 'LSHIFT':
return resolve(parts[0], vals) << int(parts[2])
elif parts[1] == 'RSHIFT':
return resolve(parts[0], vals) >> int(parts[2])
elif parts[0] == 'NOT':
return 2 ** 16 + ~ resolve(parts[1], vals)
else:
raise NotImplementedError
def resolve(string, vals):
try:
return int(string)
except ValueError:
pass
try:
return vals[string]
except KeyError:
raise NotReady
class NotReady(Exception):
pass
def parse_line(line, vals):
left, dest = parse_arrow(line)
sig = parse_sig(left, vals)
vals[dest] = sig
def clean(set_of_lines):
# all assignments with ints at the left should be excluded
return set(line for line in set_of_lines
if not isinstance(parse_arrow(line)[0], int)
and not line.endswith('-> b'))
def run_it(stored_lines, vals):
while stored_lines:
line = random.sample(stored_lines, 1)[0]
try:
parse_line(line, vals)
stored_lines.remove(line)
except NotReady:
pass
if __name__ == "__main__":
# this is apparently non-deterministic.
# I get different answers at different times.
# luckily, it worked for me the first time I ran it...
lines = set([x.strip() for x in open('input/input7.txt').readlines()])
vals = {}
stored_lines = copy.deepcopy(lines)
run_it(stored_lines, vals)
answer = vals['a']
print answer
vals = {'b': answer}
stored_lines = clean(lines)
run_it(stored_lines, vals)
print vals['a']
|
normal
|
{
"blob_id": "a4492af775899ec2dcc0cac44b2740edd8422273",
"index": 6012,
"step-1": "import copy\nimport random\n\n\ndef parse_arrow(string):\n return tuple(string.split(' -> '))\n\n\ndef parse_sig(string, vals=None):\n parts = string.split()\n if len(parts) == 1:\n return resolve(parts[0], vals)\n elif parts[1] == 'AND':\n return resolve(parts[0], vals) & resolve(parts[2], vals)\n elif parts[1] == 'OR':\n return resolve(parts[0], vals) | resolve(parts[2], vals)\n elif parts[1] == 'LSHIFT':\n return resolve(parts[0], vals) << int(parts[2])\n elif parts[1] == 'RSHIFT':\n return resolve(parts[0], vals) >> int(parts[2])\n elif parts[0] == 'NOT':\n return 2 ** 16 + ~ resolve(parts[1], vals)\n else:\n raise NotImplementedError\n\n\ndef resolve(string, vals):\n try:\n return int(string)\n except ValueError:\n pass\n try:\n return vals[string]\n except KeyError:\n raise NotReady\n\n\nclass NotReady(Exception):\n pass\n\n\ndef parse_line(line, vals):\n left, dest = parse_arrow(line)\n sig = parse_sig(left, vals)\n vals[dest] = sig\n\n\ndef clean(set_of_lines):\n # all assignments with ints at the left should be excluded\n return set(line for line in set_of_lines\n if not isinstance(parse_arrow(line)[0], int)\n and not line.endswith('-> b'))\n\n\ndef run_it(stored_lines, vals):\n while stored_lines:\n line = random.sample(stored_lines, 1)[0]\n try:\n parse_line(line, vals)\n stored_lines.remove(line)\n except NotReady:\n pass\n\n\nif __name__ == \"__main__\":\n # this is apparently non-deterministic.\n # I get different answers at different times.\n # luckily, it worked for me the first time I ran it...\n lines = set([x.strip() for x in open('input/input7.txt').readlines()])\n vals = {}\n stored_lines = copy.deepcopy(lines)\n run_it(stored_lines, vals)\n answer = vals['a']\n print answer\n vals = {'b': answer}\n stored_lines = clean(lines)\n run_it(stored_lines, vals)\n print vals['a']\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(np.linalg.norm(mat))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
a = np.log(2) / 25
apdataX = np.random.random((5, 35))
quarter_way_arr = [False, False, False]
quarter_way_arr[0] = True
quarter_way_arr[1] = True
quarter_way_arr[2] = True
mat = np.eye(3)
print(np.linalg.norm(mat))
<|reserved_special_token_1|>
import random
import gym
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from simulation_utils import box, simulation
from kinematics import pose3D
a = np.log(2) / 25
apdataX = np.random.random((5, 35))
quarter_way_arr = [False, False, False]
quarter_way_arr[0] = True
quarter_way_arr[1] = True
quarter_way_arr[2] = True
mat = np.eye(3)
print(np.linalg.norm(mat))
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import random
import gym
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from simulation_utils import box, simulation
from kinematics import pose3D
a = np.log(2)/25
apdataX = np.random.random((5, 35))
quarter_way_arr = [False, False, False]
quarter_way_arr[0] = True
quarter_way_arr[1] = True
quarter_way_arr[2] = True
mat = np.eye(3)
print(np.linalg.norm(mat))
|
flexible
|
{
"blob_id": "7e7e96fb9377e4dc59a46a46951f5057ecae419a",
"index": 201,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(np.linalg.norm(mat))\n",
"step-3": "<mask token>\na = np.log(2) / 25\napdataX = np.random.random((5, 35))\nquarter_way_arr = [False, False, False]\nquarter_way_arr[0] = True\nquarter_way_arr[1] = True\nquarter_way_arr[2] = True\nmat = np.eye(3)\nprint(np.linalg.norm(mat))\n",
"step-4": "import random\nimport gym\nimport numpy as np\nfrom collections import deque\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\nfrom simulation_utils import box, simulation\nfrom kinematics import pose3D\na = np.log(2) / 25\napdataX = np.random.random((5, 35))\nquarter_way_arr = [False, False, False]\nquarter_way_arr[0] = True\nquarter_way_arr[1] = True\nquarter_way_arr[2] = True\nmat = np.eye(3)\nprint(np.linalg.norm(mat))\n",
"step-5": "# -*- coding: utf-8 -*-\nimport random\nimport gym\nimport numpy as np\nfrom collections import deque\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\nfrom simulation_utils import box, simulation\nfrom kinematics import pose3D\n\na = np.log(2)/25\n\napdataX = np.random.random((5, 35))\nquarter_way_arr = [False, False, False]\n\nquarter_way_arr[0] = True\nquarter_way_arr[1] = True\nquarter_way_arr[2] = True\n\nmat = np.eye(3)\nprint(np.linalg.norm(mat))\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 2.2.5 on 2019-10-24 05:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0008_studentbasic_stu_class_num'),
]
operations = [
migrations.AlterModelOptions(
name='onduty',
options={'verbose_name': '学员考勤信息', 'verbose_name_plural': '学员考勤信息'},
),
migrations.AlterModelOptions(
name='studentbasic',
options={'verbose_name': '学员招生信息', 'verbose_name_plural': '学员招生信息'},
),
migrations.AlterModelOptions(
name='studentcertification',
options={'verbose_name': '学员证书', 'verbose_name_plural': '学员证书'},
),
migrations.AlterModelOptions(
name='studentclass',
options={'verbose_name': '学员班级', 'verbose_name_plural': '学员班级'},
),
migrations.AlterModelOptions(
name='studentexam',
options={'verbose_name': '学员考试信息', 'verbose_name_plural': '学员考试信息'},
),
migrations.AlterModelOptions(
name='studentexamextra',
options={'verbose_name': '学员补考情况', 'verbose_name_plural': '学员补考情况'},
),
migrations.AlterModelOptions(
name='studenttextbook',
options={'verbose_name': '学员教材', 'verbose_name_plural': '学员教材'},
),
migrations.AlterModelOptions(
name='studentwechat',
options={'verbose_name': '学员365开通情况', 'verbose_name_plural': '学员365开通情况'},
),
migrations.AlterModelOptions(
name='tuition',
options={'verbose_name': '学员交费信息', 'verbose_name_plural': '学员交费信息'},
),
migrations.AlterField(
model_name='studentbasic',
name='stu_signup_date',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='报名日期'),
),
migrations.AlterField(
model_name='studentcertification',
name='cert_date',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='发证日期'),
),
migrations.AlterField(
model_name='studentexam',
name='exam_date',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='报考日期'),
),
migrations.AlterField(
model_name='studentexamextra',
name='exam_date',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='报考日期'),
),
migrations.AlterField(
model_name='studentwechat',
name='wechat_date',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='开通日期'),
),
migrations.AlterField(
model_name='tuition',
name='fee_date',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='缴费日期'),
),
]
|
normal
|
{
"blob_id": "289aa48b4433be533c3916dd039136df45e0ac0b",
"index": 1073,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('student', '0008_studentbasic_stu_class_num')]\n operations = [migrations.AlterModelOptions(name='onduty', options={\n 'verbose_name': '学员考勤信息', 'verbose_name_plural': '学员考勤信息'}),\n migrations.AlterModelOptions(name='studentbasic', options={\n 'verbose_name': '学员招生信息', 'verbose_name_plural': '学员招生信息'}),\n migrations.AlterModelOptions(name='studentcertification', options={\n 'verbose_name': '学员证书', 'verbose_name_plural': '学员证书'}), migrations\n .AlterModelOptions(name='studentclass', options={'verbose_name':\n '学员班级', 'verbose_name_plural': '学员班级'}), migrations.\n AlterModelOptions(name='studentexam', options={'verbose_name':\n '学员考试信息', 'verbose_name_plural': '学员考试信息'}), migrations.\n AlterModelOptions(name='studentexamextra', options={'verbose_name':\n '学员补考情况', 'verbose_name_plural': '学员补考情况'}), migrations.\n AlterModelOptions(name='studenttextbook', options={'verbose_name':\n '学员教材', 'verbose_name_plural': '学员教材'}), migrations.\n AlterModelOptions(name='studentwechat', options={'verbose_name':\n '学员365开通情况', 'verbose_name_plural': '学员365开通情况'}), migrations.\n AlterModelOptions(name='tuition', options={'verbose_name': '学员交费信息',\n 'verbose_name_plural': '学员交费信息'}), migrations.AlterField(model_name\n ='studentbasic', name='stu_signup_date', field=models.CharField(\n blank=True, max_length=128, null=True, verbose_name='报名日期')),\n migrations.AlterField(model_name='studentcertification', name=\n 'cert_date', field=models.CharField(blank=True, max_length=128,\n null=True, verbose_name='发证日期')), migrations.AlterField(model_name=\n 'studentexam', name='exam_date', field=models.CharField(blank=True,\n max_length=128, null=True, verbose_name='报考日期')), migrations.\n AlterField(model_name='studentexamextra', name='exam_date', field=\n models.CharField(blank=True, max_length=128, null=True,\n verbose_name='报考日期')), migrations.AlterField(model_name=\n 'studentwechat', name='wechat_date', field=models.CharField(blank=\n True, max_length=128, null=True, verbose_name='开通日期')), migrations.\n AlterField(model_name='tuition', name='fee_date', field=models.\n CharField(blank=True, max_length=128, null=True, verbose_name='缴费日期'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('student', '0008_studentbasic_stu_class_num')]\n operations = [migrations.AlterModelOptions(name='onduty', options={\n 'verbose_name': '学员考勤信息', 'verbose_name_plural': '学员考勤信息'}),\n migrations.AlterModelOptions(name='studentbasic', options={\n 'verbose_name': '学员招生信息', 'verbose_name_plural': '学员招生信息'}),\n migrations.AlterModelOptions(name='studentcertification', options={\n 'verbose_name': '学员证书', 'verbose_name_plural': '学员证书'}), migrations\n .AlterModelOptions(name='studentclass', options={'verbose_name':\n '学员班级', 'verbose_name_plural': '学员班级'}), migrations.\n AlterModelOptions(name='studentexam', options={'verbose_name':\n '学员考试信息', 'verbose_name_plural': '学员考试信息'}), migrations.\n AlterModelOptions(name='studentexamextra', options={'verbose_name':\n '学员补考情况', 'verbose_name_plural': '学员补考情况'}), migrations.\n AlterModelOptions(name='studenttextbook', options={'verbose_name':\n '学员教材', 'verbose_name_plural': '学员教材'}), migrations.\n AlterModelOptions(name='studentwechat', options={'verbose_name':\n '学员365开通情况', 'verbose_name_plural': '学员365开通情况'}), migrations.\n AlterModelOptions(name='tuition', options={'verbose_name': '学员交费信息',\n 'verbose_name_plural': '学员交费信息'}), migrations.AlterField(model_name\n ='studentbasic', name='stu_signup_date', field=models.CharField(\n blank=True, max_length=128, null=True, verbose_name='报名日期')),\n migrations.AlterField(model_name='studentcertification', name=\n 'cert_date', field=models.CharField(blank=True, max_length=128,\n null=True, verbose_name='发证日期')), migrations.AlterField(model_name=\n 'studentexam', name='exam_date', field=models.CharField(blank=True,\n max_length=128, null=True, verbose_name='报考日期')), migrations.\n AlterField(model_name='studentexamextra', name='exam_date', field=\n models.CharField(blank=True, max_length=128, null=True,\n verbose_name='报考日期')), migrations.AlterField(model_name=\n 'studentwechat', name='wechat_date', field=models.CharField(blank=\n True, max_length=128, null=True, verbose_name='开通日期')), migrations.\n AlterField(model_name='tuition', name='fee_date', field=models.\n CharField(blank=True, max_length=128, null=True, verbose_name='缴费日期'))]\n",
"step-5": "# Generated by Django 2.2.5 on 2019-10-24 05:11\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('student', '0008_studentbasic_stu_class_num'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='onduty',\n options={'verbose_name': '学员考勤信息', 'verbose_name_plural': '学员考勤信息'},\n ),\n migrations.AlterModelOptions(\n name='studentbasic',\n options={'verbose_name': '学员招生信息', 'verbose_name_plural': '学员招生信息'},\n ),\n migrations.AlterModelOptions(\n name='studentcertification',\n options={'verbose_name': '学员证书', 'verbose_name_plural': '学员证书'},\n ),\n migrations.AlterModelOptions(\n name='studentclass',\n options={'verbose_name': '学员班级', 'verbose_name_plural': '学员班级'},\n ),\n migrations.AlterModelOptions(\n name='studentexam',\n options={'verbose_name': '学员考试信息', 'verbose_name_plural': '学员考试信息'},\n ),\n migrations.AlterModelOptions(\n name='studentexamextra',\n options={'verbose_name': '学员补考情况', 'verbose_name_plural': '学员补考情况'},\n ),\n migrations.AlterModelOptions(\n name='studenttextbook',\n options={'verbose_name': '学员教材', 'verbose_name_plural': '学员教材'},\n ),\n migrations.AlterModelOptions(\n name='studentwechat',\n options={'verbose_name': '学员365开通情况', 'verbose_name_plural': '学员365开通情况'},\n ),\n migrations.AlterModelOptions(\n name='tuition',\n options={'verbose_name': '学员交费信息', 'verbose_name_plural': '学员交费信息'},\n ),\n migrations.AlterField(\n model_name='studentbasic',\n name='stu_signup_date',\n field=models.CharField(blank=True, max_length=128, null=True, verbose_name='报名日期'),\n ),\n migrations.AlterField(\n model_name='studentcertification',\n name='cert_date',\n field=models.CharField(blank=True, max_length=128, null=True, verbose_name='发证日期'),\n ),\n migrations.AlterField(\n model_name='studentexam',\n name='exam_date',\n field=models.CharField(blank=True, max_length=128, null=True, verbose_name='报考日期'),\n ),\n migrations.AlterField(\n model_name='studentexamextra',\n name='exam_date',\n field=models.CharField(blank=True, max_length=128, null=True, verbose_name='报考日期'),\n ),\n migrations.AlterField(\n model_name='studentwechat',\n name='wechat_date',\n field=models.CharField(blank=True, max_length=128, null=True, verbose_name='开通日期'),\n ),\n migrations.AlterField(\n model_name='tuition',\n name='fee_date',\n field=models.CharField(blank=True, max_length=128, null=True, verbose_name='缴费日期'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [url('^coffeeshops/(\\d+)$', ShopView.as_view()), url(
'^coffeeshops$', ShopListView.as_view())]
<|reserved_special_token_1|>
from django.conf.urls import url
from . import views
from .views import ShopView, ShopListView
urlpatterns = [url('^coffeeshops/(\\d+)$', ShopView.as_view()), url(
'^coffeeshops$', ShopListView.as_view())]
<|reserved_special_token_1|>
from django.conf.urls import url
from . import views
from .views import ShopView, ShopListView
urlpatterns = [
url(r'^coffeeshops/(\d+)$', ShopView.as_view()),
url(r'^coffeeshops$', ShopListView.as_view()),
]
|
flexible
|
{
"blob_id": "54a705de2597140a72e47f5afe86614b619461b7",
"index": 1109,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('^coffeeshops/(\\\\d+)$', ShopView.as_view()), url(\n '^coffeeshops$', ShopListView.as_view())]\n",
"step-3": "from django.conf.urls import url\nfrom . import views\nfrom .views import ShopView, ShopListView\nurlpatterns = [url('^coffeeshops/(\\\\d+)$', ShopView.as_view()), url(\n '^coffeeshops$', ShopListView.as_view())]\n",
"step-4": "from django.conf.urls import url\n\nfrom . import views\nfrom .views import ShopView, ShopListView\n\nurlpatterns = [\n url(r'^coffeeshops/(\\d+)$', ShopView.as_view()),\n url(r'^coffeeshops$', ShopListView.as_view()),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from board.ttt import TTT
from mctsai.mcts import MCTS
import unittest
# skip = [0, 1]
skip = [0]
class TestTTT(unittest.TestCase):
def test_mcts(self):
if 0 in skip:
print("Skipping ai self-play")
return
ttt = TTT()
for i in range(1000):
mcts = MCTS(ttt)
state = mcts.root.state
while not mcts.board.ending_state(state):
move = mcts.search()
print(move)
state = mcts.board.get_state(state, move)
mcts.board.print(state)
mcts.make_move(move)
self.assertEqual(mcts.board.ending_state(state), -1)
def test_play_mcts(self):
if 1 in skip:
print("Skipping human-ai play")
return
ttt = TTT()
mcts = MCTS(ttt)
state = mcts.root.state
my_player = 2
while not mcts.board.ending_state(state):
mcts.board.print(state)
move = mcts.search()
print(move)
if state[1] == my_player:
move = input("Make move!\n")
move = (int(move[0]), int(move[1]))
mcts.make_move(move)
state = mcts.root.state
mcts.board.print(state)
# state = mcts.board.get_state(state, move)
# mcts = MCTS(ttt)
# mcts.root.state = state
# mcts.root.remaining_moves = mcts.board.get_legal_moves(mcts.root.state)
def test_positions(self):
# simple block
move_sequence = [(1, 1), (2, 0), (0, 1)]
# self.from_position(move_sequence, (2, 1), "Simple block 1")
# simple block 2
move_sequence = [(1, 1), (2, 2), (2, 1)]
# self.from_position(move_sequence, (0, 1), "Simple block 2")
# simple win 1
move_sequence = [(1, 1), (2, 2), (2, 0), (0, 2), (1, 2), (2, 1)]
# self.from_position(move_sequence, (1, 0), "Simple win")
def from_position(self, move_sequence, expected_move, name):
ttt = TTT()
mcts = MCTS(ttt, searchtime= 30)
mcts.board.print(mcts.root.state)
for move in move_sequence:
mcts.search()
mcts.make_move(move)
mcts.board.print(mcts.root.state)
move = mcts.search()
print("Testing {} block (that was lost before) on the following board".format(name))
self.assertEqual(move, expected_move)
def test_trick_win(self):
pass
# ttt = TTT()
# state = ttt.get_initial_state()
# state = ttt.get_state(state, (1, 1))
# state = ttt.get_state(state, (2, 2))
# state = ttt.get_state(state, (2, 0))
# print("Testing trick win on the following board")
# ttt.print(state)
# for _ in range(100):
# mcts = MCTS(ttt)
# mcts.set_root_state(state)
# move = mcts.search()
# self.assertEqual(move, (0, 2))
def test_defend_trick_win(self):
pass
|
normal
|
{
"blob_id": "d0a3f332e04627eb275168972bd92cd1ea9b9447",
"index": 227,
"step-1": "<mask token>\n\n\nclass TestTTT(unittest.TestCase):\n\n def test_mcts(self):\n if 0 in skip:\n print('Skipping ai self-play')\n return\n ttt = TTT()\n for i in range(1000):\n mcts = MCTS(ttt)\n state = mcts.root.state\n while not mcts.board.ending_state(state):\n move = mcts.search()\n print(move)\n state = mcts.board.get_state(state, move)\n mcts.board.print(state)\n mcts.make_move(move)\n self.assertEqual(mcts.board.ending_state(state), -1)\n\n def test_play_mcts(self):\n if 1 in skip:\n print('Skipping human-ai play')\n return\n ttt = TTT()\n mcts = MCTS(ttt)\n state = mcts.root.state\n my_player = 2\n while not mcts.board.ending_state(state):\n mcts.board.print(state)\n move = mcts.search()\n print(move)\n if state[1] == my_player:\n move = input('Make move!\\n')\n move = int(move[0]), int(move[1])\n mcts.make_move(move)\n state = mcts.root.state\n mcts.board.print(state)\n <mask token>\n <mask token>\n\n def test_trick_win(self):\n pass\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestTTT(unittest.TestCase):\n\n def test_mcts(self):\n if 0 in skip:\n print('Skipping ai self-play')\n return\n ttt = TTT()\n for i in range(1000):\n mcts = MCTS(ttt)\n state = mcts.root.state\n while not mcts.board.ending_state(state):\n move = mcts.search()\n print(move)\n state = mcts.board.get_state(state, move)\n mcts.board.print(state)\n mcts.make_move(move)\n self.assertEqual(mcts.board.ending_state(state), -1)\n\n def test_play_mcts(self):\n if 1 in skip:\n print('Skipping human-ai play')\n return\n ttt = TTT()\n mcts = MCTS(ttt)\n state = mcts.root.state\n my_player = 2\n while not mcts.board.ending_state(state):\n mcts.board.print(state)\n move = mcts.search()\n print(move)\n if state[1] == my_player:\n move = input('Make move!\\n')\n move = int(move[0]), int(move[1])\n mcts.make_move(move)\n state = mcts.root.state\n mcts.board.print(state)\n\n def test_positions(self):\n move_sequence = [(1, 1), (2, 0), (0, 1)]\n move_sequence = [(1, 1), (2, 2), (2, 1)]\n move_sequence = [(1, 1), (2, 2), (2, 0), (0, 2), (1, 2), (2, 1)]\n\n def from_position(self, move_sequence, expected_move, name):\n ttt = TTT()\n mcts = MCTS(ttt, searchtime=30)\n mcts.board.print(mcts.root.state)\n for move in move_sequence:\n mcts.search()\n mcts.make_move(move)\n mcts.board.print(mcts.root.state)\n move = mcts.search()\n print('Testing {} block (that was lost before) on the following board'\n .format(name))\n self.assertEqual(move, expected_move)\n\n def test_trick_win(self):\n pass\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestTTT(unittest.TestCase):\n\n def test_mcts(self):\n if 0 in skip:\n print('Skipping ai self-play')\n return\n ttt = TTT()\n for i in range(1000):\n mcts = MCTS(ttt)\n state = mcts.root.state\n while not mcts.board.ending_state(state):\n move = mcts.search()\n print(move)\n state = mcts.board.get_state(state, move)\n mcts.board.print(state)\n mcts.make_move(move)\n self.assertEqual(mcts.board.ending_state(state), -1)\n\n def test_play_mcts(self):\n if 1 in skip:\n print('Skipping human-ai play')\n return\n ttt = TTT()\n mcts = MCTS(ttt)\n state = mcts.root.state\n my_player = 2\n while not mcts.board.ending_state(state):\n mcts.board.print(state)\n move = mcts.search()\n print(move)\n if state[1] == my_player:\n move = input('Make move!\\n')\n move = int(move[0]), int(move[1])\n mcts.make_move(move)\n state = mcts.root.state\n mcts.board.print(state)\n\n def test_positions(self):\n move_sequence = [(1, 1), (2, 0), (0, 1)]\n move_sequence = [(1, 1), (2, 2), (2, 1)]\n move_sequence = [(1, 1), (2, 2), (2, 0), (0, 2), (1, 2), (2, 1)]\n\n def from_position(self, move_sequence, expected_move, name):\n ttt = TTT()\n mcts = MCTS(ttt, searchtime=30)\n mcts.board.print(mcts.root.state)\n for move in move_sequence:\n mcts.search()\n mcts.make_move(move)\n mcts.board.print(mcts.root.state)\n move = mcts.search()\n print('Testing {} block (that was lost before) on the following board'\n .format(name))\n self.assertEqual(move, expected_move)\n\n def test_trick_win(self):\n pass\n\n def test_defend_trick_win(self):\n pass\n",
"step-4": "<mask token>\nskip = [0]\n\n\nclass TestTTT(unittest.TestCase):\n\n def test_mcts(self):\n if 0 in skip:\n print('Skipping ai self-play')\n return\n ttt = TTT()\n for i in range(1000):\n mcts = MCTS(ttt)\n state = mcts.root.state\n while not mcts.board.ending_state(state):\n move = mcts.search()\n print(move)\n state = mcts.board.get_state(state, move)\n mcts.board.print(state)\n mcts.make_move(move)\n self.assertEqual(mcts.board.ending_state(state), -1)\n\n def test_play_mcts(self):\n if 1 in skip:\n print('Skipping human-ai play')\n return\n ttt = TTT()\n mcts = MCTS(ttt)\n state = mcts.root.state\n my_player = 2\n while not mcts.board.ending_state(state):\n mcts.board.print(state)\n move = mcts.search()\n print(move)\n if state[1] == my_player:\n move = input('Make move!\\n')\n move = int(move[0]), int(move[1])\n mcts.make_move(move)\n state = mcts.root.state\n mcts.board.print(state)\n\n def test_positions(self):\n move_sequence = [(1, 1), (2, 0), (0, 1)]\n move_sequence = [(1, 1), (2, 2), (2, 1)]\n move_sequence = [(1, 1), (2, 2), (2, 0), (0, 2), (1, 2), (2, 1)]\n\n def from_position(self, move_sequence, expected_move, name):\n ttt = TTT()\n mcts = MCTS(ttt, searchtime=30)\n mcts.board.print(mcts.root.state)\n for move in move_sequence:\n mcts.search()\n mcts.make_move(move)\n mcts.board.print(mcts.root.state)\n move = mcts.search()\n print('Testing {} block (that was lost before) on the following board'\n .format(name))\n self.assertEqual(move, expected_move)\n\n def test_trick_win(self):\n pass\n\n def test_defend_trick_win(self):\n pass\n",
"step-5": "from board.ttt import TTT\nfrom mctsai.mcts import MCTS\nimport unittest\n\n# skip = [0, 1]\n\nskip = [0]\n\nclass TestTTT(unittest.TestCase):\n def test_mcts(self):\n if 0 in skip:\n print(\"Skipping ai self-play\")\n return\n ttt = TTT()\n for i in range(1000):\n mcts = MCTS(ttt)\n state = mcts.root.state\n while not mcts.board.ending_state(state):\n move = mcts.search()\n print(move)\n state = mcts.board.get_state(state, move)\n mcts.board.print(state)\n mcts.make_move(move)\n self.assertEqual(mcts.board.ending_state(state), -1)\n\n def test_play_mcts(self):\n if 1 in skip:\n print(\"Skipping human-ai play\")\n return\n\n ttt = TTT()\n mcts = MCTS(ttt)\n state = mcts.root.state\n my_player = 2\n while not mcts.board.ending_state(state):\n mcts.board.print(state)\n move = mcts.search()\n print(move)\n if state[1] == my_player:\n move = input(\"Make move!\\n\")\n move = (int(move[0]), int(move[1]))\n\n mcts.make_move(move)\n state = mcts.root.state\n mcts.board.print(state)\n # state = mcts.board.get_state(state, move)\n # mcts = MCTS(ttt)\n # mcts.root.state = state\n # mcts.root.remaining_moves = mcts.board.get_legal_moves(mcts.root.state)\n\n def test_positions(self):\n # simple block\n move_sequence = [(1, 1), (2, 0), (0, 1)]\n # self.from_position(move_sequence, (2, 1), \"Simple block 1\")\n\n # simple block 2\n move_sequence = [(1, 1), (2, 2), (2, 1)]\n # self.from_position(move_sequence, (0, 1), \"Simple block 2\")\n\n # simple win 1\n move_sequence = [(1, 1), (2, 2), (2, 0), (0, 2), (1, 2), (2, 1)]\n # self.from_position(move_sequence, (1, 0), \"Simple win\")\n\n def from_position(self, move_sequence, expected_move, name):\n ttt = TTT()\n mcts = MCTS(ttt, searchtime= 30)\n mcts.board.print(mcts.root.state)\n for move in move_sequence:\n mcts.search()\n mcts.make_move(move)\n mcts.board.print(mcts.root.state)\n\n move = mcts.search()\n\n print(\"Testing {} block (that was lost before) on the following board\".format(name))\n self.assertEqual(move, expected_move)\n\n def test_trick_win(self):\n pass\n # ttt = TTT()\n # state = ttt.get_initial_state()\n # state = ttt.get_state(state, (1, 1))\n # state = ttt.get_state(state, (2, 2))\n # state = ttt.get_state(state, (2, 0))\n # print(\"Testing trick win on the following board\")\n # ttt.print(state)\n # for _ in range(100):\n # mcts = MCTS(ttt)\n # mcts.set_root_state(state)\n # move = mcts.search()\n # self.assertEqual(move, (0, 2))\n\n def test_defend_trick_win(self):\n pass\n\n\n\n\n\n\n",
"step-ids": [
4,
6,
7,
8,
10
]
}
|
[
4,
6,
7,
8,
10
] |
<|reserved_special_token_0|>
class AsyncHandler(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AsyncHandler(object):
<|reserved_special_token_0|>
def __init__(self, future):
"""! Constructor
"""
self.__future = future
<|reserved_special_token_0|>
def wait(self, timeout=None):
resp = self.__future.result()
return resp
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AsyncHandler(object):
""" Synchronous service controller class implementation. """
def __init__(self, future):
"""! Constructor
"""
self.__future = future
"""! Wait for response.
@param timeout - Optional argument. Set timeout for waiting for a response.
@returns resp - Response object
"""
def wait(self, timeout=None):
resp = self.__future.result()
return resp
<|reserved_special_token_1|>
from ServiceControllerBase import *
from concurrent.futures import as_completed
class AsyncHandler(object):
""" Synchronous service controller class implementation. """
def __init__(self, future):
"""! Constructor
"""
self.__future = future
"""! Wait for response.
@param timeout - Optional argument. Set timeout for waiting for a response.
@returns resp - Response object
"""
def wait(self, timeout=None):
resp = self.__future.result()
return resp
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 RAPP
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Authors: Konstantinos Panayiotou, Manos Tsardoulias
# contact: klpanagi@gmail.com, etsardou@iti.gr
## @file ServiceController/ServiceControllerSync.py
#
# @copyright Rapp Projecty EU 2015
# @author Konstantinos Panayiotou, [klpanagi@gmail.com]
#
from ServiceControllerBase import *
# high-level interface for asynchronously executing callables.
from concurrent.futures import as_completed
class AsyncHandler(object):
""" Synchronous service controller class implementation. """
def __init__(self, future):
"""! Constructor
"""
self.__future = future
"""! Wait for response.
@param timeout - Optional argument. Set timeout for waiting for a response.
@returns resp - Response object
"""
def wait(self, timeout=None):
resp = self.__future.result()
return resp
|
flexible
|
{
"blob_id": "f4e287f5fce05e039c54f1108f6e73020b8d3d8f",
"index": 9346,
"step-1": "<mask token>\n\n\nclass AsyncHandler(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass AsyncHandler(object):\n <mask token>\n\n def __init__(self, future):\n \"\"\"! Constructor\n\n \"\"\"\n self.__future = future\n <mask token>\n\n def wait(self, timeout=None):\n resp = self.__future.result()\n return resp\n",
"step-3": "<mask token>\n\n\nclass AsyncHandler(object):\n \"\"\" Synchronous service controller class implementation. \"\"\"\n\n def __init__(self, future):\n \"\"\"! Constructor\n\n \"\"\"\n self.__future = future\n \"\"\"! Wait for response.\n\n @param timeout - Optional argument. Set timeout for waiting for a response.\n @returns resp - Response object\n \"\"\"\n\n def wait(self, timeout=None):\n resp = self.__future.result()\n return resp\n",
"step-4": "from ServiceControllerBase import *\nfrom concurrent.futures import as_completed\n\n\nclass AsyncHandler(object):\n \"\"\" Synchronous service controller class implementation. \"\"\"\n\n def __init__(self, future):\n \"\"\"! Constructor\n\n \"\"\"\n self.__future = future\n \"\"\"! Wait for response.\n\n @param timeout - Optional argument. Set timeout for waiting for a response.\n @returns resp - Response object\n \"\"\"\n\n def wait(self, timeout=None):\n resp = self.__future.result()\n return resp\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n# Copyright 2015 RAPP\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n #http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Authors: Konstantinos Panayiotou, Manos Tsardoulias\n# contact: klpanagi@gmail.com, etsardou@iti.gr\n\n\n## @file ServiceController/ServiceControllerSync.py\n#\n# @copyright Rapp Projecty EU 2015\n# @author Konstantinos Panayiotou, [klpanagi@gmail.com]\n#\n\n\nfrom ServiceControllerBase import *\n\n# high-level interface for asynchronously executing callables.\nfrom concurrent.futures import as_completed\n\n\nclass AsyncHandler(object):\n \"\"\" Synchronous service controller class implementation. \"\"\"\n\n def __init__(self, future):\n \"\"\"! Constructor\n\n \"\"\"\n self.__future = future\n\n\n \"\"\"! Wait for response.\n\n @param timeout - Optional argument. Set timeout for waiting for a response.\n @returns resp - Response object\n \"\"\"\n def wait(self, timeout=None):\n resp = self.__future.result()\n return resp\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import whoosh.index as index
from whoosh.fields import *
from whoosh.qparser import MultifieldParser
from whoosh import scoring
w = scoring.BM25F(B=0.75, content_B=1.0, K1=1.5)
fieldnames = ["bill_text", "bill_title", "year", "sponsor_name", "subject"]
boosts = {"bill_text": 1, "bill_title": 2.5, "year": 0, "sponsor_name": 0, "subject": 2.0}
#load index:
ix = index.open_dir("final_index")
writer = ix.writer()
#search:
def results(q):
hits = []
with ix.searcher(weighting=w) as searcher:
query = MultifieldParser(fieldnames, ix.schema, fieldboosts=boosts).parse(q)
results = searcher.search_page(query, 1, pagelen=10)
print "\n" + str(len(results)) + " results found!"
print "Displaying top ten results:"
for result in results:
if result["house_or_senate"] == "h":
hs = "hr"
billnum = "hr" + str(result["bill_number"])
isih = "ih"
elif result["house_or_senate"] == "s":
hs = "s"
billnum = "s" + str(result["bill_number"])
isih = "is"
url = "https://www.govtrack.us/data/congress/" + str(result["congress_number"]) + "/bills/" + hs + "/" + hs + str(result["bill_number"]) + "/text-versions/" + isih + "/document.txt"
hits.append({"bill_title":result["bill_title"], "year":result["year"], "url":url, "sponsor_name":result["sponsor_name"]})
return hits
query = raw_input("\nSearch for a term in bill text: ")
query = query.lstrip()
print results(query)
|
normal
|
{
"blob_id": "6a400419c26c62471dfc6893cc2d1ff6d88e49f4",
"index": 7518,
"step-1": "import whoosh.index as index\nfrom whoosh.fields import *\nfrom whoosh.qparser import MultifieldParser\nfrom whoosh import scoring\n\nw = scoring.BM25F(B=0.75, content_B=1.0, K1=1.5)\nfieldnames = [\"bill_text\", \"bill_title\", \"year\", \"sponsor_name\", \"subject\"]\nboosts = {\"bill_text\": 1, \"bill_title\": 2.5, \"year\": 0, \"sponsor_name\": 0, \"subject\": 2.0}\n\n#load index:\nix = index.open_dir(\"final_index\")\nwriter = ix.writer()\n\n#search:\ndef results(q):\n\thits = []\n\twith ix.searcher(weighting=w) as searcher:\n\t\tquery = MultifieldParser(fieldnames, ix.schema, fieldboosts=boosts).parse(q)\n\t\tresults = searcher.search_page(query, 1, pagelen=10)\n\t\tprint \"\\n\" + str(len(results)) + \" results found!\"\n\t\tprint \"Displaying top ten results:\"\n\t\tfor result in results:\n\t\t\tif result[\"house_or_senate\"] == \"h\":\n\t\t\t\ths = \"hr\"\n\t\t\t\tbillnum = \"hr\" + str(result[\"bill_number\"])\n\t\t\t\tisih = \"ih\"\n\t\t\telif result[\"house_or_senate\"] == \"s\":\n\t\t\t\ths = \"s\"\n\t\t\t\tbillnum = \"s\" + str(result[\"bill_number\"])\n\t\t\t\tisih = \"is\"\n\t\t\t\n\t\t\turl = \"https://www.govtrack.us/data/congress/\" + str(result[\"congress_number\"]) + \"/bills/\" + hs + \"/\" + hs + str(result[\"bill_number\"]) + \"/text-versions/\" + isih + \"/document.txt\" \n\t\t\thits.append({\"bill_title\":result[\"bill_title\"], \"year\":result[\"year\"], \"url\":url, \"sponsor_name\":result[\"sponsor_name\"]})\n\treturn hits\n\nquery = raw_input(\"\\nSearch for a term in bill text: \")\nquery = query.lstrip()\n\nprint results(query)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def create_item(user_id):
flag = False
while flag == False:
product_name = input('Enter the name of the product : ')
flag = validate_product_name(product_name)
flag = False
while flag == False:
price = input('Enter the price : ')
flag = validate_product_price(price)
flag = False
while flag == False:
qty = input('Enter the qty : ')
flag = validate_product_quantity(qty)
item_detail = ItemDetails(product_name, price, qty, user_id)
insert_item_details(item_detail)
print('successfully created')
<|reserved_special_token_0|>
def validate_product_quantity(qty):
if qty.isnumeric():
return True
else:
print('qty should contain only numeric')
return False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def mechant_service(user_id):
print('================================')
print('Merchant Page')
print('================================')
heading = """=============================================
enter your choice:
1. Create item
2. View item
3. View order list
4. Accept
5. logout
=============================================
"""
int_value = ''
while (int_value != 1 and int_value != 2 and int_value != 3 and
int_value != 4 and int_value != 5):
result = input(heading)
try:
int_value = int(result)
if int_value == 1:
create_item(user_id)
int_value = ''
elif int_value == 2:
print('view item')
int_value = ''
elif int_value == 3:
print('View order list')
int_value = ''
elif int_value == 4:
print('Accept')
int_value = ''
elif int_value == 5:
print('logout successfully')
return
except Exception as e:
print(e)
print('\n\ninvalid input')
def create_item(user_id):
flag = False
while flag == False:
product_name = input('Enter the name of the product : ')
flag = validate_product_name(product_name)
flag = False
while flag == False:
price = input('Enter the price : ')
flag = validate_product_price(price)
flag = False
while flag == False:
qty = input('Enter the qty : ')
flag = validate_product_quantity(qty)
item_detail = ItemDetails(product_name, price, qty, user_id)
insert_item_details(item_detail)
print('successfully created')
<|reserved_special_token_0|>
def validate_product_price(price):
if price.isnumeric():
return True
else:
print('price should contain only numeric')
return False
def validate_product_quantity(qty):
if qty.isnumeric():
return True
else:
print('qty should contain only numeric')
return False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def mechant_service(user_id):
print('================================')
print('Merchant Page')
print('================================')
heading = """=============================================
enter your choice:
1. Create item
2. View item
3. View order list
4. Accept
5. logout
=============================================
"""
int_value = ''
while (int_value != 1 and int_value != 2 and int_value != 3 and
int_value != 4 and int_value != 5):
result = input(heading)
try:
int_value = int(result)
if int_value == 1:
create_item(user_id)
int_value = ''
elif int_value == 2:
print('view item')
int_value = ''
elif int_value == 3:
print('View order list')
int_value = ''
elif int_value == 4:
print('Accept')
int_value = ''
elif int_value == 5:
print('logout successfully')
return
except Exception as e:
print(e)
print('\n\ninvalid input')
def create_item(user_id):
flag = False
while flag == False:
product_name = input('Enter the name of the product : ')
flag = validate_product_name(product_name)
flag = False
while flag == False:
price = input('Enter the price : ')
flag = validate_product_price(price)
flag = False
while flag == False:
qty = input('Enter the qty : ')
flag = validate_product_quantity(qty)
item_detail = ItemDetails(product_name, price, qty, user_id)
insert_item_details(item_detail)
print('successfully created')
def validate_product_name(name):
if name.isalpha():
if len(name) > 3 and len(name) <= 10:
return True
else:
print('name should be more than 3 and less than 10 letters')
return False
else:
print('name should contain only alphanumeric')
return False
def validate_product_price(price):
if price.isnumeric():
return True
else:
print('price should contain only numeric')
return False
def validate_product_quantity(qty):
if qty.isnumeric():
return True
else:
print('qty should contain only numeric')
return False
<|reserved_special_token_1|>
from db_connector import insert_item_details, insert_user_details
from Item_details import ItemDetails
def mechant_service(user_id):
print('================================')
print('Merchant Page')
print('================================')
heading = """=============================================
enter your choice:
1. Create item
2. View item
3. View order list
4. Accept
5. logout
=============================================
"""
int_value = ''
while (int_value != 1 and int_value != 2 and int_value != 3 and
int_value != 4 and int_value != 5):
result = input(heading)
try:
int_value = int(result)
if int_value == 1:
create_item(user_id)
int_value = ''
elif int_value == 2:
print('view item')
int_value = ''
elif int_value == 3:
print('View order list')
int_value = ''
elif int_value == 4:
print('Accept')
int_value = ''
elif int_value == 5:
print('logout successfully')
return
except Exception as e:
print(e)
print('\n\ninvalid input')
def create_item(user_id):
flag = False
while flag == False:
product_name = input('Enter the name of the product : ')
flag = validate_product_name(product_name)
flag = False
while flag == False:
price = input('Enter the price : ')
flag = validate_product_price(price)
flag = False
while flag == False:
qty = input('Enter the qty : ')
flag = validate_product_quantity(qty)
item_detail = ItemDetails(product_name, price, qty, user_id)
insert_item_details(item_detail)
print('successfully created')
def validate_product_name(name):
if name.isalpha():
if len(name) > 3 and len(name) <= 10:
return True
else:
print('name should be more than 3 and less than 10 letters')
return False
else:
print('name should contain only alphanumeric')
return False
def validate_product_price(price):
if price.isnumeric():
return True
else:
print('price should contain only numeric')
return False
def validate_product_quantity(qty):
if qty.isnumeric():
return True
else:
print('qty should contain only numeric')
return False
<|reserved_special_token_1|>
from db_connector import insert_item_details, insert_user_details
from Item_details import ItemDetails
def mechant_service(user_id):
print('================================')
print('Merchant Page')
print('================================')
heading='=============================================\nenter your choice:\n1. Create item \n2. View item \n3. View order list \n4. Accept \n5. logout \n============================================= \n'
int_value=''
while int_value!=1 and int_value!=2 and int_value!=3 and int_value!=4 and int_value!=5:
result=input(heading)
try:
int_value=int(result)
if int_value==1:
create_item(user_id)
int_value=''
elif int_value==2:
print('view item')
int_value=''
elif int_value==3:
print('View order list')
int_value=''
elif int_value==4:
print('Accept')
int_value=''
elif int_value==5:
print('logout successfully')
return
except Exception as e:
print(e)
print('\n\ninvalid input')
def create_item(user_id):
flag=False
while flag==False:
product_name=input('Enter the name of the product : ')
flag=validate_product_name(product_name)
flag=False
while flag==False:
price=input('Enter the price : ')
flag=validate_product_price(price)
flag=False
while flag==False:
qty=input('Enter the qty : ')
flag=validate_product_quantity(qty)
item_detail=ItemDetails(product_name,price,qty,user_id)
insert_item_details(item_detail)
print('successfully created')
def validate_product_name(name):
if name.isalpha():
if len(name) > 3 and len(name) <=10:
return True
else:
print('name should be more than 3 and less than 10 letters')
return False
else:
print("name should contain only alphanumeric")
return False
def validate_product_price(price):
if price.isnumeric():
return True
else:
print("price should contain only numeric")
return False
def validate_product_quantity(qty):
if qty.isnumeric():
return True
else:
print("qty should contain only numeric")
return False
|
flexible
|
{
"blob_id": "d5dae7ab6eb34c82ae795730ecae666c4f81f10a",
"index": 4160,
"step-1": "<mask token>\n\n\ndef create_item(user_id):\n flag = False\n while flag == False:\n product_name = input('Enter the name of the product : ')\n flag = validate_product_name(product_name)\n flag = False\n while flag == False:\n price = input('Enter the price : ')\n flag = validate_product_price(price)\n flag = False\n while flag == False:\n qty = input('Enter the qty : ')\n flag = validate_product_quantity(qty)\n item_detail = ItemDetails(product_name, price, qty, user_id)\n insert_item_details(item_detail)\n print('successfully created')\n\n\n<mask token>\n\n\ndef validate_product_quantity(qty):\n if qty.isnumeric():\n return True\n else:\n print('qty should contain only numeric')\n return False\n",
"step-2": "<mask token>\n\n\ndef mechant_service(user_id):\n print('================================')\n print('Merchant Page')\n print('================================')\n heading = \"\"\"=============================================\nenter your choice:\n1. Create item \n2. View item \n3. View order list \n4. Accept \n5. logout \n============================================= \n\"\"\"\n int_value = ''\n while (int_value != 1 and int_value != 2 and int_value != 3 and \n int_value != 4 and int_value != 5):\n result = input(heading)\n try:\n int_value = int(result)\n if int_value == 1:\n create_item(user_id)\n int_value = ''\n elif int_value == 2:\n print('view item')\n int_value = ''\n elif int_value == 3:\n print('View order list')\n int_value = ''\n elif int_value == 4:\n print('Accept')\n int_value = ''\n elif int_value == 5:\n print('logout successfully')\n return\n except Exception as e:\n print(e)\n print('\\n\\ninvalid input')\n\n\ndef create_item(user_id):\n flag = False\n while flag == False:\n product_name = input('Enter the name of the product : ')\n flag = validate_product_name(product_name)\n flag = False\n while flag == False:\n price = input('Enter the price : ')\n flag = validate_product_price(price)\n flag = False\n while flag == False:\n qty = input('Enter the qty : ')\n flag = validate_product_quantity(qty)\n item_detail = ItemDetails(product_name, price, qty, user_id)\n insert_item_details(item_detail)\n print('successfully created')\n\n\n<mask token>\n\n\ndef validate_product_price(price):\n if price.isnumeric():\n return True\n else:\n print('price should contain only numeric')\n return False\n\n\ndef validate_product_quantity(qty):\n if qty.isnumeric():\n return True\n else:\n print('qty should contain only numeric')\n return False\n",
"step-3": "<mask token>\n\n\ndef mechant_service(user_id):\n print('================================')\n print('Merchant Page')\n print('================================')\n heading = \"\"\"=============================================\nenter your choice:\n1. Create item \n2. View item \n3. View order list \n4. Accept \n5. logout \n============================================= \n\"\"\"\n int_value = ''\n while (int_value != 1 and int_value != 2 and int_value != 3 and \n int_value != 4 and int_value != 5):\n result = input(heading)\n try:\n int_value = int(result)\n if int_value == 1:\n create_item(user_id)\n int_value = ''\n elif int_value == 2:\n print('view item')\n int_value = ''\n elif int_value == 3:\n print('View order list')\n int_value = ''\n elif int_value == 4:\n print('Accept')\n int_value = ''\n elif int_value == 5:\n print('logout successfully')\n return\n except Exception as e:\n print(e)\n print('\\n\\ninvalid input')\n\n\ndef create_item(user_id):\n flag = False\n while flag == False:\n product_name = input('Enter the name of the product : ')\n flag = validate_product_name(product_name)\n flag = False\n while flag == False:\n price = input('Enter the price : ')\n flag = validate_product_price(price)\n flag = False\n while flag == False:\n qty = input('Enter the qty : ')\n flag = validate_product_quantity(qty)\n item_detail = ItemDetails(product_name, price, qty, user_id)\n insert_item_details(item_detail)\n print('successfully created')\n\n\ndef validate_product_name(name):\n if name.isalpha():\n if len(name) > 3 and len(name) <= 10:\n return True\n else:\n print('name should be more than 3 and less than 10 letters')\n return False\n else:\n print('name should contain only alphanumeric')\n return False\n\n\ndef validate_product_price(price):\n if price.isnumeric():\n return True\n else:\n print('price should contain only numeric')\n return False\n\n\ndef validate_product_quantity(qty):\n if qty.isnumeric():\n return True\n else:\n print('qty should contain only numeric')\n return False\n",
"step-4": "from db_connector import insert_item_details, insert_user_details\nfrom Item_details import ItemDetails\n\n\ndef mechant_service(user_id):\n print('================================')\n print('Merchant Page')\n print('================================')\n heading = \"\"\"=============================================\nenter your choice:\n1. Create item \n2. View item \n3. View order list \n4. Accept \n5. logout \n============================================= \n\"\"\"\n int_value = ''\n while (int_value != 1 and int_value != 2 and int_value != 3 and \n int_value != 4 and int_value != 5):\n result = input(heading)\n try:\n int_value = int(result)\n if int_value == 1:\n create_item(user_id)\n int_value = ''\n elif int_value == 2:\n print('view item')\n int_value = ''\n elif int_value == 3:\n print('View order list')\n int_value = ''\n elif int_value == 4:\n print('Accept')\n int_value = ''\n elif int_value == 5:\n print('logout successfully')\n return\n except Exception as e:\n print(e)\n print('\\n\\ninvalid input')\n\n\ndef create_item(user_id):\n flag = False\n while flag == False:\n product_name = input('Enter the name of the product : ')\n flag = validate_product_name(product_name)\n flag = False\n while flag == False:\n price = input('Enter the price : ')\n flag = validate_product_price(price)\n flag = False\n while flag == False:\n qty = input('Enter the qty : ')\n flag = validate_product_quantity(qty)\n item_detail = ItemDetails(product_name, price, qty, user_id)\n insert_item_details(item_detail)\n print('successfully created')\n\n\ndef validate_product_name(name):\n if name.isalpha():\n if len(name) > 3 and len(name) <= 10:\n return True\n else:\n print('name should be more than 3 and less than 10 letters')\n return False\n else:\n print('name should contain only alphanumeric')\n return False\n\n\ndef validate_product_price(price):\n if price.isnumeric():\n return True\n else:\n print('price should contain only numeric')\n return False\n\n\ndef validate_product_quantity(qty):\n if qty.isnumeric():\n return True\n else:\n print('qty should contain only numeric')\n return False\n",
"step-5": "from db_connector import insert_item_details, insert_user_details\nfrom Item_details import ItemDetails\n\n\ndef mechant_service(user_id):\n print('================================')\n print('Merchant Page')\n print('================================')\n heading='=============================================\\nenter your choice:\\n1. Create item \\n2. View item \\n3. View order list \\n4. Accept \\n5. logout \\n============================================= \\n'\n int_value=''\n while int_value!=1 and int_value!=2 and int_value!=3 and int_value!=4 and int_value!=5:\n result=input(heading)\n try:\n int_value=int(result)\n if int_value==1:\n create_item(user_id)\n int_value=''\n elif int_value==2:\n print('view item')\n int_value=''\n elif int_value==3:\n print('View order list')\n int_value=''\n elif int_value==4:\n print('Accept')\n int_value=''\n elif int_value==5:\n print('logout successfully')\n return \n except Exception as e:\n print(e)\n print('\\n\\ninvalid input') \n\ndef create_item(user_id):\n flag=False\n while flag==False:\n product_name=input('Enter the name of the product : ')\n flag=validate_product_name(product_name)\n \n flag=False\n while flag==False:\n price=input('Enter the price : ')\n flag=validate_product_price(price)\n\n flag=False\n while flag==False:\n qty=input('Enter the qty : ')\n flag=validate_product_quantity(qty) \n\n\n item_detail=ItemDetails(product_name,price,qty,user_id) \n insert_item_details(item_detail) \n print('successfully created')\n \n\ndef validate_product_name(name):\n if name.isalpha():\n if len(name) > 3 and len(name) <=10:\n return True\n else:\n print('name should be more than 3 and less than 10 letters') \n return False \n else:\n print(\"name should contain only alphanumeric\")\n return False \n\ndef validate_product_price(price):\n if price.isnumeric():\n return True\n else:\n print(\"price should contain only numeric\")\n return False \n\ndef validate_product_quantity(qty):\n if qty.isnumeric():\n return True\n else:\n print(\"qty should contain only numeric\")\n return False \n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class WaitForException(Exception):
"""WaitForException: if func doesn't return expected result within the specified time"""
def _wait_for(func: Callable, expected_result: Any=True, timeout: int=10,
print_error: bool=True, sleep_for: int=1, **kwargs) ->None:
"""
Run specified function until it returns expected result until timeout.
Args:
func (Callable): Specified function
expected_result (Any): result that is expected. Defaults to None.
timeout (int): Time out in seconds. Defaults to 10.
print_error (boolean): whether any exceptions raised should be printed.
Defaults to False.
sleep_for (int): Execute func every specified number of seconds.
Defaults to 1.
**kwargs: Arguments to be passed to func
Raises:
WaitForException: if func doesn't return expected result within the
specified time
"""
end = time() + timeout
while time() <= end:
try:
retval = func(**kwargs)
except Exception as err:
if print_error:
logger.error(err)
else:
if retval == expected_result:
return None
sleep(sleep_for)
raise WaitForException(
f"func: {func}, didn't return {expected_result} within specified timeout: {timeout}"
)
<|reserved_special_token_0|>
def run_viz(port: int=None, line=None, local_ns=None) ->None:
"""
Line magic function to start kedro viz. It calls a kedro viz in a process and displays it in
the Jupyter notebook environment.
Args:
port: TCP port that viz will listen to. Defaults to 4141.
line: line required by line magic interface.
local_ns: Local namespace with local variables of the scope where the line magic is invoked.
For more details, please visit:
https://ipython.readthedocs.io/en/stable/config/custommagics.html
"""
port = port or 4141
port = _allocate_port(start_at=port)
if port in _VIZ_PROCESSES and _VIZ_PROCESSES[port].is_alive():
_VIZ_PROCESSES[port].terminate()
if local_ns is not None and 'project_path' in local_ns:
target = partial(run_server, project_path=local_ns['project_path'])
else:
target = run_server
viz_process = multiprocessing.Process(target=target, daemon=True,
kwargs={'port': port})
viz_process.start()
_VIZ_PROCESSES[port] = viz_process
_wait_for(func=_check_viz_up, port=port)
wrapper = (
"""
<html lang="en"><head></head><body style="width:100; height:100;">
<iframe src="http://127.0.0.1:{}/" height=500 width="100%"></iframe>
</body></html>"""
.format(port))
display(HTML(wrapper))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class WaitForException(Exception):
"""WaitForException: if func doesn't return expected result within the specified time"""
def _wait_for(func: Callable, expected_result: Any=True, timeout: int=10,
print_error: bool=True, sleep_for: int=1, **kwargs) ->None:
"""
Run specified function until it returns expected result until timeout.
Args:
func (Callable): Specified function
expected_result (Any): result that is expected. Defaults to None.
timeout (int): Time out in seconds. Defaults to 10.
print_error (boolean): whether any exceptions raised should be printed.
Defaults to False.
sleep_for (int): Execute func every specified number of seconds.
Defaults to 1.
**kwargs: Arguments to be passed to func
Raises:
WaitForException: if func doesn't return expected result within the
specified time
"""
end = time() + timeout
while time() <= end:
try:
retval = func(**kwargs)
except Exception as err:
if print_error:
logger.error(err)
else:
if retval == expected_result:
return None
sleep(sleep_for)
raise WaitForException(
f"func: {func}, didn't return {expected_result} within specified timeout: {timeout}"
)
<|reserved_special_token_0|>
def _allocate_port(start_at: int, end_at: int=65535) ->int:
acceptable_ports = range(start_at, end_at + 1)
viz_ports = _VIZ_PROCESSES.keys() & set(acceptable_ports)
if viz_ports:
return sorted(viz_ports)[0]
socket.setdefaulttimeout(2.0)
for port in acceptable_ports:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)
) as sock:
if sock.connect_ex(('127.0.0.1', port)) != 0:
return port
raise ValueError(
'Cannot allocate an open TCP port for Kedro-Viz in a range from {} to {}'
.format(start_at, end_at))
def run_viz(port: int=None, line=None, local_ns=None) ->None:
"""
Line magic function to start kedro viz. It calls a kedro viz in a process and displays it in
the Jupyter notebook environment.
Args:
port: TCP port that viz will listen to. Defaults to 4141.
line: line required by line magic interface.
local_ns: Local namespace with local variables of the scope where the line magic is invoked.
For more details, please visit:
https://ipython.readthedocs.io/en/stable/config/custommagics.html
"""
port = port or 4141
port = _allocate_port(start_at=port)
if port in _VIZ_PROCESSES and _VIZ_PROCESSES[port].is_alive():
_VIZ_PROCESSES[port].terminate()
if local_ns is not None and 'project_path' in local_ns:
target = partial(run_server, project_path=local_ns['project_path'])
else:
target = run_server
viz_process = multiprocessing.Process(target=target, daemon=True,
kwargs={'port': port})
viz_process.start()
_VIZ_PROCESSES[port] = viz_process
_wait_for(func=_check_viz_up, port=port)
wrapper = (
"""
<html lang="en"><head></head><body style="width:100; height:100;">
<iframe src="http://127.0.0.1:{}/" height=500 width="100%"></iframe>
</body></html>"""
.format(port))
display(HTML(wrapper))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class WaitForException(Exception):
"""WaitForException: if func doesn't return expected result within the specified time"""
def _wait_for(func: Callable, expected_result: Any=True, timeout: int=10,
print_error: bool=True, sleep_for: int=1, **kwargs) ->None:
"""
Run specified function until it returns expected result until timeout.
Args:
func (Callable): Specified function
expected_result (Any): result that is expected. Defaults to None.
timeout (int): Time out in seconds. Defaults to 10.
print_error (boolean): whether any exceptions raised should be printed.
Defaults to False.
sleep_for (int): Execute func every specified number of seconds.
Defaults to 1.
**kwargs: Arguments to be passed to func
Raises:
WaitForException: if func doesn't return expected result within the
specified time
"""
end = time() + timeout
while time() <= end:
try:
retval = func(**kwargs)
except Exception as err:
if print_error:
logger.error(err)
else:
if retval == expected_result:
return None
sleep(sleep_for)
raise WaitForException(
f"func: {func}, didn't return {expected_result} within specified timeout: {timeout}"
)
def _check_viz_up(port):
url = 'http://127.0.0.1:{}/'.format(port)
try:
response = requests.get(url)
except requests.ConnectionError:
return False
return response.status_code == 200
def _allocate_port(start_at: int, end_at: int=65535) ->int:
acceptable_ports = range(start_at, end_at + 1)
viz_ports = _VIZ_PROCESSES.keys() & set(acceptable_ports)
if viz_ports:
return sorted(viz_ports)[0]
socket.setdefaulttimeout(2.0)
for port in acceptable_ports:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)
) as sock:
if sock.connect_ex(('127.0.0.1', port)) != 0:
return port
raise ValueError(
'Cannot allocate an open TCP port for Kedro-Viz in a range from {} to {}'
.format(start_at, end_at))
def run_viz(port: int=None, line=None, local_ns=None) ->None:
"""
Line magic function to start kedro viz. It calls a kedro viz in a process and displays it in
the Jupyter notebook environment.
Args:
port: TCP port that viz will listen to. Defaults to 4141.
line: line required by line magic interface.
local_ns: Local namespace with local variables of the scope where the line magic is invoked.
For more details, please visit:
https://ipython.readthedocs.io/en/stable/config/custommagics.html
"""
port = port or 4141
port = _allocate_port(start_at=port)
if port in _VIZ_PROCESSES and _VIZ_PROCESSES[port].is_alive():
_VIZ_PROCESSES[port].terminate()
if local_ns is not None and 'project_path' in local_ns:
target = partial(run_server, project_path=local_ns['project_path'])
else:
target = run_server
viz_process = multiprocessing.Process(target=target, daemon=True,
kwargs={'port': port})
viz_process.start()
_VIZ_PROCESSES[port] = viz_process
_wait_for(func=_check_viz_up, port=port)
wrapper = (
"""
<html lang="en"><head></head><body style="width:100; height:100;">
<iframe src="http://127.0.0.1:{}/" height=500 width="100%"></iframe>
</body></html>"""
.format(port))
display(HTML(wrapper))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
_VIZ_PROCESSES: Dict[str, int] = {}
<|reserved_special_token_0|>
class WaitForException(Exception):
"""WaitForException: if func doesn't return expected result within the specified time"""
def _wait_for(func: Callable, expected_result: Any=True, timeout: int=10,
print_error: bool=True, sleep_for: int=1, **kwargs) ->None:
"""
Run specified function until it returns expected result until timeout.
Args:
func (Callable): Specified function
expected_result (Any): result that is expected. Defaults to None.
timeout (int): Time out in seconds. Defaults to 10.
print_error (boolean): whether any exceptions raised should be printed.
Defaults to False.
sleep_for (int): Execute func every specified number of seconds.
Defaults to 1.
**kwargs: Arguments to be passed to func
Raises:
WaitForException: if func doesn't return expected result within the
specified time
"""
end = time() + timeout
while time() <= end:
try:
retval = func(**kwargs)
except Exception as err:
if print_error:
logger.error(err)
else:
if retval == expected_result:
return None
sleep(sleep_for)
raise WaitForException(
f"func: {func}, didn't return {expected_result} within specified timeout: {timeout}"
)
def _check_viz_up(port):
url = 'http://127.0.0.1:{}/'.format(port)
try:
response = requests.get(url)
except requests.ConnectionError:
return False
return response.status_code == 200
def _allocate_port(start_at: int, end_at: int=65535) ->int:
acceptable_ports = range(start_at, end_at + 1)
viz_ports = _VIZ_PROCESSES.keys() & set(acceptable_ports)
if viz_ports:
return sorted(viz_ports)[0]
socket.setdefaulttimeout(2.0)
for port in acceptable_ports:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)
) as sock:
if sock.connect_ex(('127.0.0.1', port)) != 0:
return port
raise ValueError(
'Cannot allocate an open TCP port for Kedro-Viz in a range from {} to {}'
.format(start_at, end_at))
def run_viz(port: int=None, line=None, local_ns=None) ->None:
"""
Line magic function to start kedro viz. It calls a kedro viz in a process and displays it in
the Jupyter notebook environment.
Args:
port: TCP port that viz will listen to. Defaults to 4141.
line: line required by line magic interface.
local_ns: Local namespace with local variables of the scope where the line magic is invoked.
For more details, please visit:
https://ipython.readthedocs.io/en/stable/config/custommagics.html
"""
port = port or 4141
port = _allocate_port(start_at=port)
if port in _VIZ_PROCESSES and _VIZ_PROCESSES[port].is_alive():
_VIZ_PROCESSES[port].terminate()
if local_ns is not None and 'project_path' in local_ns:
target = partial(run_server, project_path=local_ns['project_path'])
else:
target = run_server
viz_process = multiprocessing.Process(target=target, daemon=True,
kwargs={'port': port})
viz_process.start()
_VIZ_PROCESSES[port] = viz_process
_wait_for(func=_check_viz_up, port=port)
wrapper = (
"""
<html lang="en"><head></head><body style="width:100; height:100;">
<iframe src="http://127.0.0.1:{}/" height=500 width="100%"></iframe>
</body></html>"""
.format(port))
display(HTML(wrapper))
<|reserved_special_token_1|>
# Copyright 2021 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""`kedro_viz.launchers.jupyter` provides line_magic to launch the viz server
from a jupyter notebook.
"""
# pragma: no cover
import logging
import multiprocessing
import socket
from contextlib import closing
from functools import partial
from time import sleep, time
from typing import Any, Callable, Dict
import requests
from IPython.core.display import HTML, display
from kedro_viz.server import run_server
_VIZ_PROCESSES: Dict[str, int] = {}
logger = logging.getLogger(__name__)
class WaitForException(Exception):
"""WaitForException: if func doesn't return expected result within the specified time"""
def _wait_for(
func: Callable,
expected_result: Any = True,
timeout: int = 10,
print_error: bool = True,
sleep_for: int = 1,
**kwargs,
) -> None:
"""
Run specified function until it returns expected result until timeout.
Args:
func (Callable): Specified function
expected_result (Any): result that is expected. Defaults to None.
timeout (int): Time out in seconds. Defaults to 10.
print_error (boolean): whether any exceptions raised should be printed.
Defaults to False.
sleep_for (int): Execute func every specified number of seconds.
Defaults to 1.
**kwargs: Arguments to be passed to func
Raises:
WaitForException: if func doesn't return expected result within the
specified time
"""
end = time() + timeout
while time() <= end:
try:
retval = func(**kwargs)
except Exception as err: # pylint: disable=broad-except
if print_error:
logger.error(err)
else:
if retval == expected_result:
return None
sleep(sleep_for)
raise WaitForException(
f"func: {func}, didn't return {expected_result} within specified timeout: {timeout}"
)
def _check_viz_up(port): # pragma: no cover
url = "http://127.0.0.1:{}/".format(port)
try:
response = requests.get(url)
except requests.ConnectionError:
return False
return response.status_code == 200
def _allocate_port(start_at: int, end_at: int = 65535) -> int:
acceptable_ports = range(start_at, end_at + 1)
viz_ports = _VIZ_PROCESSES.keys() & set(acceptable_ports)
if viz_ports: # reuse one of already allocated ports
return sorted(viz_ports)[0]
socket.setdefaulttimeout(2.0) # seconds
for port in acceptable_ports: # iterate through all acceptable ports
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
if sock.connect_ex(("127.0.0.1", port)) != 0: # port is available
return port
raise ValueError(
"Cannot allocate an open TCP port for Kedro-Viz in a range "
"from {} to {}".format(start_at, end_at)
)
# pylint: disable=unused-argument,missing-type-doc
def run_viz(port: int = None, line=None, local_ns=None) -> None:
"""
Line magic function to start kedro viz. It calls a kedro viz in a process and displays it in
the Jupyter notebook environment.
Args:
port: TCP port that viz will listen to. Defaults to 4141.
line: line required by line magic interface.
local_ns: Local namespace with local variables of the scope where the line magic is invoked.
For more details, please visit:
https://ipython.readthedocs.io/en/stable/config/custommagics.html
"""
port = port or 4141 # Default argument doesn't work in Jupyter line magic.
port = _allocate_port(start_at=port)
if port in _VIZ_PROCESSES and _VIZ_PROCESSES[port].is_alive():
_VIZ_PROCESSES[port].terminate()
if local_ns is not None and "project_path" in local_ns: # pragma: no cover
target = partial(run_server, project_path=local_ns["project_path"])
else:
target = run_server
viz_process = multiprocessing.Process(
target=target, daemon=True, kwargs={"port": port}
)
viz_process.start()
_VIZ_PROCESSES[port] = viz_process
_wait_for(func=_check_viz_up, port=port)
wrapper = """
<html lang="en"><head></head><body style="width:100; height:100;">
<iframe src="http://127.0.0.1:{}/" height=500 width="100%"></iframe>
</body></html>""".format(
port
)
display(HTML(wrapper))
|
flexible
|
{
"blob_id": "0069a61127c5968d7014bdf7f8c4441f02e67df0",
"index": 6541,
"step-1": "<mask token>\n\n\nclass WaitForException(Exception):\n \"\"\"WaitForException: if func doesn't return expected result within the specified time\"\"\"\n\n\ndef _wait_for(func: Callable, expected_result: Any=True, timeout: int=10,\n print_error: bool=True, sleep_for: int=1, **kwargs) ->None:\n \"\"\"\n Run specified function until it returns expected result until timeout.\n\n Args:\n func (Callable): Specified function\n expected_result (Any): result that is expected. Defaults to None.\n timeout (int): Time out in seconds. Defaults to 10.\n print_error (boolean): whether any exceptions raised should be printed.\n Defaults to False.\n sleep_for (int): Execute func every specified number of seconds.\n Defaults to 1.\n **kwargs: Arguments to be passed to func\n\n Raises:\n WaitForException: if func doesn't return expected result within the\n specified time\n\n \"\"\"\n end = time() + timeout\n while time() <= end:\n try:\n retval = func(**kwargs)\n except Exception as err:\n if print_error:\n logger.error(err)\n else:\n if retval == expected_result:\n return None\n sleep(sleep_for)\n raise WaitForException(\n f\"func: {func}, didn't return {expected_result} within specified timeout: {timeout}\"\n )\n\n\n<mask token>\n\n\ndef run_viz(port: int=None, line=None, local_ns=None) ->None:\n \"\"\"\n Line magic function to start kedro viz. It calls a kedro viz in a process and displays it in\n the Jupyter notebook environment.\n\n Args:\n port: TCP port that viz will listen to. Defaults to 4141.\n line: line required by line magic interface.\n local_ns: Local namespace with local variables of the scope where the line magic is invoked.\n For more details, please visit:\n https://ipython.readthedocs.io/en/stable/config/custommagics.html\n\n \"\"\"\n port = port or 4141\n port = _allocate_port(start_at=port)\n if port in _VIZ_PROCESSES and _VIZ_PROCESSES[port].is_alive():\n _VIZ_PROCESSES[port].terminate()\n if local_ns is not None and 'project_path' in local_ns:\n target = partial(run_server, project_path=local_ns['project_path'])\n else:\n target = run_server\n viz_process = multiprocessing.Process(target=target, daemon=True,\n kwargs={'port': port})\n viz_process.start()\n _VIZ_PROCESSES[port] = viz_process\n _wait_for(func=_check_viz_up, port=port)\n wrapper = (\n \"\"\"\n <html lang=\"en\"><head></head><body style=\"width:100; height:100;\">\n <iframe src=\"http://127.0.0.1:{}/\" height=500 width=\"100%\"></iframe>\n </body></html>\"\"\"\n .format(port))\n display(HTML(wrapper))\n",
"step-2": "<mask token>\n\n\nclass WaitForException(Exception):\n \"\"\"WaitForException: if func doesn't return expected result within the specified time\"\"\"\n\n\ndef _wait_for(func: Callable, expected_result: Any=True, timeout: int=10,\n print_error: bool=True, sleep_for: int=1, **kwargs) ->None:\n \"\"\"\n Run specified function until it returns expected result until timeout.\n\n Args:\n func (Callable): Specified function\n expected_result (Any): result that is expected. Defaults to None.\n timeout (int): Time out in seconds. Defaults to 10.\n print_error (boolean): whether any exceptions raised should be printed.\n Defaults to False.\n sleep_for (int): Execute func every specified number of seconds.\n Defaults to 1.\n **kwargs: Arguments to be passed to func\n\n Raises:\n WaitForException: if func doesn't return expected result within the\n specified time\n\n \"\"\"\n end = time() + timeout\n while time() <= end:\n try:\n retval = func(**kwargs)\n except Exception as err:\n if print_error:\n logger.error(err)\n else:\n if retval == expected_result:\n return None\n sleep(sleep_for)\n raise WaitForException(\n f\"func: {func}, didn't return {expected_result} within specified timeout: {timeout}\"\n )\n\n\n<mask token>\n\n\ndef _allocate_port(start_at: int, end_at: int=65535) ->int:\n acceptable_ports = range(start_at, end_at + 1)\n viz_ports = _VIZ_PROCESSES.keys() & set(acceptable_ports)\n if viz_ports:\n return sorted(viz_ports)[0]\n socket.setdefaulttimeout(2.0)\n for port in acceptable_ports:\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n ) as sock:\n if sock.connect_ex(('127.0.0.1', port)) != 0:\n return port\n raise ValueError(\n 'Cannot allocate an open TCP port for Kedro-Viz in a range from {} to {}'\n .format(start_at, end_at))\n\n\ndef run_viz(port: int=None, line=None, local_ns=None) ->None:\n \"\"\"\n Line magic function to start kedro viz. It calls a kedro viz in a process and displays it in\n the Jupyter notebook environment.\n\n Args:\n port: TCP port that viz will listen to. Defaults to 4141.\n line: line required by line magic interface.\n local_ns: Local namespace with local variables of the scope where the line magic is invoked.\n For more details, please visit:\n https://ipython.readthedocs.io/en/stable/config/custommagics.html\n\n \"\"\"\n port = port or 4141\n port = _allocate_port(start_at=port)\n if port in _VIZ_PROCESSES and _VIZ_PROCESSES[port].is_alive():\n _VIZ_PROCESSES[port].terminate()\n if local_ns is not None and 'project_path' in local_ns:\n target = partial(run_server, project_path=local_ns['project_path'])\n else:\n target = run_server\n viz_process = multiprocessing.Process(target=target, daemon=True,\n kwargs={'port': port})\n viz_process.start()\n _VIZ_PROCESSES[port] = viz_process\n _wait_for(func=_check_viz_up, port=port)\n wrapper = (\n \"\"\"\n <html lang=\"en\"><head></head><body style=\"width:100; height:100;\">\n <iframe src=\"http://127.0.0.1:{}/\" height=500 width=\"100%\"></iframe>\n </body></html>\"\"\"\n .format(port))\n display(HTML(wrapper))\n",
"step-3": "<mask token>\n\n\nclass WaitForException(Exception):\n \"\"\"WaitForException: if func doesn't return expected result within the specified time\"\"\"\n\n\ndef _wait_for(func: Callable, expected_result: Any=True, timeout: int=10,\n print_error: bool=True, sleep_for: int=1, **kwargs) ->None:\n \"\"\"\n Run specified function until it returns expected result until timeout.\n\n Args:\n func (Callable): Specified function\n expected_result (Any): result that is expected. Defaults to None.\n timeout (int): Time out in seconds. Defaults to 10.\n print_error (boolean): whether any exceptions raised should be printed.\n Defaults to False.\n sleep_for (int): Execute func every specified number of seconds.\n Defaults to 1.\n **kwargs: Arguments to be passed to func\n\n Raises:\n WaitForException: if func doesn't return expected result within the\n specified time\n\n \"\"\"\n end = time() + timeout\n while time() <= end:\n try:\n retval = func(**kwargs)\n except Exception as err:\n if print_error:\n logger.error(err)\n else:\n if retval == expected_result:\n return None\n sleep(sleep_for)\n raise WaitForException(\n f\"func: {func}, didn't return {expected_result} within specified timeout: {timeout}\"\n )\n\n\ndef _check_viz_up(port):\n url = 'http://127.0.0.1:{}/'.format(port)\n try:\n response = requests.get(url)\n except requests.ConnectionError:\n return False\n return response.status_code == 200\n\n\ndef _allocate_port(start_at: int, end_at: int=65535) ->int:\n acceptable_ports = range(start_at, end_at + 1)\n viz_ports = _VIZ_PROCESSES.keys() & set(acceptable_ports)\n if viz_ports:\n return sorted(viz_ports)[0]\n socket.setdefaulttimeout(2.0)\n for port in acceptable_ports:\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n ) as sock:\n if sock.connect_ex(('127.0.0.1', port)) != 0:\n return port\n raise ValueError(\n 'Cannot allocate an open TCP port for Kedro-Viz in a range from {} to {}'\n .format(start_at, end_at))\n\n\ndef run_viz(port: int=None, line=None, local_ns=None) ->None:\n \"\"\"\n Line magic function to start kedro viz. It calls a kedro viz in a process and displays it in\n the Jupyter notebook environment.\n\n Args:\n port: TCP port that viz will listen to. Defaults to 4141.\n line: line required by line magic interface.\n local_ns: Local namespace with local variables of the scope where the line magic is invoked.\n For more details, please visit:\n https://ipython.readthedocs.io/en/stable/config/custommagics.html\n\n \"\"\"\n port = port or 4141\n port = _allocate_port(start_at=port)\n if port in _VIZ_PROCESSES and _VIZ_PROCESSES[port].is_alive():\n _VIZ_PROCESSES[port].terminate()\n if local_ns is not None and 'project_path' in local_ns:\n target = partial(run_server, project_path=local_ns['project_path'])\n else:\n target = run_server\n viz_process = multiprocessing.Process(target=target, daemon=True,\n kwargs={'port': port})\n viz_process.start()\n _VIZ_PROCESSES[port] = viz_process\n _wait_for(func=_check_viz_up, port=port)\n wrapper = (\n \"\"\"\n <html lang=\"en\"><head></head><body style=\"width:100; height:100;\">\n <iframe src=\"http://127.0.0.1:{}/\" height=500 width=\"100%\"></iframe>\n </body></html>\"\"\"\n .format(port))\n display(HTML(wrapper))\n",
"step-4": "<mask token>\n_VIZ_PROCESSES: Dict[str, int] = {}\n<mask token>\n\n\nclass WaitForException(Exception):\n \"\"\"WaitForException: if func doesn't return expected result within the specified time\"\"\"\n\n\ndef _wait_for(func: Callable, expected_result: Any=True, timeout: int=10,\n print_error: bool=True, sleep_for: int=1, **kwargs) ->None:\n \"\"\"\n Run specified function until it returns expected result until timeout.\n\n Args:\n func (Callable): Specified function\n expected_result (Any): result that is expected. Defaults to None.\n timeout (int): Time out in seconds. Defaults to 10.\n print_error (boolean): whether any exceptions raised should be printed.\n Defaults to False.\n sleep_for (int): Execute func every specified number of seconds.\n Defaults to 1.\n **kwargs: Arguments to be passed to func\n\n Raises:\n WaitForException: if func doesn't return expected result within the\n specified time\n\n \"\"\"\n end = time() + timeout\n while time() <= end:\n try:\n retval = func(**kwargs)\n except Exception as err:\n if print_error:\n logger.error(err)\n else:\n if retval == expected_result:\n return None\n sleep(sleep_for)\n raise WaitForException(\n f\"func: {func}, didn't return {expected_result} within specified timeout: {timeout}\"\n )\n\n\ndef _check_viz_up(port):\n url = 'http://127.0.0.1:{}/'.format(port)\n try:\n response = requests.get(url)\n except requests.ConnectionError:\n return False\n return response.status_code == 200\n\n\ndef _allocate_port(start_at: int, end_at: int=65535) ->int:\n acceptable_ports = range(start_at, end_at + 1)\n viz_ports = _VIZ_PROCESSES.keys() & set(acceptable_ports)\n if viz_ports:\n return sorted(viz_ports)[0]\n socket.setdefaulttimeout(2.0)\n for port in acceptable_ports:\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n ) as sock:\n if sock.connect_ex(('127.0.0.1', port)) != 0:\n return port\n raise ValueError(\n 'Cannot allocate an open TCP port for Kedro-Viz in a range from {} to {}'\n .format(start_at, end_at))\n\n\ndef run_viz(port: int=None, line=None, local_ns=None) ->None:\n \"\"\"\n Line magic function to start kedro viz. It calls a kedro viz in a process and displays it in\n the Jupyter notebook environment.\n\n Args:\n port: TCP port that viz will listen to. Defaults to 4141.\n line: line required by line magic interface.\n local_ns: Local namespace with local variables of the scope where the line magic is invoked.\n For more details, please visit:\n https://ipython.readthedocs.io/en/stable/config/custommagics.html\n\n \"\"\"\n port = port or 4141\n port = _allocate_port(start_at=port)\n if port in _VIZ_PROCESSES and _VIZ_PROCESSES[port].is_alive():\n _VIZ_PROCESSES[port].terminate()\n if local_ns is not None and 'project_path' in local_ns:\n target = partial(run_server, project_path=local_ns['project_path'])\n else:\n target = run_server\n viz_process = multiprocessing.Process(target=target, daemon=True,\n kwargs={'port': port})\n viz_process.start()\n _VIZ_PROCESSES[port] = viz_process\n _wait_for(func=_check_viz_up, port=port)\n wrapper = (\n \"\"\"\n <html lang=\"en\"><head></head><body style=\"width:100; height:100;\">\n <iframe src=\"http://127.0.0.1:{}/\" height=500 width=\"100%\"></iframe>\n </body></html>\"\"\"\n .format(port))\n display(HTML(wrapper))\n",
"step-5": "# Copyright 2021 QuantumBlack Visual Analytics Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND\n# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS\n# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n# The QuantumBlack Visual Analytics Limited (\"QuantumBlack\") name and logo\n# (either separately or in combination, \"QuantumBlack Trademarks\") are\n# trademarks of QuantumBlack. The License does not grant you any right or\n# license to the QuantumBlack Trademarks. You may not use the QuantumBlack\n# Trademarks or any confusingly similar mark as a trademark for your product,\n# or use the QuantumBlack Trademarks in any other manner that might cause\n# confusion in the marketplace, including but not limited to in advertising,\n# on websites, or on software.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"`kedro_viz.launchers.jupyter` provides line_magic to launch the viz server\nfrom a jupyter notebook.\n\"\"\"\n# pragma: no cover\nimport logging\nimport multiprocessing\nimport socket\nfrom contextlib import closing\nfrom functools import partial\nfrom time import sleep, time\nfrom typing import Any, Callable, Dict\n\nimport requests\nfrom IPython.core.display import HTML, display\n\nfrom kedro_viz.server import run_server\n\n_VIZ_PROCESSES: Dict[str, int] = {}\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass WaitForException(Exception):\n \"\"\"WaitForException: if func doesn't return expected result within the specified time\"\"\"\n\n\ndef _wait_for(\n func: Callable,\n expected_result: Any = True,\n timeout: int = 10,\n print_error: bool = True,\n sleep_for: int = 1,\n **kwargs,\n) -> None:\n \"\"\"\n Run specified function until it returns expected result until timeout.\n\n Args:\n func (Callable): Specified function\n expected_result (Any): result that is expected. Defaults to None.\n timeout (int): Time out in seconds. Defaults to 10.\n print_error (boolean): whether any exceptions raised should be printed.\n Defaults to False.\n sleep_for (int): Execute func every specified number of seconds.\n Defaults to 1.\n **kwargs: Arguments to be passed to func\n\n Raises:\n WaitForException: if func doesn't return expected result within the\n specified time\n\n \"\"\"\n end = time() + timeout\n\n while time() <= end:\n try:\n retval = func(**kwargs)\n except Exception as err: # pylint: disable=broad-except\n if print_error:\n logger.error(err)\n else:\n if retval == expected_result:\n return None\n sleep(sleep_for)\n\n raise WaitForException(\n f\"func: {func}, didn't return {expected_result} within specified timeout: {timeout}\"\n )\n\n\ndef _check_viz_up(port): # pragma: no cover\n url = \"http://127.0.0.1:{}/\".format(port)\n try:\n response = requests.get(url)\n except requests.ConnectionError:\n return False\n\n return response.status_code == 200\n\n\ndef _allocate_port(start_at: int, end_at: int = 65535) -> int:\n acceptable_ports = range(start_at, end_at + 1)\n\n viz_ports = _VIZ_PROCESSES.keys() & set(acceptable_ports)\n if viz_ports: # reuse one of already allocated ports\n return sorted(viz_ports)[0]\n\n socket.setdefaulttimeout(2.0) # seconds\n for port in acceptable_ports: # iterate through all acceptable ports\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:\n if sock.connect_ex((\"127.0.0.1\", port)) != 0: # port is available\n return port\n\n raise ValueError(\n \"Cannot allocate an open TCP port for Kedro-Viz in a range \"\n \"from {} to {}\".format(start_at, end_at)\n )\n\n\n# pylint: disable=unused-argument,missing-type-doc\ndef run_viz(port: int = None, line=None, local_ns=None) -> None:\n \"\"\"\n Line magic function to start kedro viz. It calls a kedro viz in a process and displays it in\n the Jupyter notebook environment.\n\n Args:\n port: TCP port that viz will listen to. Defaults to 4141.\n line: line required by line magic interface.\n local_ns: Local namespace with local variables of the scope where the line magic is invoked.\n For more details, please visit:\n https://ipython.readthedocs.io/en/stable/config/custommagics.html\n\n \"\"\"\n port = port or 4141 # Default argument doesn't work in Jupyter line magic.\n port = _allocate_port(start_at=port)\n\n if port in _VIZ_PROCESSES and _VIZ_PROCESSES[port].is_alive():\n _VIZ_PROCESSES[port].terminate()\n\n if local_ns is not None and \"project_path\" in local_ns: # pragma: no cover\n target = partial(run_server, project_path=local_ns[\"project_path\"])\n else:\n target = run_server\n\n viz_process = multiprocessing.Process(\n target=target, daemon=True, kwargs={\"port\": port}\n )\n\n viz_process.start()\n _VIZ_PROCESSES[port] = viz_process\n\n _wait_for(func=_check_viz_up, port=port)\n\n wrapper = \"\"\"\n <html lang=\"en\"><head></head><body style=\"width:100; height:100;\">\n <iframe src=\"http://127.0.0.1:{}/\" height=500 width=\"100%\"></iframe>\n </body></html>\"\"\".format(\n port\n )\n display(HTML(wrapper))\n",
"step-ids": [
4,
5,
6,
7,
10
]
}
|
[
4,
5,
6,
7,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def role_required(role):
"""
带参数的装饰器,可以用它来保护一个路由处理函数智能被特定的用户访问
:param role:
:return:
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if not current_user.is_authenticated or current_user.role < role:
abort(404)
return func(*args, **kwargs)
return wrapper
return decorator
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def role_required(role):
"""
带参数的装饰器,可以用它来保护一个路由处理函数智能被特定的用户访问
:param role:
:return:
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if not current_user.is_authenticated or current_user.role < role:
abort(404)
return func(*args, **kwargs)
return wrapper
return decorator
staff_required = role_required(User.ROLE_STAFF)
admin_required = role_required(User.ROLE_ADMIN)
<|reserved_special_token_1|>
from flask import abort
from flask_login import current_user
from functools import wraps
from simpledu.modes import User
def role_required(role):
"""
带参数的装饰器,可以用它来保护一个路由处理函数智能被特定的用户访问
:param role:
:return:
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if not current_user.is_authenticated or current_user.role < role:
abort(404)
return func(*args, **kwargs)
return wrapper
return decorator
staff_required = role_required(User.ROLE_STAFF)
admin_required = role_required(User.ROLE_ADMIN)
<|reserved_special_token_1|>
# -*- coding:utf-8 -*-
# Author: 李泽军
# Date: 2020/1/27 3:31 PM
# Project: flask-demo
from flask import abort
from flask_login import current_user
from functools import wraps
from simpledu.modes import User
def role_required(role):
'''
带参数的装饰器,可以用它来保护一个路由处理函数智能被特定的用户访问
:param role:
:return:
'''
def decorator(func):
@wraps(func)
def wrapper(*args,**kwargs):
if not current_user.is_authenticated or current_user.role < role:
abort(404)
return func(*args,**kwargs)
return wrapper
return decorator
# 特定角色的装饰器
staff_required = role_required(User.ROLE_STAFF)
admin_required = role_required(User.ROLE_ADMIN)
|
flexible
|
{
"blob_id": "b3f6d255830bdb2b0afc99aab6e3715616ac4dec",
"index": 4298,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef role_required(role):\n \"\"\"\n 带参数的装饰器,可以用它来保护一个路由处理函数智能被特定的用户访问\n :param role:\n :return:\n \"\"\"\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n if not current_user.is_authenticated or current_user.role < role:\n abort(404)\n return func(*args, **kwargs)\n return wrapper\n return decorator\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef role_required(role):\n \"\"\"\n 带参数的装饰器,可以用它来保护一个路由处理函数智能被特定的用户访问\n :param role:\n :return:\n \"\"\"\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n if not current_user.is_authenticated or current_user.role < role:\n abort(404)\n return func(*args, **kwargs)\n return wrapper\n return decorator\n\n\nstaff_required = role_required(User.ROLE_STAFF)\nadmin_required = role_required(User.ROLE_ADMIN)\n",
"step-4": "from flask import abort\nfrom flask_login import current_user\nfrom functools import wraps\nfrom simpledu.modes import User\n\n\ndef role_required(role):\n \"\"\"\n 带参数的装饰器,可以用它来保护一个路由处理函数智能被特定的用户访问\n :param role:\n :return:\n \"\"\"\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n if not current_user.is_authenticated or current_user.role < role:\n abort(404)\n return func(*args, **kwargs)\n return wrapper\n return decorator\n\n\nstaff_required = role_required(User.ROLE_STAFF)\nadmin_required = role_required(User.ROLE_ADMIN)\n",
"step-5": "# -*- coding:utf-8 -*-\n# Author: 李泽军\n# Date: 2020/1/27 3:31 PM\n# Project: flask-demo\n\nfrom flask import abort\nfrom flask_login import current_user\nfrom functools import wraps\nfrom simpledu.modes import User\n\n\ndef role_required(role):\n '''\n 带参数的装饰器,可以用它来保护一个路由处理函数智能被特定的用户访问\n :param role:\n :return:\n '''\n\n def decorator(func):\n @wraps(func)\n def wrapper(*args,**kwargs):\n if not current_user.is_authenticated or current_user.role < role:\n abort(404)\n return func(*args,**kwargs)\n\n return wrapper\n return decorator\n\n# 特定角色的装饰器\nstaff_required = role_required(User.ROLE_STAFF)\nadmin_required = role_required(User.ROLE_ADMIN)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from celery import shared_task
import tweepy
from datetime import datetime, timedelta
from .models import Tweet
from django.db import IntegrityError
CONSUMER_KEY = 'Vp7FVQLSwESvE9oTQruw0TnhW'
CONSUMER_SECRET = 'miy6EsGklNYxAaVn37vTjAVGwP0c67IOyuY71AAyL1p2Ba4VPN'
ACCESS_TOKEN = '1952022900-5WAHk6l5d3GllFtqDPaucSpnraIokE6hU7aBxNJ'
ACCESS_TOKEN_SECRET = 'ekONOf6QxJG6Lq3k2kznfQ16x12BGm909wckYFcP8SlYZ'
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
@shared_task(name='clean_tweetdb')
def clean_tweetdb():
tweets = Tweet.objects.all()
for tweets.tweet_date in tweets:
if tweets.tweet_date <= datetime.now() - timedelta(days=8):
tweets.delet()
@shared_task(name='get_tweets')
def get_tweets():
"""Get some tweets from the twitter api and store them to the db."""
clean_tweetdb.delay()
db_tweets = Tweet.objects.all()
max_id = min([tweet.tweet_id for tweet in db_tweets])
tweets = api.search(
q='#python',
max_id=max_id,
count=100
)
tweets_id = [tweet.id for tweet in tweets]
tweets_date = [tweet.created_at for tweet in tweets]
tweets_source = [tweet.source for tweet in tweets]
tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]
tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]
tweets_text = [tweet.text for tweet in tweets]
for i, j, k, l, m, n in zip(
tweets_id,
tweets_date,
tweets_source,
tweets_favorite_cnt,
tweets_retweet_cnt,
tweets_text,
):
try:
Tweet.objects.create(
tweet_id=i,
tweet_date=j,
tweet_source=k,
tweet_favorite_cnt=l,
tweet_retweet_cnt=m,
tweet_text=n,
)
except IntegrityError:
pass
|
normal
|
{
"blob_id": "d4d19411f0c48ffb99bd17e8387f1741144e43b4",
"index": 870,
"step-1": "<mask token>\n\n\n@shared_task(name='clean_tweetdb')\ndef clean_tweetdb():\n tweets = Tweet.objects.all()\n for tweets.tweet_date in tweets:\n if tweets.tweet_date <= datetime.now() - timedelta(days=8):\n tweets.delet()\n\n\n@shared_task(name='get_tweets')\ndef get_tweets():\n \"\"\"Get some tweets from the twitter api and store them to the db.\"\"\"\n clean_tweetdb.delay()\n db_tweets = Tweet.objects.all()\n max_id = min([tweet.tweet_id for tweet in db_tweets])\n tweets = api.search(q='#python', max_id=max_id, count=100)\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n for i, j, k, l, m, n in zip(tweets_id, tweets_date, tweets_source,\n tweets_favorite_cnt, tweets_retweet_cnt, tweets_text):\n try:\n Tweet.objects.create(tweet_id=i, tweet_date=j, tweet_source=k,\n tweet_favorite_cnt=l, tweet_retweet_cnt=m, tweet_text=n)\n except IntegrityError:\n pass\n",
"step-2": "<mask token>\nauth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n<mask token>\n\n\n@shared_task(name='clean_tweetdb')\ndef clean_tweetdb():\n tweets = Tweet.objects.all()\n for tweets.tweet_date in tweets:\n if tweets.tweet_date <= datetime.now() - timedelta(days=8):\n tweets.delet()\n\n\n@shared_task(name='get_tweets')\ndef get_tweets():\n \"\"\"Get some tweets from the twitter api and store them to the db.\"\"\"\n clean_tweetdb.delay()\n db_tweets = Tweet.objects.all()\n max_id = min([tweet.tweet_id for tweet in db_tweets])\n tweets = api.search(q='#python', max_id=max_id, count=100)\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n for i, j, k, l, m, n in zip(tweets_id, tweets_date, tweets_source,\n tweets_favorite_cnt, tweets_retweet_cnt, tweets_text):\n try:\n Tweet.objects.create(tweet_id=i, tweet_date=j, tweet_source=k,\n tweet_favorite_cnt=l, tweet_retweet_cnt=m, tweet_text=n)\n except IntegrityError:\n pass\n",
"step-3": "<mask token>\nCONSUMER_KEY = 'Vp7FVQLSwESvE9oTQruw0TnhW'\nCONSUMER_SECRET = 'miy6EsGklNYxAaVn37vTjAVGwP0c67IOyuY71AAyL1p2Ba4VPN'\nACCESS_TOKEN = '1952022900-5WAHk6l5d3GllFtqDPaucSpnraIokE6hU7aBxNJ'\nACCESS_TOKEN_SECRET = 'ekONOf6QxJG6Lq3k2kznfQ16x12BGm909wckYFcP8SlYZ'\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\napi = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n\n\n@shared_task(name='clean_tweetdb')\ndef clean_tweetdb():\n tweets = Tweet.objects.all()\n for tweets.tweet_date in tweets:\n if tweets.tweet_date <= datetime.now() - timedelta(days=8):\n tweets.delet()\n\n\n@shared_task(name='get_tweets')\ndef get_tweets():\n \"\"\"Get some tweets from the twitter api and store them to the db.\"\"\"\n clean_tweetdb.delay()\n db_tweets = Tweet.objects.all()\n max_id = min([tweet.tweet_id for tweet in db_tweets])\n tweets = api.search(q='#python', max_id=max_id, count=100)\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n for i, j, k, l, m, n in zip(tweets_id, tweets_date, tweets_source,\n tweets_favorite_cnt, tweets_retweet_cnt, tweets_text):\n try:\n Tweet.objects.create(tweet_id=i, tweet_date=j, tweet_source=k,\n tweet_favorite_cnt=l, tweet_retweet_cnt=m, tweet_text=n)\n except IntegrityError:\n pass\n",
"step-4": "from celery import shared_task\nimport tweepy\nfrom datetime import datetime, timedelta\nfrom .models import Tweet\nfrom django.db import IntegrityError\nCONSUMER_KEY = 'Vp7FVQLSwESvE9oTQruw0TnhW'\nCONSUMER_SECRET = 'miy6EsGklNYxAaVn37vTjAVGwP0c67IOyuY71AAyL1p2Ba4VPN'\nACCESS_TOKEN = '1952022900-5WAHk6l5d3GllFtqDPaucSpnraIokE6hU7aBxNJ'\nACCESS_TOKEN_SECRET = 'ekONOf6QxJG6Lq3k2kznfQ16x12BGm909wckYFcP8SlYZ'\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\napi = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n\n\n@shared_task(name='clean_tweetdb')\ndef clean_tweetdb():\n tweets = Tweet.objects.all()\n for tweets.tweet_date in tweets:\n if tweets.tweet_date <= datetime.now() - timedelta(days=8):\n tweets.delet()\n\n\n@shared_task(name='get_tweets')\ndef get_tweets():\n \"\"\"Get some tweets from the twitter api and store them to the db.\"\"\"\n clean_tweetdb.delay()\n db_tweets = Tweet.objects.all()\n max_id = min([tweet.tweet_id for tweet in db_tweets])\n tweets = api.search(q='#python', max_id=max_id, count=100)\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n for i, j, k, l, m, n in zip(tweets_id, tweets_date, tweets_source,\n tweets_favorite_cnt, tweets_retweet_cnt, tweets_text):\n try:\n Tweet.objects.create(tweet_id=i, tweet_date=j, tweet_source=k,\n tweet_favorite_cnt=l, tweet_retweet_cnt=m, tweet_text=n)\n except IntegrityError:\n pass\n",
"step-5": "from celery import shared_task\nimport tweepy\nfrom datetime import datetime, timedelta\n\nfrom .models import Tweet\nfrom django.db import IntegrityError\n\nCONSUMER_KEY = 'Vp7FVQLSwESvE9oTQruw0TnhW'\nCONSUMER_SECRET = 'miy6EsGklNYxAaVn37vTjAVGwP0c67IOyuY71AAyL1p2Ba4VPN'\nACCESS_TOKEN = '1952022900-5WAHk6l5d3GllFtqDPaucSpnraIokE6hU7aBxNJ'\nACCESS_TOKEN_SECRET = 'ekONOf6QxJG6Lq3k2kznfQ16x12BGm909wckYFcP8SlYZ'\n\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n\napi = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n\n\n@shared_task(name='clean_tweetdb')\ndef clean_tweetdb():\n tweets = Tweet.objects.all()\n for tweets.tweet_date in tweets:\n if tweets.tweet_date <= datetime.now() - timedelta(days=8):\n tweets.delet()\n\n\n@shared_task(name='get_tweets')\ndef get_tweets():\n \"\"\"Get some tweets from the twitter api and store them to the db.\"\"\"\n clean_tweetdb.delay()\n db_tweets = Tweet.objects.all()\n max_id = min([tweet.tweet_id for tweet in db_tweets])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from setuptools import setup, find_packages
setup(
name='spt_compute',
version='2.0.1',
description='Computational framework for the Streamflow Prediciton Tool',
long_description='Computational framework to ingest ECMWF ensemble runoff forcasts '
' or otherLand Surface Model forecasts;'
' generate input for and run the RAPID (rapid-hub.org) program'
' using HTCondor or Python\'s Multiprocessing; and upload to '
' CKAN in order to be used by the Streamflow Prediction Tool (SPT).'
' There is also an experimental option to use the AutoRoute program'
' for flood inundation mapping.',
keywords='ECMWF, WRF, RAPID, Flood Prediction, Streamflow Prediction Tool',
author='Alan Dee Snow',
author_email='alan.d.snow@usace.army.mil',
url='https://github.com/erdc/spt_compute',
license='BSD 3-Clause',
packages=find_packages(),
install_requires=[
'numpy',
'netCDF4',
'pandas',
'RAPIDpy',
'tethys_dataset_services',
'xarray',
],
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
extras_require={
'tests': [
'coveralls',
'pytest',
'pytest-cov',
],
},
)
|
normal
|
{
"blob_id": "53b6d30bf52c43daaebe8158002db1072e34f127",
"index": 7956,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='spt_compute', version='2.0.1', description=\n 'Computational framework for the Streamflow Prediciton Tool',\n long_description=\n \"Computational framework to ingest ECMWF ensemble runoff forcasts or otherLand Surface Model forecasts; generate input for and run the RAPID (rapid-hub.org) program using HTCondor or Python's Multiprocessing; and upload to CKAN in order to be used by the Streamflow Prediction Tool (SPT). There is also an experimental option to use the AutoRoute program for flood inundation mapping.\"\n , keywords=\n 'ECMWF, WRF, RAPID, Flood Prediction, Streamflow Prediction Tool',\n author='Alan Dee Snow', author_email='alan.d.snow@usace.army.mil', url=\n 'https://github.com/erdc/spt_compute', license='BSD 3-Clause', packages\n =find_packages(), install_requires=['numpy', 'netCDF4', 'pandas',\n 'RAPIDpy', 'tethys_dataset_services', 'xarray'], classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3'], extras_require={'tests': [\n 'coveralls', 'pytest', 'pytest-cov']})\n",
"step-3": "from setuptools import setup, find_packages\nsetup(name='spt_compute', version='2.0.1', description=\n 'Computational framework for the Streamflow Prediciton Tool',\n long_description=\n \"Computational framework to ingest ECMWF ensemble runoff forcasts or otherLand Surface Model forecasts; generate input for and run the RAPID (rapid-hub.org) program using HTCondor or Python's Multiprocessing; and upload to CKAN in order to be used by the Streamflow Prediction Tool (SPT). There is also an experimental option to use the AutoRoute program for flood inundation mapping.\"\n , keywords=\n 'ECMWF, WRF, RAPID, Flood Prediction, Streamflow Prediction Tool',\n author='Alan Dee Snow', author_email='alan.d.snow@usace.army.mil', url=\n 'https://github.com/erdc/spt_compute', license='BSD 3-Clause', packages\n =find_packages(), install_requires=['numpy', 'netCDF4', 'pandas',\n 'RAPIDpy', 'tethys_dataset_services', 'xarray'], classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3'], extras_require={'tests': [\n 'coveralls', 'pytest', 'pytest-cov']})\n",
"step-4": "from setuptools import setup, find_packages\n\nsetup(\n name='spt_compute',\n version='2.0.1',\n description='Computational framework for the Streamflow Prediciton Tool',\n long_description='Computational framework to ingest ECMWF ensemble runoff forcasts '\n ' or otherLand Surface Model forecasts;'\n ' generate input for and run the RAPID (rapid-hub.org) program'\n ' using HTCondor or Python\\'s Multiprocessing; and upload to '\n ' CKAN in order to be used by the Streamflow Prediction Tool (SPT).'\n ' There is also an experimental option to use the AutoRoute program'\n ' for flood inundation mapping.',\n keywords='ECMWF, WRF, RAPID, Flood Prediction, Streamflow Prediction Tool',\n author='Alan Dee Snow',\n author_email='alan.d.snow@usace.army.mil',\n url='https://github.com/erdc/spt_compute',\n license='BSD 3-Clause',\n packages=find_packages(),\n install_requires=[\n 'numpy',\n 'netCDF4',\n 'pandas',\n 'RAPIDpy',\n 'tethys_dataset_services',\n 'xarray',\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n ],\n extras_require={\n 'tests': [\n 'coveralls',\n 'pytest',\n 'pytest-cov',\n ],\n },\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import turtle
hexagon = turtle.Turtle()
for i in range(6):
hexagon.forward(100)
hexagon.left(60)
|
normal
|
{
"blob_id": "f6401eca2dc0ea86a934e859c35fa2d6c85a61b3",
"index": 8695,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(6):\n hexagon.forward(100)\n hexagon.left(60)\n",
"step-3": "<mask token>\nhexagon = turtle.Turtle()\nfor i in range(6):\n hexagon.forward(100)\n hexagon.left(60)\n",
"step-4": "import turtle\nhexagon = turtle.Turtle()\nfor i in range(6):\n hexagon.forward(100)\n hexagon.left(60)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import json
from pets.pet import Pet
from store_requests.store import Store
from user_requests.user import User
SUCCESS = 200
NotFound = 404
url_site = 'https://petstore.swagger.io/v2'
new_username = "Khrystyna"
new_id = 12345
invalid_new_id = 1234
error_message = "oops we have a problem!"
store_inventory = {
"1": 1,
"4444": 2,
"teste": 1,
"string": 6738,
"Operated": 4,
"pending": 56,
"Not-Operated": 10,
"available": 4800,
"waiting list": 1,
"Unavailable": 1,
"Shortlisted": 1,
"Sold": 1,
"availasdfsadfasdfble": 1,
"not available": 1,
"Available": 1,
"YAI3424forYAI3373": 1,
"ok": 1,
"KBMAvailable": 3,
"onwork": 1,
"sold": 87,
"ddd": 1,
"Nonavailable": 1,
"Offline": 1,
"straight": 2,
"pendin": 1,
"sts": 1,
"onhold": 3,
"status": 5,
"xavailable": 1
}
Category_Dict = dict(id=36,
name='Rexy')
tag_dict = dict(id=4,
name='Dog')
PetObject = Pet(id=456,
category=Category_Dict,
name="Xixi",
photo_urls=["https://www.what-dog.net/Images/faces2/scroll001.jpg"],
tags=[tag_dict],
status='sold')
DataJsonForPets = json.dumps(PetObject.__dict__)
store = Store(id=12,
petId=12,
quantity=2,
ship_date="2018-09-12T13:52:49.901Z",
status="placed",
complete=True)
data_json_for_store = json.dumps(store.__dict__)
user = User(id=3,
username="Nini",
first_name="Vira",
last_name="Budda",
email="email@gmail.com",
password="1234567",
phone="55455545",
user_status=1)
data_json_for_user = json.dumps(user.__dict__)
|
normal
|
{
"blob_id": "54ed0683d0f8d907c27e2f3809f9533556593392",
"index": 5546,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nSUCCESS = 200\nNotFound = 404\nurl_site = 'https://petstore.swagger.io/v2'\nnew_username = 'Khrystyna'\nnew_id = 12345\ninvalid_new_id = 1234\nerror_message = 'oops we have a problem!'\nstore_inventory = {'1': 1, '4444': 2, 'teste': 1, 'string': 6738,\n 'Operated': 4, 'pending': 56, 'Not-Operated': 10, 'available': 4800,\n 'waiting list': 1, 'Unavailable': 1, 'Shortlisted': 1, 'Sold': 1,\n 'availasdfsadfasdfble': 1, 'not available': 1, 'Available': 1,\n 'YAI3424forYAI3373': 1, 'ok': 1, 'KBMAvailable': 3, 'onwork': 1, 'sold':\n 87, 'ddd': 1, 'Nonavailable': 1, 'Offline': 1, 'straight': 2, 'pendin':\n 1, 'sts': 1, 'onhold': 3, 'status': 5, 'xavailable': 1}\nCategory_Dict = dict(id=36, name='Rexy')\ntag_dict = dict(id=4, name='Dog')\nPetObject = Pet(id=456, category=Category_Dict, name='Xixi', photo_urls=[\n 'https://www.what-dog.net/Images/faces2/scroll001.jpg'], tags=[tag_dict\n ], status='sold')\nDataJsonForPets = json.dumps(PetObject.__dict__)\nstore = Store(id=12, petId=12, quantity=2, ship_date=\n '2018-09-12T13:52:49.901Z', status='placed', complete=True)\ndata_json_for_store = json.dumps(store.__dict__)\nuser = User(id=3, username='Nini', first_name='Vira', last_name='Budda',\n email='email@gmail.com', password='1234567', phone='55455545',\n user_status=1)\ndata_json_for_user = json.dumps(user.__dict__)\n",
"step-3": "import json\nfrom pets.pet import Pet\nfrom store_requests.store import Store\nfrom user_requests.user import User\nSUCCESS = 200\nNotFound = 404\nurl_site = 'https://petstore.swagger.io/v2'\nnew_username = 'Khrystyna'\nnew_id = 12345\ninvalid_new_id = 1234\nerror_message = 'oops we have a problem!'\nstore_inventory = {'1': 1, '4444': 2, 'teste': 1, 'string': 6738,\n 'Operated': 4, 'pending': 56, 'Not-Operated': 10, 'available': 4800,\n 'waiting list': 1, 'Unavailable': 1, 'Shortlisted': 1, 'Sold': 1,\n 'availasdfsadfasdfble': 1, 'not available': 1, 'Available': 1,\n 'YAI3424forYAI3373': 1, 'ok': 1, 'KBMAvailable': 3, 'onwork': 1, 'sold':\n 87, 'ddd': 1, 'Nonavailable': 1, 'Offline': 1, 'straight': 2, 'pendin':\n 1, 'sts': 1, 'onhold': 3, 'status': 5, 'xavailable': 1}\nCategory_Dict = dict(id=36, name='Rexy')\ntag_dict = dict(id=4, name='Dog')\nPetObject = Pet(id=456, category=Category_Dict, name='Xixi', photo_urls=[\n 'https://www.what-dog.net/Images/faces2/scroll001.jpg'], tags=[tag_dict\n ], status='sold')\nDataJsonForPets = json.dumps(PetObject.__dict__)\nstore = Store(id=12, petId=12, quantity=2, ship_date=\n '2018-09-12T13:52:49.901Z', status='placed', complete=True)\ndata_json_for_store = json.dumps(store.__dict__)\nuser = User(id=3, username='Nini', first_name='Vira', last_name='Budda',\n email='email@gmail.com', password='1234567', phone='55455545',\n user_status=1)\ndata_json_for_user = json.dumps(user.__dict__)\n",
"step-4": "import json\n\nfrom pets.pet import Pet\nfrom store_requests.store import Store\nfrom user_requests.user import User\n\nSUCCESS = 200\nNotFound = 404\nurl_site = 'https://petstore.swagger.io/v2'\nnew_username = \"Khrystyna\"\nnew_id = 12345\ninvalid_new_id = 1234\nerror_message = \"oops we have a problem!\"\nstore_inventory = {\n \"1\": 1,\n \"4444\": 2,\n \"teste\": 1,\n \"string\": 6738,\n \"Operated\": 4,\n \"pending\": 56,\n \"Not-Operated\": 10,\n \"available\": 4800,\n \"waiting list\": 1,\n \"Unavailable\": 1,\n \"Shortlisted\": 1,\n \"Sold\": 1,\n \"availasdfsadfasdfble\": 1,\n \"not available\": 1,\n \"Available\": 1,\n \"YAI3424forYAI3373\": 1,\n \"ok\": 1,\n \"KBMAvailable\": 3,\n \"onwork\": 1,\n \"sold\": 87,\n \"ddd\": 1,\n \"Nonavailable\": 1,\n \"Offline\": 1,\n \"straight\": 2,\n \"pendin\": 1,\n \"sts\": 1,\n \"onhold\": 3,\n \"status\": 5,\n \"xavailable\": 1\n}\n\nCategory_Dict = dict(id=36,\n name='Rexy')\ntag_dict = dict(id=4,\n name='Dog')\nPetObject = Pet(id=456,\n category=Category_Dict,\n name=\"Xixi\",\n photo_urls=[\"https://www.what-dog.net/Images/faces2/scroll001.jpg\"],\n tags=[tag_dict],\n status='sold')\nDataJsonForPets = json.dumps(PetObject.__dict__)\n\nstore = Store(id=12,\n petId=12,\n quantity=2,\n ship_date=\"2018-09-12T13:52:49.901Z\",\n status=\"placed\",\n complete=True)\ndata_json_for_store = json.dumps(store.__dict__)\n\nuser = User(id=3,\n username=\"Nini\",\n first_name=\"Vira\",\n last_name=\"Budda\",\n email=\"email@gmail.com\",\n password=\"1234567\",\n phone=\"55455545\",\n user_status=1)\ndata_json_for_user = json.dumps(user.__dict__)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# coding: utf-8
def init_list():
print("=== init_list ===")
l = list()
print(l)
l2 = []
print(l2)
l3 = list((1, 2))
print(l3)
l4 = [1, 2]
print(l4)
def insert_append_and_extend_list():
print("=== insert_append_and_extend_list ===")
l = ['e', 'h']
l.insert(-1, 'g')
print(l)
l.insert(1, 'f')
print(l)
l.insert(0, 'd')
print(l)
l.insert(10, 'i')
print(l)
l.append('l')
print(l)
l.extend(['m', 'n'])
print(l)
l[0:0] = ['b', 'c']
print(l)
l = ['a'] + l
print(l)
def remove_pop_list():
print("=== remove_pop_list ===")
l = ['a', 'b', 'c', 'd', 'e', 'e']
print(l)
print('l.remove: {}'.format(l.remove('e'))) # 只删除第一次出现的,没有返回
print(l)
# l.remove('h') # 删除不存在的会导致 ValueError
if 'h' in l:
l.remove('h')
l.pop()
print(l)
l.pop(1)
print('l.pop: {}'.format(l.pop(1)))
print(l)
# l.pop(10) # IndexError: pop index out of range
def get_len_count_index_list():
print("=== get_len_count_index_list ===")
l = ['a', 'b', 'c', 'd', 'e', 'e']
print(l[0])
# l[10] = 'z' # IndexError: list index out of range
# print(l[10]) # IndexError: list index out of range
# print(l.get(10)) # 'list' object has no attribute 'get'
print('len: {}'.format(len(l)))
print('count d: {}'.format(l.count('d')))
print('count e: {}'.format(l.count('e')))
print('count f: {}'.format(l.count('f')))
print('index d: {}'.format(l.index('d')))
print('index e: {}'.format(l.index('e')))
# print('index f: {}'.format(l.index('f'))) # ValueError: 'f' is not in list
def sort_reverse_list():
print("=== sort_reverse_list ===")
l = ['e', 'b', 'c', 'a', 'f', 'd']
print(l)
print(l.sort()) # return None
print(l)
l.sort(cmp=lambda x, y: -(cmp(x, y)))
print(l)
print(l.reverse()) # return None
print(l)
print(list(reversed(l))) # return a reversed iterator
print(l)
def _sort_func(x):
return x[0] + x[1]
def sort_list():
print("=== sort_list ===")
l = [(1, 2), (3, 1), (2, 3)]
print(sorted(l, key=lambda x: x[1]))
print(sorted(l, key=lambda x: x[0]))
print(sorted(l, key=_sort_func))
def list_index():
print("=== list_index ===")
l = ['a', 'b', 'c']
print(l.index('a'))
def list_slice():
print("=== list_slice ===")
l = [1, 2, 3]
print(l[:1])
print(l[:5])
print(l[-1:])
print(l[-5:])
print(l[:-1])
print(l[10:20])
ls = l[:1]
ls[0] = 2
print(ls)
print(l) # 改变 ls 不会改变 l
def list_slice_with_step():
print("=== list_slice_with_step ===")
l = list(range(20))
print(l[::3])
print(l[1::3])
print(l[2::3])
print(l[::-1])
l[::3] = [0, 0, 0, 0, 0, 0, 0]
print(l)
del l[::3]
print(l)
def list_comprehension():
print("=== list_comprehension ===")
# 列表推导式
l = [i * i for i in range(3)]
print(l)
def test_insert_when_traversing():
print("=== test_insert_when_traversing ===")
l = [3, 4]
for i in l:
l.insert(0, -i)
print(i)
# 会停不下来
def main():
# init_list()
# insert_append_and_extend_list()
# remove_pop_list()
# get_len_count_index_list()
# sort_reverse_list()
# sort_list()
# list_index()
list_slice()
# list_slice_with_step()
# list_comprehension()
# test_insert_when_traversing()
if __name__ == '__main__':
main()
# https://www.tutorialspoint.com/python/python_lists.htm
# https://stackoverflow.com/a/9028088/3936457
# https://stackoverflow.com/questions/8785554/how-do-i-insert-a-list-at-the-front-of-another-list
|
normal
|
{
"blob_id": "1a710916461644a0676a3bd84926aeabb2aa3f71",
"index": 7127,
"step-1": "<mask token>\n\n\ndef get_len_count_index_list():\n print('=== get_len_count_index_list ===')\n l = ['a', 'b', 'c', 'd', 'e', 'e']\n print(l[0])\n print('len: {}'.format(len(l)))\n print('count d: {}'.format(l.count('d')))\n print('count e: {}'.format(l.count('e')))\n print('count f: {}'.format(l.count('f')))\n print('index d: {}'.format(l.index('d')))\n print('index e: {}'.format(l.index('e')))\n\n\ndef sort_reverse_list():\n print('=== sort_reverse_list ===')\n l = ['e', 'b', 'c', 'a', 'f', 'd']\n print(l)\n print(l.sort())\n print(l)\n l.sort(cmp=lambda x, y: -cmp(x, y))\n print(l)\n print(l.reverse())\n print(l)\n print(list(reversed(l)))\n print(l)\n\n\ndef _sort_func(x):\n return x[0] + x[1]\n\n\ndef sort_list():\n print('=== sort_list ===')\n l = [(1, 2), (3, 1), (2, 3)]\n print(sorted(l, key=lambda x: x[1]))\n print(sorted(l, key=lambda x: x[0]))\n print(sorted(l, key=_sort_func))\n\n\ndef list_index():\n print('=== list_index ===')\n l = ['a', 'b', 'c']\n print(l.index('a'))\n\n\ndef list_slice():\n print('=== list_slice ===')\n l = [1, 2, 3]\n print(l[:1])\n print(l[:5])\n print(l[-1:])\n print(l[-5:])\n print(l[:-1])\n print(l[10:20])\n ls = l[:1]\n ls[0] = 2\n print(ls)\n print(l)\n\n\ndef list_slice_with_step():\n print('=== list_slice_with_step ===')\n l = list(range(20))\n print(l[::3])\n print(l[1::3])\n print(l[2::3])\n print(l[::-1])\n l[::3] = [0, 0, 0, 0, 0, 0, 0]\n print(l)\n del l[::3]\n print(l)\n\n\ndef list_comprehension():\n print('=== list_comprehension ===')\n l = [(i * i) for i in range(3)]\n print(l)\n\n\ndef test_insert_when_traversing():\n print('=== test_insert_when_traversing ===')\n l = [3, 4]\n for i in l:\n l.insert(0, -i)\n print(i)\n\n\ndef main():\n list_slice()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef insert_append_and_extend_list():\n print('=== insert_append_and_extend_list ===')\n l = ['e', 'h']\n l.insert(-1, 'g')\n print(l)\n l.insert(1, 'f')\n print(l)\n l.insert(0, 'd')\n print(l)\n l.insert(10, 'i')\n print(l)\n l.append('l')\n print(l)\n l.extend(['m', 'n'])\n print(l)\n l[0:0] = ['b', 'c']\n print(l)\n l = ['a'] + l\n print(l)\n\n\n<mask token>\n\n\ndef get_len_count_index_list():\n print('=== get_len_count_index_list ===')\n l = ['a', 'b', 'c', 'd', 'e', 'e']\n print(l[0])\n print('len: {}'.format(len(l)))\n print('count d: {}'.format(l.count('d')))\n print('count e: {}'.format(l.count('e')))\n print('count f: {}'.format(l.count('f')))\n print('index d: {}'.format(l.index('d')))\n print('index e: {}'.format(l.index('e')))\n\n\ndef sort_reverse_list():\n print('=== sort_reverse_list ===')\n l = ['e', 'b', 'c', 'a', 'f', 'd']\n print(l)\n print(l.sort())\n print(l)\n l.sort(cmp=lambda x, y: -cmp(x, y))\n print(l)\n print(l.reverse())\n print(l)\n print(list(reversed(l)))\n print(l)\n\n\ndef _sort_func(x):\n return x[0] + x[1]\n\n\ndef sort_list():\n print('=== sort_list ===')\n l = [(1, 2), (3, 1), (2, 3)]\n print(sorted(l, key=lambda x: x[1]))\n print(sorted(l, key=lambda x: x[0]))\n print(sorted(l, key=_sort_func))\n\n\ndef list_index():\n print('=== list_index ===')\n l = ['a', 'b', 'c']\n print(l.index('a'))\n\n\ndef list_slice():\n print('=== list_slice ===')\n l = [1, 2, 3]\n print(l[:1])\n print(l[:5])\n print(l[-1:])\n print(l[-5:])\n print(l[:-1])\n print(l[10:20])\n ls = l[:1]\n ls[0] = 2\n print(ls)\n print(l)\n\n\ndef list_slice_with_step():\n print('=== list_slice_with_step ===')\n l = list(range(20))\n print(l[::3])\n print(l[1::3])\n print(l[2::3])\n print(l[::-1])\n l[::3] = [0, 0, 0, 0, 0, 0, 0]\n print(l)\n del l[::3]\n print(l)\n\n\ndef list_comprehension():\n print('=== list_comprehension ===')\n l = [(i * i) for i in range(3)]\n print(l)\n\n\ndef test_insert_when_traversing():\n print('=== test_insert_when_traversing ===')\n l = [3, 4]\n for i in l:\n l.insert(0, -i)\n print(i)\n\n\ndef main():\n list_slice()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef insert_append_and_extend_list():\n print('=== insert_append_and_extend_list ===')\n l = ['e', 'h']\n l.insert(-1, 'g')\n print(l)\n l.insert(1, 'f')\n print(l)\n l.insert(0, 'd')\n print(l)\n l.insert(10, 'i')\n print(l)\n l.append('l')\n print(l)\n l.extend(['m', 'n'])\n print(l)\n l[0:0] = ['b', 'c']\n print(l)\n l = ['a'] + l\n print(l)\n\n\ndef remove_pop_list():\n print('=== remove_pop_list ===')\n l = ['a', 'b', 'c', 'd', 'e', 'e']\n print(l)\n print('l.remove: {}'.format(l.remove('e')))\n print(l)\n if 'h' in l:\n l.remove('h')\n l.pop()\n print(l)\n l.pop(1)\n print('l.pop: {}'.format(l.pop(1)))\n print(l)\n\n\ndef get_len_count_index_list():\n print('=== get_len_count_index_list ===')\n l = ['a', 'b', 'c', 'd', 'e', 'e']\n print(l[0])\n print('len: {}'.format(len(l)))\n print('count d: {}'.format(l.count('d')))\n print('count e: {}'.format(l.count('e')))\n print('count f: {}'.format(l.count('f')))\n print('index d: {}'.format(l.index('d')))\n print('index e: {}'.format(l.index('e')))\n\n\ndef sort_reverse_list():\n print('=== sort_reverse_list ===')\n l = ['e', 'b', 'c', 'a', 'f', 'd']\n print(l)\n print(l.sort())\n print(l)\n l.sort(cmp=lambda x, y: -cmp(x, y))\n print(l)\n print(l.reverse())\n print(l)\n print(list(reversed(l)))\n print(l)\n\n\ndef _sort_func(x):\n return x[0] + x[1]\n\n\ndef sort_list():\n print('=== sort_list ===')\n l = [(1, 2), (3, 1), (2, 3)]\n print(sorted(l, key=lambda x: x[1]))\n print(sorted(l, key=lambda x: x[0]))\n print(sorted(l, key=_sort_func))\n\n\ndef list_index():\n print('=== list_index ===')\n l = ['a', 'b', 'c']\n print(l.index('a'))\n\n\ndef list_slice():\n print('=== list_slice ===')\n l = [1, 2, 3]\n print(l[:1])\n print(l[:5])\n print(l[-1:])\n print(l[-5:])\n print(l[:-1])\n print(l[10:20])\n ls = l[:1]\n ls[0] = 2\n print(ls)\n print(l)\n\n\ndef list_slice_with_step():\n print('=== list_slice_with_step ===')\n l = list(range(20))\n print(l[::3])\n print(l[1::3])\n print(l[2::3])\n print(l[::-1])\n l[::3] = [0, 0, 0, 0, 0, 0, 0]\n print(l)\n del l[::3]\n print(l)\n\n\ndef list_comprehension():\n print('=== list_comprehension ===')\n l = [(i * i) for i in range(3)]\n print(l)\n\n\ndef test_insert_when_traversing():\n print('=== test_insert_when_traversing ===')\n l = [3, 4]\n for i in l:\n l.insert(0, -i)\n print(i)\n\n\ndef main():\n list_slice()\n\n\n<mask token>\n",
"step-4": "def init_list():\n print('=== init_list ===')\n l = list()\n print(l)\n l2 = []\n print(l2)\n l3 = list((1, 2))\n print(l3)\n l4 = [1, 2]\n print(l4)\n\n\ndef insert_append_and_extend_list():\n print('=== insert_append_and_extend_list ===')\n l = ['e', 'h']\n l.insert(-1, 'g')\n print(l)\n l.insert(1, 'f')\n print(l)\n l.insert(0, 'd')\n print(l)\n l.insert(10, 'i')\n print(l)\n l.append('l')\n print(l)\n l.extend(['m', 'n'])\n print(l)\n l[0:0] = ['b', 'c']\n print(l)\n l = ['a'] + l\n print(l)\n\n\ndef remove_pop_list():\n print('=== remove_pop_list ===')\n l = ['a', 'b', 'c', 'd', 'e', 'e']\n print(l)\n print('l.remove: {}'.format(l.remove('e')))\n print(l)\n if 'h' in l:\n l.remove('h')\n l.pop()\n print(l)\n l.pop(1)\n print('l.pop: {}'.format(l.pop(1)))\n print(l)\n\n\ndef get_len_count_index_list():\n print('=== get_len_count_index_list ===')\n l = ['a', 'b', 'c', 'd', 'e', 'e']\n print(l[0])\n print('len: {}'.format(len(l)))\n print('count d: {}'.format(l.count('d')))\n print('count e: {}'.format(l.count('e')))\n print('count f: {}'.format(l.count('f')))\n print('index d: {}'.format(l.index('d')))\n print('index e: {}'.format(l.index('e')))\n\n\ndef sort_reverse_list():\n print('=== sort_reverse_list ===')\n l = ['e', 'b', 'c', 'a', 'f', 'd']\n print(l)\n print(l.sort())\n print(l)\n l.sort(cmp=lambda x, y: -cmp(x, y))\n print(l)\n print(l.reverse())\n print(l)\n print(list(reversed(l)))\n print(l)\n\n\ndef _sort_func(x):\n return x[0] + x[1]\n\n\ndef sort_list():\n print('=== sort_list ===')\n l = [(1, 2), (3, 1), (2, 3)]\n print(sorted(l, key=lambda x: x[1]))\n print(sorted(l, key=lambda x: x[0]))\n print(sorted(l, key=_sort_func))\n\n\ndef list_index():\n print('=== list_index ===')\n l = ['a', 'b', 'c']\n print(l.index('a'))\n\n\ndef list_slice():\n print('=== list_slice ===')\n l = [1, 2, 3]\n print(l[:1])\n print(l[:5])\n print(l[-1:])\n print(l[-5:])\n print(l[:-1])\n print(l[10:20])\n ls = l[:1]\n ls[0] = 2\n print(ls)\n print(l)\n\n\ndef list_slice_with_step():\n print('=== list_slice_with_step ===')\n l = list(range(20))\n print(l[::3])\n print(l[1::3])\n print(l[2::3])\n print(l[::-1])\n l[::3] = [0, 0, 0, 0, 0, 0, 0]\n print(l)\n del l[::3]\n print(l)\n\n\ndef list_comprehension():\n print('=== list_comprehension ===')\n l = [(i * i) for i in range(3)]\n print(l)\n\n\ndef test_insert_when_traversing():\n print('=== test_insert_when_traversing ===')\n l = [3, 4]\n for i in l:\n l.insert(0, -i)\n print(i)\n\n\ndef main():\n list_slice()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# coding: utf-8\n\n\ndef init_list():\n print(\"=== init_list ===\")\n l = list()\n print(l)\n l2 = []\n print(l2)\n l3 = list((1, 2))\n print(l3)\n l4 = [1, 2]\n print(l4)\n\n\ndef insert_append_and_extend_list():\n print(\"=== insert_append_and_extend_list ===\")\n l = ['e', 'h']\n l.insert(-1, 'g')\n print(l)\n l.insert(1, 'f')\n print(l)\n l.insert(0, 'd')\n print(l)\n l.insert(10, 'i')\n print(l)\n l.append('l')\n print(l)\n l.extend(['m', 'n'])\n print(l)\n l[0:0] = ['b', 'c']\n print(l)\n l = ['a'] + l\n print(l)\n\n\ndef remove_pop_list():\n print(\"=== remove_pop_list ===\")\n l = ['a', 'b', 'c', 'd', 'e', 'e']\n print(l)\n print('l.remove: {}'.format(l.remove('e'))) # 只删除第一次出现的,没有返回\n print(l)\n # l.remove('h') # 删除不存在的会导致 ValueError\n if 'h' in l:\n l.remove('h')\n l.pop()\n print(l)\n l.pop(1)\n print('l.pop: {}'.format(l.pop(1)))\n print(l)\n # l.pop(10) # IndexError: pop index out of range\n\n\ndef get_len_count_index_list():\n print(\"=== get_len_count_index_list ===\")\n l = ['a', 'b', 'c', 'd', 'e', 'e']\n print(l[0])\n # l[10] = 'z' # IndexError: list index out of range\n # print(l[10]) # IndexError: list index out of range\n # print(l.get(10)) # 'list' object has no attribute 'get'\n print('len: {}'.format(len(l)))\n print('count d: {}'.format(l.count('d')))\n print('count e: {}'.format(l.count('e')))\n print('count f: {}'.format(l.count('f')))\n print('index d: {}'.format(l.index('d')))\n print('index e: {}'.format(l.index('e')))\n # print('index f: {}'.format(l.index('f'))) # ValueError: 'f' is not in list\n\n\ndef sort_reverse_list():\n print(\"=== sort_reverse_list ===\")\n l = ['e', 'b', 'c', 'a', 'f', 'd']\n print(l)\n print(l.sort()) # return None\n print(l)\n l.sort(cmp=lambda x, y: -(cmp(x, y)))\n print(l)\n print(l.reverse()) # return None\n print(l)\n print(list(reversed(l))) # return a reversed iterator\n print(l)\n\n\ndef _sort_func(x):\n return x[0] + x[1]\n\n\ndef sort_list():\n print(\"=== sort_list ===\")\n l = [(1, 2), (3, 1), (2, 3)]\n print(sorted(l, key=lambda x: x[1]))\n print(sorted(l, key=lambda x: x[0]))\n print(sorted(l, key=_sort_func))\n\n\ndef list_index():\n print(\"=== list_index ===\")\n l = ['a', 'b', 'c']\n print(l.index('a'))\n\n\ndef list_slice():\n print(\"=== list_slice ===\")\n l = [1, 2, 3]\n print(l[:1])\n print(l[:5])\n print(l[-1:])\n print(l[-5:])\n print(l[:-1])\n print(l[10:20])\n\n ls = l[:1]\n ls[0] = 2\n print(ls)\n print(l) # 改变 ls 不会改变 l\n\n\ndef list_slice_with_step():\n print(\"=== list_slice_with_step ===\")\n l = list(range(20))\n print(l[::3])\n print(l[1::3])\n print(l[2::3])\n print(l[::-1])\n l[::3] = [0, 0, 0, 0, 0, 0, 0]\n print(l)\n del l[::3]\n print(l)\n\n\ndef list_comprehension():\n print(\"=== list_comprehension ===\")\n # 列表推导式\n l = [i * i for i in range(3)]\n print(l)\n\n\ndef test_insert_when_traversing():\n print(\"=== test_insert_when_traversing ===\")\n l = [3, 4]\n for i in l:\n l.insert(0, -i)\n print(i)\n # 会停不下来\n\n\ndef main():\n # init_list()\n # insert_append_and_extend_list()\n # remove_pop_list()\n # get_len_count_index_list()\n # sort_reverse_list()\n # sort_list()\n # list_index()\n list_slice()\n # list_slice_with_step()\n # list_comprehension()\n # test_insert_when_traversing()\n\n\nif __name__ == '__main__':\n main()\n\n# https://www.tutorialspoint.com/python/python_lists.htm\n# https://stackoverflow.com/a/9028088/3936457\n# https://stackoverflow.com/questions/8785554/how-do-i-insert-a-list-at-the-front-of-another-list\n",
"step-ids": [
10,
11,
12,
14,
15
]
}
|
[
10,
11,
12,
14,
15
] |
# -*- coding: utf-8 -*-
import requests
import csv
from lxml import html
import json
class ycombinatorParser():
siteurl = 'https://news.ycombinator.com/'
def getNextPage(pageurl):
response = requests.get(pageurl)
parsed_body = html.fromstring(response.text)
nextpage=parsed_body.xpath('//a[@class="morelink"]')
try:
nexthref=nextpage[0].get('href')
except IndexError:
nexthref = ''
return nexthref
def parsePage(parsed_body,rownumber):
def jsonWriteLine(rownumber,title,autor,url,site):
line = '{"Rownumber": %d,\n "title": "%s",\n "autor": "%s",\n "url": "%s",\n "site": "%s",\n }\n' %(rownumber,title,autor,url,site)
#print line
return line
def getNews(rownews):
newsdict = {}
for news in rownews:
newsdict["title"] = ''.join(news.xpath('./a/text()'))
for i in news.xpath('./a'):
newsdict["url"] = i.get('href')
newsdict["site"] = ''.join(news.xpath('./span/a/span/text()'))
return newsdict
def getAuthor(rowautor):
authordict = {}
for author in rowautor:
authordict["autor"] = ''.join(author.xpath('./a[1]/text()'))
return authordict
for row in parsed_body.xpath('//tr'):
rownews = row.xpath('./td[@class="title"][2]')
rowautor = row.xpath('./td[@class="subtext"][1]')
datadict = {}
rowdata = {}
if rownews:
datadict = getNews(rownews)
if rowautor:
for author in rowautor:
datadict = getAuthor(rowautor)
if datadict:
autor = ''
try:
title=datadict["title"]
url=datadict["url"]
site=datadict["site"]
except KeyError:
autor = datadict["autor"]
if autor:
rowdata['rownumber'] = str(rownumber)
rowdata['title'] = str(title)
rowdata['autor'] = str(autor)
rowdata['url'] = str(url)
rowdata['site'] = str(site)
with open('nix.json',mode='a') as f:
json.dump(rowdata,f)
#outputfile.write(jsonWriteLine(rownumber,title,autor,url,site))
#print jsonWriteLine(rownumber,title,autor,url,site)
rownumber += 1
if rownumber>2:
exit()
return rownumber
def __unicode__(self):
return unicode(self.rowdata)
pageflag = True
rownumber = 1
pageparse = siteurl
with open('nix.json',mode='w') as f:
json.dump('',f)
while pageflag:
response = requests.get(pageparse)
parsed_body = html.fromstring(response.text)
rownumber = parsePage(parsed_body,rownumber)-1
pageparse = siteurl+getNextPage(pageparse)
if pageparse == siteurl:
pageflag = False
if __name__ == '__main__':
ycombinatorParser()
|
normal
|
{
"blob_id": "87c27711c0089ca2c7e5c7d0e9edb51b9d4008d9",
"index": 6717,
"step-1": "<mask token>\n\n\nclass ycombinatorParser:\n <mask token>\n\n def getNextPage(pageurl):\n response = requests.get(pageurl)\n parsed_body = html.fromstring(response.text)\n nextpage = parsed_body.xpath('//a[@class=\"morelink\"]')\n try:\n nexthref = nextpage[0].get('href')\n except IndexError:\n nexthref = ''\n return nexthref\n\n def parsePage(parsed_body, rownumber):\n\n def jsonWriteLine(rownumber, title, autor, url, site):\n line = (\n \"\"\"{\"Rownumber\": %d,\n \"title\": \"%s\",\n \"autor\": \"%s\",\n \"url\": \"%s\",\n \"site\": \"%s\",\n }\n\"\"\"\n % (rownumber, title, autor, url, site))\n return line\n\n def getNews(rownews):\n newsdict = {}\n for news in rownews:\n newsdict['title'] = ''.join(news.xpath('./a/text()'))\n for i in news.xpath('./a'):\n newsdict['url'] = i.get('href')\n newsdict['site'] = ''.join(news.xpath('./span/a/span/text()'))\n return newsdict\n\n def getAuthor(rowautor):\n authordict = {}\n for author in rowautor:\n authordict['autor'] = ''.join(author.xpath('./a[1]/text()'))\n return authordict\n for row in parsed_body.xpath('//tr'):\n rownews = row.xpath('./td[@class=\"title\"][2]')\n rowautor = row.xpath('./td[@class=\"subtext\"][1]')\n datadict = {}\n rowdata = {}\n if rownews:\n datadict = getNews(rownews)\n if rowautor:\n for author in rowautor:\n datadict = getAuthor(rowautor)\n if datadict:\n autor = ''\n try:\n title = datadict['title']\n url = datadict['url']\n site = datadict['site']\n except KeyError:\n autor = datadict['autor']\n if autor:\n rowdata['rownumber'] = str(rownumber)\n rowdata['title'] = str(title)\n rowdata['autor'] = str(autor)\n rowdata['url'] = str(url)\n rowdata['site'] = str(site)\n with open('nix.json', mode='a') as f:\n json.dump(rowdata, f)\n rownumber += 1\n if rownumber > 2:\n exit()\n return rownumber\n\n def __unicode__(self):\n return unicode(self.rowdata)\n <mask token>\n <mask token>\n <mask token>\n with open('nix.json', mode='w') as f:\n json.dump('', f)\n while pageflag:\n response = requests.get(pageparse)\n parsed_body = html.fromstring(response.text)\n rownumber = parsePage(parsed_body, rownumber) - 1\n pageparse = siteurl + getNextPage(pageparse)\n if pageparse == siteurl:\n pageflag = False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ycombinatorParser:\n siteurl = 'https://news.ycombinator.com/'\n\n def getNextPage(pageurl):\n response = requests.get(pageurl)\n parsed_body = html.fromstring(response.text)\n nextpage = parsed_body.xpath('//a[@class=\"morelink\"]')\n try:\n nexthref = nextpage[0].get('href')\n except IndexError:\n nexthref = ''\n return nexthref\n\n def parsePage(parsed_body, rownumber):\n\n def jsonWriteLine(rownumber, title, autor, url, site):\n line = (\n \"\"\"{\"Rownumber\": %d,\n \"title\": \"%s\",\n \"autor\": \"%s\",\n \"url\": \"%s\",\n \"site\": \"%s\",\n }\n\"\"\"\n % (rownumber, title, autor, url, site))\n return line\n\n def getNews(rownews):\n newsdict = {}\n for news in rownews:\n newsdict['title'] = ''.join(news.xpath('./a/text()'))\n for i in news.xpath('./a'):\n newsdict['url'] = i.get('href')\n newsdict['site'] = ''.join(news.xpath('./span/a/span/text()'))\n return newsdict\n\n def getAuthor(rowautor):\n authordict = {}\n for author in rowautor:\n authordict['autor'] = ''.join(author.xpath('./a[1]/text()'))\n return authordict\n for row in parsed_body.xpath('//tr'):\n rownews = row.xpath('./td[@class=\"title\"][2]')\n rowautor = row.xpath('./td[@class=\"subtext\"][1]')\n datadict = {}\n rowdata = {}\n if rownews:\n datadict = getNews(rownews)\n if rowautor:\n for author in rowautor:\n datadict = getAuthor(rowautor)\n if datadict:\n autor = ''\n try:\n title = datadict['title']\n url = datadict['url']\n site = datadict['site']\n except KeyError:\n autor = datadict['autor']\n if autor:\n rowdata['rownumber'] = str(rownumber)\n rowdata['title'] = str(title)\n rowdata['autor'] = str(autor)\n rowdata['url'] = str(url)\n rowdata['site'] = str(site)\n with open('nix.json', mode='a') as f:\n json.dump(rowdata, f)\n rownumber += 1\n if rownumber > 2:\n exit()\n return rownumber\n\n def __unicode__(self):\n return unicode(self.rowdata)\n pageflag = True\n rownumber = 1\n pageparse = siteurl\n with open('nix.json', mode='w') as f:\n json.dump('', f)\n while pageflag:\n response = requests.get(pageparse)\n parsed_body = html.fromstring(response.text)\n rownumber = parsePage(parsed_body, rownumber) - 1\n pageparse = siteurl + getNextPage(pageparse)\n if pageparse == siteurl:\n pageflag = False\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ycombinatorParser:\n siteurl = 'https://news.ycombinator.com/'\n\n def getNextPage(pageurl):\n response = requests.get(pageurl)\n parsed_body = html.fromstring(response.text)\n nextpage = parsed_body.xpath('//a[@class=\"morelink\"]')\n try:\n nexthref = nextpage[0].get('href')\n except IndexError:\n nexthref = ''\n return nexthref\n\n def parsePage(parsed_body, rownumber):\n\n def jsonWriteLine(rownumber, title, autor, url, site):\n line = (\n \"\"\"{\"Rownumber\": %d,\n \"title\": \"%s\",\n \"autor\": \"%s\",\n \"url\": \"%s\",\n \"site\": \"%s\",\n }\n\"\"\"\n % (rownumber, title, autor, url, site))\n return line\n\n def getNews(rownews):\n newsdict = {}\n for news in rownews:\n newsdict['title'] = ''.join(news.xpath('./a/text()'))\n for i in news.xpath('./a'):\n newsdict['url'] = i.get('href')\n newsdict['site'] = ''.join(news.xpath('./span/a/span/text()'))\n return newsdict\n\n def getAuthor(rowautor):\n authordict = {}\n for author in rowautor:\n authordict['autor'] = ''.join(author.xpath('./a[1]/text()'))\n return authordict\n for row in parsed_body.xpath('//tr'):\n rownews = row.xpath('./td[@class=\"title\"][2]')\n rowautor = row.xpath('./td[@class=\"subtext\"][1]')\n datadict = {}\n rowdata = {}\n if rownews:\n datadict = getNews(rownews)\n if rowautor:\n for author in rowautor:\n datadict = getAuthor(rowautor)\n if datadict:\n autor = ''\n try:\n title = datadict['title']\n url = datadict['url']\n site = datadict['site']\n except KeyError:\n autor = datadict['autor']\n if autor:\n rowdata['rownumber'] = str(rownumber)\n rowdata['title'] = str(title)\n rowdata['autor'] = str(autor)\n rowdata['url'] = str(url)\n rowdata['site'] = str(site)\n with open('nix.json', mode='a') as f:\n json.dump(rowdata, f)\n rownumber += 1\n if rownumber > 2:\n exit()\n return rownumber\n\n def __unicode__(self):\n return unicode(self.rowdata)\n pageflag = True\n rownumber = 1\n pageparse = siteurl\n with open('nix.json', mode='w') as f:\n json.dump('', f)\n while pageflag:\n response = requests.get(pageparse)\n parsed_body = html.fromstring(response.text)\n rownumber = parsePage(parsed_body, rownumber) - 1\n pageparse = siteurl + getNextPage(pageparse)\n if pageparse == siteurl:\n pageflag = False\n\n\nif __name__ == '__main__':\n ycombinatorParser()\n",
"step-4": "import requests\nimport csv\nfrom lxml import html\nimport json\n\n\nclass ycombinatorParser:\n siteurl = 'https://news.ycombinator.com/'\n\n def getNextPage(pageurl):\n response = requests.get(pageurl)\n parsed_body = html.fromstring(response.text)\n nextpage = parsed_body.xpath('//a[@class=\"morelink\"]')\n try:\n nexthref = nextpage[0].get('href')\n except IndexError:\n nexthref = ''\n return nexthref\n\n def parsePage(parsed_body, rownumber):\n\n def jsonWriteLine(rownumber, title, autor, url, site):\n line = (\n \"\"\"{\"Rownumber\": %d,\n \"title\": \"%s\",\n \"autor\": \"%s\",\n \"url\": \"%s\",\n \"site\": \"%s\",\n }\n\"\"\"\n % (rownumber, title, autor, url, site))\n return line\n\n def getNews(rownews):\n newsdict = {}\n for news in rownews:\n newsdict['title'] = ''.join(news.xpath('./a/text()'))\n for i in news.xpath('./a'):\n newsdict['url'] = i.get('href')\n newsdict['site'] = ''.join(news.xpath('./span/a/span/text()'))\n return newsdict\n\n def getAuthor(rowautor):\n authordict = {}\n for author in rowautor:\n authordict['autor'] = ''.join(author.xpath('./a[1]/text()'))\n return authordict\n for row in parsed_body.xpath('//tr'):\n rownews = row.xpath('./td[@class=\"title\"][2]')\n rowautor = row.xpath('./td[@class=\"subtext\"][1]')\n datadict = {}\n rowdata = {}\n if rownews:\n datadict = getNews(rownews)\n if rowautor:\n for author in rowautor:\n datadict = getAuthor(rowautor)\n if datadict:\n autor = ''\n try:\n title = datadict['title']\n url = datadict['url']\n site = datadict['site']\n except KeyError:\n autor = datadict['autor']\n if autor:\n rowdata['rownumber'] = str(rownumber)\n rowdata['title'] = str(title)\n rowdata['autor'] = str(autor)\n rowdata['url'] = str(url)\n rowdata['site'] = str(site)\n with open('nix.json', mode='a') as f:\n json.dump(rowdata, f)\n rownumber += 1\n if rownumber > 2:\n exit()\n return rownumber\n\n def __unicode__(self):\n return unicode(self.rowdata)\n pageflag = True\n rownumber = 1\n pageparse = siteurl\n with open('nix.json', mode='w') as f:\n json.dump('', f)\n while pageflag:\n response = requests.get(pageparse)\n parsed_body = html.fromstring(response.text)\n rownumber = parsePage(parsed_body, rownumber) - 1\n pageparse = siteurl + getNextPage(pageparse)\n if pageparse == siteurl:\n pageflag = False\n\n\nif __name__ == '__main__':\n ycombinatorParser()\n",
"step-5": "# -*- coding: utf-8 -*-\nimport requests\nimport csv\nfrom lxml import html\nimport json\n\nclass ycombinatorParser():\n siteurl = 'https://news.ycombinator.com/' \n\n def getNextPage(pageurl):\n response = requests.get(pageurl)\n parsed_body = html.fromstring(response.text)\n nextpage=parsed_body.xpath('//a[@class=\"morelink\"]')\n try:\n nexthref=nextpage[0].get('href')\n except IndexError:\n nexthref = ''\n return nexthref \n\n\n def parsePage(parsed_body,rownumber):\n def jsonWriteLine(rownumber,title,autor,url,site):\n line = '{\"Rownumber\": %d,\\n \"title\": \"%s\",\\n \"autor\": \"%s\",\\n \"url\": \"%s\",\\n \"site\": \"%s\",\\n }\\n' %(rownumber,title,autor,url,site)\n #print line\n return line\n\n def getNews(rownews):\n newsdict = {}\n for news in rownews:\n newsdict[\"title\"] = ''.join(news.xpath('./a/text()'))\n for i in news.xpath('./a'):\n newsdict[\"url\"] = i.get('href')\n newsdict[\"site\"] = ''.join(news.xpath('./span/a/span/text()'))\n return newsdict\n\n def getAuthor(rowautor):\n authordict = {}\n for author in rowautor:\n authordict[\"autor\"] = ''.join(author.xpath('./a[1]/text()'))\n return authordict\n\n for row in parsed_body.xpath('//tr'):\n rownews = row.xpath('./td[@class=\"title\"][2]')\n rowautor = row.xpath('./td[@class=\"subtext\"][1]')\n datadict = {}\n rowdata = {}\n if rownews:\n datadict = getNews(rownews)\n if rowautor:\n for author in rowautor:\n datadict = getAuthor(rowautor)\n\n if datadict:\n autor = ''\n try:\n title=datadict[\"title\"]\n url=datadict[\"url\"]\n site=datadict[\"site\"]\n except KeyError:\n autor = datadict[\"autor\"]\n\n if autor:\n rowdata['rownumber'] = str(rownumber)\n rowdata['title'] = str(title)\n rowdata['autor'] = str(autor)\n rowdata['url'] = str(url)\n rowdata['site'] = str(site)\n \n with open('nix.json',mode='a') as f:\n json.dump(rowdata,f)\n \n #outputfile.write(jsonWriteLine(rownumber,title,autor,url,site)) \n \n #print jsonWriteLine(rownumber,title,autor,url,site)\n rownumber += 1\n if rownumber>2:\n exit()\n return rownumber\n \n def __unicode__(self):\n return unicode(self.rowdata)\n \n pageflag = True\n rownumber = 1\n pageparse = siteurl\n with open('nix.json',mode='w') as f:\n json.dump('',f)\n while pageflag: \n response = requests.get(pageparse)\n parsed_body = html.fromstring(response.text) \n\n rownumber = parsePage(parsed_body,rownumber)-1\n\n pageparse = siteurl+getNextPage(pageparse)\n if pageparse == siteurl:\n pageflag = False\nif __name__ == '__main__':\n ycombinatorParser()",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the file entry implementation using pyfshfs."""
import unittest
from dfvfs.lib import definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import context
from dfvfs.vfs import hfs_attribute
from dfvfs.vfs import hfs_file_entry
from dfvfs.vfs import hfs_file_system
from tests import test_lib as shared_test_lib
class HFSFileEntryTest(shared_test_lib.BaseTestCase):
"""Tests the HFS file entry."""
# pylint: disable=protected-access
_IDENTIFIER_A_DIRECTORY = 18
_IDENTIFIER_A_FILE = 19
_IDENTIFIER_A_LINK = 22
_IDENTIFIER_ANOTHER_FILE = 21
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_path = self._GetTestFilePath(['hfsplus.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
self._raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
self._hfs_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, location='/',
parent=self._raw_path_spec)
self._file_system = hfs_file_system.HFSFileSystem(
self._resolver_context, self._hfs_path_spec)
self._file_system.Open()
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def testInitialize(self):
"""Tests the __init__ function."""
file_entry = hfs_file_entry.HFSFileEntry(
self._resolver_context, self._file_system, self._hfs_path_spec)
self.assertIsNotNone(file_entry)
def testGetAttributes(self):
"""Tests the _GetAttributes function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_FILE,
location='/a_directory/a_file', parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNone(file_entry._attributes)
file_entry._GetAttributes()
self.assertIsNotNone(file_entry._attributes)
self.assertEqual(len(file_entry._attributes), 1)
test_attribute = file_entry._attributes[0]
self.assertIsInstance(test_attribute, hfs_attribute.HFSExtendedAttribute)
self.assertEqual(test_attribute.name, 'myxattr')
test_attribute_value_data = test_attribute.read()
self.assertEqual(test_attribute_value_data, b'My extended attribute')
def testGetDataStreams(self):
"""Tests the _GetDataStreams function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
data_streams = file_entry._GetDataStreams()
self.assertEqual(len(data_streams), 1)
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=25,
location='/a_directory/a_resourcefork', parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
data_streams = file_entry._GetDataStreams()
self.assertEqual(len(data_streams), 2)
def testGetStatAttribute(self):
"""Tests the _GetStatAttribute function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
stat_attribute = file_entry._GetStatAttribute()
self.assertIsNotNone(stat_attribute)
self.assertIsNone(stat_attribute.device_number)
self.assertEqual(stat_attribute.group_identifier, 20)
self.assertEqual(stat_attribute.inode_number, 21)
self.assertEqual(stat_attribute.mode, 0o100644)
self.assertEqual(stat_attribute.number_of_links, 1)
self.assertEqual(stat_attribute.owner_identifier, 501)
self.assertEqual(stat_attribute.size, 22)
self.assertEqual(stat_attribute.type, stat_attribute.TYPE_FILE)
def testAccessTime(self):
"""Test the access_time property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.access_time)
def testAddedTime(self):
"""Test the added_time property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.added_time)
def testChangeTime(self):
"""Test the change_time property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.change_time)
def testCreationTime(self):
"""Test the creation_time property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.creation_time)
def testDataStreams(self):
"""Tests the data_streams property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_data_streams, 1)
data_stream_names = []
for data_stream in file_entry.data_streams:
data_stream_names.append(data_stream.name)
self.assertEqual(data_stream_names, [''])
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,
location='/a_directory', parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_data_streams, 0)
data_stream_names = []
for data_stream in file_entry.data_streams:
data_stream_names.append(data_stream.name)
self.assertEqual(data_stream_names, [])
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=25,
location='/a_directory/a_resourcefork', parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_data_streams, 2)
data_stream_names = []
for data_stream in file_entry.data_streams:
data_stream_names.append(data_stream.name)
self.assertEqual(data_stream_names, ['', 'rsrc'])
def testModificationTime(self):
"""Test the modification_time property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.modification_time)
def testSize(self):
"""Test the size property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.size, 22)
def testSubFileEntries(self):
"""Tests the number_of_sub_file_entries and sub_file_entries properties."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, location='/',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_sub_file_entries, 6)
expected_sub_file_entry_names = [
'.fseventsd',
'.HFS+ Private Directory Data\r',
'a_directory',
'a_link',
'passwords.txt',
'\u2400\u2400\u2400\u2400HFS+ Private Data']
sub_file_entry_names = []
for sub_file_entry in file_entry.sub_file_entries:
sub_file_entry_names.append(sub_file_entry.name)
self.assertEqual(
len(sub_file_entry_names), len(expected_sub_file_entry_names))
self.assertEqual(
sorted(sub_file_entry_names), sorted(expected_sub_file_entry_names))
# Test a path specification without a location.
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_sub_file_entries, 3)
def testGetDataStream(self):
"""Tests the GetDataStream function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
data_stream = file_entry.GetDataStream('')
self.assertIsNotNone(data_stream)
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=25,
location='/a_directory/a_resourcefork', parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
data_stream = file_entry.GetDataStream('rsrc')
self.assertIsNotNone(data_stream)
def testGetExtents(self):
"""Tests the GetExtents function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
extents = file_entry.GetExtents()
self.assertEqual(len(extents), 1)
self.assertEqual(extents[0].extent_type, definitions.EXTENT_TYPE_DATA)
self.assertEqual(extents[0].offset, 1130496)
self.assertEqual(extents[0].size, 4096)
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,
location='/a_directory', parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
extents = file_entry.GetExtents()
self.assertEqual(len(extents), 0)
def testGetFileEntryByPathSpec(self):
"""Tests the GetFileEntryByPathSpec function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_FILE,
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
def testGetFileObject(self):
"""Tests the GetFileObject function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
file_object = file_entry.GetFileObject()
self.assertIsNotNone(file_object)
self.assertEqual(file_object.get_size(), 22)
file_object = file_entry.GetFileObject(data_stream_name='bogus')
self.assertIsNone(file_object)
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,
location='/a_directory', parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
file_object = file_entry.GetFileObject()
self.assertIsNone(file_object)
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=25,
location='/a_directory/a_resourcefork', parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
file_object = file_entry.GetFileObject(data_stream_name='rsrc')
self.assertIsNotNone(file_object)
self.assertEqual(file_object.get_size(), 17)
def testGetLinkedFileEntry(self):
"""Tests the GetLinkedFileEntry function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_LINK,
location='/a_link', parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
linked_file_entry = file_entry.GetLinkedFileEntry()
self.assertIsNotNone(linked_file_entry)
self.assertEqual(linked_file_entry.name, 'another_file')
def testGetParentFileEntry(self):
"""Tests the GetParentFileEntry function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
parent_file_entry = file_entry.GetParentFileEntry()
self.assertIsNotNone(parent_file_entry)
self.assertEqual(parent_file_entry.name, 'a_directory')
def testIsFunctions(self):
"""Tests the Is? functions."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertFalse(file_entry.IsRoot())
self.assertFalse(file_entry.IsVirtual())
self.assertTrue(file_entry.IsAllocated())
self.assertFalse(file_entry.IsDevice())
self.assertFalse(file_entry.IsDirectory())
self.assertTrue(file_entry.IsFile())
self.assertFalse(file_entry.IsLink())
self.assertFalse(file_entry.IsPipe())
self.assertFalse(file_entry.IsSocket())
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,
location='/a_directory', parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertFalse(file_entry.IsRoot())
self.assertFalse(file_entry.IsVirtual())
self.assertTrue(file_entry.IsAllocated())
self.assertFalse(file_entry.IsDevice())
self.assertTrue(file_entry.IsDirectory())
self.assertFalse(file_entry.IsFile())
self.assertFalse(file_entry.IsLink())
self.assertFalse(file_entry.IsPipe())
self.assertFalse(file_entry.IsSocket())
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, location='/',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertTrue(file_entry.IsRoot())
self.assertFalse(file_entry.IsVirtual())
self.assertTrue(file_entry.IsAllocated())
self.assertFalse(file_entry.IsDevice())
self.assertTrue(file_entry.IsDirectory())
self.assertFalse(file_entry.IsFile())
self.assertFalse(file_entry.IsLink())
self.assertFalse(file_entry.IsPipe())
self.assertFalse(file_entry.IsSocket())
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "520672f8607751b65fe9e4b975a9978ed0ab71b6",
"index": 8242,
"step-1": "<mask token>\n\n\nclass HFSFileEntryTest(shared_test_lib.BaseTestCase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def tearDown(self):\n \"\"\"Cleans up the needed objects used throughout the test.\"\"\"\n self._resolver_context.Empty()\n\n def testInitialize(self):\n \"\"\"Tests the __init__ function.\"\"\"\n file_entry = hfs_file_entry.HFSFileEntry(self._resolver_context,\n self._file_system, self._hfs_path_spec)\n self.assertIsNotNone(file_entry)\n\n def testGetAttributes(self):\n \"\"\"Tests the _GetAttributes function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_FILE,\n location='/a_directory/a_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertIsNone(file_entry._attributes)\n file_entry._GetAttributes()\n self.assertIsNotNone(file_entry._attributes)\n self.assertEqual(len(file_entry._attributes), 1)\n test_attribute = file_entry._attributes[0]\n self.assertIsInstance(test_attribute, hfs_attribute.\n HFSExtendedAttribute)\n self.assertEqual(test_attribute.name, 'myxattr')\n test_attribute_value_data = test_attribute.read()\n self.assertEqual(test_attribute_value_data, b'My extended attribute')\n\n def testGetDataStreams(self):\n \"\"\"Tests the _GetDataStreams function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n data_streams = file_entry._GetDataStreams()\n self.assertEqual(len(data_streams), 1)\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=25, location=\n '/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n data_streams = file_entry._GetDataStreams()\n self.assertEqual(len(data_streams), 2)\n\n def testGetStatAttribute(self):\n \"\"\"Tests the _GetStatAttribute function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n stat_attribute = file_entry._GetStatAttribute()\n self.assertIsNotNone(stat_attribute)\n self.assertIsNone(stat_attribute.device_number)\n self.assertEqual(stat_attribute.group_identifier, 20)\n self.assertEqual(stat_attribute.inode_number, 21)\n self.assertEqual(stat_attribute.mode, 33188)\n self.assertEqual(stat_attribute.number_of_links, 1)\n self.assertEqual(stat_attribute.owner_identifier, 501)\n self.assertEqual(stat_attribute.size, 22)\n self.assertEqual(stat_attribute.type, stat_attribute.TYPE_FILE)\n\n def testAccessTime(self):\n \"\"\"Test the access_time property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertIsNotNone(file_entry.access_time)\n\n def testAddedTime(self):\n \"\"\"Test the added_time property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertIsNotNone(file_entry.added_time)\n\n def testChangeTime(self):\n \"\"\"Test the change_time property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertIsNotNone(file_entry.change_time)\n\n def testCreationTime(self):\n \"\"\"Test the creation_time property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertIsNotNone(file_entry.creation_time)\n\n def testDataStreams(self):\n \"\"\"Tests the data_streams property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertEqual(file_entry.number_of_data_streams, 1)\n data_stream_names = []\n for data_stream in file_entry.data_streams:\n data_stream_names.append(data_stream.name)\n self.assertEqual(data_stream_names, [''])\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,\n location='/a_directory', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertEqual(file_entry.number_of_data_streams, 0)\n data_stream_names = []\n for data_stream in file_entry.data_streams:\n data_stream_names.append(data_stream.name)\n self.assertEqual(data_stream_names, [])\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=25, location=\n '/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertEqual(file_entry.number_of_data_streams, 2)\n data_stream_names = []\n for data_stream in file_entry.data_streams:\n data_stream_names.append(data_stream.name)\n self.assertEqual(data_stream_names, ['', 'rsrc'])\n <mask token>\n\n def testSize(self):\n \"\"\"Test the size property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertEqual(file_entry.size, 22)\n <mask token>\n\n def testGetDataStream(self):\n \"\"\"Tests the GetDataStream function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n data_stream = file_entry.GetDataStream('')\n self.assertIsNotNone(data_stream)\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=25, location=\n '/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n data_stream = file_entry.GetDataStream('rsrc')\n self.assertIsNotNone(data_stream)\n <mask token>\n\n def testGetFileEntryByPathSpec(self):\n \"\"\"Tests the GetFileEntryByPathSpec function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_FILE, parent=\n self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n def testGetFileObject(self):\n \"\"\"Tests the GetFileObject function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n file_object = file_entry.GetFileObject()\n self.assertIsNotNone(file_object)\n self.assertEqual(file_object.get_size(), 22)\n file_object = file_entry.GetFileObject(data_stream_name='bogus')\n self.assertIsNone(file_object)\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,\n location='/a_directory', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n file_object = file_entry.GetFileObject()\n self.assertIsNone(file_object)\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=25, location=\n '/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n file_object = file_entry.GetFileObject(data_stream_name='rsrc')\n self.assertIsNotNone(file_object)\n self.assertEqual(file_object.get_size(), 17)\n\n def testGetLinkedFileEntry(self):\n \"\"\"Tests the GetLinkedFileEntry function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_LINK,\n location='/a_link', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n linked_file_entry = file_entry.GetLinkedFileEntry()\n self.assertIsNotNone(linked_file_entry)\n self.assertEqual(linked_file_entry.name, 'another_file')\n\n def testGetParentFileEntry(self):\n \"\"\"Tests the GetParentFileEntry function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n parent_file_entry = file_entry.GetParentFileEntry()\n self.assertIsNotNone(parent_file_entry)\n self.assertEqual(parent_file_entry.name, 'a_directory')\n\n def testIsFunctions(self):\n \"\"\"Tests the Is? functions.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertFalse(file_entry.IsRoot())\n self.assertFalse(file_entry.IsVirtual())\n self.assertTrue(file_entry.IsAllocated())\n self.assertFalse(file_entry.IsDevice())\n self.assertFalse(file_entry.IsDirectory())\n self.assertTrue(file_entry.IsFile())\n self.assertFalse(file_entry.IsLink())\n self.assertFalse(file_entry.IsPipe())\n self.assertFalse(file_entry.IsSocket())\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,\n location='/a_directory', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertFalse(file_entry.IsRoot())\n self.assertFalse(file_entry.IsVirtual())\n self.assertTrue(file_entry.IsAllocated())\n self.assertFalse(file_entry.IsDevice())\n self.assertTrue(file_entry.IsDirectory())\n self.assertFalse(file_entry.IsFile())\n self.assertFalse(file_entry.IsLink())\n self.assertFalse(file_entry.IsPipe())\n self.assertFalse(file_entry.IsSocket())\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, location='/', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertTrue(file_entry.IsRoot())\n self.assertFalse(file_entry.IsVirtual())\n self.assertTrue(file_entry.IsAllocated())\n self.assertFalse(file_entry.IsDevice())\n self.assertTrue(file_entry.IsDirectory())\n self.assertFalse(file_entry.IsFile())\n self.assertFalse(file_entry.IsLink())\n self.assertFalse(file_entry.IsPipe())\n self.assertFalse(file_entry.IsSocket())\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass HFSFileEntryTest(shared_test_lib.BaseTestCase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def setUp(self):\n \"\"\"Sets up the needed objects used throughout the test.\"\"\"\n self._resolver_context = context.Context()\n test_path = self._GetTestFilePath(['hfsplus.raw'])\n self._SkipIfPathNotExists(test_path)\n test_os_path_spec = path_spec_factory.Factory.NewPathSpec(definitions\n .TYPE_INDICATOR_OS, location=test_path)\n self._raw_path_spec = path_spec_factory.Factory.NewPathSpec(definitions\n .TYPE_INDICATOR_RAW, parent=test_os_path_spec)\n self._hfs_path_spec = path_spec_factory.Factory.NewPathSpec(definitions\n .TYPE_INDICATOR_HFS, location='/', parent=self._raw_path_spec)\n self._file_system = hfs_file_system.HFSFileSystem(self.\n _resolver_context, self._hfs_path_spec)\n self._file_system.Open()\n\n def tearDown(self):\n \"\"\"Cleans up the needed objects used throughout the test.\"\"\"\n self._resolver_context.Empty()\n\n def testInitialize(self):\n \"\"\"Tests the __init__ function.\"\"\"\n file_entry = hfs_file_entry.HFSFileEntry(self._resolver_context,\n self._file_system, self._hfs_path_spec)\n self.assertIsNotNone(file_entry)\n\n def testGetAttributes(self):\n \"\"\"Tests the _GetAttributes function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_FILE,\n location='/a_directory/a_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertIsNone(file_entry._attributes)\n file_entry._GetAttributes()\n self.assertIsNotNone(file_entry._attributes)\n self.assertEqual(len(file_entry._attributes), 1)\n test_attribute = file_entry._attributes[0]\n self.assertIsInstance(test_attribute, hfs_attribute.\n HFSExtendedAttribute)\n self.assertEqual(test_attribute.name, 'myxattr')\n test_attribute_value_data = test_attribute.read()\n self.assertEqual(test_attribute_value_data, b'My extended attribute')\n\n def testGetDataStreams(self):\n \"\"\"Tests the _GetDataStreams function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n data_streams = file_entry._GetDataStreams()\n self.assertEqual(len(data_streams), 1)\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=25, location=\n '/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n data_streams = file_entry._GetDataStreams()\n self.assertEqual(len(data_streams), 2)\n\n def testGetStatAttribute(self):\n \"\"\"Tests the _GetStatAttribute function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n stat_attribute = file_entry._GetStatAttribute()\n self.assertIsNotNone(stat_attribute)\n self.assertIsNone(stat_attribute.device_number)\n self.assertEqual(stat_attribute.group_identifier, 20)\n self.assertEqual(stat_attribute.inode_number, 21)\n self.assertEqual(stat_attribute.mode, 33188)\n self.assertEqual(stat_attribute.number_of_links, 1)\n self.assertEqual(stat_attribute.owner_identifier, 501)\n self.assertEqual(stat_attribute.size, 22)\n self.assertEqual(stat_attribute.type, stat_attribute.TYPE_FILE)\n\n def testAccessTime(self):\n \"\"\"Test the access_time property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertIsNotNone(file_entry.access_time)\n\n def testAddedTime(self):\n \"\"\"Test the added_time property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertIsNotNone(file_entry.added_time)\n\n def testChangeTime(self):\n \"\"\"Test the change_time property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertIsNotNone(file_entry.change_time)\n\n def testCreationTime(self):\n \"\"\"Test the creation_time property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertIsNotNone(file_entry.creation_time)\n\n def testDataStreams(self):\n \"\"\"Tests the data_streams property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertEqual(file_entry.number_of_data_streams, 1)\n data_stream_names = []\n for data_stream in file_entry.data_streams:\n data_stream_names.append(data_stream.name)\n self.assertEqual(data_stream_names, [''])\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,\n location='/a_directory', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertEqual(file_entry.number_of_data_streams, 0)\n data_stream_names = []\n for data_stream in file_entry.data_streams:\n data_stream_names.append(data_stream.name)\n self.assertEqual(data_stream_names, [])\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=25, location=\n '/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertEqual(file_entry.number_of_data_streams, 2)\n data_stream_names = []\n for data_stream in file_entry.data_streams:\n data_stream_names.append(data_stream.name)\n self.assertEqual(data_stream_names, ['', 'rsrc'])\n <mask token>\n\n def testSize(self):\n \"\"\"Test the size property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertEqual(file_entry.size, 22)\n <mask token>\n\n def testGetDataStream(self):\n \"\"\"Tests the GetDataStream function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n data_stream = file_entry.GetDataStream('')\n self.assertIsNotNone(data_stream)\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=25, location=\n '/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n data_stream = file_entry.GetDataStream('rsrc')\n self.assertIsNotNone(data_stream)\n <mask token>\n\n def testGetFileEntryByPathSpec(self):\n \"\"\"Tests the GetFileEntryByPathSpec function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_FILE, parent=\n self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n def testGetFileObject(self):\n \"\"\"Tests the GetFileObject function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n file_object = file_entry.GetFileObject()\n self.assertIsNotNone(file_object)\n self.assertEqual(file_object.get_size(), 22)\n file_object = file_entry.GetFileObject(data_stream_name='bogus')\n self.assertIsNone(file_object)\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,\n location='/a_directory', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n file_object = file_entry.GetFileObject()\n self.assertIsNone(file_object)\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=25, location=\n '/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n file_object = file_entry.GetFileObject(data_stream_name='rsrc')\n self.assertIsNotNone(file_object)\n self.assertEqual(file_object.get_size(), 17)\n\n def testGetLinkedFileEntry(self):\n \"\"\"Tests the GetLinkedFileEntry function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_LINK,\n location='/a_link', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n linked_file_entry = file_entry.GetLinkedFileEntry()\n self.assertIsNotNone(linked_file_entry)\n self.assertEqual(linked_file_entry.name, 'another_file')\n\n def testGetParentFileEntry(self):\n \"\"\"Tests the GetParentFileEntry function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n parent_file_entry = file_entry.GetParentFileEntry()\n self.assertIsNotNone(parent_file_entry)\n self.assertEqual(parent_file_entry.name, 'a_directory')\n\n def testIsFunctions(self):\n \"\"\"Tests the Is? functions.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertFalse(file_entry.IsRoot())\n self.assertFalse(file_entry.IsVirtual())\n self.assertTrue(file_entry.IsAllocated())\n self.assertFalse(file_entry.IsDevice())\n self.assertFalse(file_entry.IsDirectory())\n self.assertTrue(file_entry.IsFile())\n self.assertFalse(file_entry.IsLink())\n self.assertFalse(file_entry.IsPipe())\n self.assertFalse(file_entry.IsSocket())\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,\n location='/a_directory', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertFalse(file_entry.IsRoot())\n self.assertFalse(file_entry.IsVirtual())\n self.assertTrue(file_entry.IsAllocated())\n self.assertFalse(file_entry.IsDevice())\n self.assertTrue(file_entry.IsDirectory())\n self.assertFalse(file_entry.IsFile())\n self.assertFalse(file_entry.IsLink())\n self.assertFalse(file_entry.IsPipe())\n self.assertFalse(file_entry.IsSocket())\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, location='/', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertTrue(file_entry.IsRoot())\n self.assertFalse(file_entry.IsVirtual())\n self.assertTrue(file_entry.IsAllocated())\n self.assertFalse(file_entry.IsDevice())\n self.assertTrue(file_entry.IsDirectory())\n self.assertFalse(file_entry.IsFile())\n self.assertFalse(file_entry.IsLink())\n self.assertFalse(file_entry.IsPipe())\n self.assertFalse(file_entry.IsSocket())\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass HFSFileEntryTest(shared_test_lib.BaseTestCase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def setUp(self):\n \"\"\"Sets up the needed objects used throughout the test.\"\"\"\n self._resolver_context = context.Context()\n test_path = self._GetTestFilePath(['hfsplus.raw'])\n self._SkipIfPathNotExists(test_path)\n test_os_path_spec = path_spec_factory.Factory.NewPathSpec(definitions\n .TYPE_INDICATOR_OS, location=test_path)\n self._raw_path_spec = path_spec_factory.Factory.NewPathSpec(definitions\n .TYPE_INDICATOR_RAW, parent=test_os_path_spec)\n self._hfs_path_spec = path_spec_factory.Factory.NewPathSpec(definitions\n .TYPE_INDICATOR_HFS, location='/', parent=self._raw_path_spec)\n self._file_system = hfs_file_system.HFSFileSystem(self.\n _resolver_context, self._hfs_path_spec)\n self._file_system.Open()\n\n def tearDown(self):\n \"\"\"Cleans up the needed objects used throughout the test.\"\"\"\n self._resolver_context.Empty()\n\n def testInitialize(self):\n \"\"\"Tests the __init__ function.\"\"\"\n file_entry = hfs_file_entry.HFSFileEntry(self._resolver_context,\n self._file_system, self._hfs_path_spec)\n self.assertIsNotNone(file_entry)\n\n def testGetAttributes(self):\n \"\"\"Tests the _GetAttributes function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_FILE,\n location='/a_directory/a_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertIsNone(file_entry._attributes)\n file_entry._GetAttributes()\n self.assertIsNotNone(file_entry._attributes)\n self.assertEqual(len(file_entry._attributes), 1)\n test_attribute = file_entry._attributes[0]\n self.assertIsInstance(test_attribute, hfs_attribute.\n HFSExtendedAttribute)\n self.assertEqual(test_attribute.name, 'myxattr')\n test_attribute_value_data = test_attribute.read()\n self.assertEqual(test_attribute_value_data, b'My extended attribute')\n\n def testGetDataStreams(self):\n \"\"\"Tests the _GetDataStreams function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n data_streams = file_entry._GetDataStreams()\n self.assertEqual(len(data_streams), 1)\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=25, location=\n '/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n data_streams = file_entry._GetDataStreams()\n self.assertEqual(len(data_streams), 2)\n\n def testGetStatAttribute(self):\n \"\"\"Tests the _GetStatAttribute function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n stat_attribute = file_entry._GetStatAttribute()\n self.assertIsNotNone(stat_attribute)\n self.assertIsNone(stat_attribute.device_number)\n self.assertEqual(stat_attribute.group_identifier, 20)\n self.assertEqual(stat_attribute.inode_number, 21)\n self.assertEqual(stat_attribute.mode, 33188)\n self.assertEqual(stat_attribute.number_of_links, 1)\n self.assertEqual(stat_attribute.owner_identifier, 501)\n self.assertEqual(stat_attribute.size, 22)\n self.assertEqual(stat_attribute.type, stat_attribute.TYPE_FILE)\n\n def testAccessTime(self):\n \"\"\"Test the access_time property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertIsNotNone(file_entry.access_time)\n\n def testAddedTime(self):\n \"\"\"Test the added_time property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertIsNotNone(file_entry.added_time)\n\n def testChangeTime(self):\n \"\"\"Test the change_time property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertIsNotNone(file_entry.change_time)\n\n def testCreationTime(self):\n \"\"\"Test the creation_time property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertIsNotNone(file_entry.creation_time)\n\n def testDataStreams(self):\n \"\"\"Tests the data_streams property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertEqual(file_entry.number_of_data_streams, 1)\n data_stream_names = []\n for data_stream in file_entry.data_streams:\n data_stream_names.append(data_stream.name)\n self.assertEqual(data_stream_names, [''])\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,\n location='/a_directory', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertEqual(file_entry.number_of_data_streams, 0)\n data_stream_names = []\n for data_stream in file_entry.data_streams:\n data_stream_names.append(data_stream.name)\n self.assertEqual(data_stream_names, [])\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=25, location=\n '/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertEqual(file_entry.number_of_data_streams, 2)\n data_stream_names = []\n for data_stream in file_entry.data_streams:\n data_stream_names.append(data_stream.name)\n self.assertEqual(data_stream_names, ['', 'rsrc'])\n\n def testModificationTime(self):\n \"\"\"Test the modification_time property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertIsNotNone(file_entry.modification_time)\n\n def testSize(self):\n \"\"\"Test the size property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertEqual(file_entry.size, 22)\n <mask token>\n\n def testGetDataStream(self):\n \"\"\"Tests the GetDataStream function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n data_stream = file_entry.GetDataStream('')\n self.assertIsNotNone(data_stream)\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=25, location=\n '/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n data_stream = file_entry.GetDataStream('rsrc')\n self.assertIsNotNone(data_stream)\n <mask token>\n\n def testGetFileEntryByPathSpec(self):\n \"\"\"Tests the GetFileEntryByPathSpec function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_FILE, parent=\n self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n def testGetFileObject(self):\n \"\"\"Tests the GetFileObject function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n file_object = file_entry.GetFileObject()\n self.assertIsNotNone(file_object)\n self.assertEqual(file_object.get_size(), 22)\n file_object = file_entry.GetFileObject(data_stream_name='bogus')\n self.assertIsNone(file_object)\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,\n location='/a_directory', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n file_object = file_entry.GetFileObject()\n self.assertIsNone(file_object)\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=25, location=\n '/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n file_object = file_entry.GetFileObject(data_stream_name='rsrc')\n self.assertIsNotNone(file_object)\n self.assertEqual(file_object.get_size(), 17)\n\n def testGetLinkedFileEntry(self):\n \"\"\"Tests the GetLinkedFileEntry function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_LINK,\n location='/a_link', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n linked_file_entry = file_entry.GetLinkedFileEntry()\n self.assertIsNotNone(linked_file_entry)\n self.assertEqual(linked_file_entry.name, 'another_file')\n\n def testGetParentFileEntry(self):\n \"\"\"Tests the GetParentFileEntry function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n parent_file_entry = file_entry.GetParentFileEntry()\n self.assertIsNotNone(parent_file_entry)\n self.assertEqual(parent_file_entry.name, 'a_directory')\n\n def testIsFunctions(self):\n \"\"\"Tests the Is? functions.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertFalse(file_entry.IsRoot())\n self.assertFalse(file_entry.IsVirtual())\n self.assertTrue(file_entry.IsAllocated())\n self.assertFalse(file_entry.IsDevice())\n self.assertFalse(file_entry.IsDirectory())\n self.assertTrue(file_entry.IsFile())\n self.assertFalse(file_entry.IsLink())\n self.assertFalse(file_entry.IsPipe())\n self.assertFalse(file_entry.IsSocket())\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,\n location='/a_directory', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertFalse(file_entry.IsRoot())\n self.assertFalse(file_entry.IsVirtual())\n self.assertTrue(file_entry.IsAllocated())\n self.assertFalse(file_entry.IsDevice())\n self.assertTrue(file_entry.IsDirectory())\n self.assertFalse(file_entry.IsFile())\n self.assertFalse(file_entry.IsLink())\n self.assertFalse(file_entry.IsPipe())\n self.assertFalse(file_entry.IsSocket())\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, location='/', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertTrue(file_entry.IsRoot())\n self.assertFalse(file_entry.IsVirtual())\n self.assertTrue(file_entry.IsAllocated())\n self.assertFalse(file_entry.IsDevice())\n self.assertTrue(file_entry.IsDirectory())\n self.assertFalse(file_entry.IsFile())\n self.assertFalse(file_entry.IsLink())\n self.assertFalse(file_entry.IsPipe())\n self.assertFalse(file_entry.IsSocket())\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass HFSFileEntryTest(shared_test_lib.BaseTestCase):\n \"\"\"Tests the HFS file entry.\"\"\"\n _IDENTIFIER_A_DIRECTORY = 18\n _IDENTIFIER_A_FILE = 19\n _IDENTIFIER_A_LINK = 22\n _IDENTIFIER_ANOTHER_FILE = 21\n\n def setUp(self):\n \"\"\"Sets up the needed objects used throughout the test.\"\"\"\n self._resolver_context = context.Context()\n test_path = self._GetTestFilePath(['hfsplus.raw'])\n self._SkipIfPathNotExists(test_path)\n test_os_path_spec = path_spec_factory.Factory.NewPathSpec(definitions\n .TYPE_INDICATOR_OS, location=test_path)\n self._raw_path_spec = path_spec_factory.Factory.NewPathSpec(definitions\n .TYPE_INDICATOR_RAW, parent=test_os_path_spec)\n self._hfs_path_spec = path_spec_factory.Factory.NewPathSpec(definitions\n .TYPE_INDICATOR_HFS, location='/', parent=self._raw_path_spec)\n self._file_system = hfs_file_system.HFSFileSystem(self.\n _resolver_context, self._hfs_path_spec)\n self._file_system.Open()\n\n def tearDown(self):\n \"\"\"Cleans up the needed objects used throughout the test.\"\"\"\n self._resolver_context.Empty()\n\n def testInitialize(self):\n \"\"\"Tests the __init__ function.\"\"\"\n file_entry = hfs_file_entry.HFSFileEntry(self._resolver_context,\n self._file_system, self._hfs_path_spec)\n self.assertIsNotNone(file_entry)\n\n def testGetAttributes(self):\n \"\"\"Tests the _GetAttributes function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_FILE,\n location='/a_directory/a_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertIsNone(file_entry._attributes)\n file_entry._GetAttributes()\n self.assertIsNotNone(file_entry._attributes)\n self.assertEqual(len(file_entry._attributes), 1)\n test_attribute = file_entry._attributes[0]\n self.assertIsInstance(test_attribute, hfs_attribute.\n HFSExtendedAttribute)\n self.assertEqual(test_attribute.name, 'myxattr')\n test_attribute_value_data = test_attribute.read()\n self.assertEqual(test_attribute_value_data, b'My extended attribute')\n\n def testGetDataStreams(self):\n \"\"\"Tests the _GetDataStreams function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n data_streams = file_entry._GetDataStreams()\n self.assertEqual(len(data_streams), 1)\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=25, location=\n '/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n data_streams = file_entry._GetDataStreams()\n self.assertEqual(len(data_streams), 2)\n\n def testGetStatAttribute(self):\n \"\"\"Tests the _GetStatAttribute function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n stat_attribute = file_entry._GetStatAttribute()\n self.assertIsNotNone(stat_attribute)\n self.assertIsNone(stat_attribute.device_number)\n self.assertEqual(stat_attribute.group_identifier, 20)\n self.assertEqual(stat_attribute.inode_number, 21)\n self.assertEqual(stat_attribute.mode, 33188)\n self.assertEqual(stat_attribute.number_of_links, 1)\n self.assertEqual(stat_attribute.owner_identifier, 501)\n self.assertEqual(stat_attribute.size, 22)\n self.assertEqual(stat_attribute.type, stat_attribute.TYPE_FILE)\n\n def testAccessTime(self):\n \"\"\"Test the access_time property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertIsNotNone(file_entry.access_time)\n\n def testAddedTime(self):\n \"\"\"Test the added_time property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertIsNotNone(file_entry.added_time)\n\n def testChangeTime(self):\n \"\"\"Test the change_time property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertIsNotNone(file_entry.change_time)\n\n def testCreationTime(self):\n \"\"\"Test the creation_time property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertIsNotNone(file_entry.creation_time)\n\n def testDataStreams(self):\n \"\"\"Tests the data_streams property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertEqual(file_entry.number_of_data_streams, 1)\n data_stream_names = []\n for data_stream in file_entry.data_streams:\n data_stream_names.append(data_stream.name)\n self.assertEqual(data_stream_names, [''])\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,\n location='/a_directory', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertEqual(file_entry.number_of_data_streams, 0)\n data_stream_names = []\n for data_stream in file_entry.data_streams:\n data_stream_names.append(data_stream.name)\n self.assertEqual(data_stream_names, [])\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=25, location=\n '/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertEqual(file_entry.number_of_data_streams, 2)\n data_stream_names = []\n for data_stream in file_entry.data_streams:\n data_stream_names.append(data_stream.name)\n self.assertEqual(data_stream_names, ['', 'rsrc'])\n\n def testModificationTime(self):\n \"\"\"Test the modification_time property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertIsNotNone(file_entry.modification_time)\n\n def testSize(self):\n \"\"\"Test the size property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertEqual(file_entry.size, 22)\n\n def testSubFileEntries(self):\n \"\"\"Tests the number_of_sub_file_entries and sub_file_entries properties.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, location='/', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertEqual(file_entry.number_of_sub_file_entries, 6)\n expected_sub_file_entry_names = ['.fseventsd',\n '.HFS+ Private Directory Data\\r', 'a_directory', 'a_link',\n 'passwords.txt', '␀␀␀␀HFS+ Private Data']\n sub_file_entry_names = []\n for sub_file_entry in file_entry.sub_file_entries:\n sub_file_entry_names.append(sub_file_entry.name)\n self.assertEqual(len(sub_file_entry_names), len(\n expected_sub_file_entry_names))\n self.assertEqual(sorted(sub_file_entry_names), sorted(\n expected_sub_file_entry_names))\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertEqual(file_entry.number_of_sub_file_entries, 3)\n\n def testGetDataStream(self):\n \"\"\"Tests the GetDataStream function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n data_stream = file_entry.GetDataStream('')\n self.assertIsNotNone(data_stream)\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=25, location=\n '/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n data_stream = file_entry.GetDataStream('rsrc')\n self.assertIsNotNone(data_stream)\n\n def testGetExtents(self):\n \"\"\"Tests the GetExtents function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n extents = file_entry.GetExtents()\n self.assertEqual(len(extents), 1)\n self.assertEqual(extents[0].extent_type, definitions.EXTENT_TYPE_DATA)\n self.assertEqual(extents[0].offset, 1130496)\n self.assertEqual(extents[0].size, 4096)\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,\n location='/a_directory', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n extents = file_entry.GetExtents()\n self.assertEqual(len(extents), 0)\n\n def testGetFileEntryByPathSpec(self):\n \"\"\"Tests the GetFileEntryByPathSpec function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_FILE, parent=\n self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n def testGetFileObject(self):\n \"\"\"Tests the GetFileObject function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n file_object = file_entry.GetFileObject()\n self.assertIsNotNone(file_object)\n self.assertEqual(file_object.get_size(), 22)\n file_object = file_entry.GetFileObject(data_stream_name='bogus')\n self.assertIsNone(file_object)\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,\n location='/a_directory', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n file_object = file_entry.GetFileObject()\n self.assertIsNone(file_object)\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=25, location=\n '/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n file_object = file_entry.GetFileObject(data_stream_name='rsrc')\n self.assertIsNotNone(file_object)\n self.assertEqual(file_object.get_size(), 17)\n\n def testGetLinkedFileEntry(self):\n \"\"\"Tests the GetLinkedFileEntry function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_LINK,\n location='/a_link', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n linked_file_entry = file_entry.GetLinkedFileEntry()\n self.assertIsNotNone(linked_file_entry)\n self.assertEqual(linked_file_entry.name, 'another_file')\n\n def testGetParentFileEntry(self):\n \"\"\"Tests the GetParentFileEntry function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n parent_file_entry = file_entry.GetParentFileEntry()\n self.assertIsNotNone(parent_file_entry)\n self.assertEqual(parent_file_entry.name, 'a_directory')\n\n def testIsFunctions(self):\n \"\"\"Tests the Is? functions.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertFalse(file_entry.IsRoot())\n self.assertFalse(file_entry.IsVirtual())\n self.assertTrue(file_entry.IsAllocated())\n self.assertFalse(file_entry.IsDevice())\n self.assertFalse(file_entry.IsDirectory())\n self.assertTrue(file_entry.IsFile())\n self.assertFalse(file_entry.IsLink())\n self.assertFalse(file_entry.IsPipe())\n self.assertFalse(file_entry.IsSocket())\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,\n location='/a_directory', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertFalse(file_entry.IsRoot())\n self.assertFalse(file_entry.IsVirtual())\n self.assertTrue(file_entry.IsAllocated())\n self.assertFalse(file_entry.IsDevice())\n self.assertTrue(file_entry.IsDirectory())\n self.assertFalse(file_entry.IsFile())\n self.assertFalse(file_entry.IsLink())\n self.assertFalse(file_entry.IsPipe())\n self.assertFalse(file_entry.IsSocket())\n path_spec = path_spec_factory.Factory.NewPathSpec(definitions.\n TYPE_INDICATOR_HFS, location='/', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n self.assertTrue(file_entry.IsRoot())\n self.assertFalse(file_entry.IsVirtual())\n self.assertTrue(file_entry.IsAllocated())\n self.assertFalse(file_entry.IsDevice())\n self.assertTrue(file_entry.IsDirectory())\n self.assertFalse(file_entry.IsFile())\n self.assertFalse(file_entry.IsLink())\n self.assertFalse(file_entry.IsPipe())\n self.assertFalse(file_entry.IsSocket())\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Tests for the file entry implementation using pyfshfs.\"\"\"\n\nimport unittest\n\nfrom dfvfs.lib import definitions\nfrom dfvfs.path import factory as path_spec_factory\nfrom dfvfs.resolver import context\nfrom dfvfs.vfs import hfs_attribute\nfrom dfvfs.vfs import hfs_file_entry\nfrom dfvfs.vfs import hfs_file_system\n\nfrom tests import test_lib as shared_test_lib\n\n\nclass HFSFileEntryTest(shared_test_lib.BaseTestCase):\n \"\"\"Tests the HFS file entry.\"\"\"\n\n # pylint: disable=protected-access\n\n _IDENTIFIER_A_DIRECTORY = 18\n _IDENTIFIER_A_FILE = 19\n _IDENTIFIER_A_LINK = 22\n _IDENTIFIER_ANOTHER_FILE = 21\n\n def setUp(self):\n \"\"\"Sets up the needed objects used throughout the test.\"\"\"\n self._resolver_context = context.Context()\n test_path = self._GetTestFilePath(['hfsplus.raw'])\n self._SkipIfPathNotExists(test_path)\n\n test_os_path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_OS, location=test_path)\n self._raw_path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)\n self._hfs_path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, location='/',\n parent=self._raw_path_spec)\n\n self._file_system = hfs_file_system.HFSFileSystem(\n self._resolver_context, self._hfs_path_spec)\n self._file_system.Open()\n\n def tearDown(self):\n \"\"\"Cleans up the needed objects used throughout the test.\"\"\"\n self._resolver_context.Empty()\n\n def testInitialize(self):\n \"\"\"Tests the __init__ function.\"\"\"\n file_entry = hfs_file_entry.HFSFileEntry(\n self._resolver_context, self._file_system, self._hfs_path_spec)\n\n self.assertIsNotNone(file_entry)\n\n def testGetAttributes(self):\n \"\"\"Tests the _GetAttributes function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_FILE,\n location='/a_directory/a_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n self.assertIsNone(file_entry._attributes)\n\n file_entry._GetAttributes()\n self.assertIsNotNone(file_entry._attributes)\n self.assertEqual(len(file_entry._attributes), 1)\n\n test_attribute = file_entry._attributes[0]\n self.assertIsInstance(test_attribute, hfs_attribute.HFSExtendedAttribute)\n self.assertEqual(test_attribute.name, 'myxattr')\n\n test_attribute_value_data = test_attribute.read()\n self.assertEqual(test_attribute_value_data, b'My extended attribute')\n\n def testGetDataStreams(self):\n \"\"\"Tests the _GetDataStreams function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS,\n identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n data_streams = file_entry._GetDataStreams()\n self.assertEqual(len(data_streams), 1)\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=25,\n location='/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n data_streams = file_entry._GetDataStreams()\n self.assertEqual(len(data_streams), 2)\n\n def testGetStatAttribute(self):\n \"\"\"Tests the _GetStatAttribute function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS,\n identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n stat_attribute = file_entry._GetStatAttribute()\n\n self.assertIsNotNone(stat_attribute)\n self.assertIsNone(stat_attribute.device_number)\n self.assertEqual(stat_attribute.group_identifier, 20)\n self.assertEqual(stat_attribute.inode_number, 21)\n self.assertEqual(stat_attribute.mode, 0o100644)\n self.assertEqual(stat_attribute.number_of_links, 1)\n self.assertEqual(stat_attribute.owner_identifier, 501)\n self.assertEqual(stat_attribute.size, 22)\n self.assertEqual(stat_attribute.type, stat_attribute.TYPE_FILE)\n\n def testAccessTime(self):\n \"\"\"Test the access_time property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS,\n identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n\n self.assertIsNotNone(file_entry)\n self.assertIsNotNone(file_entry.access_time)\n\n def testAddedTime(self):\n \"\"\"Test the added_time property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS,\n identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n\n self.assertIsNotNone(file_entry)\n self.assertIsNotNone(file_entry.added_time)\n\n def testChangeTime(self):\n \"\"\"Test the change_time property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS,\n identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n\n self.assertIsNotNone(file_entry)\n self.assertIsNotNone(file_entry.change_time)\n\n def testCreationTime(self):\n \"\"\"Test the creation_time property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS,\n identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n\n self.assertIsNotNone(file_entry)\n self.assertIsNotNone(file_entry.creation_time)\n\n def testDataStreams(self):\n \"\"\"Tests the data_streams property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS,\n identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n self.assertEqual(file_entry.number_of_data_streams, 1)\n\n data_stream_names = []\n for data_stream in file_entry.data_streams:\n data_stream_names.append(data_stream.name)\n\n self.assertEqual(data_stream_names, [''])\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,\n location='/a_directory', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n self.assertEqual(file_entry.number_of_data_streams, 0)\n\n data_stream_names = []\n for data_stream in file_entry.data_streams:\n data_stream_names.append(data_stream.name)\n\n self.assertEqual(data_stream_names, [])\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=25,\n location='/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n self.assertEqual(file_entry.number_of_data_streams, 2)\n\n data_stream_names = []\n for data_stream in file_entry.data_streams:\n data_stream_names.append(data_stream.name)\n\n self.assertEqual(data_stream_names, ['', 'rsrc'])\n\n def testModificationTime(self):\n \"\"\"Test the modification_time property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS,\n identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n\n self.assertIsNotNone(file_entry)\n self.assertIsNotNone(file_entry.modification_time)\n\n def testSize(self):\n \"\"\"Test the size property.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS,\n identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n\n self.assertIsNotNone(file_entry)\n self.assertEqual(file_entry.size, 22)\n\n def testSubFileEntries(self):\n \"\"\"Tests the number_of_sub_file_entries and sub_file_entries properties.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, location='/',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n self.assertEqual(file_entry.number_of_sub_file_entries, 6)\n\n expected_sub_file_entry_names = [\n '.fseventsd',\n '.HFS+ Private Directory Data\\r',\n 'a_directory',\n 'a_link',\n 'passwords.txt',\n '\\u2400\\u2400\\u2400\\u2400HFS+ Private Data']\n\n sub_file_entry_names = []\n for sub_file_entry in file_entry.sub_file_entries:\n sub_file_entry_names.append(sub_file_entry.name)\n\n self.assertEqual(\n len(sub_file_entry_names), len(expected_sub_file_entry_names))\n self.assertEqual(\n sorted(sub_file_entry_names), sorted(expected_sub_file_entry_names))\n\n # Test a path specification without a location.\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n self.assertEqual(file_entry.number_of_sub_file_entries, 3)\n\n def testGetDataStream(self):\n \"\"\"Tests the GetDataStream function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS,\n identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n data_stream = file_entry.GetDataStream('')\n self.assertIsNotNone(data_stream)\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=25,\n location='/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n data_stream = file_entry.GetDataStream('rsrc')\n self.assertIsNotNone(data_stream)\n\n def testGetExtents(self):\n \"\"\"Tests the GetExtents function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS,\n identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n extents = file_entry.GetExtents()\n self.assertEqual(len(extents), 1)\n\n self.assertEqual(extents[0].extent_type, definitions.EXTENT_TYPE_DATA)\n self.assertEqual(extents[0].offset, 1130496)\n self.assertEqual(extents[0].size, 4096)\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,\n location='/a_directory', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n extents = file_entry.GetExtents()\n self.assertEqual(len(extents), 0)\n\n def testGetFileEntryByPathSpec(self):\n \"\"\"Tests the GetFileEntryByPathSpec function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_FILE,\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n\n self.assertIsNotNone(file_entry)\n\n def testGetFileObject(self):\n \"\"\"Tests the GetFileObject function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS,\n identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n file_object = file_entry.GetFileObject()\n self.assertIsNotNone(file_object)\n\n self.assertEqual(file_object.get_size(), 22)\n\n file_object = file_entry.GetFileObject(data_stream_name='bogus')\n self.assertIsNone(file_object)\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,\n location='/a_directory', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n file_object = file_entry.GetFileObject()\n self.assertIsNone(file_object)\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=25,\n location='/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n file_object = file_entry.GetFileObject(data_stream_name='rsrc')\n self.assertIsNotNone(file_object)\n\n self.assertEqual(file_object.get_size(), 17)\n\n def testGetLinkedFileEntry(self):\n \"\"\"Tests the GetLinkedFileEntry function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_LINK,\n location='/a_link', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n linked_file_entry = file_entry.GetLinkedFileEntry()\n\n self.assertIsNotNone(linked_file_entry)\n\n self.assertEqual(linked_file_entry.name, 'another_file')\n\n def testGetParentFileEntry(self):\n \"\"\"Tests the GetParentFileEntry function.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS,\n identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n parent_file_entry = file_entry.GetParentFileEntry()\n\n self.assertIsNotNone(parent_file_entry)\n\n self.assertEqual(parent_file_entry.name, 'a_directory')\n\n def testIsFunctions(self):\n \"\"\"Tests the Is? functions.\"\"\"\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS,\n identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n self.assertFalse(file_entry.IsRoot())\n self.assertFalse(file_entry.IsVirtual())\n self.assertTrue(file_entry.IsAllocated())\n\n self.assertFalse(file_entry.IsDevice())\n self.assertFalse(file_entry.IsDirectory())\n self.assertTrue(file_entry.IsFile())\n self.assertFalse(file_entry.IsLink())\n self.assertFalse(file_entry.IsPipe())\n self.assertFalse(file_entry.IsSocket())\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,\n location='/a_directory', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n self.assertFalse(file_entry.IsRoot())\n self.assertFalse(file_entry.IsVirtual())\n self.assertTrue(file_entry.IsAllocated())\n\n self.assertFalse(file_entry.IsDevice())\n self.assertTrue(file_entry.IsDirectory())\n self.assertFalse(file_entry.IsFile())\n self.assertFalse(file_entry.IsLink())\n self.assertFalse(file_entry.IsPipe())\n self.assertFalse(file_entry.IsSocket())\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, location='/',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n self.assertTrue(file_entry.IsRoot())\n self.assertFalse(file_entry.IsVirtual())\n self.assertTrue(file_entry.IsAllocated())\n\n self.assertFalse(file_entry.IsDevice())\n self.assertTrue(file_entry.IsDirectory())\n self.assertFalse(file_entry.IsFile())\n self.assertFalse(file_entry.IsLink())\n self.assertFalse(file_entry.IsPipe())\n self.assertFalse(file_entry.IsSocket())\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
18,
19,
20,
25,
27
]
}
|
[
18,
19,
20,
25,
27
] |
import requests
import pandas as pd
import time
def job_spider(jid="1913e38066dd3c8e1Hd40t--FVE~", ka="search_list_1", i=0):
# request info.
job_url = "https://www.zhipin.com/job_detail/" + jid + ".html"
headers = {
'cache-control': "no-cache",
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/71.0.3578.80 Safari/537.36'
}
querystring = {"ka": ka}
try:
# request
r = requests.request("GET", job_url, headers=headers, params=querystring)
content = r.content.decode('utf-8')
# raw data.
file = "./raw_data/page/" + jid + ".html"
with open(file, 'w', encoding='utf-8') as f:
f.write(content)
result = "suceed"
except IOError:
result = "failed"
log = "job " + str(i) + " : " + jid + " crawl " + result
print(log)
if __name__ == "__main__":
file = "./raw_data/list/job_list.csv"
df = pd.read_csv(file, encoding='utf-8', header=None)
jid_list = df[0].values.tolist()
ka_list = df[1].values.tolist()
# print(jid_list)
for i in range(0, len(jid_list)):
job_spider(jid_list[i], ka_list[i], i)
time.sleep(1)
|
normal
|
{
"blob_id": "5b894eac93bff44931df4ef8d845c23071a03227",
"index": 3461,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef job_spider(jid='1913e38066dd3c8e1Hd40t--FVE~', ka='search_list_1', i=0):\n job_url = 'https://www.zhipin.com/job_detail/' + jid + '.html'\n headers = {'cache-control': 'no-cache', 'user-agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36'\n }\n querystring = {'ka': ka}\n try:\n r = requests.request('GET', job_url, headers=headers, params=\n querystring)\n content = r.content.decode('utf-8')\n file = './raw_data/page/' + jid + '.html'\n with open(file, 'w', encoding='utf-8') as f:\n f.write(content)\n result = 'suceed'\n except IOError:\n result = 'failed'\n log = 'job ' + str(i) + ' : ' + jid + ' crawl ' + result\n print(log)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef job_spider(jid='1913e38066dd3c8e1Hd40t--FVE~', ka='search_list_1', i=0):\n job_url = 'https://www.zhipin.com/job_detail/' + jid + '.html'\n headers = {'cache-control': 'no-cache', 'user-agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36'\n }\n querystring = {'ka': ka}\n try:\n r = requests.request('GET', job_url, headers=headers, params=\n querystring)\n content = r.content.decode('utf-8')\n file = './raw_data/page/' + jid + '.html'\n with open(file, 'w', encoding='utf-8') as f:\n f.write(content)\n result = 'suceed'\n except IOError:\n result = 'failed'\n log = 'job ' + str(i) + ' : ' + jid + ' crawl ' + result\n print(log)\n\n\nif __name__ == '__main__':\n file = './raw_data/list/job_list.csv'\n df = pd.read_csv(file, encoding='utf-8', header=None)\n jid_list = df[0].values.tolist()\n ka_list = df[1].values.tolist()\n for i in range(0, len(jid_list)):\n job_spider(jid_list[i], ka_list[i], i)\n time.sleep(1)\n",
"step-4": "import requests\nimport pandas as pd\nimport time\n\n\ndef job_spider(jid='1913e38066dd3c8e1Hd40t--FVE~', ka='search_list_1', i=0):\n job_url = 'https://www.zhipin.com/job_detail/' + jid + '.html'\n headers = {'cache-control': 'no-cache', 'user-agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36'\n }\n querystring = {'ka': ka}\n try:\n r = requests.request('GET', job_url, headers=headers, params=\n querystring)\n content = r.content.decode('utf-8')\n file = './raw_data/page/' + jid + '.html'\n with open(file, 'w', encoding='utf-8') as f:\n f.write(content)\n result = 'suceed'\n except IOError:\n result = 'failed'\n log = 'job ' + str(i) + ' : ' + jid + ' crawl ' + result\n print(log)\n\n\nif __name__ == '__main__':\n file = './raw_data/list/job_list.csv'\n df = pd.read_csv(file, encoding='utf-8', header=None)\n jid_list = df[0].values.tolist()\n ka_list = df[1].values.tolist()\n for i in range(0, len(jid_list)):\n job_spider(jid_list[i], ka_list[i], i)\n time.sleep(1)\n",
"step-5": "import requests\nimport pandas as pd\nimport time\n\n\ndef job_spider(jid=\"1913e38066dd3c8e1Hd40t--FVE~\", ka=\"search_list_1\", i=0):\n # request info.\n job_url = \"https://www.zhipin.com/job_detail/\" + jid + \".html\"\n\n headers = {\n 'cache-control': \"no-cache\",\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/71.0.3578.80 Safari/537.36'\n }\n\n querystring = {\"ka\": ka}\n\n try:\n # request\n r = requests.request(\"GET\", job_url, headers=headers, params=querystring)\n content = r.content.decode('utf-8')\n\n # raw data.\n file = \"./raw_data/page/\" + jid + \".html\"\n with open(file, 'w', encoding='utf-8') as f:\n f.write(content)\n result = \"suceed\"\n except IOError:\n result = \"failed\"\n\n log = \"job \" + str(i) + \" : \" + jid + \" crawl \" + result\n print(log)\n\n\nif __name__ == \"__main__\":\n\n file = \"./raw_data/list/job_list.csv\"\n df = pd.read_csv(file, encoding='utf-8', header=None)\n\n jid_list = df[0].values.tolist()\n ka_list = df[1].values.tolist()\n # print(jid_list)\n\n for i in range(0, len(jid_list)):\n job_spider(jid_list[i], ka_list[i], i)\n time.sleep(1)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.db.models import Q
from django.contrib import messages
from django.views.generic import ListView, DetailView
from django.shortcuts import get_object_or_404, redirect, render
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.views.decorators.http import require_POST
from .models import Pills, Like, Comment
from .forms import CommentForm
import json
class PillListView(ListView):
model = Pills
template_name = "pills/pill_list.html"
form_class = CommentForm
def get_context_data(self, **kwargs):
context = super(PillListView, self).get_context_data(**kwargs)
return context
def get_queryset(self, *args, **kwargs):
qs = Pills.objects.prefetch_related('category_body','category_gender','like_user_set').all()
print(self.request.GET)
query = self.request.GET.get("q", None)
if query is not None:
qs = qs.filter(
Q(name__icontains=query) | Q(category_body__name__icontains=query)
)
return qs
# def PillCategory_SearchList(request):
# qs = Pills.objects.prefetch_related('category_body').all()
# query = self.request.GET.get("q", None)
# if query is not None:
# qs = qs.filter(
# Q(name__icontains=query)
# )
# return qs
# context = {
# 'qs' : qs,
# }
# return render(request, "categorysearch.html", context)
@login_required
def comment_new(request):
pk = request.POST.get('pk')
pill = get_object_or_404(Pills, pk=pk)
form = CommentForm
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.author = request.user
comment.pills = pill
comment.save()
return render(request, 'pills/comment_new_ajax.html', {'comment':comment, 'form':form,})
return redirect("pills:pill_list")
@login_required
def comment_delete(request, pill_pk, pk):
comment = get_object_or_404(Comment, pk=pk)
if request.method == 'POST' and request.user == comment.author:
comment.delete()
messages.success(request, '삭제했습니다.')
return redirect('pills:pill_list')
messages.warning('권한이 없습니다.')
return redirect('pills:pill_list')
class PillDetailView(DetailView):
model = Pills
template_name = 'pills/pill_detail.html'
# context_object_name = 'pills'
@login_required
@require_POST # POST method만 받음
def pill_like(request):
pk = request.POST.get('pk', None)
pill = get_object_or_404(Pills, pk=pk)
pill_like, pill_like_created = pill.like_set.get_or_create(user=request.user)
if not pill_like_created:
pill_like.delete()
message = "좋아요 취소"
else:
message = "좋아요"
context = {
'like_count': pill.like_count,
'message': message,
'username': request.user.username
}
return HttpResponse(json.dumps(context))
|
normal
|
{
"blob_id": "3c193decc4a1f284de953003fbba434d6e798b24",
"index": 2827,
"step-1": "<mask token>\n\n\nclass PillListView(ListView):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass PillDetailView(DetailView):\n model = Pills\n template_name = 'pills/pill_detail.html'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PillListView(ListView):\n model = Pills\n template_name = 'pills/pill_list.html'\n form_class = CommentForm\n\n def get_context_data(self, **kwargs):\n context = super(PillListView, self).get_context_data(**kwargs)\n return context\n\n def get_queryset(self, *args, **kwargs):\n qs = Pills.objects.prefetch_related('category_body',\n 'category_gender', 'like_user_set').all()\n print(self.request.GET)\n query = self.request.GET.get('q', None)\n if query is not None:\n qs = qs.filter(Q(name__icontains=query) | Q(\n category_body__name__icontains=query))\n return qs\n\n\n<mask token>\n\n\nclass PillDetailView(DetailView):\n model = Pills\n template_name = 'pills/pill_detail.html'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass PillListView(ListView):\n model = Pills\n template_name = 'pills/pill_list.html'\n form_class = CommentForm\n\n def get_context_data(self, **kwargs):\n context = super(PillListView, self).get_context_data(**kwargs)\n return context\n\n def get_queryset(self, *args, **kwargs):\n qs = Pills.objects.prefetch_related('category_body',\n 'category_gender', 'like_user_set').all()\n print(self.request.GET)\n query = self.request.GET.get('q', None)\n if query is not None:\n qs = qs.filter(Q(name__icontains=query) | Q(\n category_body__name__icontains=query))\n return qs\n\n\n@login_required\ndef comment_new(request):\n pk = request.POST.get('pk')\n pill = get_object_or_404(Pills, pk=pk)\n form = CommentForm\n if request.method == 'POST':\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = request.user\n comment.pills = pill\n comment.save()\n return render(request, 'pills/comment_new_ajax.html', {\n 'comment': comment, 'form': form})\n return redirect('pills:pill_list')\n\n\n@login_required\ndef comment_delete(request, pill_pk, pk):\n comment = get_object_or_404(Comment, pk=pk)\n if request.method == 'POST' and request.user == comment.author:\n comment.delete()\n messages.success(request, '삭제했습니다.')\n return redirect('pills:pill_list')\n messages.warning('권한이 없습니다.')\n return redirect('pills:pill_list')\n\n\nclass PillDetailView(DetailView):\n model = Pills\n template_name = 'pills/pill_detail.html'\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass PillListView(ListView):\n model = Pills\n template_name = 'pills/pill_list.html'\n form_class = CommentForm\n\n def get_context_data(self, **kwargs):\n context = super(PillListView, self).get_context_data(**kwargs)\n return context\n\n def get_queryset(self, *args, **kwargs):\n qs = Pills.objects.prefetch_related('category_body',\n 'category_gender', 'like_user_set').all()\n print(self.request.GET)\n query = self.request.GET.get('q', None)\n if query is not None:\n qs = qs.filter(Q(name__icontains=query) | Q(\n category_body__name__icontains=query))\n return qs\n\n\n@login_required\ndef comment_new(request):\n pk = request.POST.get('pk')\n pill = get_object_or_404(Pills, pk=pk)\n form = CommentForm\n if request.method == 'POST':\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = request.user\n comment.pills = pill\n comment.save()\n return render(request, 'pills/comment_new_ajax.html', {\n 'comment': comment, 'form': form})\n return redirect('pills:pill_list')\n\n\n@login_required\ndef comment_delete(request, pill_pk, pk):\n comment = get_object_or_404(Comment, pk=pk)\n if request.method == 'POST' and request.user == comment.author:\n comment.delete()\n messages.success(request, '삭제했습니다.')\n return redirect('pills:pill_list')\n messages.warning('권한이 없습니다.')\n return redirect('pills:pill_list')\n\n\nclass PillDetailView(DetailView):\n model = Pills\n template_name = 'pills/pill_detail.html'\n\n\n@login_required\n@require_POST\ndef pill_like(request):\n pk = request.POST.get('pk', None)\n pill = get_object_or_404(Pills, pk=pk)\n pill_like, pill_like_created = pill.like_set.get_or_create(user=request\n .user)\n if not pill_like_created:\n pill_like.delete()\n message = '좋아요 취소'\n else:\n message = '좋아요'\n context = {'like_count': pill.like_count, 'message': message,\n 'username': request.user.username}\n return HttpResponse(json.dumps(context))\n",
"step-5": "from django.db.models import Q\nfrom django.contrib import messages\n\nfrom django.views.generic import ListView, DetailView\nfrom django.shortcuts import get_object_or_404, redirect, render\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\n\nfrom django.views.decorators.http import require_POST\n\nfrom .models import Pills, Like, Comment\nfrom .forms import CommentForm\nimport json\n\n\nclass PillListView(ListView):\n\tmodel = Pills\n\ttemplate_name = \"pills/pill_list.html\"\n\tform_class = CommentForm\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(PillListView, self).get_context_data(**kwargs)\n\t\treturn context\n\n\tdef get_queryset(self, *args, **kwargs):\n\t\tqs = Pills.objects.prefetch_related('category_body','category_gender','like_user_set').all()\n\t\tprint(self.request.GET)\n\t\tquery = self.request.GET.get(\"q\", None)\n\t\tif query is not None:\n\t\t\tqs = qs.filter(\n\t\t\t\t\tQ(name__icontains=query) | Q(category_body__name__icontains=query)\n\t\t\t\t)\n\t\treturn qs\n\n\n# def PillCategory_SearchList(request):\n# \tqs = Pills.objects.prefetch_related('category_body').all()\n# \tquery = self.request.GET.get(\"q\", None)\n# \tif query is not None:\n# \t\tqs = qs.filter(\n# \t\t\t\tQ(name__icontains=query)\n# \t\t\t\t)\n# \t\treturn qs\n\n# \tcontext = {\n# \t\t\t\t'qs' : qs,\n\n# \t}\n\n# \treturn render(request, \"categorysearch.html\", context)\n\n\n\n\n\n@login_required\ndef comment_new(request):\n\tpk = request.POST.get('pk')\n\tpill = get_object_or_404(Pills, pk=pk)\n\tform = CommentForm\n\tif request.method == 'POST':\n\t\tform = CommentForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tcomment = form.save(commit=False)\n\t\t\tcomment.author = request.user\n\t\t\tcomment.pills = pill\n\t\t\tcomment.save()\n\t\t\treturn render(request, 'pills/comment_new_ajax.html', {'comment':comment, 'form':form,})\n\treturn redirect(\"pills:pill_list\")\n\n\n@login_required\ndef comment_delete(request, pill_pk, pk):\n\tcomment = get_object_or_404(Comment, pk=pk)\n\tif request.method == 'POST' and request.user == comment.author:\n\t\tcomment.delete()\n\t\tmessages.success(request, '삭제했습니다.')\n\t\treturn redirect('pills:pill_list')\n\n\tmessages.warning('권한이 없습니다.')\n\treturn redirect('pills:pill_list')\n\n\n\nclass PillDetailView(DetailView):\n\tmodel = Pills\n\ttemplate_name = 'pills/pill_detail.html'\n\t# context_object_name = 'pills'\n\n\n@login_required\n@require_POST\t# POST method만 받음\ndef pill_like(request):\n\tpk = request.POST.get('pk', None)\n\tpill = get_object_or_404(Pills, pk=pk)\n\n\tpill_like, pill_like_created = pill.like_set.get_or_create(user=request.user)\n\n\tif not pill_like_created:\n\t\tpill_like.delete()\n\t\tmessage = \"좋아요 취소\"\n\telse:\n\t\tmessage = \"좋아요\"\n\n\tcontext = {\n\t\t\t\t'like_count': pill.like_count,\n\t\t\t\t'message': message,\n\t\t\t\t'username': request.user.username\n\t}\n\n\treturn HttpResponse(json.dumps(context))\n\n\n\n\n\n\n",
"step-ids": [
3,
6,
8,
9,
11
]
}
|
[
3,
6,
8,
9,
11
] |
<|reserved_special_token_0|>
class UserProfile(ndb.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@classmethod
def query_profile(cls, ancestor_key):
return cls.query(ancestor=ancestor_key).get()
class UserProfileHandler(webapp2.RequestHandler):
def get(self):
template = JINJA_ENVIRONMENT.get_template('templates/profile.html')
the_user = self.request.get('user')
logging.info('The user = ' + the_user)
if the_user == '':
the_user = users.get_current_user().email()
owner = True
else:
owner = False
user_profile_data = UserProfile.get_by_id(the_user)
template_values = {'owner': owner, 'user': the_user}
if user_profile_data:
template_values['profile_data'] = user_profile_data.profile
logging.info(user_profile_data)
self.response.out.write(template.render(template_values))
def post(self):
user = users.get_current_user()
profile_data = self.request.get('profile_data')
user_profile = UserProfile(id=user.email(), profile=profile_data)
user_profile.put()
self.redirect('/profile')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserProfile(ndb.Model):
"""Models the profile (JSON) of an individual user."""
profile = ndb.TextProperty()
date = ndb.DateTimeProperty(auto_now_add=True)
@classmethod
def query_profile(cls, ancestor_key):
return cls.query(ancestor=ancestor_key).get()
class UserProfileHandler(webapp2.RequestHandler):
def get(self):
template = JINJA_ENVIRONMENT.get_template('templates/profile.html')
the_user = self.request.get('user')
logging.info('The user = ' + the_user)
if the_user == '':
the_user = users.get_current_user().email()
owner = True
else:
owner = False
user_profile_data = UserProfile.get_by_id(the_user)
template_values = {'owner': owner, 'user': the_user}
if user_profile_data:
template_values['profile_data'] = user_profile_data.profile
logging.info(user_profile_data)
self.response.out.write(template.render(template_values))
def post(self):
user = users.get_current_user()
profile_data = self.request.get('profile_data')
user_profile = UserProfile(id=user.email(), profile=profile_data)
user_profile.put()
self.redirect('/profile')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
JINJA_ENVIRONMENT = jinja2.Environment(loader=jinja2.FileSystemLoader(os.
path.dirname(__file__)), extensions=['jinja2.ext.autoescape'],
autoescape=True)
class UserProfile(ndb.Model):
"""Models the profile (JSON) of an individual user."""
profile = ndb.TextProperty()
date = ndb.DateTimeProperty(auto_now_add=True)
@classmethod
def query_profile(cls, ancestor_key):
return cls.query(ancestor=ancestor_key).get()
class UserProfileHandler(webapp2.RequestHandler):
def get(self):
template = JINJA_ENVIRONMENT.get_template('templates/profile.html')
the_user = self.request.get('user')
logging.info('The user = ' + the_user)
if the_user == '':
the_user = users.get_current_user().email()
owner = True
else:
owner = False
user_profile_data = UserProfile.get_by_id(the_user)
template_values = {'owner': owner, 'user': the_user}
if user_profile_data:
template_values['profile_data'] = user_profile_data.profile
logging.info(user_profile_data)
self.response.out.write(template.render(template_values))
def post(self):
user = users.get_current_user()
profile_data = self.request.get('profile_data')
user_profile = UserProfile(id=user.email(), profile=profile_data)
user_profile.put()
self.redirect('/profile')
app = webapp2.WSGIApplication([('/profile', UserProfileHandler)], debug=True)
<|reserved_special_token_1|>
from google.appengine.api import users
from google.appengine.ext import ndb
from datetime import datetime
from datetime import timedelta
import os
import logging
import webapp2
import jinja2
JINJA_ENVIRONMENT = jinja2.Environment(loader=jinja2.FileSystemLoader(os.
path.dirname(__file__)), extensions=['jinja2.ext.autoescape'],
autoescape=True)
class UserProfile(ndb.Model):
"""Models the profile (JSON) of an individual user."""
profile = ndb.TextProperty()
date = ndb.DateTimeProperty(auto_now_add=True)
@classmethod
def query_profile(cls, ancestor_key):
return cls.query(ancestor=ancestor_key).get()
class UserProfileHandler(webapp2.RequestHandler):
def get(self):
template = JINJA_ENVIRONMENT.get_template('templates/profile.html')
the_user = self.request.get('user')
logging.info('The user = ' + the_user)
if the_user == '':
the_user = users.get_current_user().email()
owner = True
else:
owner = False
user_profile_data = UserProfile.get_by_id(the_user)
template_values = {'owner': owner, 'user': the_user}
if user_profile_data:
template_values['profile_data'] = user_profile_data.profile
logging.info(user_profile_data)
self.response.out.write(template.render(template_values))
def post(self):
user = users.get_current_user()
profile_data = self.request.get('profile_data')
user_profile = UserProfile(id=user.email(), profile=profile_data)
user_profile.put()
self.redirect('/profile')
app = webapp2.WSGIApplication([('/profile', UserProfileHandler)], debug=True)
<|reserved_special_token_1|>
from google.appengine.api import users
from google.appengine.ext import ndb
from datetime import datetime
from datetime import timedelta
import os
import logging
import webapp2
import jinja2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class UserProfile(ndb.Model):
"""Models the profile (JSON) of an individual user."""
profile = ndb.TextProperty()
date = ndb.DateTimeProperty(auto_now_add=True)
@classmethod
def query_profile(cls, ancestor_key):
return cls.query(ancestor=ancestor_key).get()
class UserProfileHandler(webapp2.RequestHandler):
def get(self):
template = JINJA_ENVIRONMENT.get_template('templates/profile.html')
the_user = self.request.get('user')
logging.info("The user = " + the_user)
if the_user == "":
the_user = users.get_current_user().email()
owner = True
else:
owner = False
user_profile_data = UserProfile.get_by_id(the_user)
template_values = { 'owner': owner, 'user': the_user}
if user_profile_data:
template_values['profile_data'] = user_profile_data.profile
logging.info(user_profile_data)
self.response.out.write(template.render(template_values))
def post(self):
user = users.get_current_user()
profile_data = self.request.get('profile_data')
user_profile = UserProfile(id=user.email(), profile=profile_data)
user_profile.put()
self.redirect('/profile')
#self.response.out.write("Here is the JSON for your profile.")
#self.response.out.write(profile_data)
app = webapp2.WSGIApplication([
('/profile', UserProfileHandler),
], debug=True)
|
flexible
|
{
"blob_id": "309090167c2218c89494ce17f7a25bd89320a202",
"index": 3855,
"step-1": "<mask token>\n\n\nclass UserProfile(ndb.Model):\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def query_profile(cls, ancestor_key):\n return cls.query(ancestor=ancestor_key).get()\n\n\nclass UserProfileHandler(webapp2.RequestHandler):\n\n def get(self):\n template = JINJA_ENVIRONMENT.get_template('templates/profile.html')\n the_user = self.request.get('user')\n logging.info('The user = ' + the_user)\n if the_user == '':\n the_user = users.get_current_user().email()\n owner = True\n else:\n owner = False\n user_profile_data = UserProfile.get_by_id(the_user)\n template_values = {'owner': owner, 'user': the_user}\n if user_profile_data:\n template_values['profile_data'] = user_profile_data.profile\n logging.info(user_profile_data)\n self.response.out.write(template.render(template_values))\n\n def post(self):\n user = users.get_current_user()\n profile_data = self.request.get('profile_data')\n user_profile = UserProfile(id=user.email(), profile=profile_data)\n user_profile.put()\n self.redirect('/profile')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass UserProfile(ndb.Model):\n \"\"\"Models the profile (JSON) of an individual user.\"\"\"\n profile = ndb.TextProperty()\n date = ndb.DateTimeProperty(auto_now_add=True)\n\n @classmethod\n def query_profile(cls, ancestor_key):\n return cls.query(ancestor=ancestor_key).get()\n\n\nclass UserProfileHandler(webapp2.RequestHandler):\n\n def get(self):\n template = JINJA_ENVIRONMENT.get_template('templates/profile.html')\n the_user = self.request.get('user')\n logging.info('The user = ' + the_user)\n if the_user == '':\n the_user = users.get_current_user().email()\n owner = True\n else:\n owner = False\n user_profile_data = UserProfile.get_by_id(the_user)\n template_values = {'owner': owner, 'user': the_user}\n if user_profile_data:\n template_values['profile_data'] = user_profile_data.profile\n logging.info(user_profile_data)\n self.response.out.write(template.render(template_values))\n\n def post(self):\n user = users.get_current_user()\n profile_data = self.request.get('profile_data')\n user_profile = UserProfile(id=user.email(), profile=profile_data)\n user_profile.put()\n self.redirect('/profile')\n\n\n<mask token>\n",
"step-3": "<mask token>\nJINJA_ENVIRONMENT = jinja2.Environment(loader=jinja2.FileSystemLoader(os.\n path.dirname(__file__)), extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n\n\nclass UserProfile(ndb.Model):\n \"\"\"Models the profile (JSON) of an individual user.\"\"\"\n profile = ndb.TextProperty()\n date = ndb.DateTimeProperty(auto_now_add=True)\n\n @classmethod\n def query_profile(cls, ancestor_key):\n return cls.query(ancestor=ancestor_key).get()\n\n\nclass UserProfileHandler(webapp2.RequestHandler):\n\n def get(self):\n template = JINJA_ENVIRONMENT.get_template('templates/profile.html')\n the_user = self.request.get('user')\n logging.info('The user = ' + the_user)\n if the_user == '':\n the_user = users.get_current_user().email()\n owner = True\n else:\n owner = False\n user_profile_data = UserProfile.get_by_id(the_user)\n template_values = {'owner': owner, 'user': the_user}\n if user_profile_data:\n template_values['profile_data'] = user_profile_data.profile\n logging.info(user_profile_data)\n self.response.out.write(template.render(template_values))\n\n def post(self):\n user = users.get_current_user()\n profile_data = self.request.get('profile_data')\n user_profile = UserProfile(id=user.email(), profile=profile_data)\n user_profile.put()\n self.redirect('/profile')\n\n\napp = webapp2.WSGIApplication([('/profile', UserProfileHandler)], debug=True)\n",
"step-4": "from google.appengine.api import users\nfrom google.appengine.ext import ndb\nfrom datetime import datetime\nfrom datetime import timedelta\nimport os\nimport logging\nimport webapp2\nimport jinja2\nJINJA_ENVIRONMENT = jinja2.Environment(loader=jinja2.FileSystemLoader(os.\n path.dirname(__file__)), extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n\n\nclass UserProfile(ndb.Model):\n \"\"\"Models the profile (JSON) of an individual user.\"\"\"\n profile = ndb.TextProperty()\n date = ndb.DateTimeProperty(auto_now_add=True)\n\n @classmethod\n def query_profile(cls, ancestor_key):\n return cls.query(ancestor=ancestor_key).get()\n\n\nclass UserProfileHandler(webapp2.RequestHandler):\n\n def get(self):\n template = JINJA_ENVIRONMENT.get_template('templates/profile.html')\n the_user = self.request.get('user')\n logging.info('The user = ' + the_user)\n if the_user == '':\n the_user = users.get_current_user().email()\n owner = True\n else:\n owner = False\n user_profile_data = UserProfile.get_by_id(the_user)\n template_values = {'owner': owner, 'user': the_user}\n if user_profile_data:\n template_values['profile_data'] = user_profile_data.profile\n logging.info(user_profile_data)\n self.response.out.write(template.render(template_values))\n\n def post(self):\n user = users.get_current_user()\n profile_data = self.request.get('profile_data')\n user_profile = UserProfile(id=user.email(), profile=profile_data)\n user_profile.put()\n self.redirect('/profile')\n\n\napp = webapp2.WSGIApplication([('/profile', UserProfileHandler)], debug=True)\n",
"step-5": "from google.appengine.api import users\nfrom google.appengine.ext import ndb\nfrom datetime import datetime\nfrom datetime import timedelta\nimport os\nimport logging\n\nimport webapp2\nimport jinja2\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n\nclass UserProfile(ndb.Model):\n \"\"\"Models the profile (JSON) of an individual user.\"\"\"\n profile = ndb.TextProperty()\n date = ndb.DateTimeProperty(auto_now_add=True)\n\n @classmethod\n def query_profile(cls, ancestor_key):\n return cls.query(ancestor=ancestor_key).get()\n\nclass UserProfileHandler(webapp2.RequestHandler):\n def get(self):\n template = JINJA_ENVIRONMENT.get_template('templates/profile.html')\n the_user = self.request.get('user')\n logging.info(\"The user = \" + the_user)\n if the_user == \"\":\n the_user = users.get_current_user().email()\n owner = True\n else:\n owner = False\n user_profile_data = UserProfile.get_by_id(the_user)\n template_values = { 'owner': owner, 'user': the_user}\n if user_profile_data:\n template_values['profile_data'] = user_profile_data.profile\n logging.info(user_profile_data)\n self.response.out.write(template.render(template_values))\n\n def post(self):\n user = users.get_current_user()\n profile_data = self.request.get('profile_data')\n user_profile = UserProfile(id=user.email(), profile=profile_data)\n user_profile.put()\n self.redirect('/profile')\n #self.response.out.write(\"Here is the JSON for your profile.\")\n #self.response.out.write(profile_data)\n\napp = webapp2.WSGIApplication([\n ('/profile', UserProfileHandler),\n], debug=True)\n",
"step-ids": [
5,
7,
8,
9,
10
]
}
|
[
5,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
class Demo:
def __init__(self):
self.log = logging.getLogger(self.__class__.__name__)
def start(self, filename: str):
self.log.debug('analyse file: ' + filename)
astmodif = AstModifier(filename)
originTree = astmodif.origin()
self.log.info('origin: ' + astmodif.dump(originTree))
astmodif.simplify()
self.log.info('simplified: ' + astmodif.dump(astmodif.simpast))
analyzer = Analyzer()
analyzer.analyze(astmodif.simpast)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Demo:
def __init__(self):
self.log = logging.getLogger(self.__class__.__name__)
def start(self, filename: str):
self.log.debug('analyse file: ' + filename)
astmodif = AstModifier(filename)
originTree = astmodif.origin()
self.log.info('origin: ' + astmodif.dump(originTree))
astmodif.simplify()
self.log.info('simplified: ' + astmodif.dump(astmodif.simpast))
analyzer = Analyzer()
analyzer.analyze(astmodif.simpast)
def main(args):
demo = Demo()
defaultfile = './test/apple.py'
if len(args) > 1:
defaultfile = args[1]
demo.start(os.path.abspath(defaultfile))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Demo:
def __init__(self):
self.log = logging.getLogger(self.__class__.__name__)
def start(self, filename: str):
self.log.debug('analyse file: ' + filename)
astmodif = AstModifier(filename)
originTree = astmodif.origin()
self.log.info('origin: ' + astmodif.dump(originTree))
astmodif.simplify()
self.log.info('simplified: ' + astmodif.dump(astmodif.simpast))
analyzer = Analyzer()
analyzer.analyze(astmodif.simpast)
def main(args):
demo = Demo()
defaultfile = './test/apple.py'
if len(args) > 1:
defaultfile = args[1]
demo.start(os.path.abspath(defaultfile))
if __name__ == '__main__':
import sys
main(sys.argv)
<|reserved_special_token_1|>
import log
import logging
import os
from ast_modifier import AstModifier
from analyzer import Analyzer
class Demo:
def __init__(self):
self.log = logging.getLogger(self.__class__.__name__)
def start(self, filename: str):
self.log.debug('analyse file: ' + filename)
astmodif = AstModifier(filename)
originTree = astmodif.origin()
self.log.info('origin: ' + astmodif.dump(originTree))
astmodif.simplify()
self.log.info('simplified: ' + astmodif.dump(astmodif.simpast))
analyzer = Analyzer()
analyzer.analyze(astmodif.simpast)
def main(args):
demo = Demo()
defaultfile = './test/apple.py'
if len(args) > 1:
defaultfile = args[1]
demo.start(os.path.abspath(defaultfile))
if __name__ == '__main__':
import sys
main(sys.argv)
<|reserved_special_token_1|>
# the main program of this project
import log
import logging
import os
from ast_modifier import AstModifier
from analyzer import Analyzer
class Demo():
def __init__(self):
self.log = logging.getLogger(self.__class__.__name__)
def start(self, filename: str):
self.log.debug('analyse file: ' + filename)
astmodif = AstModifier(filename)
# get origin AST
originTree = astmodif.origin()
self.log.info('origin: ' + astmodif.dump(originTree))
# simplify the AST
astmodif.simplify()
self.log.info('simplified: ' + astmodif.dump(astmodif.simpast))
# analyse
analyzer = Analyzer()
analyzer.analyze(astmodif.simpast)
def main(args):
demo = Demo()
defaultfile = './test/apple.py'
if len(args) > 1:
defaultfile = args[1]
demo.start(os.path.abspath(defaultfile))
if __name__ == "__main__":
import sys
main(sys.argv)
|
flexible
|
{
"blob_id": "e989f73011559080f96802dba4db30361d5626f9",
"index": 4002,
"step-1": "<mask token>\n\n\nclass Demo:\n\n def __init__(self):\n self.log = logging.getLogger(self.__class__.__name__)\n\n def start(self, filename: str):\n self.log.debug('analyse file: ' + filename)\n astmodif = AstModifier(filename)\n originTree = astmodif.origin()\n self.log.info('origin: ' + astmodif.dump(originTree))\n astmodif.simplify()\n self.log.info('simplified: ' + astmodif.dump(astmodif.simpast))\n analyzer = Analyzer()\n analyzer.analyze(astmodif.simpast)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Demo:\n\n def __init__(self):\n self.log = logging.getLogger(self.__class__.__name__)\n\n def start(self, filename: str):\n self.log.debug('analyse file: ' + filename)\n astmodif = AstModifier(filename)\n originTree = astmodif.origin()\n self.log.info('origin: ' + astmodif.dump(originTree))\n astmodif.simplify()\n self.log.info('simplified: ' + astmodif.dump(astmodif.simpast))\n analyzer = Analyzer()\n analyzer.analyze(astmodif.simpast)\n\n\ndef main(args):\n demo = Demo()\n defaultfile = './test/apple.py'\n if len(args) > 1:\n defaultfile = args[1]\n demo.start(os.path.abspath(defaultfile))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Demo:\n\n def __init__(self):\n self.log = logging.getLogger(self.__class__.__name__)\n\n def start(self, filename: str):\n self.log.debug('analyse file: ' + filename)\n astmodif = AstModifier(filename)\n originTree = astmodif.origin()\n self.log.info('origin: ' + astmodif.dump(originTree))\n astmodif.simplify()\n self.log.info('simplified: ' + astmodif.dump(astmodif.simpast))\n analyzer = Analyzer()\n analyzer.analyze(astmodif.simpast)\n\n\ndef main(args):\n demo = Demo()\n defaultfile = './test/apple.py'\n if len(args) > 1:\n defaultfile = args[1]\n demo.start(os.path.abspath(defaultfile))\n\n\nif __name__ == '__main__':\n import sys\n main(sys.argv)\n",
"step-4": "import log\nimport logging\nimport os\nfrom ast_modifier import AstModifier\nfrom analyzer import Analyzer\n\n\nclass Demo:\n\n def __init__(self):\n self.log = logging.getLogger(self.__class__.__name__)\n\n def start(self, filename: str):\n self.log.debug('analyse file: ' + filename)\n astmodif = AstModifier(filename)\n originTree = astmodif.origin()\n self.log.info('origin: ' + astmodif.dump(originTree))\n astmodif.simplify()\n self.log.info('simplified: ' + astmodif.dump(astmodif.simpast))\n analyzer = Analyzer()\n analyzer.analyze(astmodif.simpast)\n\n\ndef main(args):\n demo = Demo()\n defaultfile = './test/apple.py'\n if len(args) > 1:\n defaultfile = args[1]\n demo.start(os.path.abspath(defaultfile))\n\n\nif __name__ == '__main__':\n import sys\n main(sys.argv)\n",
"step-5": "# the main program of this project\nimport log\nimport logging\nimport os\nfrom ast_modifier import AstModifier\nfrom analyzer import Analyzer\n\nclass Demo():\n def __init__(self):\n self.log = logging.getLogger(self.__class__.__name__)\n def start(self, filename: str):\n self.log.debug('analyse file: ' + filename)\n astmodif = AstModifier(filename)\n # get origin AST\n originTree = astmodif.origin()\n self.log.info('origin: ' + astmodif.dump(originTree))\n # simplify the AST\n astmodif.simplify()\n self.log.info('simplified: ' + astmodif.dump(astmodif.simpast))\n\n # analyse\n analyzer = Analyzer()\n analyzer.analyze(astmodif.simpast)\n\ndef main(args):\n demo = Demo()\n defaultfile = './test/apple.py'\n if len(args) > 1:\n defaultfile = args[1]\n demo.start(os.path.abspath(defaultfile))\n\nif __name__ == \"__main__\":\n import sys\n main(sys.argv)",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
class Person:
country = "INDIA"
def __init__(self):
print("its base constructor")
def takeBreath(self):
print("Yes Iam breathing.")
class Emp(Person): # inherits person
def takeBreath(self):
print("Yes Iam EMP and Iam also breathing.")
class Prog(Emp):
def __init__(self):
super().__init__() # CALLS BASE CLASS CONTRUCTOR
print("its child constructor")
def takeBreath(self):
super().takeBreath() # calls previous class's method
print("Iam a programmer and breathing++.")
a=0
p = Person()
p.takeBreath()
e = Emp()
e.takeBreath()
pr = Prog()
pr.takeBreath()
|
normal
|
{
"blob_id": "cb2e2ef70935a22854c70fedf4f4a6715b089291",
"index": 1990,
"step-1": "<mask token>\n\n\nclass Prog(Emp):\n\n def __init__(self):\n super().__init__()\n print('its child constructor')\n\n def takeBreath(self):\n super().takeBreath()\n print('Iam a programmer and breathing++.')\n a = 0\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Emp(Person):\n\n def takeBreath(self):\n print('Yes Iam EMP and Iam also breathing.')\n\n\nclass Prog(Emp):\n\n def __init__(self):\n super().__init__()\n print('its child constructor')\n\n def takeBreath(self):\n super().takeBreath()\n print('Iam a programmer and breathing++.')\n a = 0\n\n\n<mask token>\n",
"step-3": "class Person:\n <mask token>\n <mask token>\n\n def takeBreath(self):\n print('Yes Iam breathing.')\n\n\nclass Emp(Person):\n\n def takeBreath(self):\n print('Yes Iam EMP and Iam also breathing.')\n\n\nclass Prog(Emp):\n\n def __init__(self):\n super().__init__()\n print('its child constructor')\n\n def takeBreath(self):\n super().takeBreath()\n print('Iam a programmer and breathing++.')\n a = 0\n\n\n<mask token>\n",
"step-4": "class Person:\n country = 'INDIA'\n\n def __init__(self):\n print('its base constructor')\n\n def takeBreath(self):\n print('Yes Iam breathing.')\n\n\nclass Emp(Person):\n\n def takeBreath(self):\n print('Yes Iam EMP and Iam also breathing.')\n\n\nclass Prog(Emp):\n\n def __init__(self):\n super().__init__()\n print('its child constructor')\n\n def takeBreath(self):\n super().takeBreath()\n print('Iam a programmer and breathing++.')\n a = 0\n\n\n<mask token>\np.takeBreath()\n<mask token>\ne.takeBreath()\n<mask token>\npr.takeBreath()\n",
"step-5": "class Person:\r\n country = \"INDIA\"\r\n def __init__(self):\r\n print(\"its base constructor\")\r\n\r\n def takeBreath(self):\r\n print(\"Yes Iam breathing.\")\r\n\r\nclass Emp(Person): # inherits person\r\n def takeBreath(self):\r\n print(\"Yes Iam EMP and Iam also breathing.\")\r\n\r\nclass Prog(Emp): \r\n\r\n def __init__(self):\r\n super().__init__() # CALLS BASE CLASS CONTRUCTOR\r\n print(\"its child constructor\")\r\n\r\n def takeBreath(self):\r\n super().takeBreath() # calls previous class's method \r\n print(\"Iam a programmer and breathing++.\")\r\n a=0\r\n\r\np = Person()\r\np.takeBreath()\r\n\r\ne = Emp()\r\ne.takeBreath()\r\n\r\npr = Prog()\r\npr.takeBreath()",
"step-ids": [
4,
6,
8,
11,
13
]
}
|
[
4,
6,
8,
11,
13
] |
"""
Card rarity parameters
"""
from typing import List, Optional
from django.db.models.query import Q
from cards.models.rarity import Rarity
from cardsearch.parameters.base_parameters import (
OPERATOR_MAPPING,
OPERATOR_TO_WORDY_MAPPING,
CardTextParameter,
CardSearchContext,
ParameterArgs,
QueryContext,
QueryValidationError,
)
class CardRarityParam(CardTextParameter):
"""
The parameter for searching by a card's rarity
"""
@classmethod
def get_parameter_name(cls) -> str:
return "rarity"
@classmethod
def get_search_operators(cls) -> List[str]:
return [":", "=", "<=", "<", ">", ">="]
@classmethod
def get_search_keywords(cls) -> List[str]:
return ["rarity", "r"]
def get_default_search_context(self) -> CardSearchContext:
return CardSearchContext.PRINTING
def __init__(self, negated: bool, param_args: ParameterArgs):
super().__init__(negated, param_args)
self.rarity: Optional[Rarity] = None
if self.operator == ":":
self.operator = "="
def validate(self, query_context: QueryContext) -> None:
try:
self.rarity = Rarity.objects.get(
Q(symbol__iexact=self.value) | Q(name__iexact=self.value)
)
except Rarity.DoesNotExist:
raise QueryValidationError(f'Couldn\'t find rarity "{self.value}"')
def query(self, query_context: QueryContext) -> Q:
if self.operator == "=":
query = Q(rarity=self.rarity)
else:
filter_ = f"rarity__display_order{OPERATOR_MAPPING[self.operator]}"
query = Q(**{filter_: self.rarity.display_order})
return ~query if self.negated else query
def get_pretty_str(self, query_context: QueryContext) -> str:
return (
"the rarity "
+ ("isn't" if self.negated else "is")
+ (
" " + OPERATOR_TO_WORDY_MAPPING[self.operator]
if self.operator not in (":", "=")
else ""
)
+ f" {self.rarity.name.lower()}"
)
|
normal
|
{
"blob_id": "c7d9bbdff9148c5d928de66f4406ee8b4e1bcdac",
"index": 2672,
"step-1": "<mask token>\n\n\nclass CardRarityParam(CardTextParameter):\n <mask token>\n\n @classmethod\n def get_parameter_name(cls) ->str:\n return 'rarity'\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def validate(self, query_context: QueryContext) ->None:\n try:\n self.rarity = Rarity.objects.get(Q(symbol__iexact=self.value) |\n Q(name__iexact=self.value))\n except Rarity.DoesNotExist:\n raise QueryValidationError(f'Couldn\\'t find rarity \"{self.value}\"')\n <mask token>\n\n def get_pretty_str(self, query_context: QueryContext) ->str:\n return 'the rarity ' + (\"isn't\" if self.negated else 'is') + (' ' +\n OPERATOR_TO_WORDY_MAPPING[self.operator] if self.operator not in\n (':', '=') else '') + f' {self.rarity.name.lower()}'\n",
"step-2": "<mask token>\n\n\nclass CardRarityParam(CardTextParameter):\n <mask token>\n\n @classmethod\n def get_parameter_name(cls) ->str:\n return 'rarity'\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, negated: bool, param_args: ParameterArgs):\n super().__init__(negated, param_args)\n self.rarity: Optional[Rarity] = None\n if self.operator == ':':\n self.operator = '='\n\n def validate(self, query_context: QueryContext) ->None:\n try:\n self.rarity = Rarity.objects.get(Q(symbol__iexact=self.value) |\n Q(name__iexact=self.value))\n except Rarity.DoesNotExist:\n raise QueryValidationError(f'Couldn\\'t find rarity \"{self.value}\"')\n <mask token>\n\n def get_pretty_str(self, query_context: QueryContext) ->str:\n return 'the rarity ' + (\"isn't\" if self.negated else 'is') + (' ' +\n OPERATOR_TO_WORDY_MAPPING[self.operator] if self.operator not in\n (':', '=') else '') + f' {self.rarity.name.lower()}'\n",
"step-3": "<mask token>\n\n\nclass CardRarityParam(CardTextParameter):\n <mask token>\n\n @classmethod\n def get_parameter_name(cls) ->str:\n return 'rarity'\n <mask token>\n <mask token>\n\n def get_default_search_context(self) ->CardSearchContext:\n return CardSearchContext.PRINTING\n\n def __init__(self, negated: bool, param_args: ParameterArgs):\n super().__init__(negated, param_args)\n self.rarity: Optional[Rarity] = None\n if self.operator == ':':\n self.operator = '='\n\n def validate(self, query_context: QueryContext) ->None:\n try:\n self.rarity = Rarity.objects.get(Q(symbol__iexact=self.value) |\n Q(name__iexact=self.value))\n except Rarity.DoesNotExist:\n raise QueryValidationError(f'Couldn\\'t find rarity \"{self.value}\"')\n <mask token>\n\n def get_pretty_str(self, query_context: QueryContext) ->str:\n return 'the rarity ' + (\"isn't\" if self.negated else 'is') + (' ' +\n OPERATOR_TO_WORDY_MAPPING[self.operator] if self.operator not in\n (':', '=') else '') + f' {self.rarity.name.lower()}'\n",
"step-4": "<mask token>\n\n\nclass CardRarityParam(CardTextParameter):\n <mask token>\n\n @classmethod\n def get_parameter_name(cls) ->str:\n return 'rarity'\n\n @classmethod\n def get_search_operators(cls) ->List[str]:\n return [':', '=', '<=', '<', '>', '>=']\n <mask token>\n\n def get_default_search_context(self) ->CardSearchContext:\n return CardSearchContext.PRINTING\n\n def __init__(self, negated: bool, param_args: ParameterArgs):\n super().__init__(negated, param_args)\n self.rarity: Optional[Rarity] = None\n if self.operator == ':':\n self.operator = '='\n\n def validate(self, query_context: QueryContext) ->None:\n try:\n self.rarity = Rarity.objects.get(Q(symbol__iexact=self.value) |\n Q(name__iexact=self.value))\n except Rarity.DoesNotExist:\n raise QueryValidationError(f'Couldn\\'t find rarity \"{self.value}\"')\n\n def query(self, query_context: QueryContext) ->Q:\n if self.operator == '=':\n query = Q(rarity=self.rarity)\n else:\n filter_ = f'rarity__display_order{OPERATOR_MAPPING[self.operator]}'\n query = Q(**{filter_: self.rarity.display_order})\n return ~query if self.negated else query\n\n def get_pretty_str(self, query_context: QueryContext) ->str:\n return 'the rarity ' + (\"isn't\" if self.negated else 'is') + (' ' +\n OPERATOR_TO_WORDY_MAPPING[self.operator] if self.operator not in\n (':', '=') else '') + f' {self.rarity.name.lower()}'\n",
"step-5": "\"\"\"\nCard rarity parameters\n\"\"\"\nfrom typing import List, Optional\n\nfrom django.db.models.query import Q\n\nfrom cards.models.rarity import Rarity\nfrom cardsearch.parameters.base_parameters import (\n OPERATOR_MAPPING,\n OPERATOR_TO_WORDY_MAPPING,\n CardTextParameter,\n CardSearchContext,\n ParameterArgs,\n QueryContext,\n QueryValidationError,\n)\n\n\nclass CardRarityParam(CardTextParameter):\n \"\"\"\n The parameter for searching by a card's rarity\n \"\"\"\n\n @classmethod\n def get_parameter_name(cls) -> str:\n return \"rarity\"\n\n @classmethod\n def get_search_operators(cls) -> List[str]:\n return [\":\", \"=\", \"<=\", \"<\", \">\", \">=\"]\n\n @classmethod\n def get_search_keywords(cls) -> List[str]:\n return [\"rarity\", \"r\"]\n\n def get_default_search_context(self) -> CardSearchContext:\n return CardSearchContext.PRINTING\n\n def __init__(self, negated: bool, param_args: ParameterArgs):\n super().__init__(negated, param_args)\n self.rarity: Optional[Rarity] = None\n if self.operator == \":\":\n self.operator = \"=\"\n\n def validate(self, query_context: QueryContext) -> None:\n try:\n self.rarity = Rarity.objects.get(\n Q(symbol__iexact=self.value) | Q(name__iexact=self.value)\n )\n except Rarity.DoesNotExist:\n raise QueryValidationError(f'Couldn\\'t find rarity \"{self.value}\"')\n\n def query(self, query_context: QueryContext) -> Q:\n if self.operator == \"=\":\n query = Q(rarity=self.rarity)\n else:\n filter_ = f\"rarity__display_order{OPERATOR_MAPPING[self.operator]}\"\n query = Q(**{filter_: self.rarity.display_order})\n return ~query if self.negated else query\n\n def get_pretty_str(self, query_context: QueryContext) -> str:\n return (\n \"the rarity \"\n + (\"isn't\" if self.negated else \"is\")\n + (\n \" \" + OPERATOR_TO_WORDY_MAPPING[self.operator]\n if self.operator not in (\":\", \"=\")\n else \"\"\n )\n + f\" {self.rarity.name.lower()}\"\n )\n",
"step-ids": [
4,
5,
6,
8,
12
]
}
|
[
4,
5,
6,
8,
12
] |
<|reserved_special_token_0|>
def DSA_2048(filename, key):
with open(filename, 'rb') as f:
message = f.read()
hash_obj = SHA256.new(message)
signer = DSS.new(key, 'fips-186-3')
signature = signer.sign(hash_obj)
f = open('public_key.pem', 'r')
hash_obj = SHA256.new(message)
pub_key = DSA.import_key(f.read())
verifier = DSS.new(pub_key, 'fips-186-3')
try:
verifier.verify(hash_obj, signature)
print('The message is authentic.')
except ValueError:
print('The message is not authentic.')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('small_file.txt', 'wb') as f:
f.write(os.urandom(kB))
<|reserved_special_token_0|>
with open('large_file.txt', 'wb') as f:
f.write(os.urandom(mB))
<|reserved_special_token_0|>
with open('public_key.pem', 'wb') as f:
f.write(key.publickey().export_key())
f.close()
<|reserved_special_token_0|>
print('Key Generation Time: ', End - Begin)
def DSA_2048(filename, key):
with open(filename, 'rb') as f:
message = f.read()
hash_obj = SHA256.new(message)
signer = DSS.new(key, 'fips-186-3')
signature = signer.sign(hash_obj)
f = open('public_key.pem', 'r')
hash_obj = SHA256.new(message)
pub_key = DSA.import_key(f.read())
verifier = DSS.new(pub_key, 'fips-186-3')
try:
verifier.verify(hash_obj, signature)
print('The message is authentic.')
except ValueError:
print('The message is not authentic.')
<|reserved_special_token_0|>
DSA_2048('small_file.txt', key)
<|reserved_special_token_0|>
print('Time taken for DSA_2048 with 1 kb file: ', End - Begin)
if End - Begin != 0:
print('DSA_2048 speed for 1 kb file: ', 1024 / (End - Begin), 'bytes/sec')
<|reserved_special_token_0|>
DSA_2048('large_file.txt', key)
<|reserved_special_token_0|>
print('Time taken for DSA_2048 with 10 mb file: ', End - Begin)
if End - Begin != 0:
print('DSA_2048 speed for 1 kb file: ', 10485760 / (End - Begin),
'bytes/sec')
exit()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
kB = 1024
with open('small_file.txt', 'wb') as f:
f.write(os.urandom(kB))
mB = 10485760
with open('large_file.txt', 'wb') as f:
f.write(os.urandom(mB))
Begin = time.time()
key = DSA.generate(2048)
with open('public_key.pem', 'wb') as f:
f.write(key.publickey().export_key())
f.close()
End = time.time()
print('Key Generation Time: ', End - Begin)
def DSA_2048(filename, key):
with open(filename, 'rb') as f:
message = f.read()
hash_obj = SHA256.new(message)
signer = DSS.new(key, 'fips-186-3')
signature = signer.sign(hash_obj)
f = open('public_key.pem', 'r')
hash_obj = SHA256.new(message)
pub_key = DSA.import_key(f.read())
verifier = DSS.new(pub_key, 'fips-186-3')
try:
verifier.verify(hash_obj, signature)
print('The message is authentic.')
except ValueError:
print('The message is not authentic.')
Begin = time.time()
DSA_2048('small_file.txt', key)
End = time.time()
print('Time taken for DSA_2048 with 1 kb file: ', End - Begin)
if End - Begin != 0:
print('DSA_2048 speed for 1 kb file: ', 1024 / (End - Begin), 'bytes/sec')
Begin = time.time()
DSA_2048('large_file.txt', key)
End = time.time()
print('Time taken for DSA_2048 with 10 mb file: ', End - Begin)
if End - Begin != 0:
print('DSA_2048 speed for 1 kb file: ', 10485760 / (End - Begin),
'bytes/sec')
exit()
<|reserved_special_token_1|>
from Crypto.PublicKey import DSA
from Crypto.Signature import DSS
from Crypto.Hash import SHA256
import os
import time
kB = 1024
with open('small_file.txt', 'wb') as f:
f.write(os.urandom(kB))
mB = 10485760
with open('large_file.txt', 'wb') as f:
f.write(os.urandom(mB))
Begin = time.time()
key = DSA.generate(2048)
with open('public_key.pem', 'wb') as f:
f.write(key.publickey().export_key())
f.close()
End = time.time()
print('Key Generation Time: ', End - Begin)
def DSA_2048(filename, key):
with open(filename, 'rb') as f:
message = f.read()
hash_obj = SHA256.new(message)
signer = DSS.new(key, 'fips-186-3')
signature = signer.sign(hash_obj)
f = open('public_key.pem', 'r')
hash_obj = SHA256.new(message)
pub_key = DSA.import_key(f.read())
verifier = DSS.new(pub_key, 'fips-186-3')
try:
verifier.verify(hash_obj, signature)
print('The message is authentic.')
except ValueError:
print('The message is not authentic.')
Begin = time.time()
DSA_2048('small_file.txt', key)
End = time.time()
print('Time taken for DSA_2048 with 1 kb file: ', End - Begin)
if End - Begin != 0:
print('DSA_2048 speed for 1 kb file: ', 1024 / (End - Begin), 'bytes/sec')
Begin = time.time()
DSA_2048('large_file.txt', key)
End = time.time()
print('Time taken for DSA_2048 with 10 mb file: ', End - Begin)
if End - Begin != 0:
print('DSA_2048 speed for 1 kb file: ', 10485760 / (End - Begin),
'bytes/sec')
exit()
<|reserved_special_token_1|>
from Crypto.PublicKey import DSA
from Crypto.Signature import DSS
from Crypto.Hash import SHA256
import os
import time
kB = 1024 # 1kB
with open('small_file.txt', 'wb') as f:
f.write(os.urandom(kB))
mB = 10485760 # 1GB
with open('large_file.txt', 'wb') as f:
f.write(os.urandom(mB))
Begin = time.time()
key = DSA.generate(2048)
with open("public_key.pem", "wb") as f:
f.write(key.publickey().export_key())
f.close()
End = time.time()
print("Key Generation Time: ", End-Begin)
def DSA_2048(filename,key):
with open(filename, 'rb') as f:
message = f.read()
hash_obj = SHA256.new(message)
signer = DSS.new(key, 'fips-186-3')
signature = signer.sign(hash_obj)
# Load the public key
f = open("public_key.pem", "r")
hash_obj = SHA256.new(message)
pub_key = DSA.import_key(f.read())
verifier = DSS.new(pub_key, 'fips-186-3')
# Verify the authenticity of the message
try:
verifier.verify(hash_obj, signature)
print ("The message is authentic.")
except ValueError:
print ("The message is not authentic.")
Begin=time.time()
DSA_2048('small_file.txt',key)
End=time.time()
print("Time taken for DSA_2048 with 1 kb file: ",End-Begin)
if End-Begin != 0:
print("DSA_2048 speed for 1 kb file: ",1024/(End-Begin),"bytes/sec")
Begin=time.time()
DSA_2048('large_file.txt',key)
End=time.time()
print("Time taken for DSA_2048 with 10 mb file: ",End-Begin)
if End-Begin != 0:
print("DSA_2048 speed for 1 kb file: ",10485760/(End-Begin),"bytes/sec")
exit()
|
flexible
|
{
"blob_id": "d24bbfc3587a2a79891a11e00ec865498c01c286",
"index": 2101,
"step-1": "<mask token>\n\n\ndef DSA_2048(filename, key):\n with open(filename, 'rb') as f:\n message = f.read()\n hash_obj = SHA256.new(message)\n signer = DSS.new(key, 'fips-186-3')\n signature = signer.sign(hash_obj)\n f = open('public_key.pem', 'r')\n hash_obj = SHA256.new(message)\n pub_key = DSA.import_key(f.read())\n verifier = DSS.new(pub_key, 'fips-186-3')\n try:\n verifier.verify(hash_obj, signature)\n print('The message is authentic.')\n except ValueError:\n print('The message is not authentic.')\n\n\n<mask token>\n",
"step-2": "<mask token>\nwith open('small_file.txt', 'wb') as f:\n f.write(os.urandom(kB))\n<mask token>\nwith open('large_file.txt', 'wb') as f:\n f.write(os.urandom(mB))\n<mask token>\nwith open('public_key.pem', 'wb') as f:\n f.write(key.publickey().export_key())\n f.close()\n<mask token>\nprint('Key Generation Time: ', End - Begin)\n\n\ndef DSA_2048(filename, key):\n with open(filename, 'rb') as f:\n message = f.read()\n hash_obj = SHA256.new(message)\n signer = DSS.new(key, 'fips-186-3')\n signature = signer.sign(hash_obj)\n f = open('public_key.pem', 'r')\n hash_obj = SHA256.new(message)\n pub_key = DSA.import_key(f.read())\n verifier = DSS.new(pub_key, 'fips-186-3')\n try:\n verifier.verify(hash_obj, signature)\n print('The message is authentic.')\n except ValueError:\n print('The message is not authentic.')\n\n\n<mask token>\nDSA_2048('small_file.txt', key)\n<mask token>\nprint('Time taken for DSA_2048 with 1 kb file: ', End - Begin)\nif End - Begin != 0:\n print('DSA_2048 speed for 1 kb file: ', 1024 / (End - Begin), 'bytes/sec')\n<mask token>\nDSA_2048('large_file.txt', key)\n<mask token>\nprint('Time taken for DSA_2048 with 10 mb file: ', End - Begin)\nif End - Begin != 0:\n print('DSA_2048 speed for 1 kb file: ', 10485760 / (End - Begin),\n 'bytes/sec')\nexit()\n",
"step-3": "<mask token>\nkB = 1024\nwith open('small_file.txt', 'wb') as f:\n f.write(os.urandom(kB))\nmB = 10485760\nwith open('large_file.txt', 'wb') as f:\n f.write(os.urandom(mB))\nBegin = time.time()\nkey = DSA.generate(2048)\nwith open('public_key.pem', 'wb') as f:\n f.write(key.publickey().export_key())\n f.close()\nEnd = time.time()\nprint('Key Generation Time: ', End - Begin)\n\n\ndef DSA_2048(filename, key):\n with open(filename, 'rb') as f:\n message = f.read()\n hash_obj = SHA256.new(message)\n signer = DSS.new(key, 'fips-186-3')\n signature = signer.sign(hash_obj)\n f = open('public_key.pem', 'r')\n hash_obj = SHA256.new(message)\n pub_key = DSA.import_key(f.read())\n verifier = DSS.new(pub_key, 'fips-186-3')\n try:\n verifier.verify(hash_obj, signature)\n print('The message is authentic.')\n except ValueError:\n print('The message is not authentic.')\n\n\nBegin = time.time()\nDSA_2048('small_file.txt', key)\nEnd = time.time()\nprint('Time taken for DSA_2048 with 1 kb file: ', End - Begin)\nif End - Begin != 0:\n print('DSA_2048 speed for 1 kb file: ', 1024 / (End - Begin), 'bytes/sec')\nBegin = time.time()\nDSA_2048('large_file.txt', key)\nEnd = time.time()\nprint('Time taken for DSA_2048 with 10 mb file: ', End - Begin)\nif End - Begin != 0:\n print('DSA_2048 speed for 1 kb file: ', 10485760 / (End - Begin),\n 'bytes/sec')\nexit()\n",
"step-4": "from Crypto.PublicKey import DSA\nfrom Crypto.Signature import DSS\nfrom Crypto.Hash import SHA256\nimport os\nimport time\nkB = 1024\nwith open('small_file.txt', 'wb') as f:\n f.write(os.urandom(kB))\nmB = 10485760\nwith open('large_file.txt', 'wb') as f:\n f.write(os.urandom(mB))\nBegin = time.time()\nkey = DSA.generate(2048)\nwith open('public_key.pem', 'wb') as f:\n f.write(key.publickey().export_key())\n f.close()\nEnd = time.time()\nprint('Key Generation Time: ', End - Begin)\n\n\ndef DSA_2048(filename, key):\n with open(filename, 'rb') as f:\n message = f.read()\n hash_obj = SHA256.new(message)\n signer = DSS.new(key, 'fips-186-3')\n signature = signer.sign(hash_obj)\n f = open('public_key.pem', 'r')\n hash_obj = SHA256.new(message)\n pub_key = DSA.import_key(f.read())\n verifier = DSS.new(pub_key, 'fips-186-3')\n try:\n verifier.verify(hash_obj, signature)\n print('The message is authentic.')\n except ValueError:\n print('The message is not authentic.')\n\n\nBegin = time.time()\nDSA_2048('small_file.txt', key)\nEnd = time.time()\nprint('Time taken for DSA_2048 with 1 kb file: ', End - Begin)\nif End - Begin != 0:\n print('DSA_2048 speed for 1 kb file: ', 1024 / (End - Begin), 'bytes/sec')\nBegin = time.time()\nDSA_2048('large_file.txt', key)\nEnd = time.time()\nprint('Time taken for DSA_2048 with 10 mb file: ', End - Begin)\nif End - Begin != 0:\n print('DSA_2048 speed for 1 kb file: ', 10485760 / (End - Begin),\n 'bytes/sec')\nexit()\n",
"step-5": "from Crypto.PublicKey import DSA\nfrom Crypto.Signature import DSS\nfrom Crypto.Hash import SHA256\nimport os\nimport time\n\nkB = 1024 # 1kB\nwith open('small_file.txt', 'wb') as f:\n f.write(os.urandom(kB))\n\nmB = 10485760 # 1GB\nwith open('large_file.txt', 'wb') as f:\n f.write(os.urandom(mB))\n\nBegin = time.time()\nkey = DSA.generate(2048)\nwith open(\"public_key.pem\", \"wb\") as f:\n f.write(key.publickey().export_key())\n f.close()\nEnd = time.time()\nprint(\"Key Generation Time: \", End-Begin)\n\ndef DSA_2048(filename,key):\n with open(filename, 'rb') as f:\n message = f.read()\n hash_obj = SHA256.new(message)\n signer = DSS.new(key, 'fips-186-3')\n signature = signer.sign(hash_obj)\n # Load the public key\n f = open(\"public_key.pem\", \"r\")\n hash_obj = SHA256.new(message)\n pub_key = DSA.import_key(f.read())\n verifier = DSS.new(pub_key, 'fips-186-3')\n # Verify the authenticity of the message\n try:\n verifier.verify(hash_obj, signature)\n print (\"The message is authentic.\")\n except ValueError:\n print (\"The message is not authentic.\")\n\nBegin=time.time()\nDSA_2048('small_file.txt',key)\nEnd=time.time()\nprint(\"Time taken for DSA_2048 with 1 kb file: \",End-Begin)\nif End-Begin != 0:\n print(\"DSA_2048 speed for 1 kb file: \",1024/(End-Begin),\"bytes/sec\")\n\nBegin=time.time()\nDSA_2048('large_file.txt',key)\nEnd=time.time()\nprint(\"Time taken for DSA_2048 with 10 mb file: \",End-Begin)\nif End-Begin != 0:\n print(\"DSA_2048 speed for 1 kb file: \",10485760/(End-Begin),\"bytes/sec\")\nexit()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import xadmin
from .models import EmailVerifyRecord,Banner
from xadmin import views
class EmailVerifyRecordAdmin(object):
pass
class BannerAdmin(object):
list_display=('title','url','index')
class BaseSetting(object):
enable_themes=True
user_bootswatch=True
#设置xadmin页面标题和页脚
class GlobalSetting(object):
site_title='西游记'
site_footer='咨询在线'
xadmin.site.register(EmailVerifyRecord,EmailVerifyRecordAdmin)
xadmin.site.register(Banner,BannerAdmin)
xadmin.site.register(views.BaseAdminView,BaseSetting)
xadmin.site.register(views.CommAdminView,GlobalSetting)
|
normal
|
{
"blob_id": "263a853f33eb9724101ca87f12b914282dea9981",
"index": 1441,
"step-1": "<mask token>\n\n\nclass BannerAdmin(object):\n list_display = 'title', 'url', 'index'\n\n\nclass BaseSetting(object):\n enable_themes = True\n user_bootswatch = True\n\n\nclass GlobalSetting(object):\n site_title = '西游记'\n site_footer = '咨询在线'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass EmailVerifyRecordAdmin(object):\n pass\n\n\nclass BannerAdmin(object):\n list_display = 'title', 'url', 'index'\n\n\nclass BaseSetting(object):\n enable_themes = True\n user_bootswatch = True\n\n\nclass GlobalSetting(object):\n site_title = '西游记'\n site_footer = '咨询在线'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass EmailVerifyRecordAdmin(object):\n pass\n\n\nclass BannerAdmin(object):\n list_display = 'title', 'url', 'index'\n\n\nclass BaseSetting(object):\n enable_themes = True\n user_bootswatch = True\n\n\nclass GlobalSetting(object):\n site_title = '西游记'\n site_footer = '咨询在线'\n\n\nxadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)\nxadmin.site.register(Banner, BannerAdmin)\nxadmin.site.register(views.BaseAdminView, BaseSetting)\nxadmin.site.register(views.CommAdminView, GlobalSetting)\n",
"step-4": "import xadmin\nfrom .models import EmailVerifyRecord, Banner\nfrom xadmin import views\n\n\nclass EmailVerifyRecordAdmin(object):\n pass\n\n\nclass BannerAdmin(object):\n list_display = 'title', 'url', 'index'\n\n\nclass BaseSetting(object):\n enable_themes = True\n user_bootswatch = True\n\n\nclass GlobalSetting(object):\n site_title = '西游记'\n site_footer = '咨询在线'\n\n\nxadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)\nxadmin.site.register(Banner, BannerAdmin)\nxadmin.site.register(views.BaseAdminView, BaseSetting)\nxadmin.site.register(views.CommAdminView, GlobalSetting)\n",
"step-5": "import xadmin\nfrom .models import EmailVerifyRecord,Banner\nfrom xadmin import views\n\nclass EmailVerifyRecordAdmin(object):\n pass\n\n\nclass BannerAdmin(object):\n list_display=('title','url','index')\n\nclass BaseSetting(object):\n enable_themes=True\n user_bootswatch=True\n#设置xadmin页面标题和页脚\nclass GlobalSetting(object):\n site_title='西游记'\n site_footer='咨询在线'\nxadmin.site.register(EmailVerifyRecord,EmailVerifyRecordAdmin)\nxadmin.site.register(Banner,BannerAdmin)\nxadmin.site.register(views.BaseAdminView,BaseSetting)\nxadmin.site.register(views.CommAdminView,GlobalSetting)\n\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
class TreeAdjustorTest:
<|reserved_special_token_0|>
def __init__(self):
return
def getAdjustedTreesTest(self):
T = ParseTree()
nodes = [Node(index=-1, word='DEFAULT', posTag='DEFAULT') for i in
range(0, 8)]
nodes[0] = Node(index=0, word='ROOT', posTag='--')
nodes[0].info = NodeInfo(type='ROOT', value='ROOT')
nodes[1] = Node(index=1, word='return', posTag='--')
nodes[1].info = NodeInfo(type='SN', value='SELECT')
nodes[2] = Node(index=2, word='conference', posTag='--')
nodes[2].info = NodeInfo(type='NN', value='Author')
nodes[3] = Node(index=3, word='area', posTag='--')
nodes[3].info = NodeInfo(type='NN', value='Title')
nodes[4] = Node(index=4, word='papers', posTag='--')
nodes[4].info = NodeInfo(type='NN', value='Author')
nodes[5] = Node(index=5, word='citations', posTag='--')
nodes[5].info = NodeInfo(type='NN', value='Journal')
nodes[6] = Node(index=6, word='most', posTag='--')
nodes[6].info = NodeInfo(type='FN', value='>')
nodes[7] = Node(index=7, word='total', posTag='--')
nodes[7].info = NodeInfo(type='FN', value='Year')
T.root = nodes[0]
nodes[0].children.append(nodes[1])
nodes[1].parent = nodes[0]
nodes[1].children.append(nodes[2])
nodes[2].parent = nodes[1]
nodes[2].children.append(nodes[3])
nodes[3].parent = nodes[2]
nodes[2].children.append(nodes[4])
nodes[4].parent = nodes[2]
nodes[4].children.append(nodes[5])
nodes[5].parent = nodes[4]
nodes[5].children.append(nodes[6])
nodes[6].parent = nodes[5]
nodes[5].children.append(nodes[7])
nodes[7].parent = nodes[5]
print(
'===========test for Running getAdjustedTrees() in TreeAdjustor==========='
)
print('The original tree:')
print(T.toString())
print('Number of possible trees for choice:')
obj = TreeAdjustor()
result = TreeAdjustor.getAdjustedTrees(T)
print(len(result))
for i in range(0, len(result)):
for j in range(i + 1, len(result)):
if result[i].getScore() <= result[j].getScore():
temp = result[i]
result[i] = result[j]
result[j] = temp
print('The three trees with highest scores look like:')
for i in range(0, 5):
print(result[i])
for tree in result:
print(' treeList Result %s:%d' % (tree.getSentence(), tree.
getScore()))
tree.insertImplicitNodes()
query = tree.translateToSQL(self.schema)
print('qUERY: ' + query.toString())
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TreeAdjustorTest:
<|reserved_special_token_0|>
def __init__(self):
return
def getAdjustedTreesTest(self):
T = ParseTree()
nodes = [Node(index=-1, word='DEFAULT', posTag='DEFAULT') for i in
range(0, 8)]
nodes[0] = Node(index=0, word='ROOT', posTag='--')
nodes[0].info = NodeInfo(type='ROOT', value='ROOT')
nodes[1] = Node(index=1, word='return', posTag='--')
nodes[1].info = NodeInfo(type='SN', value='SELECT')
nodes[2] = Node(index=2, word='conference', posTag='--')
nodes[2].info = NodeInfo(type='NN', value='Author')
nodes[3] = Node(index=3, word='area', posTag='--')
nodes[3].info = NodeInfo(type='NN', value='Title')
nodes[4] = Node(index=4, word='papers', posTag='--')
nodes[4].info = NodeInfo(type='NN', value='Author')
nodes[5] = Node(index=5, word='citations', posTag='--')
nodes[5].info = NodeInfo(type='NN', value='Journal')
nodes[6] = Node(index=6, word='most', posTag='--')
nodes[6].info = NodeInfo(type='FN', value='>')
nodes[7] = Node(index=7, word='total', posTag='--')
nodes[7].info = NodeInfo(type='FN', value='Year')
T.root = nodes[0]
nodes[0].children.append(nodes[1])
nodes[1].parent = nodes[0]
nodes[1].children.append(nodes[2])
nodes[2].parent = nodes[1]
nodes[2].children.append(nodes[3])
nodes[3].parent = nodes[2]
nodes[2].children.append(nodes[4])
nodes[4].parent = nodes[2]
nodes[4].children.append(nodes[5])
nodes[5].parent = nodes[4]
nodes[5].children.append(nodes[6])
nodes[6].parent = nodes[5]
nodes[5].children.append(nodes[7])
nodes[7].parent = nodes[5]
print(
'===========test for Running getAdjustedTrees() in TreeAdjustor==========='
)
print('The original tree:')
print(T.toString())
print('Number of possible trees for choice:')
obj = TreeAdjustor()
result = TreeAdjustor.getAdjustedTrees(T)
print(len(result))
for i in range(0, len(result)):
for j in range(i + 1, len(result)):
if result[i].getScore() <= result[j].getScore():
temp = result[i]
result[i] = result[j]
result[j] = temp
print('The three trees with highest scores look like:')
for i in range(0, 5):
print(result[i])
for tree in result:
print(' treeList Result %s:%d' % (tree.getSentence(), tree.
getScore()))
tree.insertImplicitNodes()
query = tree.translateToSQL(self.schema)
print('qUERY: ' + query.toString())
def adjustTest(self):
T = ParseTree()
nodes = [Node(index=-1, word='DEFAULT', posTag='DEFAULT') for i in
range(0, 9)]
nodes[0] = Node(index=0, word='ROOT', posTag='--')
nodes[0].info = NodeInfo(type='ROOT', value='ROOT')
nodes[1] = Node(index=1, word='return', posTag='--')
nodes[1].info = NodeInfo(type='SN', value='SELECT')
nodes[2] = Node(index=2, word='conference', posTag='--')
nodes[2].info = NodeInfo(type='NN', value='Author')
nodes[3] = Node(index=3, word='area', posTag='--')
nodes[3].info = NodeInfo(type='NN', value='Title')
nodes[4] = Node(index=4, word='each', posTag='--')
nodes[4].info = NodeInfo(type='QN', value='>')
nodes[5] = Node(index=5, word='papers', posTag='--')
nodes[5].info = NodeInfo(type='NN', value='Author')
nodes[6] = Node(index=6, word='citations', posTag='--')
nodes[6].info = NodeInfo(type='NN', value='Journal')
nodes[7] = Node(index=7, word='most', posTag='--')
nodes[7].info = NodeInfo(type='FN', value='>')
nodes[8] = Node(index=8, word='total', posTag='--')
nodes[8].info = NodeInfo(type='FN', value='Year')
T.root = nodes[0]
nodes[0].children.append(nodes[1])
nodes[1].parent = nodes[0]
nodes[1].children.append(nodes[2])
nodes[2].parent = nodes[1]
nodes[2].children.append(nodes[3])
nodes[3].parent = nodes[2]
nodes[2].children.append(nodes[5])
nodes[5].parent = nodes[2]
nodes[3].children.append(nodes[4])
nodes[4].parent = nodes[3]
nodes[5].children.append(nodes[6])
nodes[6].parent = nodes[5]
nodes[6].children.append(nodes[7])
nodes[7].parent = nodes[6]
nodes[6].children.append(nodes[8])
nodes[8].parent = nodes[6]
print('===========test for Running adjust() in TreeAdjustor==========='
)
treeList = TreeAdjustor.adjust(T)
print('Output size: %d' % len(treeList))
print('Output trees:')
ctr = 0
for tr in treeList:
print('Tree %d %s' % (ctr, tr.getSentence()))
ctr += 1
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TreeAdjustorTest:
schema = None
def __init__(self):
return
def getAdjustedTreesTest(self):
T = ParseTree()
nodes = [Node(index=-1, word='DEFAULT', posTag='DEFAULT') for i in
range(0, 8)]
nodes[0] = Node(index=0, word='ROOT', posTag='--')
nodes[0].info = NodeInfo(type='ROOT', value='ROOT')
nodes[1] = Node(index=1, word='return', posTag='--')
nodes[1].info = NodeInfo(type='SN', value='SELECT')
nodes[2] = Node(index=2, word='conference', posTag='--')
nodes[2].info = NodeInfo(type='NN', value='Author')
nodes[3] = Node(index=3, word='area', posTag='--')
nodes[3].info = NodeInfo(type='NN', value='Title')
nodes[4] = Node(index=4, word='papers', posTag='--')
nodes[4].info = NodeInfo(type='NN', value='Author')
nodes[5] = Node(index=5, word='citations', posTag='--')
nodes[5].info = NodeInfo(type='NN', value='Journal')
nodes[6] = Node(index=6, word='most', posTag='--')
nodes[6].info = NodeInfo(type='FN', value='>')
nodes[7] = Node(index=7, word='total', posTag='--')
nodes[7].info = NodeInfo(type='FN', value='Year')
T.root = nodes[0]
nodes[0].children.append(nodes[1])
nodes[1].parent = nodes[0]
nodes[1].children.append(nodes[2])
nodes[2].parent = nodes[1]
nodes[2].children.append(nodes[3])
nodes[3].parent = nodes[2]
nodes[2].children.append(nodes[4])
nodes[4].parent = nodes[2]
nodes[4].children.append(nodes[5])
nodes[5].parent = nodes[4]
nodes[5].children.append(nodes[6])
nodes[6].parent = nodes[5]
nodes[5].children.append(nodes[7])
nodes[7].parent = nodes[5]
print(
'===========test for Running getAdjustedTrees() in TreeAdjustor==========='
)
print('The original tree:')
print(T.toString())
print('Number of possible trees for choice:')
obj = TreeAdjustor()
result = TreeAdjustor.getAdjustedTrees(T)
print(len(result))
for i in range(0, len(result)):
for j in range(i + 1, len(result)):
if result[i].getScore() <= result[j].getScore():
temp = result[i]
result[i] = result[j]
result[j] = temp
print('The three trees with highest scores look like:')
for i in range(0, 5):
print(result[i])
for tree in result:
print(' treeList Result %s:%d' % (tree.getSentence(), tree.
getScore()))
tree.insertImplicitNodes()
query = tree.translateToSQL(self.schema)
print('qUERY: ' + query.toString())
def adjustTest(self):
T = ParseTree()
nodes = [Node(index=-1, word='DEFAULT', posTag='DEFAULT') for i in
range(0, 9)]
nodes[0] = Node(index=0, word='ROOT', posTag='--')
nodes[0].info = NodeInfo(type='ROOT', value='ROOT')
nodes[1] = Node(index=1, word='return', posTag='--')
nodes[1].info = NodeInfo(type='SN', value='SELECT')
nodes[2] = Node(index=2, word='conference', posTag='--')
nodes[2].info = NodeInfo(type='NN', value='Author')
nodes[3] = Node(index=3, word='area', posTag='--')
nodes[3].info = NodeInfo(type='NN', value='Title')
nodes[4] = Node(index=4, word='each', posTag='--')
nodes[4].info = NodeInfo(type='QN', value='>')
nodes[5] = Node(index=5, word='papers', posTag='--')
nodes[5].info = NodeInfo(type='NN', value='Author')
nodes[6] = Node(index=6, word='citations', posTag='--')
nodes[6].info = NodeInfo(type='NN', value='Journal')
nodes[7] = Node(index=7, word='most', posTag='--')
nodes[7].info = NodeInfo(type='FN', value='>')
nodes[8] = Node(index=8, word='total', posTag='--')
nodes[8].info = NodeInfo(type='FN', value='Year')
T.root = nodes[0]
nodes[0].children.append(nodes[1])
nodes[1].parent = nodes[0]
nodes[1].children.append(nodes[2])
nodes[2].parent = nodes[1]
nodes[2].children.append(nodes[3])
nodes[3].parent = nodes[2]
nodes[2].children.append(nodes[5])
nodes[5].parent = nodes[2]
nodes[3].children.append(nodes[4])
nodes[4].parent = nodes[3]
nodes[5].children.append(nodes[6])
nodes[6].parent = nodes[5]
nodes[6].children.append(nodes[7])
nodes[7].parent = nodes[6]
nodes[6].children.append(nodes[8])
nodes[8].parent = nodes[6]
print('===========test for Running adjust() in TreeAdjustor==========='
)
treeList = TreeAdjustor.adjust(T)
print('Output size: %d' % len(treeList))
print('Output trees:')
ctr = 0
for tr in treeList:
print('Tree %d %s' % (ctr, tr.getSentence()))
ctr += 1
@staticmethod
def cmpp(a, b):
return a.getScore() > b.getScore()
obj = TreeAdjustorTest()
obj.getAdjustedTreesTest()
<|reserved_special_token_1|>
from ParseTree import ParseTree
from Node import Node
from NodeInfo import NodeInfo
from TreeAdjustor import TreeAdjustor
from model.SchemaGraph import SchemaGraph
class TreeAdjustorTest:
schema = None
def __init__(self):
return
def getAdjustedTreesTest(self):
T = ParseTree()
nodes = [Node(index=-1, word='DEFAULT', posTag='DEFAULT') for i in
range(0, 8)]
nodes[0] = Node(index=0, word='ROOT', posTag='--')
nodes[0].info = NodeInfo(type='ROOT', value='ROOT')
nodes[1] = Node(index=1, word='return', posTag='--')
nodes[1].info = NodeInfo(type='SN', value='SELECT')
nodes[2] = Node(index=2, word='conference', posTag='--')
nodes[2].info = NodeInfo(type='NN', value='Author')
nodes[3] = Node(index=3, word='area', posTag='--')
nodes[3].info = NodeInfo(type='NN', value='Title')
nodes[4] = Node(index=4, word='papers', posTag='--')
nodes[4].info = NodeInfo(type='NN', value='Author')
nodes[5] = Node(index=5, word='citations', posTag='--')
nodes[5].info = NodeInfo(type='NN', value='Journal')
nodes[6] = Node(index=6, word='most', posTag='--')
nodes[6].info = NodeInfo(type='FN', value='>')
nodes[7] = Node(index=7, word='total', posTag='--')
nodes[7].info = NodeInfo(type='FN', value='Year')
T.root = nodes[0]
nodes[0].children.append(nodes[1])
nodes[1].parent = nodes[0]
nodes[1].children.append(nodes[2])
nodes[2].parent = nodes[1]
nodes[2].children.append(nodes[3])
nodes[3].parent = nodes[2]
nodes[2].children.append(nodes[4])
nodes[4].parent = nodes[2]
nodes[4].children.append(nodes[5])
nodes[5].parent = nodes[4]
nodes[5].children.append(nodes[6])
nodes[6].parent = nodes[5]
nodes[5].children.append(nodes[7])
nodes[7].parent = nodes[5]
print(
'===========test for Running getAdjustedTrees() in TreeAdjustor==========='
)
print('The original tree:')
print(T.toString())
print('Number of possible trees for choice:')
obj = TreeAdjustor()
result = TreeAdjustor.getAdjustedTrees(T)
print(len(result))
for i in range(0, len(result)):
for j in range(i + 1, len(result)):
if result[i].getScore() <= result[j].getScore():
temp = result[i]
result[i] = result[j]
result[j] = temp
print('The three trees with highest scores look like:')
for i in range(0, 5):
print(result[i])
for tree in result:
print(' treeList Result %s:%d' % (tree.getSentence(), tree.
getScore()))
tree.insertImplicitNodes()
query = tree.translateToSQL(self.schema)
print('qUERY: ' + query.toString())
def adjustTest(self):
T = ParseTree()
nodes = [Node(index=-1, word='DEFAULT', posTag='DEFAULT') for i in
range(0, 9)]
nodes[0] = Node(index=0, word='ROOT', posTag='--')
nodes[0].info = NodeInfo(type='ROOT', value='ROOT')
nodes[1] = Node(index=1, word='return', posTag='--')
nodes[1].info = NodeInfo(type='SN', value='SELECT')
nodes[2] = Node(index=2, word='conference', posTag='--')
nodes[2].info = NodeInfo(type='NN', value='Author')
nodes[3] = Node(index=3, word='area', posTag='--')
nodes[3].info = NodeInfo(type='NN', value='Title')
nodes[4] = Node(index=4, word='each', posTag='--')
nodes[4].info = NodeInfo(type='QN', value='>')
nodes[5] = Node(index=5, word='papers', posTag='--')
nodes[5].info = NodeInfo(type='NN', value='Author')
nodes[6] = Node(index=6, word='citations', posTag='--')
nodes[6].info = NodeInfo(type='NN', value='Journal')
nodes[7] = Node(index=7, word='most', posTag='--')
nodes[7].info = NodeInfo(type='FN', value='>')
nodes[8] = Node(index=8, word='total', posTag='--')
nodes[8].info = NodeInfo(type='FN', value='Year')
T.root = nodes[0]
nodes[0].children.append(nodes[1])
nodes[1].parent = nodes[0]
nodes[1].children.append(nodes[2])
nodes[2].parent = nodes[1]
nodes[2].children.append(nodes[3])
nodes[3].parent = nodes[2]
nodes[2].children.append(nodes[5])
nodes[5].parent = nodes[2]
nodes[3].children.append(nodes[4])
nodes[4].parent = nodes[3]
nodes[5].children.append(nodes[6])
nodes[6].parent = nodes[5]
nodes[6].children.append(nodes[7])
nodes[7].parent = nodes[6]
nodes[6].children.append(nodes[8])
nodes[8].parent = nodes[6]
print('===========test for Running adjust() in TreeAdjustor==========='
)
treeList = TreeAdjustor.adjust(T)
print('Output size: %d' % len(treeList))
print('Output trees:')
ctr = 0
for tr in treeList:
print('Tree %d %s' % (ctr, tr.getSentence()))
ctr += 1
@staticmethod
def cmpp(a, b):
return a.getScore() > b.getScore()
obj = TreeAdjustorTest()
obj.getAdjustedTreesTest()
<|reserved_special_token_1|>
from ParseTree import ParseTree
from Node import Node
from NodeInfo import NodeInfo
from TreeAdjustor import TreeAdjustor
from model.SchemaGraph import SchemaGraph
class TreeAdjustorTest:
schema = None
def __init__(self):
return
def getAdjustedTreesTest(self):
T = ParseTree()
nodes = [Node(index=-1, word="DEFAULT", posTag="DEFAULT") for i in range(0, 8)]
nodes[0] = Node(index=0, word="ROOT", posTag="--")
nodes[0].info = NodeInfo(type="ROOT", value="ROOT")
nodes[1] = Node(index=1, word="return", posTag="--")
nodes[1].info = NodeInfo(type="SN", value="SELECT")
nodes[2] = Node(index=2, word="conference", posTag="--")
nodes[2].info = NodeInfo(type="NN", value="Author")
nodes[3] = Node(index=3, word="area", posTag="--")
nodes[3].info = NodeInfo(type="NN", value="Title")
nodes[4] = Node(index=4, word="papers", posTag="--")
nodes[4].info = NodeInfo(type="NN", value="Author")
nodes[5] = Node(index=5, word="citations", posTag="--")
nodes[5].info = NodeInfo(type="NN", value="Journal")
nodes[6] = Node(index=6, word="most", posTag="--")
nodes[6].info = NodeInfo(type="FN", value=">")
nodes[7] = Node(index=7, word="total", posTag="--")
nodes[7].info = NodeInfo(type="FN", value="Year")
T.root = nodes[0]
nodes[0].children.append(nodes[1])
nodes[1].parent = nodes[0]
nodes[1].children.append(nodes[2])
nodes[2].parent = nodes[1]
nodes[2].children.append(nodes[3])
nodes[3].parent = nodes[2]
nodes[2].children.append(nodes[4])
nodes[4].parent = nodes[2]
nodes[4].children.append(nodes[5])
nodes[5].parent = nodes[4]
nodes[5].children.append(nodes[6])
nodes[6].parent = nodes[5]
nodes[5].children.append(nodes[7])
nodes[7].parent = nodes[5]
print ("===========test for Running getAdjustedTrees() in TreeAdjustor===========")
print ("The original tree:")
print (T.toString())
print ("Number of possible trees for choice:")
obj = TreeAdjustor()
result = TreeAdjustor.getAdjustedTrees(T)
# result = TreeAdjustor.adjust(T)
print (len(result))
# result = sorted(result,cmp=TreeAdjustorTest.cmpp)
# l =sorted(m, cmp =TreeAdjustor.timeStampCompare)
for i in range(0, len(result)):
for j in range(i+1, len(result)):
if(result[i].getScore() <= result[j].getScore()):
temp = result[i]
result[i] =result[j]
result[j] = temp
print ("The three trees with highest scores look like:")
for i in range(0,5):
print (result[i])
for tree in result:
print (" treeList Result %s:%d" % (tree.getSentence(), tree.getScore()))
tree.insertImplicitNodes()
query = tree.translateToSQL(self.schema)
print ("qUERY: " + query.toString())
def adjustTest(self):
T = ParseTree()
nodes = [Node(index=-1, word="DEFAULT", posTag="DEFAULT") for i in range(0, 9)]
nodes[0] = Node(index=0, word="ROOT",posTag= "--")
nodes[0].info = NodeInfo(type="ROOT", value="ROOT")
nodes[1] = Node(index=1, word="return", posTag="--")
nodes[1].info = NodeInfo(type="SN", value="SELECT")
nodes[2] = Node(index=2, word="conference", posTag="--")
nodes[2].info = NodeInfo(type="NN", value="Author")
nodes[3] = Node(index=3, word="area", posTag="--")
nodes[3].info =NodeInfo(type="NN", value="Title")
nodes[4] =Node(index=4, word="each", posTag="--")
nodes[4].info = NodeInfo(type="QN", value=">")
nodes[5] = Node(index=5, word="papers", posTag="--")
nodes[5].info = NodeInfo(type="NN", value="Author")
nodes[6] = Node(index=6, word="citations", posTag="--")
nodes[6].info = NodeInfo(type="NN", value="Journal")
nodes[7] = Node(index=7, word="most", posTag="--")
nodes[7].info = NodeInfo(type="FN", value=">")
nodes[8] = Node(index=8, word="total", posTag="--")
nodes[8].info = NodeInfo(type="FN", value="Year")
T.root = nodes[0]
nodes[0].children.append(nodes[1])
nodes[1].parent = nodes[0]
nodes[1].children.append(nodes[2])
nodes[2].parent = nodes[1]
nodes[2].children.append(nodes[3])
nodes[3].parent = nodes[2]
nodes[2].children.append(nodes[5])
nodes[5].parent = nodes[2]
nodes[3].children.append(nodes[4])
nodes[4].parent = nodes[3]
nodes[5].children.append(nodes[6])
nodes[6].parent = nodes[5]
nodes[6].children.append(nodes[7])
nodes[7].parent = nodes[6]
nodes[6].children.append(nodes[8])
nodes[8].parent = nodes[6]
print ("===========test for Running adjust() in TreeAdjustor===========")
treeList = TreeAdjustor.adjust(T)
print ("Output size: %d"%len(treeList))
print ("Output trees:")
ctr=0
for tr in treeList:
print ("Tree %d %s"%(ctr, tr.getSentence()))
ctr+=1
@staticmethod
def cmpp(a,b):
return a.getScore() > b.getScore()
obj = TreeAdjustorTest()
obj.getAdjustedTreesTest()
# obj.adjustTest()
|
flexible
|
{
"blob_id": "1db397df2d030b2f622e701c46c15d653cb79e55",
"index": 5079,
"step-1": "<mask token>\n\n\nclass TreeAdjustorTest:\n <mask token>\n\n def __init__(self):\n return\n\n def getAdjustedTreesTest(self):\n T = ParseTree()\n nodes = [Node(index=-1, word='DEFAULT', posTag='DEFAULT') for i in\n range(0, 8)]\n nodes[0] = Node(index=0, word='ROOT', posTag='--')\n nodes[0].info = NodeInfo(type='ROOT', value='ROOT')\n nodes[1] = Node(index=1, word='return', posTag='--')\n nodes[1].info = NodeInfo(type='SN', value='SELECT')\n nodes[2] = Node(index=2, word='conference', posTag='--')\n nodes[2].info = NodeInfo(type='NN', value='Author')\n nodes[3] = Node(index=3, word='area', posTag='--')\n nodes[3].info = NodeInfo(type='NN', value='Title')\n nodes[4] = Node(index=4, word='papers', posTag='--')\n nodes[4].info = NodeInfo(type='NN', value='Author')\n nodes[5] = Node(index=5, word='citations', posTag='--')\n nodes[5].info = NodeInfo(type='NN', value='Journal')\n nodes[6] = Node(index=6, word='most', posTag='--')\n nodes[6].info = NodeInfo(type='FN', value='>')\n nodes[7] = Node(index=7, word='total', posTag='--')\n nodes[7].info = NodeInfo(type='FN', value='Year')\n T.root = nodes[0]\n nodes[0].children.append(nodes[1])\n nodes[1].parent = nodes[0]\n nodes[1].children.append(nodes[2])\n nodes[2].parent = nodes[1]\n nodes[2].children.append(nodes[3])\n nodes[3].parent = nodes[2]\n nodes[2].children.append(nodes[4])\n nodes[4].parent = nodes[2]\n nodes[4].children.append(nodes[5])\n nodes[5].parent = nodes[4]\n nodes[5].children.append(nodes[6])\n nodes[6].parent = nodes[5]\n nodes[5].children.append(nodes[7])\n nodes[7].parent = nodes[5]\n print(\n '===========test for Running getAdjustedTrees() in TreeAdjustor==========='\n )\n print('The original tree:')\n print(T.toString())\n print('Number of possible trees for choice:')\n obj = TreeAdjustor()\n result = TreeAdjustor.getAdjustedTrees(T)\n print(len(result))\n for i in range(0, len(result)):\n for j in range(i + 1, len(result)):\n if result[i].getScore() <= result[j].getScore():\n temp = result[i]\n result[i] = result[j]\n result[j] = temp\n print('The three trees with highest scores look like:')\n for i in range(0, 5):\n print(result[i])\n for tree in result:\n print(' treeList Result %s:%d' % (tree.getSentence(), tree.\n getScore()))\n tree.insertImplicitNodes()\n query = tree.translateToSQL(self.schema)\n print('qUERY: ' + query.toString())\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TreeAdjustorTest:\n <mask token>\n\n def __init__(self):\n return\n\n def getAdjustedTreesTest(self):\n T = ParseTree()\n nodes = [Node(index=-1, word='DEFAULT', posTag='DEFAULT') for i in\n range(0, 8)]\n nodes[0] = Node(index=0, word='ROOT', posTag='--')\n nodes[0].info = NodeInfo(type='ROOT', value='ROOT')\n nodes[1] = Node(index=1, word='return', posTag='--')\n nodes[1].info = NodeInfo(type='SN', value='SELECT')\n nodes[2] = Node(index=2, word='conference', posTag='--')\n nodes[2].info = NodeInfo(type='NN', value='Author')\n nodes[3] = Node(index=3, word='area', posTag='--')\n nodes[3].info = NodeInfo(type='NN', value='Title')\n nodes[4] = Node(index=4, word='papers', posTag='--')\n nodes[4].info = NodeInfo(type='NN', value='Author')\n nodes[5] = Node(index=5, word='citations', posTag='--')\n nodes[5].info = NodeInfo(type='NN', value='Journal')\n nodes[6] = Node(index=6, word='most', posTag='--')\n nodes[6].info = NodeInfo(type='FN', value='>')\n nodes[7] = Node(index=7, word='total', posTag='--')\n nodes[7].info = NodeInfo(type='FN', value='Year')\n T.root = nodes[0]\n nodes[0].children.append(nodes[1])\n nodes[1].parent = nodes[0]\n nodes[1].children.append(nodes[2])\n nodes[2].parent = nodes[1]\n nodes[2].children.append(nodes[3])\n nodes[3].parent = nodes[2]\n nodes[2].children.append(nodes[4])\n nodes[4].parent = nodes[2]\n nodes[4].children.append(nodes[5])\n nodes[5].parent = nodes[4]\n nodes[5].children.append(nodes[6])\n nodes[6].parent = nodes[5]\n nodes[5].children.append(nodes[7])\n nodes[7].parent = nodes[5]\n print(\n '===========test for Running getAdjustedTrees() in TreeAdjustor==========='\n )\n print('The original tree:')\n print(T.toString())\n print('Number of possible trees for choice:')\n obj = TreeAdjustor()\n result = TreeAdjustor.getAdjustedTrees(T)\n print(len(result))\n for i in range(0, len(result)):\n for j in range(i + 1, len(result)):\n if result[i].getScore() <= result[j].getScore():\n temp = result[i]\n result[i] = result[j]\n result[j] = temp\n print('The three trees with highest scores look like:')\n for i in range(0, 5):\n print(result[i])\n for tree in result:\n print(' treeList Result %s:%d' % (tree.getSentence(), tree.\n getScore()))\n tree.insertImplicitNodes()\n query = tree.translateToSQL(self.schema)\n print('qUERY: ' + query.toString())\n\n def adjustTest(self):\n T = ParseTree()\n nodes = [Node(index=-1, word='DEFAULT', posTag='DEFAULT') for i in\n range(0, 9)]\n nodes[0] = Node(index=0, word='ROOT', posTag='--')\n nodes[0].info = NodeInfo(type='ROOT', value='ROOT')\n nodes[1] = Node(index=1, word='return', posTag='--')\n nodes[1].info = NodeInfo(type='SN', value='SELECT')\n nodes[2] = Node(index=2, word='conference', posTag='--')\n nodes[2].info = NodeInfo(type='NN', value='Author')\n nodes[3] = Node(index=3, word='area', posTag='--')\n nodes[3].info = NodeInfo(type='NN', value='Title')\n nodes[4] = Node(index=4, word='each', posTag='--')\n nodes[4].info = NodeInfo(type='QN', value='>')\n nodes[5] = Node(index=5, word='papers', posTag='--')\n nodes[5].info = NodeInfo(type='NN', value='Author')\n nodes[6] = Node(index=6, word='citations', posTag='--')\n nodes[6].info = NodeInfo(type='NN', value='Journal')\n nodes[7] = Node(index=7, word='most', posTag='--')\n nodes[7].info = NodeInfo(type='FN', value='>')\n nodes[8] = Node(index=8, word='total', posTag='--')\n nodes[8].info = NodeInfo(type='FN', value='Year')\n T.root = nodes[0]\n nodes[0].children.append(nodes[1])\n nodes[1].parent = nodes[0]\n nodes[1].children.append(nodes[2])\n nodes[2].parent = nodes[1]\n nodes[2].children.append(nodes[3])\n nodes[3].parent = nodes[2]\n nodes[2].children.append(nodes[5])\n nodes[5].parent = nodes[2]\n nodes[3].children.append(nodes[4])\n nodes[4].parent = nodes[3]\n nodes[5].children.append(nodes[6])\n nodes[6].parent = nodes[5]\n nodes[6].children.append(nodes[7])\n nodes[7].parent = nodes[6]\n nodes[6].children.append(nodes[8])\n nodes[8].parent = nodes[6]\n print('===========test for Running adjust() in TreeAdjustor==========='\n )\n treeList = TreeAdjustor.adjust(T)\n print('Output size: %d' % len(treeList))\n print('Output trees:')\n ctr = 0\n for tr in treeList:\n print('Tree %d %s' % (ctr, tr.getSentence()))\n ctr += 1\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TreeAdjustorTest:\n schema = None\n\n def __init__(self):\n return\n\n def getAdjustedTreesTest(self):\n T = ParseTree()\n nodes = [Node(index=-1, word='DEFAULT', posTag='DEFAULT') for i in\n range(0, 8)]\n nodes[0] = Node(index=0, word='ROOT', posTag='--')\n nodes[0].info = NodeInfo(type='ROOT', value='ROOT')\n nodes[1] = Node(index=1, word='return', posTag='--')\n nodes[1].info = NodeInfo(type='SN', value='SELECT')\n nodes[2] = Node(index=2, word='conference', posTag='--')\n nodes[2].info = NodeInfo(type='NN', value='Author')\n nodes[3] = Node(index=3, word='area', posTag='--')\n nodes[3].info = NodeInfo(type='NN', value='Title')\n nodes[4] = Node(index=4, word='papers', posTag='--')\n nodes[4].info = NodeInfo(type='NN', value='Author')\n nodes[5] = Node(index=5, word='citations', posTag='--')\n nodes[5].info = NodeInfo(type='NN', value='Journal')\n nodes[6] = Node(index=6, word='most', posTag='--')\n nodes[6].info = NodeInfo(type='FN', value='>')\n nodes[7] = Node(index=7, word='total', posTag='--')\n nodes[7].info = NodeInfo(type='FN', value='Year')\n T.root = nodes[0]\n nodes[0].children.append(nodes[1])\n nodes[1].parent = nodes[0]\n nodes[1].children.append(nodes[2])\n nodes[2].parent = nodes[1]\n nodes[2].children.append(nodes[3])\n nodes[3].parent = nodes[2]\n nodes[2].children.append(nodes[4])\n nodes[4].parent = nodes[2]\n nodes[4].children.append(nodes[5])\n nodes[5].parent = nodes[4]\n nodes[5].children.append(nodes[6])\n nodes[6].parent = nodes[5]\n nodes[5].children.append(nodes[7])\n nodes[7].parent = nodes[5]\n print(\n '===========test for Running getAdjustedTrees() in TreeAdjustor==========='\n )\n print('The original tree:')\n print(T.toString())\n print('Number of possible trees for choice:')\n obj = TreeAdjustor()\n result = TreeAdjustor.getAdjustedTrees(T)\n print(len(result))\n for i in range(0, len(result)):\n for j in range(i + 1, len(result)):\n if result[i].getScore() <= result[j].getScore():\n temp = result[i]\n result[i] = result[j]\n result[j] = temp\n print('The three trees with highest scores look like:')\n for i in range(0, 5):\n print(result[i])\n for tree in result:\n print(' treeList Result %s:%d' % (tree.getSentence(), tree.\n getScore()))\n tree.insertImplicitNodes()\n query = tree.translateToSQL(self.schema)\n print('qUERY: ' + query.toString())\n\n def adjustTest(self):\n T = ParseTree()\n nodes = [Node(index=-1, word='DEFAULT', posTag='DEFAULT') for i in\n range(0, 9)]\n nodes[0] = Node(index=0, word='ROOT', posTag='--')\n nodes[0].info = NodeInfo(type='ROOT', value='ROOT')\n nodes[1] = Node(index=1, word='return', posTag='--')\n nodes[1].info = NodeInfo(type='SN', value='SELECT')\n nodes[2] = Node(index=2, word='conference', posTag='--')\n nodes[2].info = NodeInfo(type='NN', value='Author')\n nodes[3] = Node(index=3, word='area', posTag='--')\n nodes[3].info = NodeInfo(type='NN', value='Title')\n nodes[4] = Node(index=4, word='each', posTag='--')\n nodes[4].info = NodeInfo(type='QN', value='>')\n nodes[5] = Node(index=5, word='papers', posTag='--')\n nodes[5].info = NodeInfo(type='NN', value='Author')\n nodes[6] = Node(index=6, word='citations', posTag='--')\n nodes[6].info = NodeInfo(type='NN', value='Journal')\n nodes[7] = Node(index=7, word='most', posTag='--')\n nodes[7].info = NodeInfo(type='FN', value='>')\n nodes[8] = Node(index=8, word='total', posTag='--')\n nodes[8].info = NodeInfo(type='FN', value='Year')\n T.root = nodes[0]\n nodes[0].children.append(nodes[1])\n nodes[1].parent = nodes[0]\n nodes[1].children.append(nodes[2])\n nodes[2].parent = nodes[1]\n nodes[2].children.append(nodes[3])\n nodes[3].parent = nodes[2]\n nodes[2].children.append(nodes[5])\n nodes[5].parent = nodes[2]\n nodes[3].children.append(nodes[4])\n nodes[4].parent = nodes[3]\n nodes[5].children.append(nodes[6])\n nodes[6].parent = nodes[5]\n nodes[6].children.append(nodes[7])\n nodes[7].parent = nodes[6]\n nodes[6].children.append(nodes[8])\n nodes[8].parent = nodes[6]\n print('===========test for Running adjust() in TreeAdjustor==========='\n )\n treeList = TreeAdjustor.adjust(T)\n print('Output size: %d' % len(treeList))\n print('Output trees:')\n ctr = 0\n for tr in treeList:\n print('Tree %d %s' % (ctr, tr.getSentence()))\n ctr += 1\n\n @staticmethod\n def cmpp(a, b):\n return a.getScore() > b.getScore()\n\n\nobj = TreeAdjustorTest()\nobj.getAdjustedTreesTest()\n",
"step-4": "from ParseTree import ParseTree\nfrom Node import Node\nfrom NodeInfo import NodeInfo\nfrom TreeAdjustor import TreeAdjustor\nfrom model.SchemaGraph import SchemaGraph\n\n\nclass TreeAdjustorTest:\n schema = None\n\n def __init__(self):\n return\n\n def getAdjustedTreesTest(self):\n T = ParseTree()\n nodes = [Node(index=-1, word='DEFAULT', posTag='DEFAULT') for i in\n range(0, 8)]\n nodes[0] = Node(index=0, word='ROOT', posTag='--')\n nodes[0].info = NodeInfo(type='ROOT', value='ROOT')\n nodes[1] = Node(index=1, word='return', posTag='--')\n nodes[1].info = NodeInfo(type='SN', value='SELECT')\n nodes[2] = Node(index=2, word='conference', posTag='--')\n nodes[2].info = NodeInfo(type='NN', value='Author')\n nodes[3] = Node(index=3, word='area', posTag='--')\n nodes[3].info = NodeInfo(type='NN', value='Title')\n nodes[4] = Node(index=4, word='papers', posTag='--')\n nodes[4].info = NodeInfo(type='NN', value='Author')\n nodes[5] = Node(index=5, word='citations', posTag='--')\n nodes[5].info = NodeInfo(type='NN', value='Journal')\n nodes[6] = Node(index=6, word='most', posTag='--')\n nodes[6].info = NodeInfo(type='FN', value='>')\n nodes[7] = Node(index=7, word='total', posTag='--')\n nodes[7].info = NodeInfo(type='FN', value='Year')\n T.root = nodes[0]\n nodes[0].children.append(nodes[1])\n nodes[1].parent = nodes[0]\n nodes[1].children.append(nodes[2])\n nodes[2].parent = nodes[1]\n nodes[2].children.append(nodes[3])\n nodes[3].parent = nodes[2]\n nodes[2].children.append(nodes[4])\n nodes[4].parent = nodes[2]\n nodes[4].children.append(nodes[5])\n nodes[5].parent = nodes[4]\n nodes[5].children.append(nodes[6])\n nodes[6].parent = nodes[5]\n nodes[5].children.append(nodes[7])\n nodes[7].parent = nodes[5]\n print(\n '===========test for Running getAdjustedTrees() in TreeAdjustor==========='\n )\n print('The original tree:')\n print(T.toString())\n print('Number of possible trees for choice:')\n obj = TreeAdjustor()\n result = TreeAdjustor.getAdjustedTrees(T)\n print(len(result))\n for i in range(0, len(result)):\n for j in range(i + 1, len(result)):\n if result[i].getScore() <= result[j].getScore():\n temp = result[i]\n result[i] = result[j]\n result[j] = temp\n print('The three trees with highest scores look like:')\n for i in range(0, 5):\n print(result[i])\n for tree in result:\n print(' treeList Result %s:%d' % (tree.getSentence(), tree.\n getScore()))\n tree.insertImplicitNodes()\n query = tree.translateToSQL(self.schema)\n print('qUERY: ' + query.toString())\n\n def adjustTest(self):\n T = ParseTree()\n nodes = [Node(index=-1, word='DEFAULT', posTag='DEFAULT') for i in\n range(0, 9)]\n nodes[0] = Node(index=0, word='ROOT', posTag='--')\n nodes[0].info = NodeInfo(type='ROOT', value='ROOT')\n nodes[1] = Node(index=1, word='return', posTag='--')\n nodes[1].info = NodeInfo(type='SN', value='SELECT')\n nodes[2] = Node(index=2, word='conference', posTag='--')\n nodes[2].info = NodeInfo(type='NN', value='Author')\n nodes[3] = Node(index=3, word='area', posTag='--')\n nodes[3].info = NodeInfo(type='NN', value='Title')\n nodes[4] = Node(index=4, word='each', posTag='--')\n nodes[4].info = NodeInfo(type='QN', value='>')\n nodes[5] = Node(index=5, word='papers', posTag='--')\n nodes[5].info = NodeInfo(type='NN', value='Author')\n nodes[6] = Node(index=6, word='citations', posTag='--')\n nodes[6].info = NodeInfo(type='NN', value='Journal')\n nodes[7] = Node(index=7, word='most', posTag='--')\n nodes[7].info = NodeInfo(type='FN', value='>')\n nodes[8] = Node(index=8, word='total', posTag='--')\n nodes[8].info = NodeInfo(type='FN', value='Year')\n T.root = nodes[0]\n nodes[0].children.append(nodes[1])\n nodes[1].parent = nodes[0]\n nodes[1].children.append(nodes[2])\n nodes[2].parent = nodes[1]\n nodes[2].children.append(nodes[3])\n nodes[3].parent = nodes[2]\n nodes[2].children.append(nodes[5])\n nodes[5].parent = nodes[2]\n nodes[3].children.append(nodes[4])\n nodes[4].parent = nodes[3]\n nodes[5].children.append(nodes[6])\n nodes[6].parent = nodes[5]\n nodes[6].children.append(nodes[7])\n nodes[7].parent = nodes[6]\n nodes[6].children.append(nodes[8])\n nodes[8].parent = nodes[6]\n print('===========test for Running adjust() in TreeAdjustor==========='\n )\n treeList = TreeAdjustor.adjust(T)\n print('Output size: %d' % len(treeList))\n print('Output trees:')\n ctr = 0\n for tr in treeList:\n print('Tree %d %s' % (ctr, tr.getSentence()))\n ctr += 1\n\n @staticmethod\n def cmpp(a, b):\n return a.getScore() > b.getScore()\n\n\nobj = TreeAdjustorTest()\nobj.getAdjustedTreesTest()\n",
"step-5": "\nfrom ParseTree import ParseTree\nfrom Node import Node\nfrom NodeInfo import NodeInfo\nfrom TreeAdjustor import TreeAdjustor\nfrom model.SchemaGraph import SchemaGraph\n\n\nclass TreeAdjustorTest:\n\n schema = None\n def __init__(self):\n return\n\n def getAdjustedTreesTest(self):\n\n\n\n T = ParseTree()\n nodes = [Node(index=-1, word=\"DEFAULT\", posTag=\"DEFAULT\") for i in range(0, 8)]\n\n nodes[0] = Node(index=0, word=\"ROOT\", posTag=\"--\")\n nodes[0].info = NodeInfo(type=\"ROOT\", value=\"ROOT\")\n nodes[1] = Node(index=1, word=\"return\", posTag=\"--\")\n nodes[1].info = NodeInfo(type=\"SN\", value=\"SELECT\")\n nodes[2] = Node(index=2, word=\"conference\", posTag=\"--\")\n nodes[2].info = NodeInfo(type=\"NN\", value=\"Author\")\n nodes[3] = Node(index=3, word=\"area\", posTag=\"--\")\n nodes[3].info = NodeInfo(type=\"NN\", value=\"Title\")\n nodes[4] = Node(index=4, word=\"papers\", posTag=\"--\")\n nodes[4].info = NodeInfo(type=\"NN\", value=\"Author\")\n nodes[5] = Node(index=5, word=\"citations\", posTag=\"--\")\n nodes[5].info = NodeInfo(type=\"NN\", value=\"Journal\")\n nodes[6] = Node(index=6, word=\"most\", posTag=\"--\")\n nodes[6].info = NodeInfo(type=\"FN\", value=\">\")\n nodes[7] = Node(index=7, word=\"total\", posTag=\"--\")\n nodes[7].info = NodeInfo(type=\"FN\", value=\"Year\")\n\n T.root = nodes[0]\n nodes[0].children.append(nodes[1])\n nodes[1].parent = nodes[0]\n nodes[1].children.append(nodes[2])\n nodes[2].parent = nodes[1]\n nodes[2].children.append(nodes[3])\n nodes[3].parent = nodes[2]\n nodes[2].children.append(nodes[4])\n nodes[4].parent = nodes[2]\n nodes[4].children.append(nodes[5])\n nodes[5].parent = nodes[4]\n nodes[5].children.append(nodes[6])\n nodes[6].parent = nodes[5]\n nodes[5].children.append(nodes[7])\n nodes[7].parent = nodes[5]\n\n print (\"===========test for Running getAdjustedTrees() in TreeAdjustor===========\")\n print (\"The original tree:\")\n print (T.toString())\n print (\"Number of possible trees for choice:\")\n\n obj = TreeAdjustor()\n result = TreeAdjustor.getAdjustedTrees(T)\n # result = TreeAdjustor.adjust(T)\n\n print (len(result))\n # result = sorted(result,cmp=TreeAdjustorTest.cmpp)\n # l =sorted(m, cmp =TreeAdjustor.timeStampCompare)\n for i in range(0, len(result)):\n for j in range(i+1, len(result)):\n if(result[i].getScore() <= result[j].getScore()):\n temp = result[i]\n result[i] =result[j]\n result[j] = temp\n print (\"The three trees with highest scores look like:\")\n for i in range(0,5):\n print (result[i])\n\n for tree in result:\n print (\" treeList Result %s:%d\" % (tree.getSentence(), tree.getScore()))\n tree.insertImplicitNodes()\n query = tree.translateToSQL(self.schema)\n print (\"qUERY: \" + query.toString())\n \n\n def adjustTest(self):\n T = ParseTree()\n nodes = [Node(index=-1, word=\"DEFAULT\", posTag=\"DEFAULT\") for i in range(0, 9)]\n nodes[0] = Node(index=0, word=\"ROOT\",posTag= \"--\")\n nodes[0].info = NodeInfo(type=\"ROOT\", value=\"ROOT\")\n nodes[1] = Node(index=1, word=\"return\", posTag=\"--\")\n nodes[1].info = NodeInfo(type=\"SN\", value=\"SELECT\")\n nodes[2] = Node(index=2, word=\"conference\", posTag=\"--\")\n nodes[2].info = NodeInfo(type=\"NN\", value=\"Author\")\n nodes[3] = Node(index=3, word=\"area\", posTag=\"--\")\n nodes[3].info =NodeInfo(type=\"NN\", value=\"Title\")\n nodes[4] =Node(index=4, word=\"each\", posTag=\"--\")\n nodes[4].info = NodeInfo(type=\"QN\", value=\">\")\n nodes[5] = Node(index=5, word=\"papers\", posTag=\"--\")\n nodes[5].info = NodeInfo(type=\"NN\", value=\"Author\")\n nodes[6] = Node(index=6, word=\"citations\", posTag=\"--\")\n nodes[6].info = NodeInfo(type=\"NN\", value=\"Journal\")\n nodes[7] = Node(index=7, word=\"most\", posTag=\"--\")\n nodes[7].info = NodeInfo(type=\"FN\", value=\">\")\n nodes[8] = Node(index=8, word=\"total\", posTag=\"--\")\n nodes[8].info = NodeInfo(type=\"FN\", value=\"Year\")\n\n T.root = nodes[0]\n nodes[0].children.append(nodes[1])\n nodes[1].parent = nodes[0]\n nodes[1].children.append(nodes[2])\n nodes[2].parent = nodes[1]\n nodes[2].children.append(nodes[3])\n nodes[3].parent = nodes[2]\n nodes[2].children.append(nodes[5])\n nodes[5].parent = nodes[2]\n nodes[3].children.append(nodes[4])\n nodes[4].parent = nodes[3]\n nodes[5].children.append(nodes[6])\n nodes[6].parent = nodes[5]\n nodes[6].children.append(nodes[7])\n nodes[7].parent = nodes[6]\n nodes[6].children.append(nodes[8])\n nodes[8].parent = nodes[6]\n\n print (\"===========test for Running adjust() in TreeAdjustor===========\")\n\n treeList = TreeAdjustor.adjust(T)\n print (\"Output size: %d\"%len(treeList))\n\n print (\"Output trees:\")\n ctr=0\n for tr in treeList:\n print (\"Tree %d %s\"%(ctr, tr.getSentence()))\n ctr+=1\n @staticmethod\n def cmpp(a,b):\n\n return a.getScore() > b.getScore()\n\nobj = TreeAdjustorTest()\nobj.getAdjustedTreesTest()\n# obj.adjustTest()\n\n\n\n",
"step-ids": [
3,
4,
8,
9,
10
]
}
|
[
3,
4,
8,
9,
10
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 30 14:34:56 2019
ref :
https://stackoverflow.com/questions/11140163/plotting-a-3d-cube-a-sphere-and-a-vector-in-matplotlib
@author: jiedeng
"""
import numpy as np
from itertools import product, combinations
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import matplotlib.pyplot as plt
def switch_y_z(inter,liq_cutoff,vap_cutoff,liq_in,vap_in,int_in):
triangles = inter.triangulated_surface[0][inter.triangulated_surface[1]]
interface1 = np.zeros_like(triangles)
interface2 = np.zeros_like(triangles)
xlim, zlim, ylim = inter.universe.dimensions[0],inter.universe.dimensions[1],inter.universe.dimensions[2]
for i in range(len(triangles)):
## swap y and z
tmp = np.array([triangles[i][:,0],triangles[i][:,2],triangles[i][:,1]]).T
if triangles[i][:,-1][0] < zlim:
interface1[i] = tmp + np.array([0,liq_cutoff,0])
interface2[i] = tmp + np.array([0,vap_cutoff,0])
else:
interface1[i] = tmp - np.array([0,liq_cutoff,0])
interface2[i] = tmp - np.array([0,vap_cutoff,0])
return xlim,zlim,ylim,interface1,interface2
def plot_interfaces(inter,liq_cutoff,vap_cutoff,liq_in,vap_in,int_in,box_color='k'):
xlim,zlim,ylim,interface1,interface2 = switch_y_z(inter,liq_cutoff,vap_cutoff,liq_in,vap_in,int_in)
fig = plt.figure(figsize=(12, 12))
ax1 = fig.add_subplot(111, projection='3d')
ax1.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.grid(False)
# Fancy indexing: `verts[faces]` to generate a collection of triangles
# mesh1 = Poly3DCollection(triangles)
# mesh1.set_edgecolor('none')
# mesh1.set_alpha(0.3)
# ax1.add_collection3d(mesh1)
mesh2 = Poly3DCollection(interface1)
mesh2.set_edgecolor('none')
mesh2.set_alpha(0.3)
ax1.add_collection3d(mesh2)
mesh3 = Poly3DCollection(interface2)
mesh3.set_edgecolor('none')
mesh3.set_alpha(0.3);
# mesh3.set_facecolor('b')
ax1.add_collection3d(mesh3)
pos = inter.universe.atoms.positions
pos = np.array([pos[:,0],pos[:,2],pos[:,1]]).T
pos_liq = pos[liq_in]
xyz_liq = np.vstack([pos_liq[::, 0], pos_liq[::, 1], pos_liq[::, 2]])
ax1.scatter(xyz_liq[0],xyz_liq[1],xyz_liq[2],color='r')
pos_vap = pos[vap_in]
xyz_vap = np.vstack([pos_vap[::, 0], pos_vap[::, 1], pos_vap[::, 2]])
ax1.scatter(xyz_vap[0],xyz_vap[1],xyz_vap[2],color='c')
pos_int = pos[int_in]
xyz_int = np.vstack([pos_int[::, 0], pos_int[::, 1], pos_int[::, 2]])
ax1.scatter(xyz_int[0],xyz_int[1],xyz_int[2],color='k')
pts = np.array(list(product([0,xlim], [0,ylim], [0,zlim])))
for s, e in combinations(pts, 2):
if np.sum(np.abs(s-e)) in (xlim,ylim,zlim):
ax1.plot3D(*zip(s, e), 'k-',color=box_color,linewidth=1)
ax1.set_xlabel("x")
ax1.set_ylabel("z")
ax1.set_zlabel("y")
plt.xlim([0,xlim])
plt.ylim([0,ylim])
# plt.ylim([0,ylim])
ax1.set_xlim([0,xlim])
ax1.set_ylim([0,ylim])
ax1.set_zlim([0,zlim])
# ax1.set_aspect('equal')
set_axes_equal(ax1)
ax1.view_init(0, 10)
plt.tight_layout()
plt.show()
def set_axes_radius(ax, origin, radius):
ax.set_xlim3d([origin[0] - radius, origin[0] + radius])
ax.set_ylim3d([origin[1] - radius, origin[1] + radius])
ax.set_zlim3d([origin[2] - radius, origin[2] + radius])
def set_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
limits = np.array([
ax.get_xlim3d(),
ax.get_ylim3d(),
ax.get_zlim3d(),])
# print(limits)
origin = np.mean(limits, axis=1)
radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))
set_axes_radius(ax, origin, radius)
#plot_interfaces(inter,liq_cutoff,vap_cutoff,liq_in,vap_in,int_in)
|
normal
|
{
"blob_id": "086c74669b6762a6b35e8a46f816db2f4f172caa",
"index": 1437,
"step-1": "<mask token>\n\n\ndef switch_y_z(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in):\n triangles = inter.triangulated_surface[0][inter.triangulated_surface[1]]\n interface1 = np.zeros_like(triangles)\n interface2 = np.zeros_like(triangles)\n xlim, zlim, ylim = inter.universe.dimensions[0], inter.universe.dimensions[\n 1], inter.universe.dimensions[2]\n for i in range(len(triangles)):\n tmp = np.array([triangles[i][:, 0], triangles[i][:, 2], triangles[i\n ][:, 1]]).T\n if triangles[i][:, -1][0] < zlim:\n interface1[i] = tmp + np.array([0, liq_cutoff, 0])\n interface2[i] = tmp + np.array([0, vap_cutoff, 0])\n else:\n interface1[i] = tmp - np.array([0, liq_cutoff, 0])\n interface2[i] = tmp - np.array([0, vap_cutoff, 0])\n return xlim, zlim, ylim, interface1, interface2\n\n\n<mask token>\n\n\ndef set_axes_equal(ax):\n \"\"\"Make axes of 3D plot have equal scale so that spheres appear as spheres,\n cubes as cubes, etc.. This is one possible solution to Matplotlib's\n ax.set_aspect('equal') and ax.axis('equal') not working for 3D.\n\n Input\n ax: a matplotlib axis, e.g., as output from plt.gca().\n \"\"\"\n limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])\n origin = np.mean(limits, axis=1)\n radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))\n set_axes_radius(ax, origin, radius)\n",
"step-2": "<mask token>\n\n\ndef switch_y_z(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in):\n triangles = inter.triangulated_surface[0][inter.triangulated_surface[1]]\n interface1 = np.zeros_like(triangles)\n interface2 = np.zeros_like(triangles)\n xlim, zlim, ylim = inter.universe.dimensions[0], inter.universe.dimensions[\n 1], inter.universe.dimensions[2]\n for i in range(len(triangles)):\n tmp = np.array([triangles[i][:, 0], triangles[i][:, 2], triangles[i\n ][:, 1]]).T\n if triangles[i][:, -1][0] < zlim:\n interface1[i] = tmp + np.array([0, liq_cutoff, 0])\n interface2[i] = tmp + np.array([0, vap_cutoff, 0])\n else:\n interface1[i] = tmp - np.array([0, liq_cutoff, 0])\n interface2[i] = tmp - np.array([0, vap_cutoff, 0])\n return xlim, zlim, ylim, interface1, interface2\n\n\n<mask token>\n\n\ndef set_axes_radius(ax, origin, radius):\n ax.set_xlim3d([origin[0] - radius, origin[0] + radius])\n ax.set_ylim3d([origin[1] - radius, origin[1] + radius])\n ax.set_zlim3d([origin[2] - radius, origin[2] + radius])\n\n\ndef set_axes_equal(ax):\n \"\"\"Make axes of 3D plot have equal scale so that spheres appear as spheres,\n cubes as cubes, etc.. This is one possible solution to Matplotlib's\n ax.set_aspect('equal') and ax.axis('equal') not working for 3D.\n\n Input\n ax: a matplotlib axis, e.g., as output from plt.gca().\n \"\"\"\n limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])\n origin = np.mean(limits, axis=1)\n radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))\n set_axes_radius(ax, origin, radius)\n",
"step-3": "<mask token>\n\n\ndef switch_y_z(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in):\n triangles = inter.triangulated_surface[0][inter.triangulated_surface[1]]\n interface1 = np.zeros_like(triangles)\n interface2 = np.zeros_like(triangles)\n xlim, zlim, ylim = inter.universe.dimensions[0], inter.universe.dimensions[\n 1], inter.universe.dimensions[2]\n for i in range(len(triangles)):\n tmp = np.array([triangles[i][:, 0], triangles[i][:, 2], triangles[i\n ][:, 1]]).T\n if triangles[i][:, -1][0] < zlim:\n interface1[i] = tmp + np.array([0, liq_cutoff, 0])\n interface2[i] = tmp + np.array([0, vap_cutoff, 0])\n else:\n interface1[i] = tmp - np.array([0, liq_cutoff, 0])\n interface2[i] = tmp - np.array([0, vap_cutoff, 0])\n return xlim, zlim, ylim, interface1, interface2\n\n\ndef plot_interfaces(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in,\n box_color='k'):\n xlim, zlim, ylim, interface1, interface2 = switch_y_z(inter, liq_cutoff,\n vap_cutoff, liq_in, vap_in, int_in)\n fig = plt.figure(figsize=(12, 12))\n ax1 = fig.add_subplot(111, projection='3d')\n ax1.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax1.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax1.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax1.grid(False)\n mesh2 = Poly3DCollection(interface1)\n mesh2.set_edgecolor('none')\n mesh2.set_alpha(0.3)\n ax1.add_collection3d(mesh2)\n mesh3 = Poly3DCollection(interface2)\n mesh3.set_edgecolor('none')\n mesh3.set_alpha(0.3)\n ax1.add_collection3d(mesh3)\n pos = inter.universe.atoms.positions\n pos = np.array([pos[:, 0], pos[:, 2], pos[:, 1]]).T\n pos_liq = pos[liq_in]\n xyz_liq = np.vstack([pos_liq[:, 0], pos_liq[:, 1], pos_liq[:, 2]])\n ax1.scatter(xyz_liq[0], xyz_liq[1], xyz_liq[2], color='r')\n pos_vap = pos[vap_in]\n xyz_vap = np.vstack([pos_vap[:, 0], pos_vap[:, 1], pos_vap[:, 2]])\n ax1.scatter(xyz_vap[0], xyz_vap[1], xyz_vap[2], color='c')\n pos_int = pos[int_in]\n xyz_int = np.vstack([pos_int[:, 0], pos_int[:, 1], pos_int[:, 2]])\n ax1.scatter(xyz_int[0], xyz_int[1], xyz_int[2], color='k')\n pts = np.array(list(product([0, xlim], [0, ylim], [0, zlim])))\n for s, e in combinations(pts, 2):\n if np.sum(np.abs(s - e)) in (xlim, ylim, zlim):\n ax1.plot3D(*zip(s, e), 'k-', color=box_color, linewidth=1)\n ax1.set_xlabel('x')\n ax1.set_ylabel('z')\n ax1.set_zlabel('y')\n plt.xlim([0, xlim])\n plt.ylim([0, ylim])\n ax1.set_xlim([0, xlim])\n ax1.set_ylim([0, ylim])\n ax1.set_zlim([0, zlim])\n set_axes_equal(ax1)\n ax1.view_init(0, 10)\n plt.tight_layout()\n plt.show()\n\n\ndef set_axes_radius(ax, origin, radius):\n ax.set_xlim3d([origin[0] - radius, origin[0] + radius])\n ax.set_ylim3d([origin[1] - radius, origin[1] + radius])\n ax.set_zlim3d([origin[2] - radius, origin[2] + radius])\n\n\ndef set_axes_equal(ax):\n \"\"\"Make axes of 3D plot have equal scale so that spheres appear as spheres,\n cubes as cubes, etc.. This is one possible solution to Matplotlib's\n ax.set_aspect('equal') and ax.axis('equal') not working for 3D.\n\n Input\n ax: a matplotlib axis, e.g., as output from plt.gca().\n \"\"\"\n limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])\n origin = np.mean(limits, axis=1)\n radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))\n set_axes_radius(ax, origin, radius)\n",
"step-4": "<mask token>\nimport numpy as np\nfrom itertools import product, combinations\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nimport matplotlib.pyplot as plt\n\n\ndef switch_y_z(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in):\n triangles = inter.triangulated_surface[0][inter.triangulated_surface[1]]\n interface1 = np.zeros_like(triangles)\n interface2 = np.zeros_like(triangles)\n xlim, zlim, ylim = inter.universe.dimensions[0], inter.universe.dimensions[\n 1], inter.universe.dimensions[2]\n for i in range(len(triangles)):\n tmp = np.array([triangles[i][:, 0], triangles[i][:, 2], triangles[i\n ][:, 1]]).T\n if triangles[i][:, -1][0] < zlim:\n interface1[i] = tmp + np.array([0, liq_cutoff, 0])\n interface2[i] = tmp + np.array([0, vap_cutoff, 0])\n else:\n interface1[i] = tmp - np.array([0, liq_cutoff, 0])\n interface2[i] = tmp - np.array([0, vap_cutoff, 0])\n return xlim, zlim, ylim, interface1, interface2\n\n\ndef plot_interfaces(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in,\n box_color='k'):\n xlim, zlim, ylim, interface1, interface2 = switch_y_z(inter, liq_cutoff,\n vap_cutoff, liq_in, vap_in, int_in)\n fig = plt.figure(figsize=(12, 12))\n ax1 = fig.add_subplot(111, projection='3d')\n ax1.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax1.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax1.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax1.grid(False)\n mesh2 = Poly3DCollection(interface1)\n mesh2.set_edgecolor('none')\n mesh2.set_alpha(0.3)\n ax1.add_collection3d(mesh2)\n mesh3 = Poly3DCollection(interface2)\n mesh3.set_edgecolor('none')\n mesh3.set_alpha(0.3)\n ax1.add_collection3d(mesh3)\n pos = inter.universe.atoms.positions\n pos = np.array([pos[:, 0], pos[:, 2], pos[:, 1]]).T\n pos_liq = pos[liq_in]\n xyz_liq = np.vstack([pos_liq[:, 0], pos_liq[:, 1], pos_liq[:, 2]])\n ax1.scatter(xyz_liq[0], xyz_liq[1], xyz_liq[2], color='r')\n pos_vap = pos[vap_in]\n xyz_vap = np.vstack([pos_vap[:, 0], pos_vap[:, 1], pos_vap[:, 2]])\n ax1.scatter(xyz_vap[0], xyz_vap[1], xyz_vap[2], color='c')\n pos_int = pos[int_in]\n xyz_int = np.vstack([pos_int[:, 0], pos_int[:, 1], pos_int[:, 2]])\n ax1.scatter(xyz_int[0], xyz_int[1], xyz_int[2], color='k')\n pts = np.array(list(product([0, xlim], [0, ylim], [0, zlim])))\n for s, e in combinations(pts, 2):\n if np.sum(np.abs(s - e)) in (xlim, ylim, zlim):\n ax1.plot3D(*zip(s, e), 'k-', color=box_color, linewidth=1)\n ax1.set_xlabel('x')\n ax1.set_ylabel('z')\n ax1.set_zlabel('y')\n plt.xlim([0, xlim])\n plt.ylim([0, ylim])\n ax1.set_xlim([0, xlim])\n ax1.set_ylim([0, ylim])\n ax1.set_zlim([0, zlim])\n set_axes_equal(ax1)\n ax1.view_init(0, 10)\n plt.tight_layout()\n plt.show()\n\n\ndef set_axes_radius(ax, origin, radius):\n ax.set_xlim3d([origin[0] - radius, origin[0] + radius])\n ax.set_ylim3d([origin[1] - radius, origin[1] + radius])\n ax.set_zlim3d([origin[2] - radius, origin[2] + radius])\n\n\ndef set_axes_equal(ax):\n \"\"\"Make axes of 3D plot have equal scale so that spheres appear as spheres,\n cubes as cubes, etc.. This is one possible solution to Matplotlib's\n ax.set_aspect('equal') and ax.axis('equal') not working for 3D.\n\n Input\n ax: a matplotlib axis, e.g., as output from plt.gca().\n \"\"\"\n limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])\n origin = np.mean(limits, axis=1)\n radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))\n set_axes_radius(ax, origin, radius)\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 30 14:34:56 2019\nref : \n https://stackoverflow.com/questions/11140163/plotting-a-3d-cube-a-sphere-and-a-vector-in-matplotlib\n\n@author: jiedeng\n\"\"\"\nimport numpy as np\nfrom itertools import product, combinations\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nimport matplotlib.pyplot as plt\n\ndef switch_y_z(inter,liq_cutoff,vap_cutoff,liq_in,vap_in,int_in):\n triangles = inter.triangulated_surface[0][inter.triangulated_surface[1]]\n \n\n interface1 = np.zeros_like(triangles)\n interface2 = np.zeros_like(triangles)\n \n xlim, zlim, ylim = inter.universe.dimensions[0],inter.universe.dimensions[1],inter.universe.dimensions[2]\n \n for i in range(len(triangles)):\n ## swap y and z\n tmp = np.array([triangles[i][:,0],triangles[i][:,2],triangles[i][:,1]]).T\n if triangles[i][:,-1][0] < zlim:\n interface1[i] = tmp + np.array([0,liq_cutoff,0])\n interface2[i] = tmp + np.array([0,vap_cutoff,0])\n else:\n interface1[i] = tmp - np.array([0,liq_cutoff,0])\n interface2[i] = tmp - np.array([0,vap_cutoff,0]) \n return xlim,zlim,ylim,interface1,interface2\n \ndef plot_interfaces(inter,liq_cutoff,vap_cutoff,liq_in,vap_in,int_in,box_color='k'):\n \n xlim,zlim,ylim,interface1,interface2 = switch_y_z(inter,liq_cutoff,vap_cutoff,liq_in,vap_in,int_in)\n fig = plt.figure(figsize=(12, 12))\n \n ax1 = fig.add_subplot(111, projection='3d')\n ax1.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax1.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax1.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax1.grid(False)\n\n # Fancy indexing: `verts[faces]` to generate a collection of triangles\n# mesh1 = Poly3DCollection(triangles)\n# mesh1.set_edgecolor('none')\n# mesh1.set_alpha(0.3)\n# ax1.add_collection3d(mesh1)\n \n mesh2 = Poly3DCollection(interface1)\n mesh2.set_edgecolor('none')\n mesh2.set_alpha(0.3)\n ax1.add_collection3d(mesh2)\n \n mesh3 = Poly3DCollection(interface2)\n mesh3.set_edgecolor('none')\n mesh3.set_alpha(0.3);\n# mesh3.set_facecolor('b')\n ax1.add_collection3d(mesh3)\n \n pos = inter.universe.atoms.positions\n pos = np.array([pos[:,0],pos[:,2],pos[:,1]]).T\n pos_liq = pos[liq_in]\n xyz_liq = np.vstack([pos_liq[::, 0], pos_liq[::, 1], pos_liq[::, 2]])\n \n ax1.scatter(xyz_liq[0],xyz_liq[1],xyz_liq[2],color='r')\n \n pos_vap = pos[vap_in]\n xyz_vap = np.vstack([pos_vap[::, 0], pos_vap[::, 1], pos_vap[::, 2]])\n ax1.scatter(xyz_vap[0],xyz_vap[1],xyz_vap[2],color='c')\n \n pos_int = pos[int_in]\n xyz_int = np.vstack([pos_int[::, 0], pos_int[::, 1], pos_int[::, 2]])\n ax1.scatter(xyz_int[0],xyz_int[1],xyz_int[2],color='k')\n\n \n \n pts = np.array(list(product([0,xlim], [0,ylim], [0,zlim]))) \n for s, e in combinations(pts, 2):\n if np.sum(np.abs(s-e)) in (xlim,ylim,zlim):\n ax1.plot3D(*zip(s, e), 'k-',color=box_color,linewidth=1) \n \n \n ax1.set_xlabel(\"x\")\n ax1.set_ylabel(\"z\")\n ax1.set_zlabel(\"y\")\n\n plt.xlim([0,xlim])\n plt.ylim([0,ylim])\n# plt.ylim([0,ylim])\n\n ax1.set_xlim([0,xlim])\n ax1.set_ylim([0,ylim])\n ax1.set_zlim([0,zlim])\n \n # ax1.set_aspect('equal') \n set_axes_equal(ax1)\n ax1.view_init(0, 10)\n plt.tight_layout()\n plt.show()\n\ndef set_axes_radius(ax, origin, radius):\n ax.set_xlim3d([origin[0] - radius, origin[0] + radius])\n ax.set_ylim3d([origin[1] - radius, origin[1] + radius])\n ax.set_zlim3d([origin[2] - radius, origin[2] + radius])\n\ndef set_axes_equal(ax):\n '''Make axes of 3D plot have equal scale so that spheres appear as spheres,\n cubes as cubes, etc.. This is one possible solution to Matplotlib's\n ax.set_aspect('equal') and ax.axis('equal') not working for 3D.\n\n Input\n ax: a matplotlib axis, e.g., as output from plt.gca().\n '''\n\n limits = np.array([\n ax.get_xlim3d(),\n ax.get_ylim3d(),\n ax.get_zlim3d(),])\n# print(limits)\n origin = np.mean(limits, axis=1)\n radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))\n set_axes_radius(ax, origin, radius)\n\n\n#plot_interfaces(inter,liq_cutoff,vap_cutoff,liq_in,vap_in,int_in)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
def tobin(n):
bin = "";
while(n/2!=0):
if n%2==0:
bin = bin + "0"
else:
bin = bin + "1"
if n%2==1:
bin = bin + "1"
return bin
n = int(input())
bin = tobin(5)
print(bin)
|
normal
|
{
"blob_id": "1c5ca920fe1f116a5bc52c9e5c53c13b1e1c925f",
"index": 2412,
"step-1": "<mask token>\n",
"step-2": "def tobin(n):\n bin = ''\n while n / 2 != 0:\n if n % 2 == 0:\n bin = bin + '0'\n else:\n bin = bin + '1'\n if n % 2 == 1:\n bin = bin + '1'\n return bin\n\n\n<mask token>\n",
"step-3": "def tobin(n):\n bin = ''\n while n / 2 != 0:\n if n % 2 == 0:\n bin = bin + '0'\n else:\n bin = bin + '1'\n if n % 2 == 1:\n bin = bin + '1'\n return bin\n\n\n<mask token>\nprint(bin)\n",
"step-4": "def tobin(n):\n bin = ''\n while n / 2 != 0:\n if n % 2 == 0:\n bin = bin + '0'\n else:\n bin = bin + '1'\n if n % 2 == 1:\n bin = bin + '1'\n return bin\n\n\nn = int(input())\nbin = tobin(5)\nprint(bin)\n",
"step-5": "def tobin(n):\r\n bin = \"\";\r\n while(n/2!=0):\r\n if n%2==0:\r\n bin = bin + \"0\"\r\n else:\r\n bin = bin + \"1\"\r\n if n%2==1:\r\n bin = bin + \"1\"\r\n return bin\r\n\r\nn = int(input())\r\nbin = tobin(5)\r\nprint(bin)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from flask_marshmallow import Marshmallow
from models import Uservet
ma = Marshmallow()
class UserVetSchema(ma.Schema):
class Meta:
model = Uservet
user_vet_1 = ['dni', 'email', 'nombre', 'apellidos', 'telefono', 'tipo_uservet'
]
|
normal
|
{
"blob_id": "677154aa99a5a4876532f3e1edfec45b1790384c",
"index": 9511,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass UserVetSchema(ma.Schema):\n\n\n class Meta:\n model = Uservet\n\n\n<mask token>\n",
"step-3": "<mask token>\nma = Marshmallow()\n\n\nclass UserVetSchema(ma.Schema):\n\n\n class Meta:\n model = Uservet\n\n\nuser_vet_1 = ['dni', 'email', 'nombre', 'apellidos', 'telefono', 'tipo_uservet'\n ]\n",
"step-4": "from flask_marshmallow import Marshmallow\nfrom models import Uservet\nma = Marshmallow()\n\n\nclass UserVetSchema(ma.Schema):\n\n\n class Meta:\n model = Uservet\n\n\nuser_vet_1 = ['dni', 'email', 'nombre', 'apellidos', 'telefono', 'tipo_uservet'\n ]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
print('tensorflow version: {}'.format(tf.__version__))
def __prepare_train_data(df, feature):
groups = df.groupby(['event', 'start'])
data = []
labels = []
for id, group in groups:
values = group['CylinderBorePressure'].values
# Reshape data from (history_size,) to (history_size, 1)
data.append(np.reshape(values, (len(values), 1)))
labels.append(id[0])
return np.array(data), np.array(convert_labels(labels))
def convert_labels(labels):
digit_labels = []
for label in labels:
if label == 'cut':
digit_labels.append(0.0)
elif label == 'sort':
digit_labels.append(1.0)
elif label == 'idle':
digit_labels.append(2.0)
return digit_labels
TRAIN_SPLIT = 300000
BATCH_SIZE = 256
BUFFER_SIZE = 10000
tf.random.set_seed(13)
train_df = pd.read_csv('data/st-cloud.csv')
train_df = train_df.sort_values(by=['timestamp'])
train_df = train_df.loc[(train_df['event'] == 'cut') | (train_df['event'] == 'sort') | (train_df['event'] == 'idle')]
x_train_uni, y_train_uni = __prepare_train_data(train_df, feature='CylinderBorePressure')
print(x_train_uni[0])
print(y_train_uni[0])
train_univariate = tf.data.Dataset.from_tensor_slices((x_train_uni, y_train_uni))
# train_univariate = train_univariate.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
#
# val_univariate = tf.data.Dataset.from_tensor_slices((x_val_uni, y_val_uni))
# val_univariate = val_univariate.batch(BATCH_SIZE).repeat()
#
# simple_lstm_model = tf.keras.models.Sequential([
# tf.keras.layers.LSTM(8, input_shape=x_train_uni.shape[-2:]),
# tf.keras.layers.Dense(1)
# ])
#
# simple_lstm_model.compile(optimizer='adam', loss='mae')
#
# for x, y in val_univariate.take(1):
# print(simple_lstm_model.predict(x).shape)
#
# EVALUATION_INTERVAL = 200
# EPOCHS = 10
#
# simple_lstm_model.fit(train_univariate, epochs=EPOCHS,
# steps_per_epoch=EVALUATION_INTERVAL,
# validation_data=val_univariate, validation_steps=50)
# for x, y in val_univariate.take(3):
# plot = show_plot([x[0].numpy(), y[0].numpy(),
# simple_lstm_model.predict(x)[0]], 0, 'Simple LSTM model')
# plot.show()
|
normal
|
{
"blob_id": "55030648a6b76636e456990c1d2b02baa35a695d",
"index": 9221,
"step-1": "<mask token>\n\n\ndef __prepare_train_data(df, feature):\n groups = df.groupby(['event', 'start'])\n data = []\n labels = []\n for id, group in groups:\n values = group['CylinderBorePressure'].values\n data.append(np.reshape(values, (len(values), 1)))\n labels.append(id[0])\n return np.array(data), np.array(convert_labels(labels))\n\n\ndef convert_labels(labels):\n digit_labels = []\n for label in labels:\n if label == 'cut':\n digit_labels.append(0.0)\n elif label == 'sort':\n digit_labels.append(1.0)\n elif label == 'idle':\n digit_labels.append(2.0)\n return digit_labels\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint('tensorflow version: {}'.format(tf.__version__))\n\n\ndef __prepare_train_data(df, feature):\n groups = df.groupby(['event', 'start'])\n data = []\n labels = []\n for id, group in groups:\n values = group['CylinderBorePressure'].values\n data.append(np.reshape(values, (len(values), 1)))\n labels.append(id[0])\n return np.array(data), np.array(convert_labels(labels))\n\n\ndef convert_labels(labels):\n digit_labels = []\n for label in labels:\n if label == 'cut':\n digit_labels.append(0.0)\n elif label == 'sort':\n digit_labels.append(1.0)\n elif label == 'idle':\n digit_labels.append(2.0)\n return digit_labels\n\n\n<mask token>\ntf.random.set_seed(13)\n<mask token>\nprint(x_train_uni[0])\nprint(y_train_uni[0])\n<mask token>\n",
"step-3": "<mask token>\nprint('tensorflow version: {}'.format(tf.__version__))\n\n\ndef __prepare_train_data(df, feature):\n groups = df.groupby(['event', 'start'])\n data = []\n labels = []\n for id, group in groups:\n values = group['CylinderBorePressure'].values\n data.append(np.reshape(values, (len(values), 1)))\n labels.append(id[0])\n return np.array(data), np.array(convert_labels(labels))\n\n\ndef convert_labels(labels):\n digit_labels = []\n for label in labels:\n if label == 'cut':\n digit_labels.append(0.0)\n elif label == 'sort':\n digit_labels.append(1.0)\n elif label == 'idle':\n digit_labels.append(2.0)\n return digit_labels\n\n\nTRAIN_SPLIT = 300000\nBATCH_SIZE = 256\nBUFFER_SIZE = 10000\ntf.random.set_seed(13)\ntrain_df = pd.read_csv('data/st-cloud.csv')\ntrain_df = train_df.sort_values(by=['timestamp'])\ntrain_df = train_df.loc[(train_df['event'] == 'cut') | (train_df['event'] ==\n 'sort') | (train_df['event'] == 'idle')]\nx_train_uni, y_train_uni = __prepare_train_data(train_df, feature=\n 'CylinderBorePressure')\nprint(x_train_uni[0])\nprint(y_train_uni[0])\ntrain_univariate = tf.data.Dataset.from_tensor_slices((x_train_uni,\n y_train_uni))\n",
"step-4": "from __future__ import absolute_import, division, print_function, unicode_literals\nimport tensorflow as tf\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nprint('tensorflow version: {}'.format(tf.__version__))\n\n\ndef __prepare_train_data(df, feature):\n groups = df.groupby(['event', 'start'])\n data = []\n labels = []\n for id, group in groups:\n values = group['CylinderBorePressure'].values\n data.append(np.reshape(values, (len(values), 1)))\n labels.append(id[0])\n return np.array(data), np.array(convert_labels(labels))\n\n\ndef convert_labels(labels):\n digit_labels = []\n for label in labels:\n if label == 'cut':\n digit_labels.append(0.0)\n elif label == 'sort':\n digit_labels.append(1.0)\n elif label == 'idle':\n digit_labels.append(2.0)\n return digit_labels\n\n\nTRAIN_SPLIT = 300000\nBATCH_SIZE = 256\nBUFFER_SIZE = 10000\ntf.random.set_seed(13)\ntrain_df = pd.read_csv('data/st-cloud.csv')\ntrain_df = train_df.sort_values(by=['timestamp'])\ntrain_df = train_df.loc[(train_df['event'] == 'cut') | (train_df['event'] ==\n 'sort') | (train_df['event'] == 'idle')]\nx_train_uni, y_train_uni = __prepare_train_data(train_df, feature=\n 'CylinderBorePressure')\nprint(x_train_uni[0])\nprint(y_train_uni[0])\ntrain_univariate = tf.data.Dataset.from_tensor_slices((x_train_uni,\n y_train_uni))\n",
"step-5": "from __future__ import absolute_import, division, print_function, unicode_literals\nimport tensorflow as tf\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\n\nprint('tensorflow version: {}'.format(tf.__version__))\n\n\ndef __prepare_train_data(df, feature):\n groups = df.groupby(['event', 'start'])\n data = []\n labels = []\n for id, group in groups:\n values = group['CylinderBorePressure'].values\n # Reshape data from (history_size,) to (history_size, 1)\n data.append(np.reshape(values, (len(values), 1)))\n labels.append(id[0])\n return np.array(data), np.array(convert_labels(labels))\n\n\ndef convert_labels(labels):\n digit_labels = []\n for label in labels:\n if label == 'cut':\n digit_labels.append(0.0)\n elif label == 'sort':\n digit_labels.append(1.0)\n elif label == 'idle':\n digit_labels.append(2.0)\n\n return digit_labels\n\n\nTRAIN_SPLIT = 300000\nBATCH_SIZE = 256\nBUFFER_SIZE = 10000\n\ntf.random.set_seed(13)\n\ntrain_df = pd.read_csv('data/st-cloud.csv')\ntrain_df = train_df.sort_values(by=['timestamp'])\ntrain_df = train_df.loc[(train_df['event'] == 'cut') | (train_df['event'] == 'sort') | (train_df['event'] == 'idle')]\nx_train_uni, y_train_uni = __prepare_train_data(train_df, feature='CylinderBorePressure')\n\nprint(x_train_uni[0])\nprint(y_train_uni[0])\n\ntrain_univariate = tf.data.Dataset.from_tensor_slices((x_train_uni, y_train_uni))\n# train_univariate = train_univariate.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()\n#\n# val_univariate = tf.data.Dataset.from_tensor_slices((x_val_uni, y_val_uni))\n# val_univariate = val_univariate.batch(BATCH_SIZE).repeat()\n#\n# simple_lstm_model = tf.keras.models.Sequential([\n# tf.keras.layers.LSTM(8, input_shape=x_train_uni.shape[-2:]),\n# tf.keras.layers.Dense(1)\n# ])\n#\n# simple_lstm_model.compile(optimizer='adam', loss='mae')\n#\n# for x, y in val_univariate.take(1):\n# print(simple_lstm_model.predict(x).shape)\n#\n# EVALUATION_INTERVAL = 200\n# EPOCHS = 10\n#\n# simple_lstm_model.fit(train_univariate, epochs=EPOCHS,\n# steps_per_epoch=EVALUATION_INTERVAL,\n# validation_data=val_univariate, validation_steps=50)\n\n# for x, y in val_univariate.take(3):\n# plot = show_plot([x[0].numpy(), y[0].numpy(),\n# simple_lstm_model.predict(x)[0]], 0, 'Simple LSTM model')\n# plot.show()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
def digital_sum(n):
if n < 10:
return n
return n % 10 + digital_sum(n // 10)
def digital_root(n):
if n < 10:
return n
return digital_root(digital_sum(n))
|
normal
|
{
"blob_id": "e3e6f1b6580a223558791cebfcb1a92d45553162",
"index": 1823,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef digital_root(n):\n if n < 10:\n return n\n return digital_root(digital_sum(n))\n",
"step-3": "def digital_sum(n):\n if n < 10:\n return n\n return n % 10 + digital_sum(n // 10)\n\n\ndef digital_root(n):\n if n < 10:\n return n\n return digital_root(digital_sum(n))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding:utf-8 -*-
from spider.driver.base.driver import Driver
from spider.driver.base.mysql import Mysql
import time
from pyquery import PyQuery
from spider.driver.base.field import Field,FieldName,Fieldlist,FieldType
from spider.driver.base.page import Page
from spider.driver.base.listcssselector import ListCssSelector
from spider.driver.base.mongodb import Mongodb
from spider.driver.base.tabsetup import TabSetup
fl_weixin1 = Fieldlist(
Field(fieldname='public_name', css_selector='div > div.txt-box > p.tit > a', regex=r'[^\u4e00-\u9fa5]*'),
)
fl_weixin2 = Fieldlist(
Field(fieldname='article_name', css_selector='div > div > h4'),
Field(fieldname='article_time', css_selector='div > div > p.weui_media_extra_info'),
)
page_weixin_1 = Page(name='微信公众号列表页面', fieldlist=fl_weixin1, listcssselector=ListCssSelector(list_css_selector='#main > div.news-box > ul > li'))
page_weixin_2 = Page(name='微信公众号文章列表页面', fieldlist=fl_weixin2, tabsetup=TabSetup(click_css_selector='div > div.txt-box > p.tit > a'), listcssselector=ListCssSelector(list_css_selector='#history > div'))
class WeixinSpider(Driver):
def __init__(self,isheadless=False,ismobile=False,isvirtualdisplay=False,spider_id='',name=''):
Driver.__init__(self, log_file_name=spider_id, ismobile=ismobile, isvirtualdisplay=isvirtualdisplay,
isheadless=isheadless)
self.name = name
self.debug_log(name=name)
def get_article(self, data_list=[]):
article_list = self.until_presence_of_all_elements_located_by_css_selector(css_selector=page_weixin_2.listcssselector.list_css_selector)
for i in range(1, len(article_list)+1):
self.until_scroll_to_center_click_by_css_selector(css_selector='%s:nth-child(%s)'%(page_weixin_2.listcssselector.list_css_selector,i))
time.sleep(3)
self.driver.back()
def run_spider(self):
for public in Mysql().query_data(table='weixin_public', field='public_name')[:1]:
self.fast_get_page(url='http://weixin.sogou.com/', min_time_to_wait=15,max_time_to_wait=30)
self.until_send_text_by_css_selector(css_selector='#query', text=public[0])
time.sleep(3)
self.fast_enter_page_by_css_selector(css_selector='#query')
time.sleep(2)
self.fast_click_same_page_by_css_selector(click_css_selector='#scroll-header > form > div > input.swz2')
public_name_list = self.from_page_get_data_list(page=page_weixin_1)
article_name_list = self.from_page_add_data_list_to_data_list(page=page_weixin_2, pre_page=page_weixin_1,data_list=public_name_list, extra_page_func=self.get_article)
# self.fast_click_page_by_css_selector(ele=item, click_css_selector='div > div.txt-box > p.tit > a')
# self.driver.switch_to.window(self.driver.window_handles[-1])
# shop_data_list = self.from_page_get_data_list(page=page_weixin_1)
# self.driver.close()
# self.driver.switch_to.window(self.driver.window_handles[-1])
|
normal
|
{
"blob_id": "1a7a28a2264ed0204184ab1dd273b0b114657fa7",
"index": 3004,
"step-1": "<mask token>\n\n\nclass WeixinSpider(Driver):\n <mask token>\n\n def get_article(self, data_list=[]):\n article_list = (self.\n until_presence_of_all_elements_located_by_css_selector(\n css_selector=page_weixin_2.listcssselector.list_css_selector))\n for i in range(1, len(article_list) + 1):\n self.until_scroll_to_center_click_by_css_selector(css_selector=\n '%s:nth-child(%s)' % (page_weixin_2.listcssselector.\n list_css_selector, i))\n time.sleep(3)\n self.driver.back()\n\n def run_spider(self):\n for public in Mysql().query_data(table='weixin_public', field=\n 'public_name')[:1]:\n self.fast_get_page(url='http://weixin.sogou.com/',\n min_time_to_wait=15, max_time_to_wait=30)\n self.until_send_text_by_css_selector(css_selector='#query',\n text=public[0])\n time.sleep(3)\n self.fast_enter_page_by_css_selector(css_selector='#query')\n time.sleep(2)\n self.fast_click_same_page_by_css_selector(click_css_selector=\n '#scroll-header > form > div > input.swz2')\n public_name_list = self.from_page_get_data_list(page=page_weixin_1)\n article_name_list = self.from_page_add_data_list_to_data_list(page\n =page_weixin_2, pre_page=page_weixin_1, data_list=\n public_name_list, extra_page_func=self.get_article)\n",
"step-2": "<mask token>\n\n\nclass WeixinSpider(Driver):\n\n def __init__(self, isheadless=False, ismobile=False, isvirtualdisplay=\n False, spider_id='', name=''):\n Driver.__init__(self, log_file_name=spider_id, ismobile=ismobile,\n isvirtualdisplay=isvirtualdisplay, isheadless=isheadless)\n self.name = name\n self.debug_log(name=name)\n\n def get_article(self, data_list=[]):\n article_list = (self.\n until_presence_of_all_elements_located_by_css_selector(\n css_selector=page_weixin_2.listcssselector.list_css_selector))\n for i in range(1, len(article_list) + 1):\n self.until_scroll_to_center_click_by_css_selector(css_selector=\n '%s:nth-child(%s)' % (page_weixin_2.listcssselector.\n list_css_selector, i))\n time.sleep(3)\n self.driver.back()\n\n def run_spider(self):\n for public in Mysql().query_data(table='weixin_public', field=\n 'public_name')[:1]:\n self.fast_get_page(url='http://weixin.sogou.com/',\n min_time_to_wait=15, max_time_to_wait=30)\n self.until_send_text_by_css_selector(css_selector='#query',\n text=public[0])\n time.sleep(3)\n self.fast_enter_page_by_css_selector(css_selector='#query')\n time.sleep(2)\n self.fast_click_same_page_by_css_selector(click_css_selector=\n '#scroll-header > form > div > input.swz2')\n public_name_list = self.from_page_get_data_list(page=page_weixin_1)\n article_name_list = self.from_page_add_data_list_to_data_list(page\n =page_weixin_2, pre_page=page_weixin_1, data_list=\n public_name_list, extra_page_func=self.get_article)\n",
"step-3": "<mask token>\nfl_weixin1 = Fieldlist(Field(fieldname='public_name', css_selector=\n 'div > div.txt-box > p.tit > a', regex='[^\\\\u4e00-\\\\u9fa5]*'))\nfl_weixin2 = Fieldlist(Field(fieldname='article_name', css_selector=\n 'div > div > h4'), Field(fieldname='article_time', css_selector=\n 'div > div > p.weui_media_extra_info'))\npage_weixin_1 = Page(name='微信公众号列表页面', fieldlist=fl_weixin1,\n listcssselector=ListCssSelector(list_css_selector=\n '#main > div.news-box > ul > li'))\npage_weixin_2 = Page(name='微信公众号文章列表页面', fieldlist=fl_weixin2, tabsetup=\n TabSetup(click_css_selector='div > div.txt-box > p.tit > a'),\n listcssselector=ListCssSelector(list_css_selector='#history > div'))\n\n\nclass WeixinSpider(Driver):\n\n def __init__(self, isheadless=False, ismobile=False, isvirtualdisplay=\n False, spider_id='', name=''):\n Driver.__init__(self, log_file_name=spider_id, ismobile=ismobile,\n isvirtualdisplay=isvirtualdisplay, isheadless=isheadless)\n self.name = name\n self.debug_log(name=name)\n\n def get_article(self, data_list=[]):\n article_list = (self.\n until_presence_of_all_elements_located_by_css_selector(\n css_selector=page_weixin_2.listcssselector.list_css_selector))\n for i in range(1, len(article_list) + 1):\n self.until_scroll_to_center_click_by_css_selector(css_selector=\n '%s:nth-child(%s)' % (page_weixin_2.listcssselector.\n list_css_selector, i))\n time.sleep(3)\n self.driver.back()\n\n def run_spider(self):\n for public in Mysql().query_data(table='weixin_public', field=\n 'public_name')[:1]:\n self.fast_get_page(url='http://weixin.sogou.com/',\n min_time_to_wait=15, max_time_to_wait=30)\n self.until_send_text_by_css_selector(css_selector='#query',\n text=public[0])\n time.sleep(3)\n self.fast_enter_page_by_css_selector(css_selector='#query')\n time.sleep(2)\n self.fast_click_same_page_by_css_selector(click_css_selector=\n '#scroll-header > form > div > input.swz2')\n public_name_list = self.from_page_get_data_list(page=page_weixin_1)\n article_name_list = self.from_page_add_data_list_to_data_list(page\n =page_weixin_2, pre_page=page_weixin_1, data_list=\n public_name_list, extra_page_func=self.get_article)\n",
"step-4": "from spider.driver.base.driver import Driver\nfrom spider.driver.base.mysql import Mysql\nimport time\nfrom pyquery import PyQuery\nfrom spider.driver.base.field import Field, FieldName, Fieldlist, FieldType\nfrom spider.driver.base.page import Page\nfrom spider.driver.base.listcssselector import ListCssSelector\nfrom spider.driver.base.mongodb import Mongodb\nfrom spider.driver.base.tabsetup import TabSetup\nfl_weixin1 = Fieldlist(Field(fieldname='public_name', css_selector=\n 'div > div.txt-box > p.tit > a', regex='[^\\\\u4e00-\\\\u9fa5]*'))\nfl_weixin2 = Fieldlist(Field(fieldname='article_name', css_selector=\n 'div > div > h4'), Field(fieldname='article_time', css_selector=\n 'div > div > p.weui_media_extra_info'))\npage_weixin_1 = Page(name='微信公众号列表页面', fieldlist=fl_weixin1,\n listcssselector=ListCssSelector(list_css_selector=\n '#main > div.news-box > ul > li'))\npage_weixin_2 = Page(name='微信公众号文章列表页面', fieldlist=fl_weixin2, tabsetup=\n TabSetup(click_css_selector='div > div.txt-box > p.tit > a'),\n listcssselector=ListCssSelector(list_css_selector='#history > div'))\n\n\nclass WeixinSpider(Driver):\n\n def __init__(self, isheadless=False, ismobile=False, isvirtualdisplay=\n False, spider_id='', name=''):\n Driver.__init__(self, log_file_name=spider_id, ismobile=ismobile,\n isvirtualdisplay=isvirtualdisplay, isheadless=isheadless)\n self.name = name\n self.debug_log(name=name)\n\n def get_article(self, data_list=[]):\n article_list = (self.\n until_presence_of_all_elements_located_by_css_selector(\n css_selector=page_weixin_2.listcssselector.list_css_selector))\n for i in range(1, len(article_list) + 1):\n self.until_scroll_to_center_click_by_css_selector(css_selector=\n '%s:nth-child(%s)' % (page_weixin_2.listcssselector.\n list_css_selector, i))\n time.sleep(3)\n self.driver.back()\n\n def run_spider(self):\n for public in Mysql().query_data(table='weixin_public', field=\n 'public_name')[:1]:\n self.fast_get_page(url='http://weixin.sogou.com/',\n min_time_to_wait=15, max_time_to_wait=30)\n self.until_send_text_by_css_selector(css_selector='#query',\n text=public[0])\n time.sleep(3)\n self.fast_enter_page_by_css_selector(css_selector='#query')\n time.sleep(2)\n self.fast_click_same_page_by_css_selector(click_css_selector=\n '#scroll-header > form > div > input.swz2')\n public_name_list = self.from_page_get_data_list(page=page_weixin_1)\n article_name_list = self.from_page_add_data_list_to_data_list(page\n =page_weixin_2, pre_page=page_weixin_1, data_list=\n public_name_list, extra_page_func=self.get_article)\n",
"step-5": "# -*- coding:utf-8 -*-\nfrom spider.driver.base.driver import Driver\nfrom spider.driver.base.mysql import Mysql\nimport time\nfrom pyquery import PyQuery\nfrom spider.driver.base.field import Field,FieldName,Fieldlist,FieldType\nfrom spider.driver.base.page import Page\nfrom spider.driver.base.listcssselector import ListCssSelector\nfrom spider.driver.base.mongodb import Mongodb\nfrom spider.driver.base.tabsetup import TabSetup\n\nfl_weixin1 = Fieldlist(\n Field(fieldname='public_name', css_selector='div > div.txt-box > p.tit > a', regex=r'[^\\u4e00-\\u9fa5]*'),\n)\n\nfl_weixin2 = Fieldlist(\n Field(fieldname='article_name', css_selector='div > div > h4'),\n Field(fieldname='article_time', css_selector='div > div > p.weui_media_extra_info'),\n)\n\npage_weixin_1 = Page(name='微信公众号列表页面', fieldlist=fl_weixin1, listcssselector=ListCssSelector(list_css_selector='#main > div.news-box > ul > li'))\n\npage_weixin_2 = Page(name='微信公众号文章列表页面', fieldlist=fl_weixin2, tabsetup=TabSetup(click_css_selector='div > div.txt-box > p.tit > a'), listcssselector=ListCssSelector(list_css_selector='#history > div'))\n\nclass WeixinSpider(Driver):\n\n def __init__(self,isheadless=False,ismobile=False,isvirtualdisplay=False,spider_id='',name=''):\n Driver.__init__(self, log_file_name=spider_id, ismobile=ismobile, isvirtualdisplay=isvirtualdisplay,\n isheadless=isheadless)\n self.name = name\n self.debug_log(name=name)\n\n def get_article(self, data_list=[]):\n article_list = self.until_presence_of_all_elements_located_by_css_selector(css_selector=page_weixin_2.listcssselector.list_css_selector)\n for i in range(1, len(article_list)+1):\n self.until_scroll_to_center_click_by_css_selector(css_selector='%s:nth-child(%s)'%(page_weixin_2.listcssselector.list_css_selector,i))\n time.sleep(3)\n self.driver.back()\n\n def run_spider(self):\n for public in Mysql().query_data(table='weixin_public', field='public_name')[:1]:\n self.fast_get_page(url='http://weixin.sogou.com/', min_time_to_wait=15,max_time_to_wait=30)\n self.until_send_text_by_css_selector(css_selector='#query', text=public[0])\n time.sleep(3)\n self.fast_enter_page_by_css_selector(css_selector='#query')\n time.sleep(2)\n self.fast_click_same_page_by_css_selector(click_css_selector='#scroll-header > form > div > input.swz2')\n public_name_list = self.from_page_get_data_list(page=page_weixin_1)\n article_name_list = self.from_page_add_data_list_to_data_list(page=page_weixin_2, pre_page=page_weixin_1,data_list=public_name_list, extra_page_func=self.get_article)\n # self.fast_click_page_by_css_selector(ele=item, click_css_selector='div > div.txt-box > p.tit > a')\n # self.driver.switch_to.window(self.driver.window_handles[-1])\n # shop_data_list = self.from_page_get_data_list(page=page_weixin_1)\n # self.driver.close()\n # self.driver.switch_to.window(self.driver.window_handles[-1])",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import numpy
import numpy.fft
import numpy.linalg
import copy
from astropy.io import fits
from scipy.interpolate import RectBivariateSpline
from scipy.signal import convolve
import offset_index
# some basic definitions
psSize = 9 # psSize x psSize postage stamps of stars
# zero padded RectBivariateSpline, if on
def RectBivariateSplineZero(y1,x1,map1,kx=1,ky=1):
return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)
y2 = numpy.zeros(numpy.size(y1)+2)
y2[1:-1] = y1
y2[0] = 2*y2[1]-y2[2]
y2[-1] = 2*y2[-2]-y2[-3]
x2 = numpy.zeros(numpy.size(x1)+2)
x2[1:-1] = x1
x2[0] = 2*x2[1]-x2[2]
x2[-1] = 2*x2[-2]-x2[-3]
map2 = numpy.zeros((numpy.size(y1)+2, numpy.size(x1)+2))
map2[1:-1,1:-1] = map1
return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)
class EmptyClass():
pass
# spectral energy distribution class
class SpectralEnergyDistribution():
# make an SED -- several options for type
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
# get Nlambda (photons/m^2/s/um) at lambda_ (um)
def Nlambda(self, lambda_):
# blackbody, info = [T (K), solidangle]
if self.type=='BB':
T = self.info[0]
x = 14387.769/lambda_/T # hc/(kTlambda)
return(2/lambda_**4*2.99792458e14*1e12*numpy.exp(-x)/(1.-numpy.exp(-x))*self.info[1])
# the 1e12 is the conversion from um^2 -> m^2
else:
print('ERROR: Invalid SED type')
exit()
# filter class
class Filter():
# make a filter -- several options for type
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
# get transmission
def Tlambda(self, lambda_):
# smoothed tophat
if self.type=='STH':
lmin = self.info[0]; dlmin = lmin*.02
lmax = self.info[1]; dlmax = lmax*.02
return((numpy.tanh((lambda_-lmin)/dlmin)-numpy.tanh((lambda_-lmax)/dlmax))/2.)
# interpolated file
# info shape (N,2) -- info[:,0] = wavelength, info[:,1] = throughput
elif self.type=='interp':
return(numpy.interp(lambda_, self.info[:,0], self.info[:,1]))
else:
print('ERROR: Invalid filter type')
exit()
# load mask files
maskfiles = EmptyClass()
maskfiles.D = 2292981.05344 # um
maskfiles.rim = []
maskfiles.full = []
maskfiles.i_rim = []
maskfiles.i_full = []
maskfiles.nSCA = 18
for k in range(18):
inFile = fits.open('pupils/SCA{:d}_rim_mask.fits'.format(k+1))
maskfiles.rim += [numpy.copy(inFile[0].data[::-1,:])]
inFile.close()
inFile = fits.open('pupils/SCA{:d}_full_mask.fits'.format(k+1))
maskfiles.full += [numpy.copy(inFile[0].data[::-1,:])]
inFile.close()
# normalize
maskfiles.rim[k] /= numpy.amax(maskfiles.rim[k])
maskfiles.full[k] /= numpy.amax(maskfiles.full[k])
N_in = maskfiles.N_in = 2048
x_in = numpy.linspace(-1+1/N_in,1-1/N_in,N_in)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, maskfiles.rim[k], kx=1, ky=1)
maskfiles.i_rim += [interp_spline]
interp_spline = RectBivariateSplineZero(y_in, x_in, maskfiles.full[k], kx=1, ky=1)
maskfiles.i_full += [interp_spline]
# lower resolution masks
maskfiles.n_lores = 7
for ku in range(1,maskfiles.n_lores):
N2 = N_in//2**ku
x_in = numpy.linspace(-1+1/N2,1-1/N2,N2)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, numpy.mean(maskfiles.rim[k].reshape(N2,2**ku,N2,2**ku), axis=(1,3)), kx=1, ky=1)
maskfiles.i_rim += [interp_spline]
interp_spline = RectBivariateSplineZero(y_in, x_in, numpy.mean(maskfiles.full[k].reshape(N2,2**ku,N2,2**ku), axis=(1,3)), kx=1, ky=1)
maskfiles.i_full += [interp_spline]
# SCA locations
sca = EmptyClass()
sca.size = 40.88 # mm
sca.x = numpy.asarray([-22.14, -22.29, -22.44, -66.42, -66.92, -67.42, -110.70, -111.48, -112.64,
22.14, 22.29, 22.44, 66.42, 66.92, 67.42, 110.70, 111.48, 112.64])
sca.y = numpy.asarray([12.15, -37.03, -82.06, 20.90, -28.28, -73.06, 42.20, 13.46, -51.06,
12.15, -37.03, -82.06, 20.90, -28.28, -73.06, 42.20, 13.46, -51.06])
sca.scale = 133.08
# reference Zernikes
ZernRef = EmptyClass()
ZernRef.data = numpy.loadtxt('pupils/zernike_ref.txt')[:,-22:] * 1.38
# filter data
FilterData = numpy.loadtxt('pupils/filter.dat')
FilterData[:,1:] /= numpy.pi/4.*(maskfiles.D/1e6)**2
# makes map of Zernikes of a given amplitude
# amp[0:Namp] = Z1 ... ZNamp
# on a spacing Ngrid (x, y = -(1-1/Ngrid) .. +(1-1/Ngrid) multiplied by scale)
#
def zernike_map_noll(amp, Ngrid, scale):
xx = numpy.tile(numpy.linspace(-1+1/Ngrid,1-1/Ngrid,Ngrid), (Ngrid,1))
yy = numpy.copy(xx.T)
rho = numpy.sqrt(xx**2+yy**2)*scale
phi = numpy.arctan2(yy,xx)
output = numpy.zeros((Ngrid,Ngrid))
nmax = 0
namp = numpy.size(amp)
while namp>(nmax+1)*(nmax+2)//2: nmax+=1
rpows = numpy.ones((nmax+1,Ngrid,Ngrid))
trigphi = numpy.ones((2*nmax+1,Ngrid,Ngrid))
for i in range(1,nmax+1): rpows[i,:,:] = rho**i
for i in range(0,nmax+1): trigphi[i,:,:] = numpy.cos(i*phi)
for i in range(1,nmax+1): trigphi[-i,:,:] = numpy.sin(i*phi)
# loop over Zernikes
for n in range(nmax+1):
for m in range(-n,n+1,2):
Z = numpy.zeros((Ngrid,Ngrid))
for k in range((n-abs(m))//2+1):
coef = (-1)**k * numpy.math.factorial(n-k)/numpy.math.factorial(k) \
/numpy.math.factorial((n-m)//2-k)/numpy.math.factorial((n+m)//2-k)
Z += coef * rpows[n-2*k,:,:]
#if m>=0:
# Z *= numpy.cos(m*phi)
#else:
# Z *= numpy.sin(-m*phi)
Z *= trigphi[m,:,:]
j = n*(n+1)//2 + abs(m)
if (-1)**j*(m+.5)<0 or m==0: j += 1
#print(n,m,j)
factor = numpy.sqrt(n+1)
if m!=0: factor *= numpy.sqrt(2)
if j<=namp: output += factor * amp[j-1] * Z
return(output)
# make annular mask of given obstruction (fraction) and scale
def make_mask_annulus(obs, Nstep, scale):
xx = numpy.tile(numpy.linspace(-1+1/Nstep,1-1/Nstep,Nstep), (Nstep,1))
yy = numpy.copy(xx.T)
rho = numpy.sqrt(xx**2+yy**2)*scale
return(numpy.where(numpy.logical_and(rho>=obs,rho<1),numpy.ones((Nstep,Nstep)),numpy.zeros((Nstep,Nstep))))
def test_zernike():
for k in range(36):
psi = numpy.zeros(36)
psi[k] = 1
N=5
M = zernike_map_noll(psi, N, N/(N-1))
print(' *** Zernike {:2d} ***'.format(k+1))
for j in range(N):
out = ''
for i in range(N):
out = out + ' {:10.5f}'.format(M[j,i])
print(out)
print('')
# psi is a vector of Zernikes, in wavelengths
# mask information: (currently none)
# scale = sampling (points per lambda/D)
# Nstep = # grid points
# output normalized to sum to 1
def mono_psf(psi, mask, scale, Nstep):
if hasattr(mask, 'N'):
if hasattr(mask, 'spline'):
interp_spline = mask.spline
else:
N_in = 2048
x_in = numpy.linspace(-1+1/N_in,1-1/N_in,N_in)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, mask.array, kx=1, ky=1)
x2 = numpy.linspace(-1+1/Nstep,1-1/Nstep,Nstep)*scale
y2 = numpy.copy(x2)
amplitude = interp_spline(y2,x2).astype(numpy.complex128) * make_mask_annulus(0, Nstep, scale)
else:
amplitude = make_mask_annulus(.32, Nstep, scale).astype(numpy.complex128)
amplitude *= numpy.exp(2j * numpy.pi * zernike_map_noll(psi, Nstep, scale))
amplitude = numpy.fft.ifft2(amplitude)
power = numpy.abs(amplitude)**2
# shift to center
newpower = numpy.zeros_like(power)
newpower[Nstep//2:Nstep,Nstep//2:Nstep] = power[0:Nstep//2,0:Nstep//2]
newpower[Nstep//2:Nstep,0:Nstep//2] = power[0:Nstep//2,Nstep//2:Nstep]
newpower[0:Nstep//2,Nstep//2:Nstep] = power[Nstep//2:Nstep,0:Nstep//2]
newpower[0:Nstep//2,0:Nstep//2] = power[Nstep//2:Nstep,Nstep//2:Nstep]
return(newpower/numpy.sum(newpower))
# helper function
def onescut(n):
array = numpy.ones((n+1))
array[0] = array[-1] = .5
return(array/n)
# Gaussian quadrature weights across a filter
# sed = spectral energy distribution
# filter = filter information (incl. bandpass)
# nOrder = order of polynomial (number of nodes)
# wlrange = [lmin,lmax,npts] in um
#
# returns wavelengths, weights
def gq_weights(sed, filter, nOrder, wlrange):
# unpack info
lmin = wlrange[0]; lmax = wlrange[1]; npts = wlrange[2]
# build integrals I_k = int x^k S(x) F(x) dx
x = numpy.linspace(lmin,lmax,npts)
c = numpy.zeros((npts))
for i in range(npts):
c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])
o = numpy.ones((npts))
I = numpy.zeros((2*nOrder))
lctr = numpy.mean(x)
for k in range(2*nOrder):
I[k] = numpy.sum(o*(x-lctr)**k*c)
# orthogonal polynomial p_n
# require sum_{j=0}^n coef_{n-j} I_{j+k} = 0 or
# sum_{j=0}^{n-1} coef_{n-j} I_{j+k} = -I_{n+k} for k = 0 .. n-1
coef = numpy.zeros((nOrder+1))
coef[0] = 1.
A = numpy.zeros((nOrder,nOrder))
for k in range(nOrder):
for j in range(nOrder):
A[k,j] = I[j+k]
coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]
p = numpy.poly1d(coef)
xroot = numpy.sort(numpy.real(p.r))
wroot = numpy.zeros_like(xroot)
pprime = numpy.polyder(p)
for i in range(nOrder):
px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i+1:])), r=True)
wroot[i] = numpy.sum(px.c[::-1]*I[:nOrder]) / pprime(xroot[i])
xroot = xroot + lctr
return xroot,wroot
# psi is a vector of Zernikes, in microns
# mask information: (currently none)
# sed = spectral energy distribution
# scale = sampling (points per lambda/D @ 1 um)
# Nstep = # grid points
# filter = filter information (incl. bandpass)
# addInfo = class for general additional information
# output normalized to sum to 1
def poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo):
# integration steps
hard_lmin = 0.4
hard_lmax = 2.5
hard_Nl = 420
ilmin = hard_Nl-1; ilmax = 0
for il in range(1,hard_Nl):
wl = hard_lmin + il/hard_Nl*(hard_lmax-hard_lmin)
if filter.Tlambda(wl)>1e-4:
if il<ilmin:
ilmin=il
wlmin=wl
if il>ilmax:
ilmax=il
wlmax=wl
na = ilmin//6 + 1
nb = (hard_Nl-ilmax)//6 + 1
wl = numpy.concatenate((numpy.linspace(hard_lmin,wlmin,na+1), numpy.linspace(wlmin,wlmax,ilmax-ilmin+1), numpy.linspace(wlmax,hard_lmax,nb+1)))
dwl = numpy.concatenate(((wlmin-hard_lmin)*onescut(na), (wlmax-wlmin)*onescut(ilmax-ilmin), (hard_lmax-wlmax)*onescut(nb)))
#print(wl,dwl,numpy.size(wl),numpy.size(dwl))
# reduced coverage
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
wl, dwl = gq_weights(sed, filter, 10, [wlmin,wlmax,ilmax-ilmin+1])
# make output PSF
sumc = 0.
output = numpy.zeros((Nstep,Nstep))
for i in range(numpy.size(wl)):
c = sed.Nlambda(wl[i]) * filter.Tlambda(wl[i]) * dwl[i]
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode: c = dwl[i]
this_psi = numpy.copy(psi)/wl[i] # convert from um -> wavelengths of wavefront
sumc += c
output += c * mono_psf(this_psi, mask, scale_1um*wl[i], Nstep)
#print('{:6.4f} {:11.5E}'.format(wl[i],filter.Tlambda(wl[i])))
output /= sumc
return(output)
# make oversampled PSF at given SCA, position
#
# sed = source SED
# filt = filter (letter: RZYJHFK)
# ovsamp = oversampling factor
# Nstep = number of samples in each axis
# scanum = SCA number (1..18)
# pos = (x,y) position on SCA in mm (0,0)=center
# offsets = adjustment parameters
# .par -> offset parameters
# addInfo = additional information class:
# .ctr -> centroid (dx,dy)
def oversamp_psf(sed, filt, ovsamp, Nstep, scanum, pos, offsets, addInfo):
# get information
parOn = False
if hasattr(offsets, 'par'): parOn = True
# get Zernikes in microns
ZR = ZernRef.data[4*(scanum-1):4*scanum,:]
wt_L = .5 - pos[0]/sca.size
wt_R = .5 + pos[0]/sca.size
wt_B = .5 - pos[1]/sca.size
wt_T = .5 + pos[1]/sca.size
psi = wt_T*wt_L*ZR[0,:] + wt_B*wt_L*ZR[1,:] + wt_B*wt_R*ZR[2,:] + wt_T*wt_R*ZR[3,:]
xf = sca.x[scanum-1] + pos[0]
yf = sca.y[scanum-1] + pos[1]
# Zernike offsets
if parOn:
psi[3] += offsets.par[offset_index.foc ]
psi[4] += offsets.par[offset_index.astig2]
psi[5] += offsets.par[offset_index.astig1]
psi[6] += offsets.par[offset_index.coma2]
psi[7] += offsets.par[offset_index.coma1]
psi[3] += (offsets.par[offset_index.focg1]*xf + offsets.par[offset_index.focg2]*yf)/sca.scale
scale_1um = ovsamp / (.11*numpy.pi/648000) / maskfiles.D
#print(scale_1um)
# filter curves
if filt=='K':
filter = Filter('STH', [1.95,2.30])
elif filt=='F':
filter = Filter('interp', FilterData[:,(0,7)])
elif filt=='H':
filter = Filter('interp', FilterData[:,(0,6)])
elif filt=='W':
filter = Filter('interp', FilterData[:,(0,5)])
elif filt=='J':
filter = Filter('interp', FilterData[:,(0,4)])
elif filt=='Y':
filter = Filter('interp', FilterData[:,(0,3)])
elif filt=='Z':
filter = Filter('interp', FilterData[:,(0,2)])
elif filt=='R':
filter = Filter('interp', FilterData[:,(0,1)])
else:
print('Error: unknown filter')
exit()
la = numpy.linspace(.4, 2.5, 2101)
fla = numpy.zeros(2101)
for i in range(2101): fla[i] = filter.Tlambda(la[i])
scale = scale_1um*numpy.sum(la*fla)/numpy.sum(fla)
# get the mask
mask = EmptyClass(); mask.N=1
imk = 0
while imk<maskfiles.n_lores-1 and Nstep/scale<maskfiles.N_in/2**(imk+1): imk+=1
#print(' *** ', Nstep, scale, scale/scale_1um, imk)
if filt=='F' or filt=='K':
mask.spline = maskfiles.i_full[scanum-1 + maskfiles.nSCA*imk]
else:
mask.spline = maskfiles.i_rim[scanum-1 + maskfiles.nSCA*imk]
# x & y offsets
if hasattr(addInfo, 'ctr'):
d = .5*(1-1/ovsamp)
psi[1:3] -= (addInfo.ctr+d) * ovsamp / scale_1um / 4.
output = poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo)
# smooth
Cxx = Cyy = .09; Cxy = 0.
if parOn:
Cxx = .09 + offsets.par[offset_index.jxx ]
Cxy = offsets.par[offset_index.jxy ]
Cyy = .09 + offsets.par[offset_index.jyy ]
output_fft = numpy.fft.fft2(output)
kx = numpy.zeros((Nstep,Nstep))
ky = numpy.zeros((Nstep,Nstep))
for i in range(-Nstep//2, Nstep//2):
kx[:,i] = abs(i)
ky[i,:] = abs(i)
kx *= 2.*numpy.pi*ovsamp/Nstep
ky *= 2.*numpy.pi*ovsamp/Nstep
output_fft = output_fft * numpy.exp(-Cxx*kx**2/2. - Cyy*ky**2/2. - Cxy*kx*ky)
output = numpy.real(numpy.fft.ifft2(output_fft))
return(output)
# parameters for next couple of functions
N_STD = 1024 # must be a multiple of 4
OV_STD = 8
# make oversampled PSF at given SCA, position
#
# sed = source SED
# filt = filter (letter: RZYJHFK)
# scanum = SCA number (1..18)
# pos = (x,y) position on SCA in mm (0,0)=center
# offsets = adjustment parameters (placeholder)
# addInfo = additional information class:
# .F -> total counts (in e)
# .ctr -> centroid (dx,dy)
# .many -> @ 5x5 grid of offsets
#
# .bfe = add bfe (can include .bfe_a, .bfe_aplus)
#
# .bfe_overwrite => special mode to compute BFE with time dependent PSF
# .stamp_in = input stamp (so compute BFE from stamp_in *acting on* this PSF)
def postage_stamp(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD # must be even
ov = OV_STD
if hasattr(addInfo,'many'):
ov = addInfo.force_ov
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
N = N//2
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo) * addInfo.F
out = numpy.zeros((psSize, psSize))
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
out[j,i] += numpy.sum(bigStamp[y:y+ov,x:x+ov])
if hasattr(addInfo, 'vtpe'):
out[j,i] += addInfo.vtpe * numpy.sum(bigStamp[y+ov:y+2*ov,x:x+ov])
if hasattr(addInfo,'many'):
out = numpy.zeros((25, psSize, psSize))
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
for k in range(25):
dy = k%5 - 2; dx = k//5 - 2
out[k,j,i] += numpy.sum(bigStamp[y+dy:y+dy+ov,x+dx:x+dx+ov])
# BFE?
if hasattr(addInfo, 'bfe'):
if hasattr(addInfo,'many'):
print('Error -- cannot do both bfe and many in postage_stamp')
exit()
dout = numpy.zeros_like(out)
# horizontal BFE
ah = 0
if hasattr(addInfo, 'bfe_a'): ah += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'): ah += addInfo.bfe_aplus
for i in range(psSize-1):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
shift = ov * ah * (out[j,i+1]-out[j,i]) / 2. # in sub-pixels, average over exposure
if hasattr(addInfo, 'bfe_overwrite'): shift = ov * ah * (addInfo.stamp_in[j,i+1]-addInfo.stamp_in[j,i]) / 2.
mflux = numpy.sum(bigStamp[y:y+ov,x+ov-1:x+ov+1])/2.
dout[j,i] += shift*mflux
dout[j,i+1] -= shift*mflux
# vertical BFE
av = 0
if hasattr(addInfo, 'bfe_a'): av += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'): av -= addInfo.bfe_aplus
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize-1):
y = N//2+(j-psSize//2)*ov
shift = ov * av * (out[j+1,i]-out[j,i]) / 2. # in sub-pixels, average over exposure
if hasattr(addInfo, 'bfe_overwrite'): shift = ov * av * (addInfo.stamp_in[j+1,i]-addInfo.stamp_in[j,i]) / 2.
mflux = numpy.sum(bigStamp[y+ov-1:y+ov+1,x:x+ov])/2.
dout[j,i] += shift*mflux
dout[j+1,i] -= shift*mflux
out+=dout
if hasattr(addInfo, 'bfe_overwrite'): out=dout
return(out)
#
# same input format but returns moments of the PSF
# A, xc, yc, T, e1, e2
def psfmoments(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD # must be even
ov = OV_STD
if hasattr(addInfo,'many'):
ov = addInfo.force_ov
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
N = N//2
addInfoX = copy.deepcopy(addInfo); addInfoX.ctr = numpy.zeros((2)); addInfoX.F = 1.
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)
bigStamp = convolve(bigStamp, numpy.ones((ov,ov)), mode='full', method='direct')/ov**2
Np = N+ov-1
# moment format: A,x,y,Cxx,Cxy,Cyy
mom = numpy.asarray([1,0,0,4*ov**2,0,4*ov**2]).astype(numpy.float64)
newmom = numpy.zeros_like(mom)
con = .5 # convergence factor
xx1 = numpy.tile(numpy.linspace(-(Np-1)/2., (Np-1)/2., Np), (Np,1))
yy1 = numpy.copy(xx1.T)
for iter in range(256):
det = mom[3]*mom[5]-mom[4]**2
xx = xx1-mom[1]
yy = yy1-mom[2]
G = numpy.exp((-mom[5]*xx**2 + 2*mom[4]*xx*yy - mom[3]*yy**2)/2./det) * bigStamp
newmom[0] = numpy.sum(G)
newmom[1] = numpy.sum(G*xx)
newmom[2] = numpy.sum(G*yy)
newmom[3] = numpy.sum(G*xx**2)
newmom[4] = numpy.sum(G*xx*yy)
newmom[5] = numpy.sum(G*yy**2)
mom[0] = 2*newmom[0]
err = newmom[1:]/newmom[0]; err[-3:] -= mom[-3:]/2.
mom[1:] += err*con
return(numpy.array([mom[0], mom[1]/ov, mom[2]/ov, (mom[3]+mom[5])/ov**2, (mom[3]-mom[5])/(mom[3]+mom[5]), 2*mom[4]/(mom[3]+mom[5])]))
# returns chi^2
# var = read noise variance
def chi2_postage_stamp(obs, theory, var):
obs2 = numpy.maximum(obs+var, 1e-24)
return(numpy.sum(theory+var-obs2-obs2*numpy.log((theory+var)/obs2))*2)
|
normal
|
{
"blob_id": "2ab6488276c74da8c3d9097d298fc53d1caf74b1",
"index": 6243,
"step-1": "<mask token>\n\n\ndef RectBivariateSplineZero(y1, x1, map1, kx=1, ky=1):\n return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)\n y2 = numpy.zeros(numpy.size(y1) + 2)\n y2[1:-1] = y1\n y2[0] = 2 * y2[1] - y2[2]\n y2[-1] = 2 * y2[-2] - y2[-3]\n x2 = numpy.zeros(numpy.size(x1) + 2)\n x2[1:-1] = x1\n x2[0] = 2 * x2[1] - x2[2]\n x2[-1] = 2 * x2[-2] - x2[-3]\n map2 = numpy.zeros((numpy.size(y1) + 2, numpy.size(x1) + 2))\n map2[1:-1, 1:-1] = map1\n return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)\n\n\nclass EmptyClass:\n pass\n\n\nclass SpectralEnergyDistribution:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Nlambda(self, lambda_):\n if self.type == 'BB':\n T = self.info[0]\n x = 14387.769 / lambda_ / T\n return (2 / lambda_ ** 4 * 299792458000000.0 * 1000000000000.0 *\n numpy.exp(-x) / (1.0 - numpy.exp(-x)) * self.info[1])\n else:\n print('ERROR: Invalid SED type')\n exit()\n\n\nclass Filter:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Tlambda(self, lambda_):\n if self.type == 'STH':\n lmin = self.info[0]\n dlmin = lmin * 0.02\n lmax = self.info[1]\n dlmax = lmax * 0.02\n return (numpy.tanh((lambda_ - lmin) / dlmin) - numpy.tanh((\n lambda_ - lmax) / dlmax)) / 2.0\n elif self.type == 'interp':\n return numpy.interp(lambda_, self.info[:, 0], self.info[:, 1])\n else:\n print('ERROR: Invalid filter type')\n exit()\n\n\n<mask token>\n\n\ndef onescut(n):\n array = numpy.ones(n + 1)\n array[0] = array[-1] = 0.5\n return array / n\n\n\ndef gq_weights(sed, filter, nOrder, wlrange):\n lmin = wlrange[0]\n lmax = wlrange[1]\n npts = wlrange[2]\n x = numpy.linspace(lmin, lmax, npts)\n c = numpy.zeros(npts)\n for i in range(npts):\n c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])\n o = numpy.ones(npts)\n I = numpy.zeros(2 * nOrder)\n lctr = numpy.mean(x)\n for k in range(2 * nOrder):\n I[k] = numpy.sum(o * (x - lctr) ** k * c)\n coef = numpy.zeros(nOrder + 1)\n coef[0] = 1.0\n A = numpy.zeros((nOrder, nOrder))\n for k in range(nOrder):\n for j in range(nOrder):\n A[k, j] = I[j + k]\n coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]\n p = numpy.poly1d(coef)\n xroot = numpy.sort(numpy.real(p.r))\n wroot = numpy.zeros_like(xroot)\n pprime = numpy.polyder(p)\n for i in range(nOrder):\n px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i + 1:])), r=True\n )\n wroot[i] = numpy.sum(px.c[::-1] * I[:nOrder]) / pprime(xroot[i])\n xroot = xroot + lctr\n return xroot, wroot\n\n\n<mask token>\n\n\ndef postage_stamp(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo\n ) * addInfo.F\n out = numpy.zeros((psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n out[j, i] += numpy.sum(bigStamp[y:y + ov, x:x + ov])\n if hasattr(addInfo, 'vtpe'):\n out[j, i] += addInfo.vtpe * numpy.sum(bigStamp[y + ov:y + 2 *\n ov, x:x + ov])\n if hasattr(addInfo, 'many'):\n out = numpy.zeros((25, psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n for k in range(25):\n dy = k % 5 - 2\n dx = k // 5 - 2\n out[k, j, i] += numpy.sum(bigStamp[y + dy:y + dy + ov, \n x + dx:x + dx + ov])\n if hasattr(addInfo, 'bfe'):\n if hasattr(addInfo, 'many'):\n print('Error -- cannot do both bfe and many in postage_stamp')\n exit()\n dout = numpy.zeros_like(out)\n ah = 0\n if hasattr(addInfo, 'bfe_a'):\n ah += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n ah += addInfo.bfe_aplus\n for i in range(psSize - 1):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * ah * (out[j, i + 1] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * ah * (addInfo.stamp_in[j, i + 1] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y:y + ov, x + ov - 1:x + ov + 1]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j, i + 1] -= shift * mflux\n av = 0\n if hasattr(addInfo, 'bfe_a'):\n av += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n av -= addInfo.bfe_aplus\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize - 1):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * av * (out[j + 1, i] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * av * (addInfo.stamp_in[j + 1, i] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y + ov - 1:y + ov + 1, x:x + ov]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j + 1, i] -= shift * mflux\n out += dout\n if hasattr(addInfo, 'bfe_overwrite'):\n out = dout\n return out\n\n\ndef psfmoments(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n addInfoX = copy.deepcopy(addInfo)\n addInfoX.ctr = numpy.zeros(2)\n addInfoX.F = 1.0\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)\n bigStamp = convolve(bigStamp, numpy.ones((ov, ov)), mode='full', method\n ='direct') / ov ** 2\n Np = N + ov - 1\n mom = numpy.asarray([1, 0, 0, 4 * ov ** 2, 0, 4 * ov ** 2]).astype(numpy\n .float64)\n newmom = numpy.zeros_like(mom)\n con = 0.5\n xx1 = numpy.tile(numpy.linspace(-(Np - 1) / 2.0, (Np - 1) / 2.0, Np), (\n Np, 1))\n yy1 = numpy.copy(xx1.T)\n for iter in range(256):\n det = mom[3] * mom[5] - mom[4] ** 2\n xx = xx1 - mom[1]\n yy = yy1 - mom[2]\n G = numpy.exp((-mom[5] * xx ** 2 + 2 * mom[4] * xx * yy - mom[3] * \n yy ** 2) / 2.0 / det) * bigStamp\n newmom[0] = numpy.sum(G)\n newmom[1] = numpy.sum(G * xx)\n newmom[2] = numpy.sum(G * yy)\n newmom[3] = numpy.sum(G * xx ** 2)\n newmom[4] = numpy.sum(G * xx * yy)\n newmom[5] = numpy.sum(G * yy ** 2)\n mom[0] = 2 * newmom[0]\n err = newmom[1:] / newmom[0]\n err[-3:] -= mom[-3:] / 2.0\n mom[1:] += err * con\n return numpy.array([mom[0], mom[1] / ov, mom[2] / ov, (mom[3] + mom[5]) /\n ov ** 2, (mom[3] - mom[5]) / (mom[3] + mom[5]), 2 * mom[4] / (mom[3\n ] + mom[5])])\n\n\ndef chi2_postage_stamp(obs, theory, var):\n obs2 = numpy.maximum(obs + var, 1e-24)\n return numpy.sum(theory + var - obs2 - obs2 * numpy.log((theory + var) /\n obs2)) * 2\n",
"step-2": "<mask token>\n\n\ndef RectBivariateSplineZero(y1, x1, map1, kx=1, ky=1):\n return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)\n y2 = numpy.zeros(numpy.size(y1) + 2)\n y2[1:-1] = y1\n y2[0] = 2 * y2[1] - y2[2]\n y2[-1] = 2 * y2[-2] - y2[-3]\n x2 = numpy.zeros(numpy.size(x1) + 2)\n x2[1:-1] = x1\n x2[0] = 2 * x2[1] - x2[2]\n x2[-1] = 2 * x2[-2] - x2[-3]\n map2 = numpy.zeros((numpy.size(y1) + 2, numpy.size(x1) + 2))\n map2[1:-1, 1:-1] = map1\n return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)\n\n\nclass EmptyClass:\n pass\n\n\nclass SpectralEnergyDistribution:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Nlambda(self, lambda_):\n if self.type == 'BB':\n T = self.info[0]\n x = 14387.769 / lambda_ / T\n return (2 / lambda_ ** 4 * 299792458000000.0 * 1000000000000.0 *\n numpy.exp(-x) / (1.0 - numpy.exp(-x)) * self.info[1])\n else:\n print('ERROR: Invalid SED type')\n exit()\n\n\nclass Filter:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Tlambda(self, lambda_):\n if self.type == 'STH':\n lmin = self.info[0]\n dlmin = lmin * 0.02\n lmax = self.info[1]\n dlmax = lmax * 0.02\n return (numpy.tanh((lambda_ - lmin) / dlmin) - numpy.tanh((\n lambda_ - lmax) / dlmax)) / 2.0\n elif self.type == 'interp':\n return numpy.interp(lambda_, self.info[:, 0], self.info[:, 1])\n else:\n print('ERROR: Invalid filter type')\n exit()\n\n\n<mask token>\n\n\ndef onescut(n):\n array = numpy.ones(n + 1)\n array[0] = array[-1] = 0.5\n return array / n\n\n\ndef gq_weights(sed, filter, nOrder, wlrange):\n lmin = wlrange[0]\n lmax = wlrange[1]\n npts = wlrange[2]\n x = numpy.linspace(lmin, lmax, npts)\n c = numpy.zeros(npts)\n for i in range(npts):\n c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])\n o = numpy.ones(npts)\n I = numpy.zeros(2 * nOrder)\n lctr = numpy.mean(x)\n for k in range(2 * nOrder):\n I[k] = numpy.sum(o * (x - lctr) ** k * c)\n coef = numpy.zeros(nOrder + 1)\n coef[0] = 1.0\n A = numpy.zeros((nOrder, nOrder))\n for k in range(nOrder):\n for j in range(nOrder):\n A[k, j] = I[j + k]\n coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]\n p = numpy.poly1d(coef)\n xroot = numpy.sort(numpy.real(p.r))\n wroot = numpy.zeros_like(xroot)\n pprime = numpy.polyder(p)\n for i in range(nOrder):\n px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i + 1:])), r=True\n )\n wroot[i] = numpy.sum(px.c[::-1] * I[:nOrder]) / pprime(xroot[i])\n xroot = xroot + lctr\n return xroot, wroot\n\n\n<mask token>\n\n\ndef oversamp_psf(sed, filt, ovsamp, Nstep, scanum, pos, offsets, addInfo):\n parOn = False\n if hasattr(offsets, 'par'):\n parOn = True\n ZR = ZernRef.data[4 * (scanum - 1):4 * scanum, :]\n wt_L = 0.5 - pos[0] / sca.size\n wt_R = 0.5 + pos[0] / sca.size\n wt_B = 0.5 - pos[1] / sca.size\n wt_T = 0.5 + pos[1] / sca.size\n psi = wt_T * wt_L * ZR[0, :] + wt_B * wt_L * ZR[1, :] + wt_B * wt_R * ZR[\n 2, :] + wt_T * wt_R * ZR[3, :]\n xf = sca.x[scanum - 1] + pos[0]\n yf = sca.y[scanum - 1] + pos[1]\n if parOn:\n psi[3] += offsets.par[offset_index.foc]\n psi[4] += offsets.par[offset_index.astig2]\n psi[5] += offsets.par[offset_index.astig1]\n psi[6] += offsets.par[offset_index.coma2]\n psi[7] += offsets.par[offset_index.coma1]\n psi[3] += (offsets.par[offset_index.focg1] * xf + offsets.par[\n offset_index.focg2] * yf) / sca.scale\n scale_1um = ovsamp / (0.11 * numpy.pi / 648000) / maskfiles.D\n if filt == 'K':\n filter = Filter('STH', [1.95, 2.3])\n elif filt == 'F':\n filter = Filter('interp', FilterData[:, (0, 7)])\n elif filt == 'H':\n filter = Filter('interp', FilterData[:, (0, 6)])\n elif filt == 'W':\n filter = Filter('interp', FilterData[:, (0, 5)])\n elif filt == 'J':\n filter = Filter('interp', FilterData[:, (0, 4)])\n elif filt == 'Y':\n filter = Filter('interp', FilterData[:, (0, 3)])\n elif filt == 'Z':\n filter = Filter('interp', FilterData[:, (0, 2)])\n elif filt == 'R':\n filter = Filter('interp', FilterData[:, (0, 1)])\n else:\n print('Error: unknown filter')\n exit()\n la = numpy.linspace(0.4, 2.5, 2101)\n fla = numpy.zeros(2101)\n for i in range(2101):\n fla[i] = filter.Tlambda(la[i])\n scale = scale_1um * numpy.sum(la * fla) / numpy.sum(fla)\n mask = EmptyClass()\n mask.N = 1\n imk = 0\n while (imk < maskfiles.n_lores - 1 and Nstep / scale < maskfiles.N_in /\n 2 ** (imk + 1)):\n imk += 1\n if filt == 'F' or filt == 'K':\n mask.spline = maskfiles.i_full[scanum - 1 + maskfiles.nSCA * imk]\n else:\n mask.spline = maskfiles.i_rim[scanum - 1 + maskfiles.nSCA * imk]\n if hasattr(addInfo, 'ctr'):\n d = 0.5 * (1 - 1 / ovsamp)\n psi[1:3] -= (addInfo.ctr + d) * ovsamp / scale_1um / 4.0\n output = poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo)\n Cxx = Cyy = 0.09\n Cxy = 0.0\n if parOn:\n Cxx = 0.09 + offsets.par[offset_index.jxx]\n Cxy = offsets.par[offset_index.jxy]\n Cyy = 0.09 + offsets.par[offset_index.jyy]\n output_fft = numpy.fft.fft2(output)\n kx = numpy.zeros((Nstep, Nstep))\n ky = numpy.zeros((Nstep, Nstep))\n for i in range(-Nstep // 2, Nstep // 2):\n kx[:, i] = abs(i)\n ky[i, :] = abs(i)\n kx *= 2.0 * numpy.pi * ovsamp / Nstep\n ky *= 2.0 * numpy.pi * ovsamp / Nstep\n output_fft = output_fft * numpy.exp(-Cxx * kx ** 2 / 2.0 - Cyy * ky ** \n 2 / 2.0 - Cxy * kx * ky)\n output = numpy.real(numpy.fft.ifft2(output_fft))\n return output\n\n\n<mask token>\n\n\ndef postage_stamp(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo\n ) * addInfo.F\n out = numpy.zeros((psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n out[j, i] += numpy.sum(bigStamp[y:y + ov, x:x + ov])\n if hasattr(addInfo, 'vtpe'):\n out[j, i] += addInfo.vtpe * numpy.sum(bigStamp[y + ov:y + 2 *\n ov, x:x + ov])\n if hasattr(addInfo, 'many'):\n out = numpy.zeros((25, psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n for k in range(25):\n dy = k % 5 - 2\n dx = k // 5 - 2\n out[k, j, i] += numpy.sum(bigStamp[y + dy:y + dy + ov, \n x + dx:x + dx + ov])\n if hasattr(addInfo, 'bfe'):\n if hasattr(addInfo, 'many'):\n print('Error -- cannot do both bfe and many in postage_stamp')\n exit()\n dout = numpy.zeros_like(out)\n ah = 0\n if hasattr(addInfo, 'bfe_a'):\n ah += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n ah += addInfo.bfe_aplus\n for i in range(psSize - 1):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * ah * (out[j, i + 1] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * ah * (addInfo.stamp_in[j, i + 1] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y:y + ov, x + ov - 1:x + ov + 1]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j, i + 1] -= shift * mflux\n av = 0\n if hasattr(addInfo, 'bfe_a'):\n av += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n av -= addInfo.bfe_aplus\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize - 1):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * av * (out[j + 1, i] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * av * (addInfo.stamp_in[j + 1, i] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y + ov - 1:y + ov + 1, x:x + ov]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j + 1, i] -= shift * mflux\n out += dout\n if hasattr(addInfo, 'bfe_overwrite'):\n out = dout\n return out\n\n\ndef psfmoments(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n addInfoX = copy.deepcopy(addInfo)\n addInfoX.ctr = numpy.zeros(2)\n addInfoX.F = 1.0\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)\n bigStamp = convolve(bigStamp, numpy.ones((ov, ov)), mode='full', method\n ='direct') / ov ** 2\n Np = N + ov - 1\n mom = numpy.asarray([1, 0, 0, 4 * ov ** 2, 0, 4 * ov ** 2]).astype(numpy\n .float64)\n newmom = numpy.zeros_like(mom)\n con = 0.5\n xx1 = numpy.tile(numpy.linspace(-(Np - 1) / 2.0, (Np - 1) / 2.0, Np), (\n Np, 1))\n yy1 = numpy.copy(xx1.T)\n for iter in range(256):\n det = mom[3] * mom[5] - mom[4] ** 2\n xx = xx1 - mom[1]\n yy = yy1 - mom[2]\n G = numpy.exp((-mom[5] * xx ** 2 + 2 * mom[4] * xx * yy - mom[3] * \n yy ** 2) / 2.0 / det) * bigStamp\n newmom[0] = numpy.sum(G)\n newmom[1] = numpy.sum(G * xx)\n newmom[2] = numpy.sum(G * yy)\n newmom[3] = numpy.sum(G * xx ** 2)\n newmom[4] = numpy.sum(G * xx * yy)\n newmom[5] = numpy.sum(G * yy ** 2)\n mom[0] = 2 * newmom[0]\n err = newmom[1:] / newmom[0]\n err[-3:] -= mom[-3:] / 2.0\n mom[1:] += err * con\n return numpy.array([mom[0], mom[1] / ov, mom[2] / ov, (mom[3] + mom[5]) /\n ov ** 2, (mom[3] - mom[5]) / (mom[3] + mom[5]), 2 * mom[4] / (mom[3\n ] + mom[5])])\n\n\ndef chi2_postage_stamp(obs, theory, var):\n obs2 = numpy.maximum(obs + var, 1e-24)\n return numpy.sum(theory + var - obs2 - obs2 * numpy.log((theory + var) /\n obs2)) * 2\n",
"step-3": "<mask token>\n\n\ndef RectBivariateSplineZero(y1, x1, map1, kx=1, ky=1):\n return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)\n y2 = numpy.zeros(numpy.size(y1) + 2)\n y2[1:-1] = y1\n y2[0] = 2 * y2[1] - y2[2]\n y2[-1] = 2 * y2[-2] - y2[-3]\n x2 = numpy.zeros(numpy.size(x1) + 2)\n x2[1:-1] = x1\n x2[0] = 2 * x2[1] - x2[2]\n x2[-1] = 2 * x2[-2] - x2[-3]\n map2 = numpy.zeros((numpy.size(y1) + 2, numpy.size(x1) + 2))\n map2[1:-1, 1:-1] = map1\n return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)\n\n\nclass EmptyClass:\n pass\n\n\nclass SpectralEnergyDistribution:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Nlambda(self, lambda_):\n if self.type == 'BB':\n T = self.info[0]\n x = 14387.769 / lambda_ / T\n return (2 / lambda_ ** 4 * 299792458000000.0 * 1000000000000.0 *\n numpy.exp(-x) / (1.0 - numpy.exp(-x)) * self.info[1])\n else:\n print('ERROR: Invalid SED type')\n exit()\n\n\nclass Filter:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Tlambda(self, lambda_):\n if self.type == 'STH':\n lmin = self.info[0]\n dlmin = lmin * 0.02\n lmax = self.info[1]\n dlmax = lmax * 0.02\n return (numpy.tanh((lambda_ - lmin) / dlmin) - numpy.tanh((\n lambda_ - lmax) / dlmax)) / 2.0\n elif self.type == 'interp':\n return numpy.interp(lambda_, self.info[:, 0], self.info[:, 1])\n else:\n print('ERROR: Invalid filter type')\n exit()\n\n\n<mask token>\n\n\ndef test_zernike():\n for k in range(36):\n psi = numpy.zeros(36)\n psi[k] = 1\n N = 5\n M = zernike_map_noll(psi, N, N / (N - 1))\n print(' *** Zernike {:2d} ***'.format(k + 1))\n for j in range(N):\n out = ''\n for i in range(N):\n out = out + ' {:10.5f}'.format(M[j, i])\n print(out)\n print('')\n\n\n<mask token>\n\n\ndef onescut(n):\n array = numpy.ones(n + 1)\n array[0] = array[-1] = 0.5\n return array / n\n\n\ndef gq_weights(sed, filter, nOrder, wlrange):\n lmin = wlrange[0]\n lmax = wlrange[1]\n npts = wlrange[2]\n x = numpy.linspace(lmin, lmax, npts)\n c = numpy.zeros(npts)\n for i in range(npts):\n c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])\n o = numpy.ones(npts)\n I = numpy.zeros(2 * nOrder)\n lctr = numpy.mean(x)\n for k in range(2 * nOrder):\n I[k] = numpy.sum(o * (x - lctr) ** k * c)\n coef = numpy.zeros(nOrder + 1)\n coef[0] = 1.0\n A = numpy.zeros((nOrder, nOrder))\n for k in range(nOrder):\n for j in range(nOrder):\n A[k, j] = I[j + k]\n coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]\n p = numpy.poly1d(coef)\n xroot = numpy.sort(numpy.real(p.r))\n wroot = numpy.zeros_like(xroot)\n pprime = numpy.polyder(p)\n for i in range(nOrder):\n px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i + 1:])), r=True\n )\n wroot[i] = numpy.sum(px.c[::-1] * I[:nOrder]) / pprime(xroot[i])\n xroot = xroot + lctr\n return xroot, wroot\n\n\n<mask token>\n\n\ndef oversamp_psf(sed, filt, ovsamp, Nstep, scanum, pos, offsets, addInfo):\n parOn = False\n if hasattr(offsets, 'par'):\n parOn = True\n ZR = ZernRef.data[4 * (scanum - 1):4 * scanum, :]\n wt_L = 0.5 - pos[0] / sca.size\n wt_R = 0.5 + pos[0] / sca.size\n wt_B = 0.5 - pos[1] / sca.size\n wt_T = 0.5 + pos[1] / sca.size\n psi = wt_T * wt_L * ZR[0, :] + wt_B * wt_L * ZR[1, :] + wt_B * wt_R * ZR[\n 2, :] + wt_T * wt_R * ZR[3, :]\n xf = sca.x[scanum - 1] + pos[0]\n yf = sca.y[scanum - 1] + pos[1]\n if parOn:\n psi[3] += offsets.par[offset_index.foc]\n psi[4] += offsets.par[offset_index.astig2]\n psi[5] += offsets.par[offset_index.astig1]\n psi[6] += offsets.par[offset_index.coma2]\n psi[7] += offsets.par[offset_index.coma1]\n psi[3] += (offsets.par[offset_index.focg1] * xf + offsets.par[\n offset_index.focg2] * yf) / sca.scale\n scale_1um = ovsamp / (0.11 * numpy.pi / 648000) / maskfiles.D\n if filt == 'K':\n filter = Filter('STH', [1.95, 2.3])\n elif filt == 'F':\n filter = Filter('interp', FilterData[:, (0, 7)])\n elif filt == 'H':\n filter = Filter('interp', FilterData[:, (0, 6)])\n elif filt == 'W':\n filter = Filter('interp', FilterData[:, (0, 5)])\n elif filt == 'J':\n filter = Filter('interp', FilterData[:, (0, 4)])\n elif filt == 'Y':\n filter = Filter('interp', FilterData[:, (0, 3)])\n elif filt == 'Z':\n filter = Filter('interp', FilterData[:, (0, 2)])\n elif filt == 'R':\n filter = Filter('interp', FilterData[:, (0, 1)])\n else:\n print('Error: unknown filter')\n exit()\n la = numpy.linspace(0.4, 2.5, 2101)\n fla = numpy.zeros(2101)\n for i in range(2101):\n fla[i] = filter.Tlambda(la[i])\n scale = scale_1um * numpy.sum(la * fla) / numpy.sum(fla)\n mask = EmptyClass()\n mask.N = 1\n imk = 0\n while (imk < maskfiles.n_lores - 1 and Nstep / scale < maskfiles.N_in /\n 2 ** (imk + 1)):\n imk += 1\n if filt == 'F' or filt == 'K':\n mask.spline = maskfiles.i_full[scanum - 1 + maskfiles.nSCA * imk]\n else:\n mask.spline = maskfiles.i_rim[scanum - 1 + maskfiles.nSCA * imk]\n if hasattr(addInfo, 'ctr'):\n d = 0.5 * (1 - 1 / ovsamp)\n psi[1:3] -= (addInfo.ctr + d) * ovsamp / scale_1um / 4.0\n output = poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo)\n Cxx = Cyy = 0.09\n Cxy = 0.0\n if parOn:\n Cxx = 0.09 + offsets.par[offset_index.jxx]\n Cxy = offsets.par[offset_index.jxy]\n Cyy = 0.09 + offsets.par[offset_index.jyy]\n output_fft = numpy.fft.fft2(output)\n kx = numpy.zeros((Nstep, Nstep))\n ky = numpy.zeros((Nstep, Nstep))\n for i in range(-Nstep // 2, Nstep // 2):\n kx[:, i] = abs(i)\n ky[i, :] = abs(i)\n kx *= 2.0 * numpy.pi * ovsamp / Nstep\n ky *= 2.0 * numpy.pi * ovsamp / Nstep\n output_fft = output_fft * numpy.exp(-Cxx * kx ** 2 / 2.0 - Cyy * ky ** \n 2 / 2.0 - Cxy * kx * ky)\n output = numpy.real(numpy.fft.ifft2(output_fft))\n return output\n\n\n<mask token>\n\n\ndef postage_stamp(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo\n ) * addInfo.F\n out = numpy.zeros((psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n out[j, i] += numpy.sum(bigStamp[y:y + ov, x:x + ov])\n if hasattr(addInfo, 'vtpe'):\n out[j, i] += addInfo.vtpe * numpy.sum(bigStamp[y + ov:y + 2 *\n ov, x:x + ov])\n if hasattr(addInfo, 'many'):\n out = numpy.zeros((25, psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n for k in range(25):\n dy = k % 5 - 2\n dx = k // 5 - 2\n out[k, j, i] += numpy.sum(bigStamp[y + dy:y + dy + ov, \n x + dx:x + dx + ov])\n if hasattr(addInfo, 'bfe'):\n if hasattr(addInfo, 'many'):\n print('Error -- cannot do both bfe and many in postage_stamp')\n exit()\n dout = numpy.zeros_like(out)\n ah = 0\n if hasattr(addInfo, 'bfe_a'):\n ah += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n ah += addInfo.bfe_aplus\n for i in range(psSize - 1):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * ah * (out[j, i + 1] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * ah * (addInfo.stamp_in[j, i + 1] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y:y + ov, x + ov - 1:x + ov + 1]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j, i + 1] -= shift * mflux\n av = 0\n if hasattr(addInfo, 'bfe_a'):\n av += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n av -= addInfo.bfe_aplus\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize - 1):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * av * (out[j + 1, i] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * av * (addInfo.stamp_in[j + 1, i] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y + ov - 1:y + ov + 1, x:x + ov]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j + 1, i] -= shift * mflux\n out += dout\n if hasattr(addInfo, 'bfe_overwrite'):\n out = dout\n return out\n\n\ndef psfmoments(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n addInfoX = copy.deepcopy(addInfo)\n addInfoX.ctr = numpy.zeros(2)\n addInfoX.F = 1.0\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)\n bigStamp = convolve(bigStamp, numpy.ones((ov, ov)), mode='full', method\n ='direct') / ov ** 2\n Np = N + ov - 1\n mom = numpy.asarray([1, 0, 0, 4 * ov ** 2, 0, 4 * ov ** 2]).astype(numpy\n .float64)\n newmom = numpy.zeros_like(mom)\n con = 0.5\n xx1 = numpy.tile(numpy.linspace(-(Np - 1) / 2.0, (Np - 1) / 2.0, Np), (\n Np, 1))\n yy1 = numpy.copy(xx1.T)\n for iter in range(256):\n det = mom[3] * mom[5] - mom[4] ** 2\n xx = xx1 - mom[1]\n yy = yy1 - mom[2]\n G = numpy.exp((-mom[5] * xx ** 2 + 2 * mom[4] * xx * yy - mom[3] * \n yy ** 2) / 2.0 / det) * bigStamp\n newmom[0] = numpy.sum(G)\n newmom[1] = numpy.sum(G * xx)\n newmom[2] = numpy.sum(G * yy)\n newmom[3] = numpy.sum(G * xx ** 2)\n newmom[4] = numpy.sum(G * xx * yy)\n newmom[5] = numpy.sum(G * yy ** 2)\n mom[0] = 2 * newmom[0]\n err = newmom[1:] / newmom[0]\n err[-3:] -= mom[-3:] / 2.0\n mom[1:] += err * con\n return numpy.array([mom[0], mom[1] / ov, mom[2] / ov, (mom[3] + mom[5]) /\n ov ** 2, (mom[3] - mom[5]) / (mom[3] + mom[5]), 2 * mom[4] / (mom[3\n ] + mom[5])])\n\n\ndef chi2_postage_stamp(obs, theory, var):\n obs2 = numpy.maximum(obs + var, 1e-24)\n return numpy.sum(theory + var - obs2 - obs2 * numpy.log((theory + var) /\n obs2)) * 2\n",
"step-4": "<mask token>\n\n\ndef RectBivariateSplineZero(y1, x1, map1, kx=1, ky=1):\n return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)\n y2 = numpy.zeros(numpy.size(y1) + 2)\n y2[1:-1] = y1\n y2[0] = 2 * y2[1] - y2[2]\n y2[-1] = 2 * y2[-2] - y2[-3]\n x2 = numpy.zeros(numpy.size(x1) + 2)\n x2[1:-1] = x1\n x2[0] = 2 * x2[1] - x2[2]\n x2[-1] = 2 * x2[-2] - x2[-3]\n map2 = numpy.zeros((numpy.size(y1) + 2, numpy.size(x1) + 2))\n map2[1:-1, 1:-1] = map1\n return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)\n\n\nclass EmptyClass:\n pass\n\n\nclass SpectralEnergyDistribution:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Nlambda(self, lambda_):\n if self.type == 'BB':\n T = self.info[0]\n x = 14387.769 / lambda_ / T\n return (2 / lambda_ ** 4 * 299792458000000.0 * 1000000000000.0 *\n numpy.exp(-x) / (1.0 - numpy.exp(-x)) * self.info[1])\n else:\n print('ERROR: Invalid SED type')\n exit()\n\n\nclass Filter:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Tlambda(self, lambda_):\n if self.type == 'STH':\n lmin = self.info[0]\n dlmin = lmin * 0.02\n lmax = self.info[1]\n dlmax = lmax * 0.02\n return (numpy.tanh((lambda_ - lmin) / dlmin) - numpy.tanh((\n lambda_ - lmax) / dlmax)) / 2.0\n elif self.type == 'interp':\n return numpy.interp(lambda_, self.info[:, 0], self.info[:, 1])\n else:\n print('ERROR: Invalid filter type')\n exit()\n\n\n<mask token>\n\n\ndef make_mask_annulus(obs, Nstep, scale):\n xx = numpy.tile(numpy.linspace(-1 + 1 / Nstep, 1 - 1 / Nstep, Nstep), (\n Nstep, 1))\n yy = numpy.copy(xx.T)\n rho = numpy.sqrt(xx ** 2 + yy ** 2) * scale\n return numpy.where(numpy.logical_and(rho >= obs, rho < 1), numpy.ones((\n Nstep, Nstep)), numpy.zeros((Nstep, Nstep)))\n\n\ndef test_zernike():\n for k in range(36):\n psi = numpy.zeros(36)\n psi[k] = 1\n N = 5\n M = zernike_map_noll(psi, N, N / (N - 1))\n print(' *** Zernike {:2d} ***'.format(k + 1))\n for j in range(N):\n out = ''\n for i in range(N):\n out = out + ' {:10.5f}'.format(M[j, i])\n print(out)\n print('')\n\n\ndef mono_psf(psi, mask, scale, Nstep):\n if hasattr(mask, 'N'):\n if hasattr(mask, 'spline'):\n interp_spline = mask.spline\n else:\n N_in = 2048\n x_in = numpy.linspace(-1 + 1 / N_in, 1 - 1 / N_in, N_in)\n y_in = numpy.copy(x_in)\n interp_spline = RectBivariateSplineZero(y_in, x_in, mask.array,\n kx=1, ky=1)\n x2 = numpy.linspace(-1 + 1 / Nstep, 1 - 1 / Nstep, Nstep) * scale\n y2 = numpy.copy(x2)\n amplitude = interp_spline(y2, x2).astype(numpy.complex128\n ) * make_mask_annulus(0, Nstep, scale)\n else:\n amplitude = make_mask_annulus(0.32, Nstep, scale).astype(numpy.\n complex128)\n amplitude *= numpy.exp(2.0j * numpy.pi * zernike_map_noll(psi, Nstep,\n scale))\n amplitude = numpy.fft.ifft2(amplitude)\n power = numpy.abs(amplitude) ** 2\n newpower = numpy.zeros_like(power)\n newpower[Nstep // 2:Nstep, Nstep // 2:Nstep] = power[0:Nstep // 2, 0:\n Nstep // 2]\n newpower[Nstep // 2:Nstep, 0:Nstep // 2] = power[0:Nstep // 2, Nstep //\n 2:Nstep]\n newpower[0:Nstep // 2, Nstep // 2:Nstep] = power[Nstep // 2:Nstep, 0:\n Nstep // 2]\n newpower[0:Nstep // 2, 0:Nstep // 2] = power[Nstep // 2:Nstep, Nstep //\n 2:Nstep]\n return newpower / numpy.sum(newpower)\n\n\ndef onescut(n):\n array = numpy.ones(n + 1)\n array[0] = array[-1] = 0.5\n return array / n\n\n\ndef gq_weights(sed, filter, nOrder, wlrange):\n lmin = wlrange[0]\n lmax = wlrange[1]\n npts = wlrange[2]\n x = numpy.linspace(lmin, lmax, npts)\n c = numpy.zeros(npts)\n for i in range(npts):\n c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])\n o = numpy.ones(npts)\n I = numpy.zeros(2 * nOrder)\n lctr = numpy.mean(x)\n for k in range(2 * nOrder):\n I[k] = numpy.sum(o * (x - lctr) ** k * c)\n coef = numpy.zeros(nOrder + 1)\n coef[0] = 1.0\n A = numpy.zeros((nOrder, nOrder))\n for k in range(nOrder):\n for j in range(nOrder):\n A[k, j] = I[j + k]\n coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]\n p = numpy.poly1d(coef)\n xroot = numpy.sort(numpy.real(p.r))\n wroot = numpy.zeros_like(xroot)\n pprime = numpy.polyder(p)\n for i in range(nOrder):\n px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i + 1:])), r=True\n )\n wroot[i] = numpy.sum(px.c[::-1] * I[:nOrder]) / pprime(xroot[i])\n xroot = xroot + lctr\n return xroot, wroot\n\n\ndef poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo):\n hard_lmin = 0.4\n hard_lmax = 2.5\n hard_Nl = 420\n ilmin = hard_Nl - 1\n ilmax = 0\n for il in range(1, hard_Nl):\n wl = hard_lmin + il / hard_Nl * (hard_lmax - hard_lmin)\n if filter.Tlambda(wl) > 0.0001:\n if il < ilmin:\n ilmin = il\n wlmin = wl\n if il > ilmax:\n ilmax = il\n wlmax = wl\n na = ilmin // 6 + 1\n nb = (hard_Nl - ilmax) // 6 + 1\n wl = numpy.concatenate((numpy.linspace(hard_lmin, wlmin, na + 1), numpy\n .linspace(wlmin, wlmax, ilmax - ilmin + 1), numpy.linspace(wlmax,\n hard_lmax, nb + 1)))\n dwl = numpy.concatenate(((wlmin - hard_lmin) * onescut(na), (wlmax -\n wlmin) * onescut(ilmax - ilmin), (hard_lmax - wlmax) * onescut(nb)))\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n wl, dwl = gq_weights(sed, filter, 10, [wlmin, wlmax, ilmax -\n ilmin + 1])\n sumc = 0.0\n output = numpy.zeros((Nstep, Nstep))\n for i in range(numpy.size(wl)):\n c = sed.Nlambda(wl[i]) * filter.Tlambda(wl[i]) * dwl[i]\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n c = dwl[i]\n this_psi = numpy.copy(psi) / wl[i]\n sumc += c\n output += c * mono_psf(this_psi, mask, scale_1um * wl[i], Nstep)\n output /= sumc\n return output\n\n\ndef oversamp_psf(sed, filt, ovsamp, Nstep, scanum, pos, offsets, addInfo):\n parOn = False\n if hasattr(offsets, 'par'):\n parOn = True\n ZR = ZernRef.data[4 * (scanum - 1):4 * scanum, :]\n wt_L = 0.5 - pos[0] / sca.size\n wt_R = 0.5 + pos[0] / sca.size\n wt_B = 0.5 - pos[1] / sca.size\n wt_T = 0.5 + pos[1] / sca.size\n psi = wt_T * wt_L * ZR[0, :] + wt_B * wt_L * ZR[1, :] + wt_B * wt_R * ZR[\n 2, :] + wt_T * wt_R * ZR[3, :]\n xf = sca.x[scanum - 1] + pos[0]\n yf = sca.y[scanum - 1] + pos[1]\n if parOn:\n psi[3] += offsets.par[offset_index.foc]\n psi[4] += offsets.par[offset_index.astig2]\n psi[5] += offsets.par[offset_index.astig1]\n psi[6] += offsets.par[offset_index.coma2]\n psi[7] += offsets.par[offset_index.coma1]\n psi[3] += (offsets.par[offset_index.focg1] * xf + offsets.par[\n offset_index.focg2] * yf) / sca.scale\n scale_1um = ovsamp / (0.11 * numpy.pi / 648000) / maskfiles.D\n if filt == 'K':\n filter = Filter('STH', [1.95, 2.3])\n elif filt == 'F':\n filter = Filter('interp', FilterData[:, (0, 7)])\n elif filt == 'H':\n filter = Filter('interp', FilterData[:, (0, 6)])\n elif filt == 'W':\n filter = Filter('interp', FilterData[:, (0, 5)])\n elif filt == 'J':\n filter = Filter('interp', FilterData[:, (0, 4)])\n elif filt == 'Y':\n filter = Filter('interp', FilterData[:, (0, 3)])\n elif filt == 'Z':\n filter = Filter('interp', FilterData[:, (0, 2)])\n elif filt == 'R':\n filter = Filter('interp', FilterData[:, (0, 1)])\n else:\n print('Error: unknown filter')\n exit()\n la = numpy.linspace(0.4, 2.5, 2101)\n fla = numpy.zeros(2101)\n for i in range(2101):\n fla[i] = filter.Tlambda(la[i])\n scale = scale_1um * numpy.sum(la * fla) / numpy.sum(fla)\n mask = EmptyClass()\n mask.N = 1\n imk = 0\n while (imk < maskfiles.n_lores - 1 and Nstep / scale < maskfiles.N_in /\n 2 ** (imk + 1)):\n imk += 1\n if filt == 'F' or filt == 'K':\n mask.spline = maskfiles.i_full[scanum - 1 + maskfiles.nSCA * imk]\n else:\n mask.spline = maskfiles.i_rim[scanum - 1 + maskfiles.nSCA * imk]\n if hasattr(addInfo, 'ctr'):\n d = 0.5 * (1 - 1 / ovsamp)\n psi[1:3] -= (addInfo.ctr + d) * ovsamp / scale_1um / 4.0\n output = poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo)\n Cxx = Cyy = 0.09\n Cxy = 0.0\n if parOn:\n Cxx = 0.09 + offsets.par[offset_index.jxx]\n Cxy = offsets.par[offset_index.jxy]\n Cyy = 0.09 + offsets.par[offset_index.jyy]\n output_fft = numpy.fft.fft2(output)\n kx = numpy.zeros((Nstep, Nstep))\n ky = numpy.zeros((Nstep, Nstep))\n for i in range(-Nstep // 2, Nstep // 2):\n kx[:, i] = abs(i)\n ky[i, :] = abs(i)\n kx *= 2.0 * numpy.pi * ovsamp / Nstep\n ky *= 2.0 * numpy.pi * ovsamp / Nstep\n output_fft = output_fft * numpy.exp(-Cxx * kx ** 2 / 2.0 - Cyy * ky ** \n 2 / 2.0 - Cxy * kx * ky)\n output = numpy.real(numpy.fft.ifft2(output_fft))\n return output\n\n\n<mask token>\n\n\ndef postage_stamp(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo\n ) * addInfo.F\n out = numpy.zeros((psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n out[j, i] += numpy.sum(bigStamp[y:y + ov, x:x + ov])\n if hasattr(addInfo, 'vtpe'):\n out[j, i] += addInfo.vtpe * numpy.sum(bigStamp[y + ov:y + 2 *\n ov, x:x + ov])\n if hasattr(addInfo, 'many'):\n out = numpy.zeros((25, psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n for k in range(25):\n dy = k % 5 - 2\n dx = k // 5 - 2\n out[k, j, i] += numpy.sum(bigStamp[y + dy:y + dy + ov, \n x + dx:x + dx + ov])\n if hasattr(addInfo, 'bfe'):\n if hasattr(addInfo, 'many'):\n print('Error -- cannot do both bfe and many in postage_stamp')\n exit()\n dout = numpy.zeros_like(out)\n ah = 0\n if hasattr(addInfo, 'bfe_a'):\n ah += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n ah += addInfo.bfe_aplus\n for i in range(psSize - 1):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * ah * (out[j, i + 1] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * ah * (addInfo.stamp_in[j, i + 1] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y:y + ov, x + ov - 1:x + ov + 1]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j, i + 1] -= shift * mflux\n av = 0\n if hasattr(addInfo, 'bfe_a'):\n av += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n av -= addInfo.bfe_aplus\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize - 1):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * av * (out[j + 1, i] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * av * (addInfo.stamp_in[j + 1, i] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y + ov - 1:y + ov + 1, x:x + ov]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j + 1, i] -= shift * mflux\n out += dout\n if hasattr(addInfo, 'bfe_overwrite'):\n out = dout\n return out\n\n\ndef psfmoments(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n addInfoX = copy.deepcopy(addInfo)\n addInfoX.ctr = numpy.zeros(2)\n addInfoX.F = 1.0\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)\n bigStamp = convolve(bigStamp, numpy.ones((ov, ov)), mode='full', method\n ='direct') / ov ** 2\n Np = N + ov - 1\n mom = numpy.asarray([1, 0, 0, 4 * ov ** 2, 0, 4 * ov ** 2]).astype(numpy\n .float64)\n newmom = numpy.zeros_like(mom)\n con = 0.5\n xx1 = numpy.tile(numpy.linspace(-(Np - 1) / 2.0, (Np - 1) / 2.0, Np), (\n Np, 1))\n yy1 = numpy.copy(xx1.T)\n for iter in range(256):\n det = mom[3] * mom[5] - mom[4] ** 2\n xx = xx1 - mom[1]\n yy = yy1 - mom[2]\n G = numpy.exp((-mom[5] * xx ** 2 + 2 * mom[4] * xx * yy - mom[3] * \n yy ** 2) / 2.0 / det) * bigStamp\n newmom[0] = numpy.sum(G)\n newmom[1] = numpy.sum(G * xx)\n newmom[2] = numpy.sum(G * yy)\n newmom[3] = numpy.sum(G * xx ** 2)\n newmom[4] = numpy.sum(G * xx * yy)\n newmom[5] = numpy.sum(G * yy ** 2)\n mom[0] = 2 * newmom[0]\n err = newmom[1:] / newmom[0]\n err[-3:] -= mom[-3:] / 2.0\n mom[1:] += err * con\n return numpy.array([mom[0], mom[1] / ov, mom[2] / ov, (mom[3] + mom[5]) /\n ov ** 2, (mom[3] - mom[5]) / (mom[3] + mom[5]), 2 * mom[4] / (mom[3\n ] + mom[5])])\n\n\ndef chi2_postage_stamp(obs, theory, var):\n obs2 = numpy.maximum(obs + var, 1e-24)\n return numpy.sum(theory + var - obs2 - obs2 * numpy.log((theory + var) /\n obs2)) * 2\n",
"step-5": "import numpy\nimport numpy.fft\nimport numpy.linalg\nimport copy\nfrom astropy.io import fits\nfrom scipy.interpolate import RectBivariateSpline\nfrom scipy.signal import convolve\nimport offset_index\n\n# some basic definitions\npsSize = 9 # psSize x psSize postage stamps of stars\n\n# zero padded RectBivariateSpline, if on\ndef RectBivariateSplineZero(y1,x1,map1,kx=1,ky=1):\n return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)\n y2 = numpy.zeros(numpy.size(y1)+2)\n y2[1:-1] = y1\n y2[0] = 2*y2[1]-y2[2]\n y2[-1] = 2*y2[-2]-y2[-3]\n x2 = numpy.zeros(numpy.size(x1)+2)\n x2[1:-1] = x1\n x2[0] = 2*x2[1]-x2[2]\n x2[-1] = 2*x2[-2]-x2[-3]\n map2 = numpy.zeros((numpy.size(y1)+2, numpy.size(x1)+2))\n map2[1:-1,1:-1] = map1\n return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)\n\nclass EmptyClass():\n pass\n\n# spectral energy distribution class\nclass SpectralEnergyDistribution():\n\n # make an SED -- several options for type\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n # get Nlambda (photons/m^2/s/um) at lambda_ (um)\n def Nlambda(self, lambda_):\n\n # blackbody, info = [T (K), solidangle]\n if self.type=='BB':\n T = self.info[0]\n x = 14387.769/lambda_/T # hc/(kTlambda)\n return(2/lambda_**4*2.99792458e14*1e12*numpy.exp(-x)/(1.-numpy.exp(-x))*self.info[1])\n # the 1e12 is the conversion from um^2 -> m^2\n else:\n print('ERROR: Invalid SED type')\n exit()\n\n# filter class\nclass Filter():\n\n # make a filter -- several options for type\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n # get transmission\n def Tlambda(self, lambda_):\n\n # smoothed tophat\n if self.type=='STH':\n lmin = self.info[0]; dlmin = lmin*.02\n lmax = self.info[1]; dlmax = lmax*.02\n return((numpy.tanh((lambda_-lmin)/dlmin)-numpy.tanh((lambda_-lmax)/dlmax))/2.)\n # interpolated file\n # info shape (N,2) -- info[:,0] = wavelength, info[:,1] = throughput\n elif self.type=='interp':\n return(numpy.interp(lambda_, self.info[:,0], self.info[:,1]))\n else:\n print('ERROR: Invalid filter type')\n exit()\n\n# load mask files\nmaskfiles = EmptyClass()\nmaskfiles.D = 2292981.05344 # um\nmaskfiles.rim = []\nmaskfiles.full = []\nmaskfiles.i_rim = []\nmaskfiles.i_full = []\nmaskfiles.nSCA = 18\nfor k in range(18):\n inFile = fits.open('pupils/SCA{:d}_rim_mask.fits'.format(k+1))\n maskfiles.rim += [numpy.copy(inFile[0].data[::-1,:])]\n inFile.close()\n inFile = fits.open('pupils/SCA{:d}_full_mask.fits'.format(k+1))\n maskfiles.full += [numpy.copy(inFile[0].data[::-1,:])]\n inFile.close()\n\n # normalize\n maskfiles.rim[k] /= numpy.amax(maskfiles.rim[k])\n maskfiles.full[k] /= numpy.amax(maskfiles.full[k])\n\n N_in = maskfiles.N_in = 2048\n x_in = numpy.linspace(-1+1/N_in,1-1/N_in,N_in)\n y_in = numpy.copy(x_in)\n interp_spline = RectBivariateSplineZero(y_in, x_in, maskfiles.rim[k], kx=1, ky=1)\n maskfiles.i_rim += [interp_spline]\n interp_spline = RectBivariateSplineZero(y_in, x_in, maskfiles.full[k], kx=1, ky=1)\n maskfiles.i_full += [interp_spline]\n\n # lower resolution masks\n maskfiles.n_lores = 7\n for ku in range(1,maskfiles.n_lores):\n N2 = N_in//2**ku\n x_in = numpy.linspace(-1+1/N2,1-1/N2,N2)\n y_in = numpy.copy(x_in)\n interp_spline = RectBivariateSplineZero(y_in, x_in, numpy.mean(maskfiles.rim[k].reshape(N2,2**ku,N2,2**ku), axis=(1,3)), kx=1, ky=1)\n maskfiles.i_rim += [interp_spline]\n interp_spline = RectBivariateSplineZero(y_in, x_in, numpy.mean(maskfiles.full[k].reshape(N2,2**ku,N2,2**ku), axis=(1,3)), kx=1, ky=1)\n maskfiles.i_full += [interp_spline]\n\n# SCA locations\nsca = EmptyClass()\nsca.size = 40.88 # mm\nsca.x = numpy.asarray([-22.14, -22.29, -22.44, -66.42, -66.92, -67.42, -110.70, -111.48, -112.64,\n 22.14, 22.29, 22.44, 66.42, 66.92, 67.42, 110.70, 111.48, 112.64])\nsca.y = numpy.asarray([12.15, -37.03, -82.06, 20.90, -28.28, -73.06, 42.20, 13.46, -51.06,\n 12.15, -37.03, -82.06, 20.90, -28.28, -73.06, 42.20, 13.46, -51.06])\nsca.scale = 133.08\n\n# reference Zernikes\nZernRef = EmptyClass()\nZernRef.data = numpy.loadtxt('pupils/zernike_ref.txt')[:,-22:] * 1.38\n\n# filter data\nFilterData = numpy.loadtxt('pupils/filter.dat')\nFilterData[:,1:] /= numpy.pi/4.*(maskfiles.D/1e6)**2\n\n# makes map of Zernikes of a given amplitude\n# amp[0:Namp] = Z1 ... ZNamp\n# on a spacing Ngrid (x, y = -(1-1/Ngrid) .. +(1-1/Ngrid) multiplied by scale)\n#\ndef zernike_map_noll(amp, Ngrid, scale):\n xx = numpy.tile(numpy.linspace(-1+1/Ngrid,1-1/Ngrid,Ngrid), (Ngrid,1))\n yy = numpy.copy(xx.T)\n rho = numpy.sqrt(xx**2+yy**2)*scale\n phi = numpy.arctan2(yy,xx)\n output = numpy.zeros((Ngrid,Ngrid))\n nmax = 0\n namp = numpy.size(amp)\n while namp>(nmax+1)*(nmax+2)//2: nmax+=1\n rpows = numpy.ones((nmax+1,Ngrid,Ngrid))\n trigphi = numpy.ones((2*nmax+1,Ngrid,Ngrid))\n for i in range(1,nmax+1): rpows[i,:,:] = rho**i\n for i in range(0,nmax+1): trigphi[i,:,:] = numpy.cos(i*phi)\n for i in range(1,nmax+1): trigphi[-i,:,:] = numpy.sin(i*phi)\n # loop over Zernikes\n for n in range(nmax+1):\n for m in range(-n,n+1,2):\n Z = numpy.zeros((Ngrid,Ngrid))\n for k in range((n-abs(m))//2+1):\n coef = (-1)**k * numpy.math.factorial(n-k)/numpy.math.factorial(k) \\\n /numpy.math.factorial((n-m)//2-k)/numpy.math.factorial((n+m)//2-k) \n Z += coef * rpows[n-2*k,:,:]\n #if m>=0:\n # Z *= numpy.cos(m*phi)\n #else:\n # Z *= numpy.sin(-m*phi)\n Z *= trigphi[m,:,:]\n j = n*(n+1)//2 + abs(m)\n if (-1)**j*(m+.5)<0 or m==0: j += 1\n #print(n,m,j)\n factor = numpy.sqrt(n+1)\n if m!=0: factor *= numpy.sqrt(2)\n if j<=namp: output += factor * amp[j-1] * Z\n return(output)\n\n# make annular mask of given obstruction (fraction) and scale\ndef make_mask_annulus(obs, Nstep, scale):\n xx = numpy.tile(numpy.linspace(-1+1/Nstep,1-1/Nstep,Nstep), (Nstep,1))\n yy = numpy.copy(xx.T)\n rho = numpy.sqrt(xx**2+yy**2)*scale\n return(numpy.where(numpy.logical_and(rho>=obs,rho<1),numpy.ones((Nstep,Nstep)),numpy.zeros((Nstep,Nstep))))\n\ndef test_zernike():\n for k in range(36):\n psi = numpy.zeros(36)\n psi[k] = 1\n N=5\n M = zernike_map_noll(psi, N, N/(N-1))\n print(' *** Zernike {:2d} ***'.format(k+1))\n for j in range(N):\n out = ''\n for i in range(N):\n out = out + ' {:10.5f}'.format(M[j,i])\n print(out)\n print('')\n\n# psi is a vector of Zernikes, in wavelengths\n# mask information: (currently none)\n# scale = sampling (points per lambda/D)\n# Nstep = # grid points\n# output normalized to sum to 1\ndef mono_psf(psi, mask, scale, Nstep):\n if hasattr(mask, 'N'):\n if hasattr(mask, 'spline'):\n interp_spline = mask.spline\n else:\n N_in = 2048\n x_in = numpy.linspace(-1+1/N_in,1-1/N_in,N_in)\n y_in = numpy.copy(x_in)\n interp_spline = RectBivariateSplineZero(y_in, x_in, mask.array, kx=1, ky=1)\n x2 = numpy.linspace(-1+1/Nstep,1-1/Nstep,Nstep)*scale\n y2 = numpy.copy(x2)\n amplitude = interp_spline(y2,x2).astype(numpy.complex128) * make_mask_annulus(0, Nstep, scale)\n else:\n amplitude = make_mask_annulus(.32, Nstep, scale).astype(numpy.complex128)\n amplitude *= numpy.exp(2j * numpy.pi * zernike_map_noll(psi, Nstep, scale))\n amplitude = numpy.fft.ifft2(amplitude)\n power = numpy.abs(amplitude)**2\n # shift to center\n newpower = numpy.zeros_like(power)\n newpower[Nstep//2:Nstep,Nstep//2:Nstep] = power[0:Nstep//2,0:Nstep//2]\n newpower[Nstep//2:Nstep,0:Nstep//2] = power[0:Nstep//2,Nstep//2:Nstep]\n newpower[0:Nstep//2,Nstep//2:Nstep] = power[Nstep//2:Nstep,0:Nstep//2]\n newpower[0:Nstep//2,0:Nstep//2] = power[Nstep//2:Nstep,Nstep//2:Nstep]\n return(newpower/numpy.sum(newpower))\n\n# helper function\ndef onescut(n):\n array = numpy.ones((n+1))\n array[0] = array[-1] = .5\n return(array/n)\n\n# Gaussian quadrature weights across a filter\n# sed = spectral energy distribution\n# filter = filter information (incl. bandpass)\n# nOrder = order of polynomial (number of nodes)\n# wlrange = [lmin,lmax,npts] in um\n#\n# returns wavelengths, weights\ndef gq_weights(sed, filter, nOrder, wlrange):\n # unpack info\n lmin = wlrange[0]; lmax = wlrange[1]; npts = wlrange[2]\n\n # build integrals I_k = int x^k S(x) F(x) dx\n x = numpy.linspace(lmin,lmax,npts)\n c = numpy.zeros((npts))\n for i in range(npts):\n c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])\n o = numpy.ones((npts))\n I = numpy.zeros((2*nOrder))\n lctr = numpy.mean(x)\n for k in range(2*nOrder):\n I[k] = numpy.sum(o*(x-lctr)**k*c)\n # orthogonal polynomial p_n\n # require sum_{j=0}^n coef_{n-j} I_{j+k} = 0 or\n # sum_{j=0}^{n-1} coef_{n-j} I_{j+k} = -I_{n+k} for k = 0 .. n-1\n coef = numpy.zeros((nOrder+1))\n coef[0] = 1.\n A = numpy.zeros((nOrder,nOrder))\n for k in range(nOrder):\n for j in range(nOrder):\n A[k,j] = I[j+k]\n coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]\n p = numpy.poly1d(coef)\n xroot = numpy.sort(numpy.real(p.r))\n wroot = numpy.zeros_like(xroot)\n pprime = numpy.polyder(p)\n for i in range(nOrder):\n px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i+1:])), r=True)\n wroot[i] = numpy.sum(px.c[::-1]*I[:nOrder]) / pprime(xroot[i])\n xroot = xroot + lctr\n return xroot,wroot\n\n# psi is a vector of Zernikes, in microns\n# mask information: (currently none)\n# sed = spectral energy distribution\n# scale = sampling (points per lambda/D @ 1 um)\n# Nstep = # grid points\n# filter = filter information (incl. bandpass)\n# addInfo = class for general additional information\n# output normalized to sum to 1\ndef poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo):\n\n # integration steps\n hard_lmin = 0.4\n hard_lmax = 2.5\n hard_Nl = 420\n\n ilmin = hard_Nl-1; ilmax = 0\n for il in range(1,hard_Nl):\n wl = hard_lmin + il/hard_Nl*(hard_lmax-hard_lmin)\n if filter.Tlambda(wl)>1e-4:\n if il<ilmin:\n ilmin=il\n wlmin=wl\n if il>ilmax:\n ilmax=il\n wlmax=wl\n na = ilmin//6 + 1\n nb = (hard_Nl-ilmax)//6 + 1\n wl = numpy.concatenate((numpy.linspace(hard_lmin,wlmin,na+1), numpy.linspace(wlmin,wlmax,ilmax-ilmin+1), numpy.linspace(wlmax,hard_lmax,nb+1)))\n dwl = numpy.concatenate(((wlmin-hard_lmin)*onescut(na), (wlmax-wlmin)*onescut(ilmax-ilmin), (hard_lmax-wlmax)*onescut(nb)))\n #print(wl,dwl,numpy.size(wl),numpy.size(dwl))\n\n # reduced coverage\n if hasattr(addInfo,'FastMode'):\n if addInfo.FastMode:\n wl, dwl = gq_weights(sed, filter, 10, [wlmin,wlmax,ilmax-ilmin+1])\n\n # make output PSF\n sumc = 0.\n output = numpy.zeros((Nstep,Nstep))\n for i in range(numpy.size(wl)):\n c = sed.Nlambda(wl[i]) * filter.Tlambda(wl[i]) * dwl[i]\n if hasattr(addInfo,'FastMode'):\n if addInfo.FastMode: c = dwl[i]\n this_psi = numpy.copy(psi)/wl[i] # convert from um -> wavelengths of wavefront\n sumc += c\n output += c * mono_psf(this_psi, mask, scale_1um*wl[i], Nstep)\n #print('{:6.4f} {:11.5E}'.format(wl[i],filter.Tlambda(wl[i])))\n output /= sumc\n\n return(output)\n\n# make oversampled PSF at given SCA, position\n#\n# sed = source SED\n# filt = filter (letter: RZYJHFK)\n# ovsamp = oversampling factor\n# Nstep = number of samples in each axis\n# scanum = SCA number (1..18)\n# pos = (x,y) position on SCA in mm (0,0)=center\n# offsets = adjustment parameters\n# .par -> offset parameters\n# addInfo = additional information class:\n# .ctr -> centroid (dx,dy) \ndef oversamp_psf(sed, filt, ovsamp, Nstep, scanum, pos, offsets, addInfo):\n\n # get information\n parOn = False\n if hasattr(offsets, 'par'): parOn = True\n\n # get Zernikes in microns\n ZR = ZernRef.data[4*(scanum-1):4*scanum,:]\n wt_L = .5 - pos[0]/sca.size\n wt_R = .5 + pos[0]/sca.size\n wt_B = .5 - pos[1]/sca.size\n wt_T = .5 + pos[1]/sca.size\n psi = wt_T*wt_L*ZR[0,:] + wt_B*wt_L*ZR[1,:] + wt_B*wt_R*ZR[2,:] + wt_T*wt_R*ZR[3,:]\n\n xf = sca.x[scanum-1] + pos[0]\n yf = sca.y[scanum-1] + pos[1]\n\n # Zernike offsets\n if parOn:\n psi[3] += offsets.par[offset_index.foc ]\n psi[4] += offsets.par[offset_index.astig2]\n psi[5] += offsets.par[offset_index.astig1]\n\n psi[6] += offsets.par[offset_index.coma2]\n psi[7] += offsets.par[offset_index.coma1]\n\n psi[3] += (offsets.par[offset_index.focg1]*xf + offsets.par[offset_index.focg2]*yf)/sca.scale\n\n scale_1um = ovsamp / (.11*numpy.pi/648000) / maskfiles.D\n #print(scale_1um)\n\n # filter curves\n if filt=='K':\n filter = Filter('STH', [1.95,2.30])\n elif filt=='F':\n filter = Filter('interp', FilterData[:,(0,7)])\n elif filt=='H':\n filter = Filter('interp', FilterData[:,(0,6)])\n elif filt=='W':\n filter = Filter('interp', FilterData[:,(0,5)])\n elif filt=='J':\n filter = Filter('interp', FilterData[:,(0,4)])\n elif filt=='Y':\n filter = Filter('interp', FilterData[:,(0,3)])\n elif filt=='Z':\n filter = Filter('interp', FilterData[:,(0,2)])\n elif filt=='R':\n filter = Filter('interp', FilterData[:,(0,1)])\n else:\n print('Error: unknown filter')\n exit()\n\n la = numpy.linspace(.4, 2.5, 2101)\n fla = numpy.zeros(2101)\n for i in range(2101): fla[i] = filter.Tlambda(la[i])\n scale = scale_1um*numpy.sum(la*fla)/numpy.sum(fla)\n\n # get the mask\n mask = EmptyClass(); mask.N=1\n imk = 0\n while imk<maskfiles.n_lores-1 and Nstep/scale<maskfiles.N_in/2**(imk+1): imk+=1\n #print(' *** ', Nstep, scale, scale/scale_1um, imk)\n if filt=='F' or filt=='K':\n mask.spline = maskfiles.i_full[scanum-1 + maskfiles.nSCA*imk]\n else:\n mask.spline = maskfiles.i_rim[scanum-1 + maskfiles.nSCA*imk]\n\n # x & y offsets\n if hasattr(addInfo, 'ctr'):\n d = .5*(1-1/ovsamp)\n psi[1:3] -= (addInfo.ctr+d) * ovsamp / scale_1um / 4.\n\n output = poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo)\n\n # smooth\n Cxx = Cyy = .09; Cxy = 0.\n if parOn:\n Cxx = .09 + offsets.par[offset_index.jxx ]\n Cxy = offsets.par[offset_index.jxy ]\n Cyy = .09 + offsets.par[offset_index.jyy ]\n\n output_fft = numpy.fft.fft2(output)\n kx = numpy.zeros((Nstep,Nstep))\n ky = numpy.zeros((Nstep,Nstep))\n for i in range(-Nstep//2, Nstep//2):\n kx[:,i] = abs(i)\n ky[i,:] = abs(i)\n kx *= 2.*numpy.pi*ovsamp/Nstep\n ky *= 2.*numpy.pi*ovsamp/Nstep\n output_fft = output_fft * numpy.exp(-Cxx*kx**2/2. - Cyy*ky**2/2. - Cxy*kx*ky)\n output = numpy.real(numpy.fft.ifft2(output_fft))\n\n return(output)\n\n# parameters for next couple of functions\nN_STD = 1024 # must be a multiple of 4\nOV_STD = 8\n\n# make oversampled PSF at given SCA, position\n#\n# sed = source SED\n# filt = filter (letter: RZYJHFK)\n# scanum = SCA number (1..18)\n# pos = (x,y) position on SCA in mm (0,0)=center\n# offsets = adjustment parameters (placeholder)\n# addInfo = additional information class:\n# .F -> total counts (in e)\n# .ctr -> centroid (dx,dy)\n# .many -> @ 5x5 grid of offsets\n#\n# .bfe = add bfe (can include .bfe_a, .bfe_aplus)\n#\n# .bfe_overwrite => special mode to compute BFE with time dependent PSF\n# .stamp_in = input stamp (so compute BFE from stamp_in *acting on* this PSF)\ndef postage_stamp(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD # must be even\n ov = OV_STD\n if hasattr(addInfo,'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo,'FastMode'):\n if addInfo.FastMode:\n N = N//2\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo) * addInfo.F\n out = numpy.zeros((psSize, psSize))\n for i in range(psSize):\n x = N//2+(i-psSize//2)*ov\n for j in range(psSize):\n y = N//2+(j-psSize//2)*ov\n out[j,i] += numpy.sum(bigStamp[y:y+ov,x:x+ov])\n if hasattr(addInfo, 'vtpe'):\n out[j,i] += addInfo.vtpe * numpy.sum(bigStamp[y+ov:y+2*ov,x:x+ov])\n if hasattr(addInfo,'many'):\n out = numpy.zeros((25, psSize, psSize))\n for i in range(psSize):\n x = N//2+(i-psSize//2)*ov\n for j in range(psSize):\n y = N//2+(j-psSize//2)*ov\n for k in range(25):\n dy = k%5 - 2; dx = k//5 - 2\n out[k,j,i] += numpy.sum(bigStamp[y+dy:y+dy+ov,x+dx:x+dx+ov])\n\n # BFE?\n if hasattr(addInfo, 'bfe'):\n if hasattr(addInfo,'many'):\n print('Error -- cannot do both bfe and many in postage_stamp')\n exit()\n dout = numpy.zeros_like(out)\n # horizontal BFE\n ah = 0\n if hasattr(addInfo, 'bfe_a'): ah += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'): ah += addInfo.bfe_aplus\n for i in range(psSize-1):\n x = N//2+(i-psSize//2)*ov\n for j in range(psSize):\n y = N//2+(j-psSize//2)*ov\n shift = ov * ah * (out[j,i+1]-out[j,i]) / 2. # in sub-pixels, average over exposure\n if hasattr(addInfo, 'bfe_overwrite'): shift = ov * ah * (addInfo.stamp_in[j,i+1]-addInfo.stamp_in[j,i]) / 2.\n mflux = numpy.sum(bigStamp[y:y+ov,x+ov-1:x+ov+1])/2.\n dout[j,i] += shift*mflux\n dout[j,i+1] -= shift*mflux\n # vertical BFE\n av = 0\n if hasattr(addInfo, 'bfe_a'): av += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'): av -= addInfo.bfe_aplus\n for i in range(psSize):\n x = N//2+(i-psSize//2)*ov\n for j in range(psSize-1):\n y = N//2+(j-psSize//2)*ov\n shift = ov * av * (out[j+1,i]-out[j,i]) / 2. # in sub-pixels, average over exposure\n if hasattr(addInfo, 'bfe_overwrite'): shift = ov * av * (addInfo.stamp_in[j+1,i]-addInfo.stamp_in[j,i]) / 2.\n mflux = numpy.sum(bigStamp[y+ov-1:y+ov+1,x:x+ov])/2.\n dout[j,i] += shift*mflux\n dout[j+1,i] -= shift*mflux\n out+=dout\n\n if hasattr(addInfo, 'bfe_overwrite'): out=dout\n\n return(out)\n\n#\n# same input format but returns moments of the PSF\n# A, xc, yc, T, e1, e2\ndef psfmoments(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD # must be even\n ov = OV_STD\n if hasattr(addInfo,'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo,'FastMode'):\n if addInfo.FastMode:\n N = N//2\n addInfoX = copy.deepcopy(addInfo); addInfoX.ctr = numpy.zeros((2)); addInfoX.F = 1.\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)\n bigStamp = convolve(bigStamp, numpy.ones((ov,ov)), mode='full', method='direct')/ov**2\n Np = N+ov-1\n # moment format: A,x,y,Cxx,Cxy,Cyy\n mom = numpy.asarray([1,0,0,4*ov**2,0,4*ov**2]).astype(numpy.float64)\n newmom = numpy.zeros_like(mom)\n con = .5 # convergence factor\n xx1 = numpy.tile(numpy.linspace(-(Np-1)/2., (Np-1)/2., Np), (Np,1))\n yy1 = numpy.copy(xx1.T)\n for iter in range(256):\n det = mom[3]*mom[5]-mom[4]**2\n xx = xx1-mom[1]\n yy = yy1-mom[2]\n G = numpy.exp((-mom[5]*xx**2 + 2*mom[4]*xx*yy - mom[3]*yy**2)/2./det) * bigStamp\n newmom[0] = numpy.sum(G)\n newmom[1] = numpy.sum(G*xx)\n newmom[2] = numpy.sum(G*yy)\n newmom[3] = numpy.sum(G*xx**2)\n newmom[4] = numpy.sum(G*xx*yy)\n newmom[5] = numpy.sum(G*yy**2)\n mom[0] = 2*newmom[0]\n err = newmom[1:]/newmom[0]; err[-3:] -= mom[-3:]/2.\n mom[1:] += err*con\n return(numpy.array([mom[0], mom[1]/ov, mom[2]/ov, (mom[3]+mom[5])/ov**2, (mom[3]-mom[5])/(mom[3]+mom[5]), 2*mom[4]/(mom[3]+mom[5])]))\n\n# returns chi^2\n# var = read noise variance\ndef chi2_postage_stamp(obs, theory, var):\n obs2 = numpy.maximum(obs+var, 1e-24)\n return(numpy.sum(theory+var-obs2-obs2*numpy.log((theory+var)/obs2))*2)\n",
"step-ids": [
13,
14,
15,
18,
23
]
}
|
[
13,
14,
15,
18,
23
] |
def readfasta (fasta):
input = open(fasta, 'r')
seqs = {}
for line in input:
if line[0] == '>':
name = line[1:].rstrip()
seqs[name] = []
else:
seqs[name].append(line.rstrip())
for name in seqs:
seqs[name] = ''.join(seqs[name])
return seqs
seqs = readfasta('cons.fasta')
length = len(seqs.values()[0])
nts = dict(A = [0] * length, C = [0] * length, G = [0] * length, T = [0] * length)
consensus = [None] * length
for name in seqs:
n = 0
for char in seqs[name]:
if char in nts: nts[char][n] += 1
else: print 'error'
n += 1
for n in range(0, length):
max_n = 0
max_nt = ''
for nt in nts:
if nts[nt][n] > max_n:
max_n = nts[nt][n]
max_nt = nt
consensus[n] = max_nt
consensus = ''.join(consensus)
print consensus
for k, v in nts.iteritems():
print str(k) + ": " + ' '.join(map(lambda k: str(k), v))
|
normal
|
{
"blob_id": "6072fc22872ee75c9501ac607a86ee9137af6a5d",
"index": 4918,
"step-1": "def readfasta (fasta):\r\n input = open(fasta, 'r')\r\n seqs = {}\r\n for line in input:\r\n if line[0] == '>':\r\n name = line[1:].rstrip()\r\n seqs[name] = [] \r\n else:\r\n seqs[name].append(line.rstrip())\r\n for name in seqs:\r\n seqs[name] = ''.join(seqs[name])\r\n return seqs\r\n\r\nseqs = readfasta('cons.fasta')\r\nlength = len(seqs.values()[0])\r\n\r\nnts = dict(A = [0] * length, C = [0] * length, G = [0] * length, T = [0] * length)\r\nconsensus = [None] * length\r\n\r\nfor name in seqs:\r\n n = 0\r\n for char in seqs[name]:\r\n if char in nts: nts[char][n] += 1\r\n else: print 'error'\r\n n += 1\r\n\r\nfor n in range(0, length):\r\n max_n = 0\r\n max_nt = ''\r\n for nt in nts:\r\n if nts[nt][n] > max_n:\r\n max_n = nts[nt][n]\r\n max_nt = nt\r\n consensus[n] = max_nt\r\nconsensus = ''.join(consensus)\r\n\r\nprint consensus\r\nfor k, v in nts.iteritems():\r\n print str(k) + \": \" + ' '.join(map(lambda k: str(k), v))",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 7/02/2014
@author: marco
Generador de ambientes FACIL 2014
'''
import wx
from formgenerador import FrameGeneral
from Dial_Pagina import ObjPagina
class IncioInterface(FrameGeneral):
def __init__(self):
#self.log = ObLog('Inicio programa')
#self.log.setNivel(0) #debug
FrameGeneral.__init__(self,None)
FrameGeneral.SetTitle(self,u"Administrador de Aplicacion FACIL")
#iconFile = u"imagenes/2s.ico"
#FrameGeneral.SetIcon(self,wx.Icon(iconFile, wx.BITMAP_TYPE_ICO))
#self.Bind(wx.EVT_MENU, self.onConfig,self.f2s_mConfig)
self.__inicio()
self.dibujarPizarra()
#Eventos Menu
self.Bind(wx.EVT_MENU,self.onDefPagina,self.f2s_menuTamPapel)
self.f2s_Pizarra.Bind(wx.EVT_PAINT, self.onPaint)
def __inicio(self):
#Asignacion Variables Globales
self.Guadar=False
self.borde=20
self.AnchoPagina=8.5 * 72
self.AltoPagina = 11 * 72
self.objfacil=[]
self.objFormatos=[]
self._initBuffer()
def onDefPagina(self,event):
pagina= ObjPagina(self.Parent)
if pagina.orientar==None :
return
print pagina.orientar
print pagina.papel
if pagina.orientar ==0 or pagina.orientar==2: #Vertical
self.AnchoPagina=pagina.papel[0] * 72
self.AltoPagina=pagina.papel[1] * 72
else: #Horizontal
self.AnchoPagina=pagina.papel[1] * 72
self.AltoPagina=pagina.papel[0] * 72
print self.AnchoPagina
print self.AltoPagina
self.dibujarPizarra()
self.wrapDC = lambda dc: dc
def dibujarPizarra(self):
print "dibujar Pizarra"
self.f2s_Pizarra.SetBackgroundColour('white')
self.f2s_Pizarra.EnableScrolling(True,True)
self.f2s_Pizarra.SetScrollbars(20, 20, (self.AnchoPagina + self.borde *2) / 20, (self.AltoPagina + self.borde *2) / 20)
def onPaint(self, event):
print "onPaint"
"""
Called when the window is exposed.
"""
# Create a buffered paint DC. It will create the real
# wx.PaintDC and then blit the bitmap to it when dc is
# deleted.
dc = wx.BufferedPaintDC(self.f2s_Pizarra, self.buffer)
# On Windows, if that's all we do things look a little rough
# So in order to make scrolling more polished-looking
# we iterate over the exposed regions and fill in unknown
# areas with a fall-back pattern.
dc.SetPen(wx.Pen(wx.BLUE, 1, wx.SOLID))
dc.DrawRectangle(self.borde, self.borde, self.AnchoPagina, self.AltoPagina)
print self.borde, self.borde, self.AnchoPagina, self.AltoPagina
if wx.Platform != '__WXMSW__':
return
print "Windows?"
# First get the update rects and subtract off the part that
# self.buffer has correct already
region = self.f2s_Pizarra.GetUpdateRegion()
panelRect = self.f2s_Pizarra.GetClientRect()
offset = list(self.f2s_Pizarra.CalcUnscrolledPosition(0,0))
offset[0] -= self.saved_offset[0]
offset[1] -= self.saved_offset[1]
region.Subtract(-offset[0],- offset[1],panelRect.Width, panelRect.Height)
# Now iterate over the remaining region rects and fill in with a pattern
rgn_iter = wx.RegionIterator(region)
if rgn_iter.HaveRects():
self.setBackgroundMissingFillStyle(dc)
offset = self.f2s_Pizarra.CalcUnscrolledPosition(0,0)
while rgn_iter:
r = rgn_iter.GetRect()
if r.Size != self.f2s_Pizarra.ClientSize:
dc.DrawRectangleRect(r)
rgn_iter.Next()
#def onConfig(self,env):
#self.log.logger.info('onCofig')
#image=ObjConfig(self.Parent,self.log.getNivel())
def _initBuffer(self):
print "_initBuffer"
"""Initialize the bitmap used for buffering the display."""
size = self.f2s_Pizarra.GetSize()
self.buffer = wx.EmptyBitmap(max(1,size.width),max(1,size.height))
dc = wx.BufferedDC(None, self.buffer)
dc.SetBackground(wx.Brush(self.f2s_Pizarra.GetBackgroundColour()))
dc.Clear()
#self.drawContents(dc)
del dc # commits all drawing to the buffer
self.saved_offset = self.f2s_Pizarra.CalcUnscrolledPosition(0,0)
self._reInitBuffer = False
class ObjInicio():
def __init__(self,ActDebug=False):
# Lanzamos aplicación.
#ActDebug=True
#
#print "inicio"
#if ActDebug:
# pass
# aplicacion = ObjDebug(redirect=True)
#else:
# aplicacion=wx.PySimpleApp()
# frame_usuario = IncioInterface()
# frame_usuario.Maximize()
# frame_usuario.Show()
aplicacion=wx.PySimpleApp()
frame_usuario = IncioInterface()
#frame_usuario.Maximize()
frame_usuario.Show()
aplicacion.MainLoop()
aplicacion.Destroy()
if __name__ == '__main__':
# Lanzamos aplicación.
j=ObjInicio(False)
|
normal
|
{
"blob_id": "9bd1fd2df7da068ac8aa4e6e24fe14d163a7e6b3",
"index": 2362,
"step-1": "#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n'''\nCreated on 7/02/2014\n\n@author: marco\nGenerador de ambientes FACIL 2014\n'''\n\n\nimport wx\n\nfrom formgenerador import FrameGeneral\nfrom Dial_Pagina import ObjPagina\n\n\nclass IncioInterface(FrameGeneral):\n\tdef __init__(self):\n\t\t#self.log = ObLog('Inicio programa')\n\t\t#self.log.setNivel(0) #debug\n\t\t\n\t\tFrameGeneral.__init__(self,None)\n\t\tFrameGeneral.SetTitle(self,u\"Administrador de Aplicacion FACIL\")\n\t\t#iconFile = u\"imagenes/2s.ico\"\n\t\t#FrameGeneral.SetIcon(self,wx.Icon(iconFile, wx.BITMAP_TYPE_ICO))\n\t\t#self.Bind(wx.EVT_MENU, self.onConfig,self.f2s_mConfig)\n\t\tself.__inicio()\n\t\tself.dibujarPizarra()\n\t\t\n\t\t#Eventos Menu\n\t\tself.Bind(wx.EVT_MENU,self.onDefPagina,self.f2s_menuTamPapel)\n\t\tself.f2s_Pizarra.Bind(wx.EVT_PAINT, self.onPaint)\n\t\t\n\tdef __inicio(self):\n\t\t#Asignacion Variables Globales\n\t\tself.Guadar=False\n\t\tself.borde=20\n\t\tself.AnchoPagina=8.5 * 72\n\t\tself.AltoPagina = 11 * 72\n\t\tself.objfacil=[]\n\t\tself.objFormatos=[]\n\t\tself._initBuffer()\n\n\t\t\n\tdef onDefPagina(self,event):\n\t\tpagina= ObjPagina(self.Parent)\n\t\tif pagina.orientar==None : \n\t\t\treturn\n\t\t\n\t\tprint pagina.orientar\n\t\tprint pagina.papel\n\t\t\n\t\tif pagina.orientar ==0 or pagina.orientar==2: \t#Vertical\n\t\t\tself.AnchoPagina=pagina.papel[0] * 72\n\t\t\tself.AltoPagina=pagina.papel[1] * 72\n\t\telse:\t\t\t\t\t\t\t\t\t\t\t#Horizontal\n\t\t\tself.AnchoPagina=pagina.papel[1] * 72\n\t\t\tself.AltoPagina=pagina.papel[0] * 72\n\t\t\t\n\t\tprint self.AnchoPagina\n\t\tprint self.AltoPagina\n\t\t\n\t\tself.dibujarPizarra()\t\n\t\tself.wrapDC = lambda dc: dc\n\t\t\n\t\t\n\tdef dibujarPizarra(self):\n\t\tprint \"dibujar Pizarra\"\n\t\tself.f2s_Pizarra.SetBackgroundColour('white')\n\t\tself.f2s_Pizarra.EnableScrolling(True,True)\n\t\tself.f2s_Pizarra.SetScrollbars(20, 20, (self.AnchoPagina + self.borde *2) / 20, (self.AltoPagina + self.borde *2) / 20)\n\t\t\n\t\t\n\t\t\n\t\n\tdef onPaint(self, event):\n\t\tprint \"onPaint\"\n\t\t\"\"\"\n\t\tCalled when the window is exposed.\n\t\t\"\"\"\n\t\t# Create a buffered paint DC. It will create the real\n\t\t# wx.PaintDC and then blit the bitmap to it when dc is\n\t\t# deleted.\n\t\tdc = wx.BufferedPaintDC(self.f2s_Pizarra, self.buffer)\n\n\t\t# On Windows, if that's all we do things look a little rough\n\t\t# So in order to make scrolling more polished-looking\n\t\t# we iterate over the exposed regions and fill in unknown\n\t\t# areas with a fall-back pattern.\n\n\t\tdc.SetPen(wx.Pen(wx.BLUE, 1, wx.SOLID))\n\t\tdc.DrawRectangle(self.borde, self.borde, self.AnchoPagina, self.AltoPagina)\n\t\tprint self.borde, self.borde, self.AnchoPagina, self.AltoPagina\n\n\n\t\tif wx.Platform != '__WXMSW__':\n\t\t\treturn\n\t\t\n\t\tprint \"Windows?\"\n\n\n\t\t# First get the update rects and subtract off the part that\n\t\t# self.buffer has correct already\n\t\tregion = self.f2s_Pizarra.GetUpdateRegion()\n\t\tpanelRect = self.f2s_Pizarra.GetClientRect()\n\t\toffset = list(self.f2s_Pizarra.CalcUnscrolledPosition(0,0))\n\t\toffset[0] -= self.saved_offset[0]\n\t\toffset[1] -= self.saved_offset[1]\n\t\tregion.Subtract(-offset[0],- offset[1],panelRect.Width, panelRect.Height)\n\n\t\t# Now iterate over the remaining region rects and fill in with a pattern\n\t\trgn_iter = wx.RegionIterator(region)\n\t\tif rgn_iter.HaveRects():\n\t\t\tself.setBackgroundMissingFillStyle(dc)\n\t\t\toffset = self.f2s_Pizarra.CalcUnscrolledPosition(0,0)\n\t\twhile rgn_iter:\n\t\t\tr = rgn_iter.GetRect()\n\t\t\tif r.Size != self.f2s_Pizarra.ClientSize:\n\t\t\t\tdc.DrawRectangleRect(r)\n\t\t\trgn_iter.Next()\n\n\t\n\t\n\t#def onConfig(self,env):\n\t\t#self.log.logger.info('onCofig')\n\t\t#image=ObjConfig(self.Parent,self.log.getNivel())\n\n\n\n\n\tdef _initBuffer(self):\n\t\tprint \"_initBuffer\"\n\t\t\"\"\"Initialize the bitmap used for buffering the display.\"\"\"\n\t\tsize = self.f2s_Pizarra.GetSize()\n\t\tself.buffer = wx.EmptyBitmap(max(1,size.width),max(1,size.height))\n\t\tdc = wx.BufferedDC(None, self.buffer)\n\t\tdc.SetBackground(wx.Brush(self.f2s_Pizarra.GetBackgroundColour()))\n\t\tdc.Clear()\n\t\t#self.drawContents(dc)\n\t\tdel dc # commits all drawing to the buffer\n\t\tself.saved_offset = self.f2s_Pizarra.CalcUnscrolledPosition(0,0)\n\t\tself._reInitBuffer = False\n\n\n\nclass ObjInicio():\n\tdef __init__(self,ActDebug=False):\n\t\t# Lanzamos aplicación.\n\t\t#ActDebug=True\n\t\t# \n\t\t#print \"inicio\"\n\t\t#if ActDebug:\n\t\t#\tpass\n\t\t#\taplicacion = ObjDebug(redirect=True)\n\t\t#else:\n\t\t#\taplicacion=wx.PySimpleApp()\n\t\t#\tframe_usuario = IncioInterface()\t\t\t\n\t\t#\tframe_usuario.Maximize()\n\t\t#\tframe_usuario.Show()\n\t\t\n\t\taplicacion=wx.PySimpleApp()\n\t\tframe_usuario = IncioInterface()\t\t\t\n\t\t#frame_usuario.Maximize()\n\t\tframe_usuario.Show()\n\t\taplicacion.MainLoop()\t\t\n\t\taplicacion.Destroy()\n\n\n\n\n\n\nif __name__ == '__main__':\n\t# Lanzamos aplicación.\n\t\n\tj=ObjInicio(False)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
model = ResNet50(weights='imagenet', # Learned weights on imagenet
include_top=True)
img_input = image.load_img('my_picture.jpg', target_size=(224, 224))
img_input = image.img_to_array(img_input)
img_input = preprocess_input(img_input[np.newaxis, ...])
preds = model.predict(img_input)
decoded_predictions = decode_predictions(preds, top=10)[0]
print(decoded_predictions)
|
normal
|
{
"blob_id": "1af6e66c19078a9ee971f608daa93247911d8406",
"index": 5881,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(decoded_predictions)\n",
"step-3": "<mask token>\nmodel = ResNet50(weights='imagenet', include_top=True)\nimg_input = image.load_img('my_picture.jpg', target_size=(224, 224))\nimg_input = image.img_to_array(img_input)\nimg_input = preprocess_input(img_input[np.newaxis, ...])\npreds = model.predict(img_input)\ndecoded_predictions = decode_predictions(preds, top=10)[0]\nprint(decoded_predictions)\n",
"step-4": "from tensorflow.keras.applications.resnet50 import ResNet50\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions\nimport numpy as np\nmodel = ResNet50(weights='imagenet', include_top=True)\nimg_input = image.load_img('my_picture.jpg', target_size=(224, 224))\nimg_input = image.img_to_array(img_input)\nimg_input = preprocess_input(img_input[np.newaxis, ...])\npreds = model.predict(img_input)\ndecoded_predictions = decode_predictions(preds, top=10)[0]\nprint(decoded_predictions)\n",
"step-5": "from tensorflow.keras.applications.resnet50 import ResNet50\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions\nimport numpy as np\n\nmodel = ResNet50(weights='imagenet', # Learned weights on imagenet\n include_top=True)\n\nimg_input = image.load_img('my_picture.jpg', target_size=(224, 224))\nimg_input = image.img_to_array(img_input)\nimg_input = preprocess_input(img_input[np.newaxis, ...])\n\npreds = model.predict(img_input)\ndecoded_predictions = decode_predictions(preds, top=10)[0]\n\nprint(decoded_predictions)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
import sys
import hashlib
# Usage
if len(sys.argv) != 2:
print("usage: part2.py puzzle_input")
exit(1)
# Get Secret
puzzle_input = sys.argv[1]
input_num = 0
# Calcuate
for i in range(sys.maxsize):
digest = hashlib.md5(puzzle_input.encode('utf-8')+str(i).encode('utf-8')).hexdigest()
if (digest.startswith('000000')): # must start with 6 zeros
input_num = i
break;
# Print Results
print(f'puzzle_input: {puzzle_input} solved with {input_num}')
print("\ndone.");
|
normal
|
{
"blob_id": "1219f7b7ac335f3a69e289d1ab2b6318a2aef23f",
"index": 1900,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(sys.argv) != 2:\n print('usage: part2.py puzzle_input')\n exit(1)\n<mask token>\nfor i in range(sys.maxsize):\n digest = hashlib.md5(puzzle_input.encode('utf-8') + str(i).encode('utf-8')\n ).hexdigest()\n if digest.startswith('000000'):\n input_num = i\n break\nprint(f'puzzle_input: {puzzle_input} solved with {input_num}')\nprint('\\ndone.')\n",
"step-3": "<mask token>\nif len(sys.argv) != 2:\n print('usage: part2.py puzzle_input')\n exit(1)\npuzzle_input = sys.argv[1]\ninput_num = 0\nfor i in range(sys.maxsize):\n digest = hashlib.md5(puzzle_input.encode('utf-8') + str(i).encode('utf-8')\n ).hexdigest()\n if digest.startswith('000000'):\n input_num = i\n break\nprint(f'puzzle_input: {puzzle_input} solved with {input_num}')\nprint('\\ndone.')\n",
"step-4": "import sys\nimport hashlib\nif len(sys.argv) != 2:\n print('usage: part2.py puzzle_input')\n exit(1)\npuzzle_input = sys.argv[1]\ninput_num = 0\nfor i in range(sys.maxsize):\n digest = hashlib.md5(puzzle_input.encode('utf-8') + str(i).encode('utf-8')\n ).hexdigest()\n if digest.startswith('000000'):\n input_num = i\n break\nprint(f'puzzle_input: {puzzle_input} solved with {input_num}')\nprint('\\ndone.')\n",
"step-5": "#!/usr/bin/env python3\n\nimport sys\nimport hashlib\n\n# Usage\nif len(sys.argv) != 2:\n\tprint(\"usage: part2.py puzzle_input\")\n\texit(1)\n\n# Get Secret\npuzzle_input = sys.argv[1]\ninput_num = 0\n\n# Calcuate \nfor i in range(sys.maxsize):\n\tdigest = hashlib.md5(puzzle_input.encode('utf-8')+str(i).encode('utf-8')).hexdigest()\n\tif (digest.startswith('000000')): # must start with 6 zeros\n\t\tinput_num = i\n\t\tbreak;\n\n# Print Results\t\t\nprint(f'puzzle_input: {puzzle_input} solved with {input_num}')\n\nprint(\"\\ndone.\");\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# x = 10
#
# def increment():
# x += 1
#
# ^^ Non-working code
x = 10
def increment(number):
number += 1
return number
# If we want to change a global variable,
# we have to do it like this
x = increment(x)
|
normal
|
{
"blob_id": "a0460b100a750b685f3e831a19379b0e26da4b35",
"index": 7368,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef increment(number):\n number += 1\n return number\n\n\n<mask token>\n",
"step-3": "x = 10\n\n\ndef increment(number):\n number += 1\n return number\n\n\nx = increment(x)\n",
"step-4": "# x = 10\n#\n# def increment():\n# x += 1\n# \n# ^^ Non-working code\n\nx = 10\n\ndef increment(number): \n number += 1\n return number\n\n# If we want to change a global variable,\n# we have to do it like this\nx = increment(x)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import weakref
from soma.controller import Controller
from soma.functiontools import SomaPartial
from traits.api import File, Undefined, Instance
class MatlabConfig(Controller):
executable = File(Undefined, output=False,
desc='Full path of the matlab executable')
def load_module(capsul_engine, module_name):
capsul_engine.add_trait('matlab', Instance(MatlabConfig))
capsul_engine.matlab = MatlabConfig()
capsul_engine.matlab.on_trait_change(SomaPartial(update_execution_context,
weakref.proxy(capsul_engine)))
def init_module(capul_engine, module_name, loaded_module):
pass
def update_execution_context(capsul_engine):
if capsul_engine.matlab.executable is not Undefined:
capsul_engine.execution_context.environ['MATLAB_EXECUTABLE'] \
= capsul_engine.matlab.executable
|
normal
|
{
"blob_id": "4a8e8994ec8734664a5965b81da9d146d8504f8d",
"index": 6096,
"step-1": "<mask token>\n\n\nclass MatlabConfig(Controller):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MatlabConfig(Controller):\n executable = File(Undefined, output=False, desc=\n 'Full path of the matlab executable')\n\n\ndef load_module(capsul_engine, module_name):\n capsul_engine.add_trait('matlab', Instance(MatlabConfig))\n capsul_engine.matlab = MatlabConfig()\n capsul_engine.matlab.on_trait_change(SomaPartial(\n update_execution_context, weakref.proxy(capsul_engine)))\n\n\n<mask token>\n\n\ndef update_execution_context(capsul_engine):\n if capsul_engine.matlab.executable is not Undefined:\n capsul_engine.execution_context.environ['MATLAB_EXECUTABLE'\n ] = capsul_engine.matlab.executable\n",
"step-3": "<mask token>\n\n\nclass MatlabConfig(Controller):\n executable = File(Undefined, output=False, desc=\n 'Full path of the matlab executable')\n\n\ndef load_module(capsul_engine, module_name):\n capsul_engine.add_trait('matlab', Instance(MatlabConfig))\n capsul_engine.matlab = MatlabConfig()\n capsul_engine.matlab.on_trait_change(SomaPartial(\n update_execution_context, weakref.proxy(capsul_engine)))\n\n\ndef init_module(capul_engine, module_name, loaded_module):\n pass\n\n\ndef update_execution_context(capsul_engine):\n if capsul_engine.matlab.executable is not Undefined:\n capsul_engine.execution_context.environ['MATLAB_EXECUTABLE'\n ] = capsul_engine.matlab.executable\n",
"step-4": "import weakref\nfrom soma.controller import Controller\nfrom soma.functiontools import SomaPartial\nfrom traits.api import File, Undefined, Instance\n\n\nclass MatlabConfig(Controller):\n executable = File(Undefined, output=False, desc=\n 'Full path of the matlab executable')\n\n\ndef load_module(capsul_engine, module_name):\n capsul_engine.add_trait('matlab', Instance(MatlabConfig))\n capsul_engine.matlab = MatlabConfig()\n capsul_engine.matlab.on_trait_change(SomaPartial(\n update_execution_context, weakref.proxy(capsul_engine)))\n\n\ndef init_module(capul_engine, module_name, loaded_module):\n pass\n\n\ndef update_execution_context(capsul_engine):\n if capsul_engine.matlab.executable is not Undefined:\n capsul_engine.execution_context.environ['MATLAB_EXECUTABLE'\n ] = capsul_engine.matlab.executable\n",
"step-5": "import weakref\n\nfrom soma.controller import Controller\nfrom soma.functiontools import SomaPartial\nfrom traits.api import File, Undefined, Instance\n\nclass MatlabConfig(Controller):\n executable = File(Undefined, output=False,\n desc='Full path of the matlab executable')\n \ndef load_module(capsul_engine, module_name):\n capsul_engine.add_trait('matlab', Instance(MatlabConfig))\n capsul_engine.matlab = MatlabConfig()\n capsul_engine.matlab.on_trait_change(SomaPartial(update_execution_context, \n weakref.proxy(capsul_engine)))\n\ndef init_module(capul_engine, module_name, loaded_module):\n pass\n\n\ndef update_execution_context(capsul_engine):\n if capsul_engine.matlab.executable is not Undefined:\n capsul_engine.execution_context.environ['MATLAB_EXECUTABLE'] \\\n = capsul_engine.matlab.executable\n\n",
"step-ids": [
1,
4,
5,
6,
7
]
}
|
[
1,
4,
5,
6,
7
] |
group = {'A': 20, 'B': 15, 'C': 10}
def split_the_bill(x):
owed_dict = {}
sum = 0
people = 0
for key in x:
sum = sum + x[key]
people = people + 1
price_pp = sum / people
for key in x:
owed_value = x[key] - price_pp
owed_dict[key] = round(owed_value, 2)
return owed_dict
split_the_bill(group)
|
normal
|
{
"blob_id": "69d7e7eb644a67ee921086005f0a55f39507f361",
"index": 2864,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef split_the_bill(x):\n owed_dict = {}\n sum = 0\n people = 0\n for key in x:\n sum = sum + x[key]\n people = people + 1\n price_pp = sum / people\n for key in x:\n owed_value = x[key] - price_pp\n owed_dict[key] = round(owed_value, 2)\n return owed_dict\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef split_the_bill(x):\n owed_dict = {}\n sum = 0\n people = 0\n for key in x:\n sum = sum + x[key]\n people = people + 1\n price_pp = sum / people\n for key in x:\n owed_value = x[key] - price_pp\n owed_dict[key] = round(owed_value, 2)\n return owed_dict\n\n\nsplit_the_bill(group)\n",
"step-4": "group = {'A': 20, 'B': 15, 'C': 10}\n\n\ndef split_the_bill(x):\n owed_dict = {}\n sum = 0\n people = 0\n for key in x:\n sum = sum + x[key]\n people = people + 1\n price_pp = sum / people\n for key in x:\n owed_value = x[key] - price_pp\n owed_dict[key] = round(owed_value, 2)\n return owed_dict\n\n\nsplit_the_bill(group)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def exeObj(count, idObj, id, obj, guild, nom):
dateID = int(strftime('%y') + strftime('%m') + strftime('%d'))
connexionGL, curseurGL = connectSQL(guild.id, nom, 'Stats', 'GL', '')
connexion, curseur = connectSQL(guild.id, nom, 'Stats', strftime('%m'),
strftime('%y'))
compteurSQL(curseur, tableauMois[strftime('%m')] + strftime('%y') + str
(idObj), id, (0, id, idObj, strftime('%m'), strftime('%y'), count),
count, (strftime('%d'), strftime('%m'), strftime('%y')), (strftime(
'%m'), strftime('%y')), 'persoM', obj, False, 2, curseurGL)
if nom in ('Emotes', 'Reactions') and curseur.execute(
'SELECT Count FROM {0}{1} WHERE ID={2}'.format(tableauMois[strftime
('%m')], strftime('%y'), idObj)).fetchone()['Count'] < 10:
curseur.execute('DROP TABLE {0}{1}{2}'.format(tableauMois[strftime(
'%m')], strftime('%y'), idObj))
connexion.commit()
connexion, curseur = connectSQL(guild.id, nom, 'Stats', 'TO', strftime(
'%y'))
compteurSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0, id,
idObj, 'TO', strftime('%y'), count), count, (strftime('%d'),
strftime('%m'), strftime('%y')), ('TO', strftime('%y')), 'persoA',
obj, False, 2, curseurGL)
if nom in ('Emotes', 'Reactions') and curseur.execute(
'SELECT Count FROM to{0} WHERE ID={1}'.format(strftime('%y'), idObj)
).fetchone()['Count'] < 25:
curseur.execute('DROP TABLE to{0}{1}'.format(strftime('%y'), idObj))
connexion.commit()
liste = compteurSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,
'TO', 'GL', count), count, (strftime('%d'), strftime('%m'),
strftime('%y')), ('TO', 'GL'), 'persoA', obj, False, 2, curseurGL)
if nom in ('Emotes', 'Reactions'):
if curseurGL.execute('SELECT Count FROM glob WHERE ID={0}'.format(
idObj)).fetchone()['Count'] < 50:
curseurGL.execute('DROP TABLE glob{0}'.format(idObj))
if curseurGL.execute('SELECT Rank FROM glob WHERE ID={0}'.format(idObj)
).fetchone()['Rank'] > 400:
for i in liste:
curseurGL.execute('DROP TABLE IF EXISTS persoM{0}{1}'.
format(i['ID'], idObj))
curseurGL.execute('DROP TABLE IF EXISTS persoA{0}{1}'.
format(i['ID'], idObj))
connexionGL.commit()
if nom not in ('Mentions', 'Mentionne'):
rapportsSQL(guild, 'objs', idObj, id, count, (0, id, idObj,
strftime('%d'), strftime('%m'), strftime('%y'), dateID, count,
nom), strftime('%d'), strftime('%m'), strftime('%y'), nom)
def exeJeuxSQL(id, idObj, state, guild, curseurGuild, count, option, tours):
dictCount = {'W': 2, 'L': -1}
dictW = {'W': 1, 'L': 0}
dictL = {'W': 0, 'L': 1}
connexionGL, curseurGL = connectSQL(guild, option, 'Jeux', 'GL', '')
connexion, curseur = connectSQL(guild, option, 'Jeux', strftime('%m'),
strftime('%y'))
compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime('%y'),
id, (0, id, strftime('%m'), strftime('%y'), dictW[state], dictL[
state], dictCount[state], 0), dictCount[state], (strftime('%d'),
strftime('%m'), strftime('%y')), (strftime('%m'), strftime('%y')),
'persoM', False, state, 4, curseurGL)
if idObj != None:
compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime(
'%y') + str(idObj), id, (0, id, idObj, strftime('%m'), strftime
('%y'), dictW[state], dictL[state], dictCount[state], 0),
dictCount[state], (strftime('%d'), strftime('%m'), strftime(
'%y')), (strftime('%m'), strftime('%y')), 'persoM', True, state,
5, curseurGL)
connexion.commit()
connexion, curseur = connectSQL(guild, option, 'Jeux', 'TO', strftime('%y')
)
compteurJeuxSQL(curseur, 'to' + strftime('%y'), id, (0, id, 'TO',
strftime('%y'), dictW[state], dictL[state], dictCount[state], 0),
dictCount[state], (strftime('%d'), strftime('%m'), strftime('%y')),
('TO', strftime('%y')), 'persoA', False, state, 4, curseurGL)
if idObj != None:
compteurJeuxSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0,
id, idObj, 'TO', strftime('%y'), dictW[state], dictL[state],
dictCount[state], 0), dictCount[state], (strftime('%d'),
strftime('%m'), strftime('%y')), ('TO', strftime('%y')),
'persoA', True, state, 5, curseurGL)
connexion.commit()
compteurJeuxSQL(curseurGL, 'glob', id, (0, id, 'TO', 'GL', dictW[state],
dictL[state], dictCount[state], 0), dictCount[state], (strftime(
'%d'), strftime('%m'), strftime('%y')), ('TO', 'GL'), 'persoA',
False, state, 4, curseurGL)
if idObj != None:
compteurJeuxSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,
'TO', 'GL', dictW[state], dictL[state], dictCount[state], 0),
dictCount[state], (strftime('%d'), strftime('%m'), strftime(
'%y')), ('TO', 'GL'), 'persoA', True, state, 5, curseurGL)
histoSQLJeux(curseurGL, id, tours, strftime('%d') + '/' + strftime(
'%m') + '/' + strftime('%y'), idObj, state)
connexionGL.commit()
dailySQL(int(strftime('%y') + strftime('%m') + strftime('%d')), (
strftime('%d'), strftime('%m'), strftime('%y')), option,
curseurGuild, guild, 'Jeux')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def exeClassic(count, id, nom, curseurGuild, guild):
dateID = int(strftime('%y') + strftime('%m') + strftime('%d'))
connexionGL, curseurGL = connectSQL(guild.id, nom, 'Stats', 'GL', '')
connexion, curseur = connectSQL(guild.id, nom, 'Stats', strftime('%m'),
strftime('%y'))
compteurSQL(curseur, tableauMois[strftime('%m')] + strftime('%y'), id,
(0, id, strftime('%m'), strftime('%y'), count, 0), count, (strftime
('%d'), strftime('%m'), strftime('%y')), (strftime('%m'), strftime(
'%y')), 'persoM', False, True, 1, curseurGL)
connexion.commit()
connexion, curseur = connectSQL(guild.id, nom, 'Stats', 'TO', strftime(
'%y'))
compteurSQL(curseur, 'to' + strftime('%y'), id, (0, id, 'TO', strftime(
'%y'), count, 0), count, (strftime('%d'), strftime('%m'), strftime(
'%y')), ('TO', strftime('%y')), 'persoA', False, True, 1, curseurGL)
connexion.commit()
liste = compteurSQL(curseurGL, 'glob', id, (0, id, 'TO', 'GL', count, 0
), count, (strftime('%d'), strftime('%m'), strftime('%y')), ('TO',
'GL'), 'persoA', False, True, 1, curseurGL)
if nom in ('Messages', 'Voice'):
compteurSQL(curseurGL, 'dayRank', int(strftime('%y') + strftime(
'%m') + strftime('%d')), (0, int(strftime('%y') + strftime('%m'
) + strftime('%d')), strftime('%d'), strftime('%m'), strftime(
'%y'), count), count, None, None, None, None, False, 3, curseurGL)
if nom in ('Emotes', 'Reactions'):
countGL = curseurGL.execute('SELECT Count FROM glob WHERE ID={0}'.
format(id)).fetchone()['Count']
for i in liste:
if i['Rank'] > 400:
curseurGL.execute('DROP TABLE IF EXISTS persoM{0}'.format(i
['ID']))
curseurGL.execute('DROP TABLE IF EXISTS persoA{0}'.format(i
['ID']))
connexionGL.commit()
dailySQL(dateID, (strftime('%d'), strftime('%m'), strftime('%y')), nom,
curseurGuild, guild.id, 'Stats')
if nom not in ('Mentions', 'Mentionne'):
rapportsSQL(guild, 'ranks', id, None, count, (0, id, strftime('%d'),
strftime('%m'), strftime('%y'), dateID, count, nom), strftime(
'%d'), strftime('%m'), strftime('%y'), nom)
def exeObj(count, idObj, id, obj, guild, nom):
dateID = int(strftime('%y') + strftime('%m') + strftime('%d'))
connexionGL, curseurGL = connectSQL(guild.id, nom, 'Stats', 'GL', '')
connexion, curseur = connectSQL(guild.id, nom, 'Stats', strftime('%m'),
strftime('%y'))
compteurSQL(curseur, tableauMois[strftime('%m')] + strftime('%y') + str
(idObj), id, (0, id, idObj, strftime('%m'), strftime('%y'), count),
count, (strftime('%d'), strftime('%m'), strftime('%y')), (strftime(
'%m'), strftime('%y')), 'persoM', obj, False, 2, curseurGL)
if nom in ('Emotes', 'Reactions') and curseur.execute(
'SELECT Count FROM {0}{1} WHERE ID={2}'.format(tableauMois[strftime
('%m')], strftime('%y'), idObj)).fetchone()['Count'] < 10:
curseur.execute('DROP TABLE {0}{1}{2}'.format(tableauMois[strftime(
'%m')], strftime('%y'), idObj))
connexion.commit()
connexion, curseur = connectSQL(guild.id, nom, 'Stats', 'TO', strftime(
'%y'))
compteurSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0, id,
idObj, 'TO', strftime('%y'), count), count, (strftime('%d'),
strftime('%m'), strftime('%y')), ('TO', strftime('%y')), 'persoA',
obj, False, 2, curseurGL)
if nom in ('Emotes', 'Reactions') and curseur.execute(
'SELECT Count FROM to{0} WHERE ID={1}'.format(strftime('%y'), idObj)
).fetchone()['Count'] < 25:
curseur.execute('DROP TABLE to{0}{1}'.format(strftime('%y'), idObj))
connexion.commit()
liste = compteurSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,
'TO', 'GL', count), count, (strftime('%d'), strftime('%m'),
strftime('%y')), ('TO', 'GL'), 'persoA', obj, False, 2, curseurGL)
if nom in ('Emotes', 'Reactions'):
if curseurGL.execute('SELECT Count FROM glob WHERE ID={0}'.format(
idObj)).fetchone()['Count'] < 50:
curseurGL.execute('DROP TABLE glob{0}'.format(idObj))
if curseurGL.execute('SELECT Rank FROM glob WHERE ID={0}'.format(idObj)
).fetchone()['Rank'] > 400:
for i in liste:
curseurGL.execute('DROP TABLE IF EXISTS persoM{0}{1}'.
format(i['ID'], idObj))
curseurGL.execute('DROP TABLE IF EXISTS persoA{0}{1}'.
format(i['ID'], idObj))
connexionGL.commit()
if nom not in ('Mentions', 'Mentionne'):
rapportsSQL(guild, 'objs', idObj, id, count, (0, id, idObj,
strftime('%d'), strftime('%m'), strftime('%y'), dateID, count,
nom), strftime('%d'), strftime('%m'), strftime('%y'), nom)
def exeJeuxSQL(id, idObj, state, guild, curseurGuild, count, option, tours):
dictCount = {'W': 2, 'L': -1}
dictW = {'W': 1, 'L': 0}
dictL = {'W': 0, 'L': 1}
connexionGL, curseurGL = connectSQL(guild, option, 'Jeux', 'GL', '')
connexion, curseur = connectSQL(guild, option, 'Jeux', strftime('%m'),
strftime('%y'))
compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime('%y'),
id, (0, id, strftime('%m'), strftime('%y'), dictW[state], dictL[
state], dictCount[state], 0), dictCount[state], (strftime('%d'),
strftime('%m'), strftime('%y')), (strftime('%m'), strftime('%y')),
'persoM', False, state, 4, curseurGL)
if idObj != None:
compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime(
'%y') + str(idObj), id, (0, id, idObj, strftime('%m'), strftime
('%y'), dictW[state], dictL[state], dictCount[state], 0),
dictCount[state], (strftime('%d'), strftime('%m'), strftime(
'%y')), (strftime('%m'), strftime('%y')), 'persoM', True, state,
5, curseurGL)
connexion.commit()
connexion, curseur = connectSQL(guild, option, 'Jeux', 'TO', strftime('%y')
)
compteurJeuxSQL(curseur, 'to' + strftime('%y'), id, (0, id, 'TO',
strftime('%y'), dictW[state], dictL[state], dictCount[state], 0),
dictCount[state], (strftime('%d'), strftime('%m'), strftime('%y')),
('TO', strftime('%y')), 'persoA', False, state, 4, curseurGL)
if idObj != None:
compteurJeuxSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0,
id, idObj, 'TO', strftime('%y'), dictW[state], dictL[state],
dictCount[state], 0), dictCount[state], (strftime('%d'),
strftime('%m'), strftime('%y')), ('TO', strftime('%y')),
'persoA', True, state, 5, curseurGL)
connexion.commit()
compteurJeuxSQL(curseurGL, 'glob', id, (0, id, 'TO', 'GL', dictW[state],
dictL[state], dictCount[state], 0), dictCount[state], (strftime(
'%d'), strftime('%m'), strftime('%y')), ('TO', 'GL'), 'persoA',
False, state, 4, curseurGL)
if idObj != None:
compteurJeuxSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,
'TO', 'GL', dictW[state], dictL[state], dictCount[state], 0),
dictCount[state], (strftime('%d'), strftime('%m'), strftime(
'%y')), ('TO', 'GL'), 'persoA', True, state, 5, curseurGL)
histoSQLJeux(curseurGL, id, tours, strftime('%d') + '/' + strftime(
'%m') + '/' + strftime('%y'), idObj, state)
connexionGL.commit()
dailySQL(int(strftime('%y') + strftime('%m') + strftime('%d')), (
strftime('%d'), strftime('%m'), strftime('%y')), option,
curseurGuild, guild, 'Jeux')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
tableauMois = {'01': 'janvier', '02': 'février', '03': 'mars', '04':
'avril', '05': 'mai', '06': 'juin', '07': 'juillet', '08': 'aout', '09':
'septembre', '10': 'octobre', '11': 'novembre', '12': 'décembre', 'TO':
'TOTAL'}
def exeClassic(count, id, nom, curseurGuild, guild):
dateID = int(strftime('%y') + strftime('%m') + strftime('%d'))
connexionGL, curseurGL = connectSQL(guild.id, nom, 'Stats', 'GL', '')
connexion, curseur = connectSQL(guild.id, nom, 'Stats', strftime('%m'),
strftime('%y'))
compteurSQL(curseur, tableauMois[strftime('%m')] + strftime('%y'), id,
(0, id, strftime('%m'), strftime('%y'), count, 0), count, (strftime
('%d'), strftime('%m'), strftime('%y')), (strftime('%m'), strftime(
'%y')), 'persoM', False, True, 1, curseurGL)
connexion.commit()
connexion, curseur = connectSQL(guild.id, nom, 'Stats', 'TO', strftime(
'%y'))
compteurSQL(curseur, 'to' + strftime('%y'), id, (0, id, 'TO', strftime(
'%y'), count, 0), count, (strftime('%d'), strftime('%m'), strftime(
'%y')), ('TO', strftime('%y')), 'persoA', False, True, 1, curseurGL)
connexion.commit()
liste = compteurSQL(curseurGL, 'glob', id, (0, id, 'TO', 'GL', count, 0
), count, (strftime('%d'), strftime('%m'), strftime('%y')), ('TO',
'GL'), 'persoA', False, True, 1, curseurGL)
if nom in ('Messages', 'Voice'):
compteurSQL(curseurGL, 'dayRank', int(strftime('%y') + strftime(
'%m') + strftime('%d')), (0, int(strftime('%y') + strftime('%m'
) + strftime('%d')), strftime('%d'), strftime('%m'), strftime(
'%y'), count), count, None, None, None, None, False, 3, curseurGL)
if nom in ('Emotes', 'Reactions'):
countGL = curseurGL.execute('SELECT Count FROM glob WHERE ID={0}'.
format(id)).fetchone()['Count']
for i in liste:
if i['Rank'] > 400:
curseurGL.execute('DROP TABLE IF EXISTS persoM{0}'.format(i
['ID']))
curseurGL.execute('DROP TABLE IF EXISTS persoA{0}'.format(i
['ID']))
connexionGL.commit()
dailySQL(dateID, (strftime('%d'), strftime('%m'), strftime('%y')), nom,
curseurGuild, guild.id, 'Stats')
if nom not in ('Mentions', 'Mentionne'):
rapportsSQL(guild, 'ranks', id, None, count, (0, id, strftime('%d'),
strftime('%m'), strftime('%y'), dateID, count, nom), strftime(
'%d'), strftime('%m'), strftime('%y'), nom)
def exeObj(count, idObj, id, obj, guild, nom):
dateID = int(strftime('%y') + strftime('%m') + strftime('%d'))
connexionGL, curseurGL = connectSQL(guild.id, nom, 'Stats', 'GL', '')
connexion, curseur = connectSQL(guild.id, nom, 'Stats', strftime('%m'),
strftime('%y'))
compteurSQL(curseur, tableauMois[strftime('%m')] + strftime('%y') + str
(idObj), id, (0, id, idObj, strftime('%m'), strftime('%y'), count),
count, (strftime('%d'), strftime('%m'), strftime('%y')), (strftime(
'%m'), strftime('%y')), 'persoM', obj, False, 2, curseurGL)
if nom in ('Emotes', 'Reactions') and curseur.execute(
'SELECT Count FROM {0}{1} WHERE ID={2}'.format(tableauMois[strftime
('%m')], strftime('%y'), idObj)).fetchone()['Count'] < 10:
curseur.execute('DROP TABLE {0}{1}{2}'.format(tableauMois[strftime(
'%m')], strftime('%y'), idObj))
connexion.commit()
connexion, curseur = connectSQL(guild.id, nom, 'Stats', 'TO', strftime(
'%y'))
compteurSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0, id,
idObj, 'TO', strftime('%y'), count), count, (strftime('%d'),
strftime('%m'), strftime('%y')), ('TO', strftime('%y')), 'persoA',
obj, False, 2, curseurGL)
if nom in ('Emotes', 'Reactions') and curseur.execute(
'SELECT Count FROM to{0} WHERE ID={1}'.format(strftime('%y'), idObj)
).fetchone()['Count'] < 25:
curseur.execute('DROP TABLE to{0}{1}'.format(strftime('%y'), idObj))
connexion.commit()
liste = compteurSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,
'TO', 'GL', count), count, (strftime('%d'), strftime('%m'),
strftime('%y')), ('TO', 'GL'), 'persoA', obj, False, 2, curseurGL)
if nom in ('Emotes', 'Reactions'):
if curseurGL.execute('SELECT Count FROM glob WHERE ID={0}'.format(
idObj)).fetchone()['Count'] < 50:
curseurGL.execute('DROP TABLE glob{0}'.format(idObj))
if curseurGL.execute('SELECT Rank FROM glob WHERE ID={0}'.format(idObj)
).fetchone()['Rank'] > 400:
for i in liste:
curseurGL.execute('DROP TABLE IF EXISTS persoM{0}{1}'.
format(i['ID'], idObj))
curseurGL.execute('DROP TABLE IF EXISTS persoA{0}{1}'.
format(i['ID'], idObj))
connexionGL.commit()
if nom not in ('Mentions', 'Mentionne'):
rapportsSQL(guild, 'objs', idObj, id, count, (0, id, idObj,
strftime('%d'), strftime('%m'), strftime('%y'), dateID, count,
nom), strftime('%d'), strftime('%m'), strftime('%y'), nom)
def exeJeuxSQL(id, idObj, state, guild, curseurGuild, count, option, tours):
dictCount = {'W': 2, 'L': -1}
dictW = {'W': 1, 'L': 0}
dictL = {'W': 0, 'L': 1}
connexionGL, curseurGL = connectSQL(guild, option, 'Jeux', 'GL', '')
connexion, curseur = connectSQL(guild, option, 'Jeux', strftime('%m'),
strftime('%y'))
compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime('%y'),
id, (0, id, strftime('%m'), strftime('%y'), dictW[state], dictL[
state], dictCount[state], 0), dictCount[state], (strftime('%d'),
strftime('%m'), strftime('%y')), (strftime('%m'), strftime('%y')),
'persoM', False, state, 4, curseurGL)
if idObj != None:
compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime(
'%y') + str(idObj), id, (0, id, idObj, strftime('%m'), strftime
('%y'), dictW[state], dictL[state], dictCount[state], 0),
dictCount[state], (strftime('%d'), strftime('%m'), strftime(
'%y')), (strftime('%m'), strftime('%y')), 'persoM', True, state,
5, curseurGL)
connexion.commit()
connexion, curseur = connectSQL(guild, option, 'Jeux', 'TO', strftime('%y')
)
compteurJeuxSQL(curseur, 'to' + strftime('%y'), id, (0, id, 'TO',
strftime('%y'), dictW[state], dictL[state], dictCount[state], 0),
dictCount[state], (strftime('%d'), strftime('%m'), strftime('%y')),
('TO', strftime('%y')), 'persoA', False, state, 4, curseurGL)
if idObj != None:
compteurJeuxSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0,
id, idObj, 'TO', strftime('%y'), dictW[state], dictL[state],
dictCount[state], 0), dictCount[state], (strftime('%d'),
strftime('%m'), strftime('%y')), ('TO', strftime('%y')),
'persoA', True, state, 5, curseurGL)
connexion.commit()
compteurJeuxSQL(curseurGL, 'glob', id, (0, id, 'TO', 'GL', dictW[state],
dictL[state], dictCount[state], 0), dictCount[state], (strftime(
'%d'), strftime('%m'), strftime('%y')), ('TO', 'GL'), 'persoA',
False, state, 4, curseurGL)
if idObj != None:
compteurJeuxSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,
'TO', 'GL', dictW[state], dictL[state], dictCount[state], 0),
dictCount[state], (strftime('%d'), strftime('%m'), strftime(
'%y')), ('TO', 'GL'), 'persoA', True, state, 5, curseurGL)
histoSQLJeux(curseurGL, id, tours, strftime('%d') + '/' + strftime(
'%m') + '/' + strftime('%y'), idObj, state)
connexionGL.commit()
dailySQL(int(strftime('%y') + strftime('%m') + strftime('%d')), (
strftime('%d'), strftime('%m'), strftime('%y')), option,
curseurGuild, guild, 'Jeux')
<|reserved_special_token_1|>
from time import strftime
from Stats.SQL.Compteur import compteurSQL
from Stats.SQL.Rapports import rapportsSQL
from Stats.SQL.Daily import dailySQL
from Stats.SQL.CompteurP4 import compteurJeuxSQL
from Stats.SQL.Historique import histoSQL, histoSQLJeux
from Stats.SQL.ConnectSQL import connectSQL
tableauMois = {'01': 'janvier', '02': 'février', '03': 'mars', '04':
'avril', '05': 'mai', '06': 'juin', '07': 'juillet', '08': 'aout', '09':
'septembre', '10': 'octobre', '11': 'novembre', '12': 'décembre', 'TO':
'TOTAL'}
def exeClassic(count, id, nom, curseurGuild, guild):
dateID = int(strftime('%y') + strftime('%m') + strftime('%d'))
connexionGL, curseurGL = connectSQL(guild.id, nom, 'Stats', 'GL', '')
connexion, curseur = connectSQL(guild.id, nom, 'Stats', strftime('%m'),
strftime('%y'))
compteurSQL(curseur, tableauMois[strftime('%m')] + strftime('%y'), id,
(0, id, strftime('%m'), strftime('%y'), count, 0), count, (strftime
('%d'), strftime('%m'), strftime('%y')), (strftime('%m'), strftime(
'%y')), 'persoM', False, True, 1, curseurGL)
connexion.commit()
connexion, curseur = connectSQL(guild.id, nom, 'Stats', 'TO', strftime(
'%y'))
compteurSQL(curseur, 'to' + strftime('%y'), id, (0, id, 'TO', strftime(
'%y'), count, 0), count, (strftime('%d'), strftime('%m'), strftime(
'%y')), ('TO', strftime('%y')), 'persoA', False, True, 1, curseurGL)
connexion.commit()
liste = compteurSQL(curseurGL, 'glob', id, (0, id, 'TO', 'GL', count, 0
), count, (strftime('%d'), strftime('%m'), strftime('%y')), ('TO',
'GL'), 'persoA', False, True, 1, curseurGL)
if nom in ('Messages', 'Voice'):
compteurSQL(curseurGL, 'dayRank', int(strftime('%y') + strftime(
'%m') + strftime('%d')), (0, int(strftime('%y') + strftime('%m'
) + strftime('%d')), strftime('%d'), strftime('%m'), strftime(
'%y'), count), count, None, None, None, None, False, 3, curseurGL)
if nom in ('Emotes', 'Reactions'):
countGL = curseurGL.execute('SELECT Count FROM glob WHERE ID={0}'.
format(id)).fetchone()['Count']
for i in liste:
if i['Rank'] > 400:
curseurGL.execute('DROP TABLE IF EXISTS persoM{0}'.format(i
['ID']))
curseurGL.execute('DROP TABLE IF EXISTS persoA{0}'.format(i
['ID']))
connexionGL.commit()
dailySQL(dateID, (strftime('%d'), strftime('%m'), strftime('%y')), nom,
curseurGuild, guild.id, 'Stats')
if nom not in ('Mentions', 'Mentionne'):
rapportsSQL(guild, 'ranks', id, None, count, (0, id, strftime('%d'),
strftime('%m'), strftime('%y'), dateID, count, nom), strftime(
'%d'), strftime('%m'), strftime('%y'), nom)
def exeObj(count, idObj, id, obj, guild, nom):
dateID = int(strftime('%y') + strftime('%m') + strftime('%d'))
connexionGL, curseurGL = connectSQL(guild.id, nom, 'Stats', 'GL', '')
connexion, curseur = connectSQL(guild.id, nom, 'Stats', strftime('%m'),
strftime('%y'))
compteurSQL(curseur, tableauMois[strftime('%m')] + strftime('%y') + str
(idObj), id, (0, id, idObj, strftime('%m'), strftime('%y'), count),
count, (strftime('%d'), strftime('%m'), strftime('%y')), (strftime(
'%m'), strftime('%y')), 'persoM', obj, False, 2, curseurGL)
if nom in ('Emotes', 'Reactions') and curseur.execute(
'SELECT Count FROM {0}{1} WHERE ID={2}'.format(tableauMois[strftime
('%m')], strftime('%y'), idObj)).fetchone()['Count'] < 10:
curseur.execute('DROP TABLE {0}{1}{2}'.format(tableauMois[strftime(
'%m')], strftime('%y'), idObj))
connexion.commit()
connexion, curseur = connectSQL(guild.id, nom, 'Stats', 'TO', strftime(
'%y'))
compteurSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0, id,
idObj, 'TO', strftime('%y'), count), count, (strftime('%d'),
strftime('%m'), strftime('%y')), ('TO', strftime('%y')), 'persoA',
obj, False, 2, curseurGL)
if nom in ('Emotes', 'Reactions') and curseur.execute(
'SELECT Count FROM to{0} WHERE ID={1}'.format(strftime('%y'), idObj)
).fetchone()['Count'] < 25:
curseur.execute('DROP TABLE to{0}{1}'.format(strftime('%y'), idObj))
connexion.commit()
liste = compteurSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,
'TO', 'GL', count), count, (strftime('%d'), strftime('%m'),
strftime('%y')), ('TO', 'GL'), 'persoA', obj, False, 2, curseurGL)
if nom in ('Emotes', 'Reactions'):
if curseurGL.execute('SELECT Count FROM glob WHERE ID={0}'.format(
idObj)).fetchone()['Count'] < 50:
curseurGL.execute('DROP TABLE glob{0}'.format(idObj))
if curseurGL.execute('SELECT Rank FROM glob WHERE ID={0}'.format(idObj)
).fetchone()['Rank'] > 400:
for i in liste:
curseurGL.execute('DROP TABLE IF EXISTS persoM{0}{1}'.
format(i['ID'], idObj))
curseurGL.execute('DROP TABLE IF EXISTS persoA{0}{1}'.
format(i['ID'], idObj))
connexionGL.commit()
if nom not in ('Mentions', 'Mentionne'):
rapportsSQL(guild, 'objs', idObj, id, count, (0, id, idObj,
strftime('%d'), strftime('%m'), strftime('%y'), dateID, count,
nom), strftime('%d'), strftime('%m'), strftime('%y'), nom)
def exeJeuxSQL(id, idObj, state, guild, curseurGuild, count, option, tours):
dictCount = {'W': 2, 'L': -1}
dictW = {'W': 1, 'L': 0}
dictL = {'W': 0, 'L': 1}
connexionGL, curseurGL = connectSQL(guild, option, 'Jeux', 'GL', '')
connexion, curseur = connectSQL(guild, option, 'Jeux', strftime('%m'),
strftime('%y'))
compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime('%y'),
id, (0, id, strftime('%m'), strftime('%y'), dictW[state], dictL[
state], dictCount[state], 0), dictCount[state], (strftime('%d'),
strftime('%m'), strftime('%y')), (strftime('%m'), strftime('%y')),
'persoM', False, state, 4, curseurGL)
if idObj != None:
compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime(
'%y') + str(idObj), id, (0, id, idObj, strftime('%m'), strftime
('%y'), dictW[state], dictL[state], dictCount[state], 0),
dictCount[state], (strftime('%d'), strftime('%m'), strftime(
'%y')), (strftime('%m'), strftime('%y')), 'persoM', True, state,
5, curseurGL)
connexion.commit()
connexion, curseur = connectSQL(guild, option, 'Jeux', 'TO', strftime('%y')
)
compteurJeuxSQL(curseur, 'to' + strftime('%y'), id, (0, id, 'TO',
strftime('%y'), dictW[state], dictL[state], dictCount[state], 0),
dictCount[state], (strftime('%d'), strftime('%m'), strftime('%y')),
('TO', strftime('%y')), 'persoA', False, state, 4, curseurGL)
if idObj != None:
compteurJeuxSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0,
id, idObj, 'TO', strftime('%y'), dictW[state], dictL[state],
dictCount[state], 0), dictCount[state], (strftime('%d'),
strftime('%m'), strftime('%y')), ('TO', strftime('%y')),
'persoA', True, state, 5, curseurGL)
connexion.commit()
compteurJeuxSQL(curseurGL, 'glob', id, (0, id, 'TO', 'GL', dictW[state],
dictL[state], dictCount[state], 0), dictCount[state], (strftime(
'%d'), strftime('%m'), strftime('%y')), ('TO', 'GL'), 'persoA',
False, state, 4, curseurGL)
if idObj != None:
compteurJeuxSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,
'TO', 'GL', dictW[state], dictL[state], dictCount[state], 0),
dictCount[state], (strftime('%d'), strftime('%m'), strftime(
'%y')), ('TO', 'GL'), 'persoA', True, state, 5, curseurGL)
histoSQLJeux(curseurGL, id, tours, strftime('%d') + '/' + strftime(
'%m') + '/' + strftime('%y'), idObj, state)
connexionGL.commit()
dailySQL(int(strftime('%y') + strftime('%m') + strftime('%d')), (
strftime('%d'), strftime('%m'), strftime('%y')), option,
curseurGuild, guild, 'Jeux')
<|reserved_special_token_1|>
from time import strftime
from Stats.SQL.Compteur import compteurSQL
from Stats.SQL.Rapports import rapportsSQL
from Stats.SQL.Daily import dailySQL
from Stats.SQL.CompteurP4 import compteurJeuxSQL
from Stats.SQL.Historique import histoSQL, histoSQLJeux
from Stats.SQL.ConnectSQL import connectSQL
tableauMois={"01":"janvier","02":"février","03":"mars","04":"avril","05":"mai","06":"juin","07":"juillet","08":"aout","09":"septembre","10":"octobre","11":"novembre","12":"décembre","TO":"TOTAL"}
def exeClassic(count,id,nom,curseurGuild,guild):
dateID=int(strftime("%y")+strftime("%m")+strftime("%d"))
connexionGL,curseurGL=connectSQL(guild.id,nom,"Stats","GL","")
connexion,curseur=connectSQL(guild.id,nom,"Stats",strftime("%m"),strftime("%y"))
compteurSQL(curseur,tableauMois[strftime("%m")]+strftime("%y"),id,(0,id,strftime("%m"),strftime("%y"),count,0),count,(strftime("%d"),strftime("%m"),strftime("%y")),(strftime("%m"),strftime("%y")),"persoM",False,True,1,curseurGL)
connexion.commit()
connexion,curseur=connectSQL(guild.id,nom,"Stats","TO",strftime("%y"))
compteurSQL(curseur,"to"+strftime("%y"),id,(0,id,"TO",strftime("%y"),count,0),count,(strftime("%d"),strftime("%m"),strftime("%y")),("TO",strftime("%y")),"persoA",False,True,1,curseurGL)
connexion.commit()
liste=compteurSQL(curseurGL,"glob",id,(0,id,"TO","GL",count,0),count,(strftime("%d"),strftime("%m"),strftime("%y")),("TO","GL"),"persoA",False,True,1,curseurGL)
if nom in ("Messages","Voice"):
compteurSQL(curseurGL,"dayRank",int(strftime("%y")+strftime("%m")+strftime("%d")),(0,int(strftime("%y")+strftime("%m")+strftime("%d")),strftime("%d"),strftime("%m"),strftime("%y"),count),count,None,None,None,None,False,3,curseurGL)
if nom in ("Emotes","Reactions"):
countGL=curseurGL.execute("SELECT Count FROM glob WHERE ID={0}".format(id)).fetchone()["Count"]
for i in liste:
if i["Rank"]>400:
curseurGL.execute("DROP TABLE IF EXISTS persoM{0}".format(i["ID"]))
curseurGL.execute("DROP TABLE IF EXISTS persoA{0}".format(i["ID"]))
connexionGL.commit()
dailySQL(dateID,(strftime("%d"),strftime("%m"),strftime("%y")),nom,curseurGuild,guild.id,"Stats")
if nom not in ("Mentions","Mentionne"):
rapportsSQL(guild,"ranks",id,None,count,(0,id,strftime("%d"),strftime("%m"),strftime("%y"),dateID,count,nom),strftime("%d"),strftime("%m"),strftime("%y"),nom)
def exeObj(count,idObj,id,obj,guild,nom):
dateID=int(strftime("%y")+strftime("%m")+strftime("%d"))
connexionGL,curseurGL=connectSQL(guild.id,nom,"Stats","GL","")
connexion,curseur=connectSQL(guild.id,nom,"Stats",strftime("%m"),strftime("%y"))
compteurSQL(curseur,tableauMois[strftime("%m")]+strftime("%y")+str(idObj),id,(0,id,idObj,strftime("%m"),strftime("%y"),count),count,(strftime("%d"),strftime("%m"),strftime("%y")),(strftime("%m"),strftime("%y")),"persoM",obj,False,2,curseurGL)
if nom in ("Emotes","Reactions") and curseur.execute("SELECT Count FROM {0}{1} WHERE ID={2}".format(tableauMois[strftime("%m")],strftime("%y"),idObj)).fetchone()["Count"]<10:
curseur.execute("DROP TABLE {0}{1}{2}".format(tableauMois[strftime("%m")],strftime("%y"),idObj))
connexion.commit()
connexion,curseur=connectSQL(guild.id,nom,"Stats","TO",strftime("%y"))
compteurSQL(curseur,"to"+strftime("%y")+str(idObj),id,(0,id,idObj,"TO",strftime("%y"),count),count,(strftime("%d"),strftime("%m"),strftime("%y")),("TO",strftime("%y")),"persoA",obj,False,2,curseurGL)
if nom in ("Emotes","Reactions") and curseur.execute("SELECT Count FROM to{0} WHERE ID={1}".format(strftime("%y"),idObj)).fetchone()["Count"]<25:
curseur.execute("DROP TABLE to{0}{1}".format(strftime("%y"),idObj))
connexion.commit()
liste=compteurSQL(curseurGL,"glob"+str(idObj),id,(0,id,idObj,"TO","GL",count),count,(strftime("%d"),strftime("%m"),strftime("%y")),("TO","GL"),"persoA",obj,False,2,curseurGL)
if nom in ("Emotes","Reactions"):
if curseurGL.execute("SELECT Count FROM glob WHERE ID={0}".format(idObj)).fetchone()["Count"]<50:
curseurGL.execute("DROP TABLE glob{0}".format(idObj))
if curseurGL.execute("SELECT Rank FROM glob WHERE ID={0}".format(idObj)).fetchone()["Rank"]>400:
for i in liste:
curseurGL.execute("DROP TABLE IF EXISTS persoM{0}{1}".format(i["ID"],idObj))
curseurGL.execute("DROP TABLE IF EXISTS persoA{0}{1}".format(i["ID"],idObj))
connexionGL.commit()
if nom not in ("Mentions","Mentionne"):
rapportsSQL(guild,"objs",idObj,id,count,(0,id,idObj,strftime("%d"),strftime("%m"),strftime("%y"),dateID,count,nom),strftime("%d"),strftime("%m"),strftime("%y"),nom)
def exeJeuxSQL(id,idObj,state,guild,curseurGuild,count,option,tours):
dictCount={"W":2,"L":-1}
dictW={"W":1,"L":0}
dictL={"W":0,"L":1}
connexionGL,curseurGL=connectSQL(guild,option,"Jeux","GL","")
connexion,curseur=connectSQL(guild,option,"Jeux",strftime("%m"),strftime("%y"))
compteurJeuxSQL(curseur,tableauMois[strftime("%m")]+strftime("%y"),id,(0,id,strftime("%m"),strftime("%y"),dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime("%d"),strftime("%m"),strftime("%y")),(strftime("%m"),strftime("%y")),"persoM",False,state,4,curseurGL)
if idObj!=None:
compteurJeuxSQL(curseur,tableauMois[strftime("%m")]+strftime("%y")+str(idObj),id,(0,id,idObj,strftime("%m"),strftime("%y"),dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime("%d"),strftime("%m"),strftime("%y")),(strftime("%m"),strftime("%y")),"persoM",True,state,5,curseurGL)
connexion.commit()
connexion,curseur=connectSQL(guild,option,"Jeux","TO",strftime("%y"))
compteurJeuxSQL(curseur,"to"+strftime("%y"),id,(0,id,"TO",strftime("%y"),dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime("%d"),strftime("%m"),strftime("%y")),("TO",strftime("%y")),"persoA",False,state,4,curseurGL)
if idObj!=None:
compteurJeuxSQL(curseur,"to"+strftime("%y")+str(idObj),id,(0,id,idObj,"TO",strftime("%y"),dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime("%d"),strftime("%m"),strftime("%y")),("TO",strftime("%y")),"persoA",True,state,5,curseurGL)
connexion.commit()
compteurJeuxSQL(curseurGL,"glob",id,(0,id,"TO","GL",dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime("%d"),strftime("%m"),strftime("%y")),("TO","GL"),"persoA",False,state,4,curseurGL)
if idObj!=None:
compteurJeuxSQL(curseurGL,"glob"+str(idObj),id,(0,id,idObj,"TO","GL",dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime("%d"),strftime("%m"),strftime("%y")),("TO","GL"),"persoA",True,state,5,curseurGL)
histoSQLJeux(curseurGL,id,tours,strftime("%d")+"/"+strftime("%m")+"/"+strftime("%y"),idObj,state)
connexionGL.commit()
dailySQL(int(strftime("%y")+strftime("%m")+strftime("%d")),(strftime("%d"),strftime("%m"),strftime("%y")),option,curseurGuild,guild,"Jeux")
|
flexible
|
{
"blob_id": "19ff064f8c27b9796eb435c7d2b9ebf87ee90ad6",
"index": 7982,
"step-1": "<mask token>\n\n\ndef exeObj(count, idObj, id, obj, guild, nom):\n dateID = int(strftime('%y') + strftime('%m') + strftime('%d'))\n connexionGL, curseurGL = connectSQL(guild.id, nom, 'Stats', 'GL', '')\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', strftime('%m'),\n strftime('%y'))\n compteurSQL(curseur, tableauMois[strftime('%m')] + strftime('%y') + str\n (idObj), id, (0, id, idObj, strftime('%m'), strftime('%y'), count),\n count, (strftime('%d'), strftime('%m'), strftime('%y')), (strftime(\n '%m'), strftime('%y')), 'persoM', obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions') and curseur.execute(\n 'SELECT Count FROM {0}{1} WHERE ID={2}'.format(tableauMois[strftime\n ('%m')], strftime('%y'), idObj)).fetchone()['Count'] < 10:\n curseur.execute('DROP TABLE {0}{1}{2}'.format(tableauMois[strftime(\n '%m')], strftime('%y'), idObj))\n connexion.commit()\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', 'TO', strftime(\n '%y'))\n compteurSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0, id,\n idObj, 'TO', strftime('%y'), count), count, (strftime('%d'),\n strftime('%m'), strftime('%y')), ('TO', strftime('%y')), 'persoA',\n obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions') and curseur.execute(\n 'SELECT Count FROM to{0} WHERE ID={1}'.format(strftime('%y'), idObj)\n ).fetchone()['Count'] < 25:\n curseur.execute('DROP TABLE to{0}{1}'.format(strftime('%y'), idObj))\n connexion.commit()\n liste = compteurSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,\n 'TO', 'GL', count), count, (strftime('%d'), strftime('%m'),\n strftime('%y')), ('TO', 'GL'), 'persoA', obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions'):\n if curseurGL.execute('SELECT Count FROM glob WHERE ID={0}'.format(\n idObj)).fetchone()['Count'] < 50:\n curseurGL.execute('DROP TABLE glob{0}'.format(idObj))\n if curseurGL.execute('SELECT Rank FROM glob WHERE ID={0}'.format(idObj)\n ).fetchone()['Rank'] > 400:\n for i in liste:\n curseurGL.execute('DROP TABLE IF EXISTS persoM{0}{1}'.\n format(i['ID'], idObj))\n curseurGL.execute('DROP TABLE IF EXISTS persoA{0}{1}'.\n format(i['ID'], idObj))\n connexionGL.commit()\n if nom not in ('Mentions', 'Mentionne'):\n rapportsSQL(guild, 'objs', idObj, id, count, (0, id, idObj,\n strftime('%d'), strftime('%m'), strftime('%y'), dateID, count,\n nom), strftime('%d'), strftime('%m'), strftime('%y'), nom)\n\n\ndef exeJeuxSQL(id, idObj, state, guild, curseurGuild, count, option, tours):\n dictCount = {'W': 2, 'L': -1}\n dictW = {'W': 1, 'L': 0}\n dictL = {'W': 0, 'L': 1}\n connexionGL, curseurGL = connectSQL(guild, option, 'Jeux', 'GL', '')\n connexion, curseur = connectSQL(guild, option, 'Jeux', strftime('%m'),\n strftime('%y'))\n compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime('%y'),\n id, (0, id, strftime('%m'), strftime('%y'), dictW[state], dictL[\n state], dictCount[state], 0), dictCount[state], (strftime('%d'),\n strftime('%m'), strftime('%y')), (strftime('%m'), strftime('%y')),\n 'persoM', False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime(\n '%y') + str(idObj), id, (0, id, idObj, strftime('%m'), strftime\n ('%y'), dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime(\n '%y')), (strftime('%m'), strftime('%y')), 'persoM', True, state,\n 5, curseurGL)\n connexion.commit()\n connexion, curseur = connectSQL(guild, option, 'Jeux', 'TO', strftime('%y')\n )\n compteurJeuxSQL(curseur, 'to' + strftime('%y'), id, (0, id, 'TO',\n strftime('%y'), dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime('%y')),\n ('TO', strftime('%y')), 'persoA', False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0,\n id, idObj, 'TO', strftime('%y'), dictW[state], dictL[state],\n dictCount[state], 0), dictCount[state], (strftime('%d'),\n strftime('%m'), strftime('%y')), ('TO', strftime('%y')),\n 'persoA', True, state, 5, curseurGL)\n connexion.commit()\n compteurJeuxSQL(curseurGL, 'glob', id, (0, id, 'TO', 'GL', dictW[state],\n dictL[state], dictCount[state], 0), dictCount[state], (strftime(\n '%d'), strftime('%m'), strftime('%y')), ('TO', 'GL'), 'persoA', \n False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,\n 'TO', 'GL', dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime(\n '%y')), ('TO', 'GL'), 'persoA', True, state, 5, curseurGL)\n histoSQLJeux(curseurGL, id, tours, strftime('%d') + '/' + strftime(\n '%m') + '/' + strftime('%y'), idObj, state)\n connexionGL.commit()\n dailySQL(int(strftime('%y') + strftime('%m') + strftime('%d')), (\n strftime('%d'), strftime('%m'), strftime('%y')), option,\n curseurGuild, guild, 'Jeux')\n",
"step-2": "<mask token>\n\n\ndef exeClassic(count, id, nom, curseurGuild, guild):\n dateID = int(strftime('%y') + strftime('%m') + strftime('%d'))\n connexionGL, curseurGL = connectSQL(guild.id, nom, 'Stats', 'GL', '')\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', strftime('%m'),\n strftime('%y'))\n compteurSQL(curseur, tableauMois[strftime('%m')] + strftime('%y'), id,\n (0, id, strftime('%m'), strftime('%y'), count, 0), count, (strftime\n ('%d'), strftime('%m'), strftime('%y')), (strftime('%m'), strftime(\n '%y')), 'persoM', False, True, 1, curseurGL)\n connexion.commit()\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', 'TO', strftime(\n '%y'))\n compteurSQL(curseur, 'to' + strftime('%y'), id, (0, id, 'TO', strftime(\n '%y'), count, 0), count, (strftime('%d'), strftime('%m'), strftime(\n '%y')), ('TO', strftime('%y')), 'persoA', False, True, 1, curseurGL)\n connexion.commit()\n liste = compteurSQL(curseurGL, 'glob', id, (0, id, 'TO', 'GL', count, 0\n ), count, (strftime('%d'), strftime('%m'), strftime('%y')), ('TO',\n 'GL'), 'persoA', False, True, 1, curseurGL)\n if nom in ('Messages', 'Voice'):\n compteurSQL(curseurGL, 'dayRank', int(strftime('%y') + strftime(\n '%m') + strftime('%d')), (0, int(strftime('%y') + strftime('%m'\n ) + strftime('%d')), strftime('%d'), strftime('%m'), strftime(\n '%y'), count), count, None, None, None, None, False, 3, curseurGL)\n if nom in ('Emotes', 'Reactions'):\n countGL = curseurGL.execute('SELECT Count FROM glob WHERE ID={0}'.\n format(id)).fetchone()['Count']\n for i in liste:\n if i['Rank'] > 400:\n curseurGL.execute('DROP TABLE IF EXISTS persoM{0}'.format(i\n ['ID']))\n curseurGL.execute('DROP TABLE IF EXISTS persoA{0}'.format(i\n ['ID']))\n connexionGL.commit()\n dailySQL(dateID, (strftime('%d'), strftime('%m'), strftime('%y')), nom,\n curseurGuild, guild.id, 'Stats')\n if nom not in ('Mentions', 'Mentionne'):\n rapportsSQL(guild, 'ranks', id, None, count, (0, id, strftime('%d'),\n strftime('%m'), strftime('%y'), dateID, count, nom), strftime(\n '%d'), strftime('%m'), strftime('%y'), nom)\n\n\ndef exeObj(count, idObj, id, obj, guild, nom):\n dateID = int(strftime('%y') + strftime('%m') + strftime('%d'))\n connexionGL, curseurGL = connectSQL(guild.id, nom, 'Stats', 'GL', '')\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', strftime('%m'),\n strftime('%y'))\n compteurSQL(curseur, tableauMois[strftime('%m')] + strftime('%y') + str\n (idObj), id, (0, id, idObj, strftime('%m'), strftime('%y'), count),\n count, (strftime('%d'), strftime('%m'), strftime('%y')), (strftime(\n '%m'), strftime('%y')), 'persoM', obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions') and curseur.execute(\n 'SELECT Count FROM {0}{1} WHERE ID={2}'.format(tableauMois[strftime\n ('%m')], strftime('%y'), idObj)).fetchone()['Count'] < 10:\n curseur.execute('DROP TABLE {0}{1}{2}'.format(tableauMois[strftime(\n '%m')], strftime('%y'), idObj))\n connexion.commit()\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', 'TO', strftime(\n '%y'))\n compteurSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0, id,\n idObj, 'TO', strftime('%y'), count), count, (strftime('%d'),\n strftime('%m'), strftime('%y')), ('TO', strftime('%y')), 'persoA',\n obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions') and curseur.execute(\n 'SELECT Count FROM to{0} WHERE ID={1}'.format(strftime('%y'), idObj)\n ).fetchone()['Count'] < 25:\n curseur.execute('DROP TABLE to{0}{1}'.format(strftime('%y'), idObj))\n connexion.commit()\n liste = compteurSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,\n 'TO', 'GL', count), count, (strftime('%d'), strftime('%m'),\n strftime('%y')), ('TO', 'GL'), 'persoA', obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions'):\n if curseurGL.execute('SELECT Count FROM glob WHERE ID={0}'.format(\n idObj)).fetchone()['Count'] < 50:\n curseurGL.execute('DROP TABLE glob{0}'.format(idObj))\n if curseurGL.execute('SELECT Rank FROM glob WHERE ID={0}'.format(idObj)\n ).fetchone()['Rank'] > 400:\n for i in liste:\n curseurGL.execute('DROP TABLE IF EXISTS persoM{0}{1}'.\n format(i['ID'], idObj))\n curseurGL.execute('DROP TABLE IF EXISTS persoA{0}{1}'.\n format(i['ID'], idObj))\n connexionGL.commit()\n if nom not in ('Mentions', 'Mentionne'):\n rapportsSQL(guild, 'objs', idObj, id, count, (0, id, idObj,\n strftime('%d'), strftime('%m'), strftime('%y'), dateID, count,\n nom), strftime('%d'), strftime('%m'), strftime('%y'), nom)\n\n\ndef exeJeuxSQL(id, idObj, state, guild, curseurGuild, count, option, tours):\n dictCount = {'W': 2, 'L': -1}\n dictW = {'W': 1, 'L': 0}\n dictL = {'W': 0, 'L': 1}\n connexionGL, curseurGL = connectSQL(guild, option, 'Jeux', 'GL', '')\n connexion, curseur = connectSQL(guild, option, 'Jeux', strftime('%m'),\n strftime('%y'))\n compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime('%y'),\n id, (0, id, strftime('%m'), strftime('%y'), dictW[state], dictL[\n state], dictCount[state], 0), dictCount[state], (strftime('%d'),\n strftime('%m'), strftime('%y')), (strftime('%m'), strftime('%y')),\n 'persoM', False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime(\n '%y') + str(idObj), id, (0, id, idObj, strftime('%m'), strftime\n ('%y'), dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime(\n '%y')), (strftime('%m'), strftime('%y')), 'persoM', True, state,\n 5, curseurGL)\n connexion.commit()\n connexion, curseur = connectSQL(guild, option, 'Jeux', 'TO', strftime('%y')\n )\n compteurJeuxSQL(curseur, 'to' + strftime('%y'), id, (0, id, 'TO',\n strftime('%y'), dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime('%y')),\n ('TO', strftime('%y')), 'persoA', False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0,\n id, idObj, 'TO', strftime('%y'), dictW[state], dictL[state],\n dictCount[state], 0), dictCount[state], (strftime('%d'),\n strftime('%m'), strftime('%y')), ('TO', strftime('%y')),\n 'persoA', True, state, 5, curseurGL)\n connexion.commit()\n compteurJeuxSQL(curseurGL, 'glob', id, (0, id, 'TO', 'GL', dictW[state],\n dictL[state], dictCount[state], 0), dictCount[state], (strftime(\n '%d'), strftime('%m'), strftime('%y')), ('TO', 'GL'), 'persoA', \n False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,\n 'TO', 'GL', dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime(\n '%y')), ('TO', 'GL'), 'persoA', True, state, 5, curseurGL)\n histoSQLJeux(curseurGL, id, tours, strftime('%d') + '/' + strftime(\n '%m') + '/' + strftime('%y'), idObj, state)\n connexionGL.commit()\n dailySQL(int(strftime('%y') + strftime('%m') + strftime('%d')), (\n strftime('%d'), strftime('%m'), strftime('%y')), option,\n curseurGuild, guild, 'Jeux')\n",
"step-3": "<mask token>\ntableauMois = {'01': 'janvier', '02': 'février', '03': 'mars', '04':\n 'avril', '05': 'mai', '06': 'juin', '07': 'juillet', '08': 'aout', '09':\n 'septembre', '10': 'octobre', '11': 'novembre', '12': 'décembre', 'TO':\n 'TOTAL'}\n\n\ndef exeClassic(count, id, nom, curseurGuild, guild):\n dateID = int(strftime('%y') + strftime('%m') + strftime('%d'))\n connexionGL, curseurGL = connectSQL(guild.id, nom, 'Stats', 'GL', '')\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', strftime('%m'),\n strftime('%y'))\n compteurSQL(curseur, tableauMois[strftime('%m')] + strftime('%y'), id,\n (0, id, strftime('%m'), strftime('%y'), count, 0), count, (strftime\n ('%d'), strftime('%m'), strftime('%y')), (strftime('%m'), strftime(\n '%y')), 'persoM', False, True, 1, curseurGL)\n connexion.commit()\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', 'TO', strftime(\n '%y'))\n compteurSQL(curseur, 'to' + strftime('%y'), id, (0, id, 'TO', strftime(\n '%y'), count, 0), count, (strftime('%d'), strftime('%m'), strftime(\n '%y')), ('TO', strftime('%y')), 'persoA', False, True, 1, curseurGL)\n connexion.commit()\n liste = compteurSQL(curseurGL, 'glob', id, (0, id, 'TO', 'GL', count, 0\n ), count, (strftime('%d'), strftime('%m'), strftime('%y')), ('TO',\n 'GL'), 'persoA', False, True, 1, curseurGL)\n if nom in ('Messages', 'Voice'):\n compteurSQL(curseurGL, 'dayRank', int(strftime('%y') + strftime(\n '%m') + strftime('%d')), (0, int(strftime('%y') + strftime('%m'\n ) + strftime('%d')), strftime('%d'), strftime('%m'), strftime(\n '%y'), count), count, None, None, None, None, False, 3, curseurGL)\n if nom in ('Emotes', 'Reactions'):\n countGL = curseurGL.execute('SELECT Count FROM glob WHERE ID={0}'.\n format(id)).fetchone()['Count']\n for i in liste:\n if i['Rank'] > 400:\n curseurGL.execute('DROP TABLE IF EXISTS persoM{0}'.format(i\n ['ID']))\n curseurGL.execute('DROP TABLE IF EXISTS persoA{0}'.format(i\n ['ID']))\n connexionGL.commit()\n dailySQL(dateID, (strftime('%d'), strftime('%m'), strftime('%y')), nom,\n curseurGuild, guild.id, 'Stats')\n if nom not in ('Mentions', 'Mentionne'):\n rapportsSQL(guild, 'ranks', id, None, count, (0, id, strftime('%d'),\n strftime('%m'), strftime('%y'), dateID, count, nom), strftime(\n '%d'), strftime('%m'), strftime('%y'), nom)\n\n\ndef exeObj(count, idObj, id, obj, guild, nom):\n dateID = int(strftime('%y') + strftime('%m') + strftime('%d'))\n connexionGL, curseurGL = connectSQL(guild.id, nom, 'Stats', 'GL', '')\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', strftime('%m'),\n strftime('%y'))\n compteurSQL(curseur, tableauMois[strftime('%m')] + strftime('%y') + str\n (idObj), id, (0, id, idObj, strftime('%m'), strftime('%y'), count),\n count, (strftime('%d'), strftime('%m'), strftime('%y')), (strftime(\n '%m'), strftime('%y')), 'persoM', obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions') and curseur.execute(\n 'SELECT Count FROM {0}{1} WHERE ID={2}'.format(tableauMois[strftime\n ('%m')], strftime('%y'), idObj)).fetchone()['Count'] < 10:\n curseur.execute('DROP TABLE {0}{1}{2}'.format(tableauMois[strftime(\n '%m')], strftime('%y'), idObj))\n connexion.commit()\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', 'TO', strftime(\n '%y'))\n compteurSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0, id,\n idObj, 'TO', strftime('%y'), count), count, (strftime('%d'),\n strftime('%m'), strftime('%y')), ('TO', strftime('%y')), 'persoA',\n obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions') and curseur.execute(\n 'SELECT Count FROM to{0} WHERE ID={1}'.format(strftime('%y'), idObj)\n ).fetchone()['Count'] < 25:\n curseur.execute('DROP TABLE to{0}{1}'.format(strftime('%y'), idObj))\n connexion.commit()\n liste = compteurSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,\n 'TO', 'GL', count), count, (strftime('%d'), strftime('%m'),\n strftime('%y')), ('TO', 'GL'), 'persoA', obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions'):\n if curseurGL.execute('SELECT Count FROM glob WHERE ID={0}'.format(\n idObj)).fetchone()['Count'] < 50:\n curseurGL.execute('DROP TABLE glob{0}'.format(idObj))\n if curseurGL.execute('SELECT Rank FROM glob WHERE ID={0}'.format(idObj)\n ).fetchone()['Rank'] > 400:\n for i in liste:\n curseurGL.execute('DROP TABLE IF EXISTS persoM{0}{1}'.\n format(i['ID'], idObj))\n curseurGL.execute('DROP TABLE IF EXISTS persoA{0}{1}'.\n format(i['ID'], idObj))\n connexionGL.commit()\n if nom not in ('Mentions', 'Mentionne'):\n rapportsSQL(guild, 'objs', idObj, id, count, (0, id, idObj,\n strftime('%d'), strftime('%m'), strftime('%y'), dateID, count,\n nom), strftime('%d'), strftime('%m'), strftime('%y'), nom)\n\n\ndef exeJeuxSQL(id, idObj, state, guild, curseurGuild, count, option, tours):\n dictCount = {'W': 2, 'L': -1}\n dictW = {'W': 1, 'L': 0}\n dictL = {'W': 0, 'L': 1}\n connexionGL, curseurGL = connectSQL(guild, option, 'Jeux', 'GL', '')\n connexion, curseur = connectSQL(guild, option, 'Jeux', strftime('%m'),\n strftime('%y'))\n compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime('%y'),\n id, (0, id, strftime('%m'), strftime('%y'), dictW[state], dictL[\n state], dictCount[state], 0), dictCount[state], (strftime('%d'),\n strftime('%m'), strftime('%y')), (strftime('%m'), strftime('%y')),\n 'persoM', False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime(\n '%y') + str(idObj), id, (0, id, idObj, strftime('%m'), strftime\n ('%y'), dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime(\n '%y')), (strftime('%m'), strftime('%y')), 'persoM', True, state,\n 5, curseurGL)\n connexion.commit()\n connexion, curseur = connectSQL(guild, option, 'Jeux', 'TO', strftime('%y')\n )\n compteurJeuxSQL(curseur, 'to' + strftime('%y'), id, (0, id, 'TO',\n strftime('%y'), dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime('%y')),\n ('TO', strftime('%y')), 'persoA', False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0,\n id, idObj, 'TO', strftime('%y'), dictW[state], dictL[state],\n dictCount[state], 0), dictCount[state], (strftime('%d'),\n strftime('%m'), strftime('%y')), ('TO', strftime('%y')),\n 'persoA', True, state, 5, curseurGL)\n connexion.commit()\n compteurJeuxSQL(curseurGL, 'glob', id, (0, id, 'TO', 'GL', dictW[state],\n dictL[state], dictCount[state], 0), dictCount[state], (strftime(\n '%d'), strftime('%m'), strftime('%y')), ('TO', 'GL'), 'persoA', \n False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,\n 'TO', 'GL', dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime(\n '%y')), ('TO', 'GL'), 'persoA', True, state, 5, curseurGL)\n histoSQLJeux(curseurGL, id, tours, strftime('%d') + '/' + strftime(\n '%m') + '/' + strftime('%y'), idObj, state)\n connexionGL.commit()\n dailySQL(int(strftime('%y') + strftime('%m') + strftime('%d')), (\n strftime('%d'), strftime('%m'), strftime('%y')), option,\n curseurGuild, guild, 'Jeux')\n",
"step-4": "from time import strftime\nfrom Stats.SQL.Compteur import compteurSQL\nfrom Stats.SQL.Rapports import rapportsSQL\nfrom Stats.SQL.Daily import dailySQL\nfrom Stats.SQL.CompteurP4 import compteurJeuxSQL\nfrom Stats.SQL.Historique import histoSQL, histoSQLJeux\nfrom Stats.SQL.ConnectSQL import connectSQL\ntableauMois = {'01': 'janvier', '02': 'février', '03': 'mars', '04':\n 'avril', '05': 'mai', '06': 'juin', '07': 'juillet', '08': 'aout', '09':\n 'septembre', '10': 'octobre', '11': 'novembre', '12': 'décembre', 'TO':\n 'TOTAL'}\n\n\ndef exeClassic(count, id, nom, curseurGuild, guild):\n dateID = int(strftime('%y') + strftime('%m') + strftime('%d'))\n connexionGL, curseurGL = connectSQL(guild.id, nom, 'Stats', 'GL', '')\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', strftime('%m'),\n strftime('%y'))\n compteurSQL(curseur, tableauMois[strftime('%m')] + strftime('%y'), id,\n (0, id, strftime('%m'), strftime('%y'), count, 0), count, (strftime\n ('%d'), strftime('%m'), strftime('%y')), (strftime('%m'), strftime(\n '%y')), 'persoM', False, True, 1, curseurGL)\n connexion.commit()\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', 'TO', strftime(\n '%y'))\n compteurSQL(curseur, 'to' + strftime('%y'), id, (0, id, 'TO', strftime(\n '%y'), count, 0), count, (strftime('%d'), strftime('%m'), strftime(\n '%y')), ('TO', strftime('%y')), 'persoA', False, True, 1, curseurGL)\n connexion.commit()\n liste = compteurSQL(curseurGL, 'glob', id, (0, id, 'TO', 'GL', count, 0\n ), count, (strftime('%d'), strftime('%m'), strftime('%y')), ('TO',\n 'GL'), 'persoA', False, True, 1, curseurGL)\n if nom in ('Messages', 'Voice'):\n compteurSQL(curseurGL, 'dayRank', int(strftime('%y') + strftime(\n '%m') + strftime('%d')), (0, int(strftime('%y') + strftime('%m'\n ) + strftime('%d')), strftime('%d'), strftime('%m'), strftime(\n '%y'), count), count, None, None, None, None, False, 3, curseurGL)\n if nom in ('Emotes', 'Reactions'):\n countGL = curseurGL.execute('SELECT Count FROM glob WHERE ID={0}'.\n format(id)).fetchone()['Count']\n for i in liste:\n if i['Rank'] > 400:\n curseurGL.execute('DROP TABLE IF EXISTS persoM{0}'.format(i\n ['ID']))\n curseurGL.execute('DROP TABLE IF EXISTS persoA{0}'.format(i\n ['ID']))\n connexionGL.commit()\n dailySQL(dateID, (strftime('%d'), strftime('%m'), strftime('%y')), nom,\n curseurGuild, guild.id, 'Stats')\n if nom not in ('Mentions', 'Mentionne'):\n rapportsSQL(guild, 'ranks', id, None, count, (0, id, strftime('%d'),\n strftime('%m'), strftime('%y'), dateID, count, nom), strftime(\n '%d'), strftime('%m'), strftime('%y'), nom)\n\n\ndef exeObj(count, idObj, id, obj, guild, nom):\n dateID = int(strftime('%y') + strftime('%m') + strftime('%d'))\n connexionGL, curseurGL = connectSQL(guild.id, nom, 'Stats', 'GL', '')\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', strftime('%m'),\n strftime('%y'))\n compteurSQL(curseur, tableauMois[strftime('%m')] + strftime('%y') + str\n (idObj), id, (0, id, idObj, strftime('%m'), strftime('%y'), count),\n count, (strftime('%d'), strftime('%m'), strftime('%y')), (strftime(\n '%m'), strftime('%y')), 'persoM', obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions') and curseur.execute(\n 'SELECT Count FROM {0}{1} WHERE ID={2}'.format(tableauMois[strftime\n ('%m')], strftime('%y'), idObj)).fetchone()['Count'] < 10:\n curseur.execute('DROP TABLE {0}{1}{2}'.format(tableauMois[strftime(\n '%m')], strftime('%y'), idObj))\n connexion.commit()\n connexion, curseur = connectSQL(guild.id, nom, 'Stats', 'TO', strftime(\n '%y'))\n compteurSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0, id,\n idObj, 'TO', strftime('%y'), count), count, (strftime('%d'),\n strftime('%m'), strftime('%y')), ('TO', strftime('%y')), 'persoA',\n obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions') and curseur.execute(\n 'SELECT Count FROM to{0} WHERE ID={1}'.format(strftime('%y'), idObj)\n ).fetchone()['Count'] < 25:\n curseur.execute('DROP TABLE to{0}{1}'.format(strftime('%y'), idObj))\n connexion.commit()\n liste = compteurSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,\n 'TO', 'GL', count), count, (strftime('%d'), strftime('%m'),\n strftime('%y')), ('TO', 'GL'), 'persoA', obj, False, 2, curseurGL)\n if nom in ('Emotes', 'Reactions'):\n if curseurGL.execute('SELECT Count FROM glob WHERE ID={0}'.format(\n idObj)).fetchone()['Count'] < 50:\n curseurGL.execute('DROP TABLE glob{0}'.format(idObj))\n if curseurGL.execute('SELECT Rank FROM glob WHERE ID={0}'.format(idObj)\n ).fetchone()['Rank'] > 400:\n for i in liste:\n curseurGL.execute('DROP TABLE IF EXISTS persoM{0}{1}'.\n format(i['ID'], idObj))\n curseurGL.execute('DROP TABLE IF EXISTS persoA{0}{1}'.\n format(i['ID'], idObj))\n connexionGL.commit()\n if nom not in ('Mentions', 'Mentionne'):\n rapportsSQL(guild, 'objs', idObj, id, count, (0, id, idObj,\n strftime('%d'), strftime('%m'), strftime('%y'), dateID, count,\n nom), strftime('%d'), strftime('%m'), strftime('%y'), nom)\n\n\ndef exeJeuxSQL(id, idObj, state, guild, curseurGuild, count, option, tours):\n dictCount = {'W': 2, 'L': -1}\n dictW = {'W': 1, 'L': 0}\n dictL = {'W': 0, 'L': 1}\n connexionGL, curseurGL = connectSQL(guild, option, 'Jeux', 'GL', '')\n connexion, curseur = connectSQL(guild, option, 'Jeux', strftime('%m'),\n strftime('%y'))\n compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime('%y'),\n id, (0, id, strftime('%m'), strftime('%y'), dictW[state], dictL[\n state], dictCount[state], 0), dictCount[state], (strftime('%d'),\n strftime('%m'), strftime('%y')), (strftime('%m'), strftime('%y')),\n 'persoM', False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseur, tableauMois[strftime('%m')] + strftime(\n '%y') + str(idObj), id, (0, id, idObj, strftime('%m'), strftime\n ('%y'), dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime(\n '%y')), (strftime('%m'), strftime('%y')), 'persoM', True, state,\n 5, curseurGL)\n connexion.commit()\n connexion, curseur = connectSQL(guild, option, 'Jeux', 'TO', strftime('%y')\n )\n compteurJeuxSQL(curseur, 'to' + strftime('%y'), id, (0, id, 'TO',\n strftime('%y'), dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime('%y')),\n ('TO', strftime('%y')), 'persoA', False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseur, 'to' + strftime('%y') + str(idObj), id, (0,\n id, idObj, 'TO', strftime('%y'), dictW[state], dictL[state],\n dictCount[state], 0), dictCount[state], (strftime('%d'),\n strftime('%m'), strftime('%y')), ('TO', strftime('%y')),\n 'persoA', True, state, 5, curseurGL)\n connexion.commit()\n compteurJeuxSQL(curseurGL, 'glob', id, (0, id, 'TO', 'GL', dictW[state],\n dictL[state], dictCount[state], 0), dictCount[state], (strftime(\n '%d'), strftime('%m'), strftime('%y')), ('TO', 'GL'), 'persoA', \n False, state, 4, curseurGL)\n if idObj != None:\n compteurJeuxSQL(curseurGL, 'glob' + str(idObj), id, (0, id, idObj,\n 'TO', 'GL', dictW[state], dictL[state], dictCount[state], 0),\n dictCount[state], (strftime('%d'), strftime('%m'), strftime(\n '%y')), ('TO', 'GL'), 'persoA', True, state, 5, curseurGL)\n histoSQLJeux(curseurGL, id, tours, strftime('%d') + '/' + strftime(\n '%m') + '/' + strftime('%y'), idObj, state)\n connexionGL.commit()\n dailySQL(int(strftime('%y') + strftime('%m') + strftime('%d')), (\n strftime('%d'), strftime('%m'), strftime('%y')), option,\n curseurGuild, guild, 'Jeux')\n",
"step-5": "from time import strftime\nfrom Stats.SQL.Compteur import compteurSQL\nfrom Stats.SQL.Rapports import rapportsSQL\nfrom Stats.SQL.Daily import dailySQL\nfrom Stats.SQL.CompteurP4 import compteurJeuxSQL\nfrom Stats.SQL.Historique import histoSQL, histoSQLJeux\nfrom Stats.SQL.ConnectSQL import connectSQL\n\ntableauMois={\"01\":\"janvier\",\"02\":\"février\",\"03\":\"mars\",\"04\":\"avril\",\"05\":\"mai\",\"06\":\"juin\",\"07\":\"juillet\",\"08\":\"aout\",\"09\":\"septembre\",\"10\":\"octobre\",\"11\":\"novembre\",\"12\":\"décembre\",\"TO\":\"TOTAL\"}\n\ndef exeClassic(count,id,nom,curseurGuild,guild):\n dateID=int(strftime(\"%y\")+strftime(\"%m\")+strftime(\"%d\"))\n connexionGL,curseurGL=connectSQL(guild.id,nom,\"Stats\",\"GL\",\"\")\n\n connexion,curseur=connectSQL(guild.id,nom,\"Stats\",strftime(\"%m\"),strftime(\"%y\"))\n compteurSQL(curseur,tableauMois[strftime(\"%m\")]+strftime(\"%y\"),id,(0,id,strftime(\"%m\"),strftime(\"%y\"),count,0),count,(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(strftime(\"%m\"),strftime(\"%y\")),\"persoM\",False,True,1,curseurGL)\n connexion.commit()\n\n connexion,curseur=connectSQL(guild.id,nom,\"Stats\",\"TO\",strftime(\"%y\"))\n compteurSQL(curseur,\"to\"+strftime(\"%y\"),id,(0,id,\"TO\",strftime(\"%y\"),count,0),count,(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(\"TO\",strftime(\"%y\")),\"persoA\",False,True,1,curseurGL)\n connexion.commit()\n\n liste=compteurSQL(curseurGL,\"glob\",id,(0,id,\"TO\",\"GL\",count,0),count,(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(\"TO\",\"GL\"),\"persoA\",False,True,1,curseurGL)\n if nom in (\"Messages\",\"Voice\"):\n compteurSQL(curseurGL,\"dayRank\",int(strftime(\"%y\")+strftime(\"%m\")+strftime(\"%d\")),(0,int(strftime(\"%y\")+strftime(\"%m\")+strftime(\"%d\")),strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\"),count),count,None,None,None,None,False,3,curseurGL)\n \n if nom in (\"Emotes\",\"Reactions\"):\n countGL=curseurGL.execute(\"SELECT Count FROM glob WHERE ID={0}\".format(id)).fetchone()[\"Count\"]\n for i in liste:\n if i[\"Rank\"]>400:\n curseurGL.execute(\"DROP TABLE IF EXISTS persoM{0}\".format(i[\"ID\"]))\n curseurGL.execute(\"DROP TABLE IF EXISTS persoA{0}\".format(i[\"ID\"]))\n connexionGL.commit()\n\n dailySQL(dateID,(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),nom,curseurGuild,guild.id,\"Stats\")\n if nom not in (\"Mentions\",\"Mentionne\"):\n rapportsSQL(guild,\"ranks\",id,None,count,(0,id,strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\"),dateID,count,nom),strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\"),nom)\n\ndef exeObj(count,idObj,id,obj,guild,nom):\n dateID=int(strftime(\"%y\")+strftime(\"%m\")+strftime(\"%d\"))\n connexionGL,curseurGL=connectSQL(guild.id,nom,\"Stats\",\"GL\",\"\")\n\n connexion,curseur=connectSQL(guild.id,nom,\"Stats\",strftime(\"%m\"),strftime(\"%y\"))\n compteurSQL(curseur,tableauMois[strftime(\"%m\")]+strftime(\"%y\")+str(idObj),id,(0,id,idObj,strftime(\"%m\"),strftime(\"%y\"),count),count,(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(strftime(\"%m\"),strftime(\"%y\")),\"persoM\",obj,False,2,curseurGL)\n if nom in (\"Emotes\",\"Reactions\") and curseur.execute(\"SELECT Count FROM {0}{1} WHERE ID={2}\".format(tableauMois[strftime(\"%m\")],strftime(\"%y\"),idObj)).fetchone()[\"Count\"]<10:\n curseur.execute(\"DROP TABLE {0}{1}{2}\".format(tableauMois[strftime(\"%m\")],strftime(\"%y\"),idObj))\n connexion.commit()\n\n connexion,curseur=connectSQL(guild.id,nom,\"Stats\",\"TO\",strftime(\"%y\"))\n compteurSQL(curseur,\"to\"+strftime(\"%y\")+str(idObj),id,(0,id,idObj,\"TO\",strftime(\"%y\"),count),count,(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(\"TO\",strftime(\"%y\")),\"persoA\",obj,False,2,curseurGL)\n if nom in (\"Emotes\",\"Reactions\") and curseur.execute(\"SELECT Count FROM to{0} WHERE ID={1}\".format(strftime(\"%y\"),idObj)).fetchone()[\"Count\"]<25:\n curseur.execute(\"DROP TABLE to{0}{1}\".format(strftime(\"%y\"),idObj))\n connexion.commit()\n\n liste=compteurSQL(curseurGL,\"glob\"+str(idObj),id,(0,id,idObj,\"TO\",\"GL\",count),count,(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(\"TO\",\"GL\"),\"persoA\",obj,False,2,curseurGL)\n if nom in (\"Emotes\",\"Reactions\"):\n if curseurGL.execute(\"SELECT Count FROM glob WHERE ID={0}\".format(idObj)).fetchone()[\"Count\"]<50:\n curseurGL.execute(\"DROP TABLE glob{0}\".format(idObj))\n if curseurGL.execute(\"SELECT Rank FROM glob WHERE ID={0}\".format(idObj)).fetchone()[\"Rank\"]>400:\n for i in liste:\n curseurGL.execute(\"DROP TABLE IF EXISTS persoM{0}{1}\".format(i[\"ID\"],idObj))\n curseurGL.execute(\"DROP TABLE IF EXISTS persoA{0}{1}\".format(i[\"ID\"],idObj))\n connexionGL.commit()\n\n if nom not in (\"Mentions\",\"Mentionne\"):\n rapportsSQL(guild,\"objs\",idObj,id,count,(0,id,idObj,strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\"),dateID,count,nom),strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\"),nom)\n\ndef exeJeuxSQL(id,idObj,state,guild,curseurGuild,count,option,tours):\n dictCount={\"W\":2,\"L\":-1}\n dictW={\"W\":1,\"L\":0}\n dictL={\"W\":0,\"L\":1}\n connexionGL,curseurGL=connectSQL(guild,option,\"Jeux\",\"GL\",\"\")\n\n connexion,curseur=connectSQL(guild,option,\"Jeux\",strftime(\"%m\"),strftime(\"%y\"))\n compteurJeuxSQL(curseur,tableauMois[strftime(\"%m\")]+strftime(\"%y\"),id,(0,id,strftime(\"%m\"),strftime(\"%y\"),dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(strftime(\"%m\"),strftime(\"%y\")),\"persoM\",False,state,4,curseurGL)\n if idObj!=None:\n compteurJeuxSQL(curseur,tableauMois[strftime(\"%m\")]+strftime(\"%y\")+str(idObj),id,(0,id,idObj,strftime(\"%m\"),strftime(\"%y\"),dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(strftime(\"%m\"),strftime(\"%y\")),\"persoM\",True,state,5,curseurGL)\n connexion.commit()\n\n connexion,curseur=connectSQL(guild,option,\"Jeux\",\"TO\",strftime(\"%y\"))\n compteurJeuxSQL(curseur,\"to\"+strftime(\"%y\"),id,(0,id,\"TO\",strftime(\"%y\"),dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(\"TO\",strftime(\"%y\")),\"persoA\",False,state,4,curseurGL)\n if idObj!=None:\n compteurJeuxSQL(curseur,\"to\"+strftime(\"%y\")+str(idObj),id,(0,id,idObj,\"TO\",strftime(\"%y\"),dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(\"TO\",strftime(\"%y\")),\"persoA\",True,state,5,curseurGL)\n connexion.commit()\n\n compteurJeuxSQL(curseurGL,\"glob\",id,(0,id,\"TO\",\"GL\",dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(\"TO\",\"GL\"),\"persoA\",False,state,4,curseurGL)\n if idObj!=None:\n compteurJeuxSQL(curseurGL,\"glob\"+str(idObj),id,(0,id,idObj,\"TO\",\"GL\",dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),(\"TO\",\"GL\"),\"persoA\",True,state,5,curseurGL)\n histoSQLJeux(curseurGL,id,tours,strftime(\"%d\")+\"/\"+strftime(\"%m\")+\"/\"+strftime(\"%y\"),idObj,state)\n connexionGL.commit()\n\n dailySQL(int(strftime(\"%y\")+strftime(\"%m\")+strftime(\"%d\")),(strftime(\"%d\"),strftime(\"%m\"),strftime(\"%y\")),option,curseurGuild,guild,\"Jeux\")",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle('Cover Letter Developer')
self.label1 = QLabel('Input Company Name')
self.edit1 = QLineEdit('')
self.label2 = QLabel('Input Position Title')
self.edit2 = QLineEdit('')
self.label3 = QLabel('How did you get introduced to the company?')
self.edit3 = QLineEdit('')
self.label4 = QLabel(
'What skills do you have that would help the COOP/Internship')
self.edit4 = QLineEdit('')
self.button = QPushButton('Develop')
layout = QVBoxLayout()
layout.addWidget(self.label1)
layout.addWidget(self.edit1)
layout.addWidget(self.label2)
layout.addWidget(self.edit2)
layout.addWidget(self.label3)
layout.addWidget(self.edit3)
layout.addWidget(self.label4)
layout.addWidget(self.edit4)
layout.addWidget(self.button)
self.setLayout(layout)
self.button.clicked.connect(self.coverlet)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle('Cover Letter Developer')
self.label1 = QLabel('Input Company Name')
self.edit1 = QLineEdit('')
self.label2 = QLabel('Input Position Title')
self.edit2 = QLineEdit('')
self.label3 = QLabel('How did you get introduced to the company?')
self.edit3 = QLineEdit('')
self.label4 = QLabel(
'What skills do you have that would help the COOP/Internship')
self.edit4 = QLineEdit('')
self.button = QPushButton('Develop')
layout = QVBoxLayout()
layout.addWidget(self.label1)
layout.addWidget(self.edit1)
layout.addWidget(self.label2)
layout.addWidget(self.edit2)
layout.addWidget(self.label3)
layout.addWidget(self.edit3)
layout.addWidget(self.label4)
layout.addWidget(self.edit4)
layout.addWidget(self.button)
self.setLayout(layout)
self.button.clicked.connect(self.coverlet)
def coverlet(self):
name = self.edit1.text()
pos = self.edit2.text()
intro = self.edit3.text()
skills = self.edit4.text()
mytext = '\n Dear ' + name + """’s Hiring Team,
""" + ' ' + ' I am writing to apply to the ' + pos + ' Intern/COOP position at ' + name + '. I am a 4th year at Wentworth Institute of Technology, pursuing a Bachelor of Science degree in Electro-mechanical Engineering. The Electro-mechanical Engineering program combines the technical disciplines of Electrical and Mechanical Engineering. ' + intro + """
""" + 'As an intern at ' + name + ' , I will bring my toolset of ' + skills + """. Additionally I have experience in quality and reliability of electronic circuit systems through the tests that I have done when I was Analog Devices like shock, high voltage, HALT testing. Along with developing reliability testers that I programmed using LabView(a graphical programming language). My C programming and Python experience is from a project that I have done for a Junior Design Project and you can see the pictures through my personal website list below.
""" + ' ' + ' As an engineering student, the most valuable thing that I have currently learned about myself is that when faced with a difficult problem I may initially fail, but I don’t quit until I eventually solve the problem. I am a quick learner and will be a good asset to ' + name + '. Wentworth Institute of Technology incorporates COOPS/internships as part of its curriculum, and, therefore, I would be available to work full time throughout the summer for a minimum of 14 weeks. I would be honored to intern for ' + name + ' and gain experience in engineering and further ' + name + """ initiative. has a reputation for excellence, and I value your commitment to making the world a better and safer place.
""" + ' ' + """ You may contact me by phone, email or my personal website, which I have supplied below. Thank you for your time and consideration.
"""
anothertext = """
Respectfully yours,
Martynas Baranauskas
baranauskasm@wit.edu
781-572-9775
Personal Website: https://baranauskasm.wixsite.com/mysite
or scan QR code with smartphone camera
"""
document = Document()
p = document.add_paragraph(mytext)
g = document.add_paragraph(anothertext)
k = document.add_picture('qr_code.png', width=Inches(0.7))
filename = name + '_' + pos + '_baranauskas_.docx'
document.save(filename)
print('-----------------------------------------------------')
print(name + '_' + pos + '_baranauskas.doxc document was developed')
print('------------------------------------------------------')
self.edit1.clear()
self.edit2.clear()
self.edit3.clear()
self.edit4.clear()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle('Cover Letter Developer')
self.label1 = QLabel('Input Company Name')
self.edit1 = QLineEdit('')
self.label2 = QLabel('Input Position Title')
self.edit2 = QLineEdit('')
self.label3 = QLabel('How did you get introduced to the company?')
self.edit3 = QLineEdit('')
self.label4 = QLabel(
'What skills do you have that would help the COOP/Internship')
self.edit4 = QLineEdit('')
self.button = QPushButton('Develop')
layout = QVBoxLayout()
layout.addWidget(self.label1)
layout.addWidget(self.edit1)
layout.addWidget(self.label2)
layout.addWidget(self.edit2)
layout.addWidget(self.label3)
layout.addWidget(self.edit3)
layout.addWidget(self.label4)
layout.addWidget(self.edit4)
layout.addWidget(self.button)
self.setLayout(layout)
self.button.clicked.connect(self.coverlet)
def coverlet(self):
name = self.edit1.text()
pos = self.edit2.text()
intro = self.edit3.text()
skills = self.edit4.text()
mytext = '\n Dear ' + name + """’s Hiring Team,
""" + ' ' + ' I am writing to apply to the ' + pos + ' Intern/COOP position at ' + name + '. I am a 4th year at Wentworth Institute of Technology, pursuing a Bachelor of Science degree in Electro-mechanical Engineering. The Electro-mechanical Engineering program combines the technical disciplines of Electrical and Mechanical Engineering. ' + intro + """
""" + 'As an intern at ' + name + ' , I will bring my toolset of ' + skills + """. Additionally I have experience in quality and reliability of electronic circuit systems through the tests that I have done when I was Analog Devices like shock, high voltage, HALT testing. Along with developing reliability testers that I programmed using LabView(a graphical programming language). My C programming and Python experience is from a project that I have done for a Junior Design Project and you can see the pictures through my personal website list below.
""" + ' ' + ' As an engineering student, the most valuable thing that I have currently learned about myself is that when faced with a difficult problem I may initially fail, but I don’t quit until I eventually solve the problem. I am a quick learner and will be a good asset to ' + name + '. Wentworth Institute of Technology incorporates COOPS/internships as part of its curriculum, and, therefore, I would be available to work full time throughout the summer for a minimum of 14 weeks. I would be honored to intern for ' + name + ' and gain experience in engineering and further ' + name + """ initiative. has a reputation for excellence, and I value your commitment to making the world a better and safer place.
""" + ' ' + """ You may contact me by phone, email or my personal website, which I have supplied below. Thank you for your time and consideration.
"""
anothertext = """
Respectfully yours,
Martynas Baranauskas
baranauskasm@wit.edu
781-572-9775
Personal Website: https://baranauskasm.wixsite.com/mysite
or scan QR code with smartphone camera
"""
document = Document()
p = document.add_paragraph(mytext)
g = document.add_paragraph(anothertext)
k = document.add_picture('qr_code.png', width=Inches(0.7))
filename = name + '_' + pos + '_baranauskas_.docx'
document.save(filename)
print('-----------------------------------------------------')
print(name + '_' + pos + '_baranauskas.doxc document was developed')
print('------------------------------------------------------')
self.edit1.clear()
self.edit2.clear()
self.edit3.clear()
self.edit4.clear()
if __name__ == '__main__':
app = QApplication(sys.argv)
form = Form()
form.resize(1300, 250)
form.show()
sys.exit(app.exec_())
<|reserved_special_token_1|>
import sys
from PySide2.QtWidgets import QApplication, QDialog, QLineEdit, QPushButton, QVBoxLayout, QLabel, QWidget
from docx import Document
from docx.shared import Inches
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle('Cover Letter Developer')
self.label1 = QLabel('Input Company Name')
self.edit1 = QLineEdit('')
self.label2 = QLabel('Input Position Title')
self.edit2 = QLineEdit('')
self.label3 = QLabel('How did you get introduced to the company?')
self.edit3 = QLineEdit('')
self.label4 = QLabel(
'What skills do you have that would help the COOP/Internship')
self.edit4 = QLineEdit('')
self.button = QPushButton('Develop')
layout = QVBoxLayout()
layout.addWidget(self.label1)
layout.addWidget(self.edit1)
layout.addWidget(self.label2)
layout.addWidget(self.edit2)
layout.addWidget(self.label3)
layout.addWidget(self.edit3)
layout.addWidget(self.label4)
layout.addWidget(self.edit4)
layout.addWidget(self.button)
self.setLayout(layout)
self.button.clicked.connect(self.coverlet)
def coverlet(self):
name = self.edit1.text()
pos = self.edit2.text()
intro = self.edit3.text()
skills = self.edit4.text()
mytext = '\n Dear ' + name + """’s Hiring Team,
""" + ' ' + ' I am writing to apply to the ' + pos + ' Intern/COOP position at ' + name + '. I am a 4th year at Wentworth Institute of Technology, pursuing a Bachelor of Science degree in Electro-mechanical Engineering. The Electro-mechanical Engineering program combines the technical disciplines of Electrical and Mechanical Engineering. ' + intro + """
""" + 'As an intern at ' + name + ' , I will bring my toolset of ' + skills + """. Additionally I have experience in quality and reliability of electronic circuit systems through the tests that I have done when I was Analog Devices like shock, high voltage, HALT testing. Along with developing reliability testers that I programmed using LabView(a graphical programming language). My C programming and Python experience is from a project that I have done for a Junior Design Project and you can see the pictures through my personal website list below.
""" + ' ' + ' As an engineering student, the most valuable thing that I have currently learned about myself is that when faced with a difficult problem I may initially fail, but I don’t quit until I eventually solve the problem. I am a quick learner and will be a good asset to ' + name + '. Wentworth Institute of Technology incorporates COOPS/internships as part of its curriculum, and, therefore, I would be available to work full time throughout the summer for a minimum of 14 weeks. I would be honored to intern for ' + name + ' and gain experience in engineering and further ' + name + """ initiative. has a reputation for excellence, and I value your commitment to making the world a better and safer place.
""" + ' ' + """ You may contact me by phone, email or my personal website, which I have supplied below. Thank you for your time and consideration.
"""
anothertext = """
Respectfully yours,
Martynas Baranauskas
baranauskasm@wit.edu
781-572-9775
Personal Website: https://baranauskasm.wixsite.com/mysite
or scan QR code with smartphone camera
"""
document = Document()
p = document.add_paragraph(mytext)
g = document.add_paragraph(anothertext)
k = document.add_picture('qr_code.png', width=Inches(0.7))
filename = name + '_' + pos + '_baranauskas_.docx'
document.save(filename)
print('-----------------------------------------------------')
print(name + '_' + pos + '_baranauskas.doxc document was developed')
print('------------------------------------------------------')
self.edit1.clear()
self.edit2.clear()
self.edit3.clear()
self.edit4.clear()
if __name__ == '__main__':
app = QApplication(sys.argv)
form = Form()
form.resize(1300, 250)
form.show()
sys.exit(app.exec_())
<|reserved_special_token_1|>
import sys
from PySide2.QtWidgets import QApplication, QDialog, QLineEdit, QPushButton,QVBoxLayout, QLabel, QWidget
from docx import Document
from docx.shared import Inches
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
#set the size
#Creat widgets
self.setWindowTitle("Cover Letter Developer")
self.label1 = QLabel('Input Company Name')
self.edit1 = QLineEdit("")
self.label2 = QLabel('Input Position Title')
self.edit2 = QLineEdit("")
self.label3 = QLabel('How did you get introduced to the company?')
self.edit3 = QLineEdit("")
self.label4 = QLabel('What skills do you have that would help the COOP/Internship')
self.edit4 = QLineEdit("")
self.button = QPushButton("Develop")
# Creat layout and add widgets
layout = QVBoxLayout()
layout.addWidget(self.label1)
layout.addWidget(self.edit1)
layout.addWidget(self.label2)
layout.addWidget(self.edit2)
layout.addWidget(self.label3)
layout.addWidget(self.edit3)
layout.addWidget(self.label4)
layout.addWidget(self.edit4)
layout.addWidget(self.button)
#set dialog layout
self.setLayout(layout)
self.button.clicked.connect(self.coverlet)
def coverlet(self):
name = self.edit1.text()
pos = self.edit2.text()
intro = self.edit3.text()
skills = self.edit4.text()
mytext = """
Dear """ + name + """’s Hiring Team,
\n
""" + """ """ + """ I am writing to apply to the """ + pos + """ Intern/COOP position at """ + name + """. I am a 4th year at Wentworth Institute of Technology, pursuing a Bachelor of Science degree in Electro-mechanical Engineering. The Electro-mechanical Engineering program combines the technical disciplines of Electrical and Mechanical Engineering. """ + intro + """
"""+ """As an intern at """ + name + """ , I will bring my toolset of """ + skills + """. Additionally I have experience in quality and reliability of electronic circuit systems through the tests that I have done when I was Analog Devices like shock, high voltage, HALT testing. Along with developing reliability testers that I programmed using LabView(a graphical programming language). My C programming and Python experience is from a project that I have done for a Junior Design Project and you can see the pictures through my personal website list below.
""" + """ """ + """ As an engineering student, the most valuable thing that I have currently learned about myself is that when faced with a difficult problem I may initially fail, but I don’t quit until I eventually solve the problem. I am a quick learner and will be a good asset to """ + name + """. Wentworth Institute of Technology incorporates COOPS/internships as part of its curriculum, and, therefore, I would be available to work full time throughout the summer for a minimum of 14 weeks. I would be honored to intern for """ + name + """ and gain experience in engineering and further """+ name +""" initiative. has a reputation for excellence, and I value your commitment to making the world a better and safer place.
""" + """ """ + """ You may contact me by phone, email or my personal website, which I have supplied below. Thank you for your time and consideration.
"""
anothertext = """
Respectfully yours,
Martynas Baranauskas
baranauskasm@wit.edu
781-572-9775
Personal Website: https://baranauskasm.wixsite.com/mysite
or scan QR code with smartphone camera
"""
document = Document()
p = document.add_paragraph(mytext)
g = document.add_paragraph(anothertext)
k = document.add_picture('qr_code.png', width=Inches(0.7))
# document.add_page_break()
# the saving of the document and the path to the
filename = name + '_' + pos + '_baranauskas_.docx'
# filepath = r'C:\Users\baranauskasm\Desktop\COOP Stuff\Summer 2020 COOP (future)\cover letters\automated cover letters'
document.save(filename)
print("-----------------------------------------------------")
print(name + "_" + pos + "_baranauskas.doxc document was developed")
print("------------------------------------------------------")
#clear the form for another submition
self.edit1.clear()
self.edit2.clear()
self.edit3.clear()
self.edit4.clear()
if __name__ == '__main__':
#or you can do a automatic one with something like
# Create the Qt Application
app = QApplication(sys.argv)
# Create and show the form
form = Form()
#the size of the gui
form.resize(1300,250)
form.show()
# Run the main Qt loop
sys.exit(app.exec_())
|
flexible
|
{
"blob_id": "bad13218a7a9e687fbd29099ca80771296789d36",
"index": 1321,
"step-1": "<mask token>\n\n\nclass Form(QDialog):\n\n def __init__(self, parent=None):\n super(Form, self).__init__(parent)\n self.setWindowTitle('Cover Letter Developer')\n self.label1 = QLabel('Input Company Name')\n self.edit1 = QLineEdit('')\n self.label2 = QLabel('Input Position Title')\n self.edit2 = QLineEdit('')\n self.label3 = QLabel('How did you get introduced to the company?')\n self.edit3 = QLineEdit('')\n self.label4 = QLabel(\n 'What skills do you have that would help the COOP/Internship')\n self.edit4 = QLineEdit('')\n self.button = QPushButton('Develop')\n layout = QVBoxLayout()\n layout.addWidget(self.label1)\n layout.addWidget(self.edit1)\n layout.addWidget(self.label2)\n layout.addWidget(self.edit2)\n layout.addWidget(self.label3)\n layout.addWidget(self.edit3)\n layout.addWidget(self.label4)\n layout.addWidget(self.edit4)\n layout.addWidget(self.button)\n self.setLayout(layout)\n self.button.clicked.connect(self.coverlet)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Form(QDialog):\n\n def __init__(self, parent=None):\n super(Form, self).__init__(parent)\n self.setWindowTitle('Cover Letter Developer')\n self.label1 = QLabel('Input Company Name')\n self.edit1 = QLineEdit('')\n self.label2 = QLabel('Input Position Title')\n self.edit2 = QLineEdit('')\n self.label3 = QLabel('How did you get introduced to the company?')\n self.edit3 = QLineEdit('')\n self.label4 = QLabel(\n 'What skills do you have that would help the COOP/Internship')\n self.edit4 = QLineEdit('')\n self.button = QPushButton('Develop')\n layout = QVBoxLayout()\n layout.addWidget(self.label1)\n layout.addWidget(self.edit1)\n layout.addWidget(self.label2)\n layout.addWidget(self.edit2)\n layout.addWidget(self.label3)\n layout.addWidget(self.edit3)\n layout.addWidget(self.label4)\n layout.addWidget(self.edit4)\n layout.addWidget(self.button)\n self.setLayout(layout)\n self.button.clicked.connect(self.coverlet)\n\n def coverlet(self):\n name = self.edit1.text()\n pos = self.edit2.text()\n intro = self.edit3.text()\n skills = self.edit4.text()\n mytext = '\\n Dear ' + name + \"\"\"’s Hiring Team,\n \n\n \"\"\" + ' ' + ' I am writing to apply to the ' + pos + ' Intern/COOP position at ' + name + '. I am a 4th year at Wentworth Institute of Technology, pursuing a Bachelor of Science degree in Electro-mechanical Engineering. The Electro-mechanical Engineering program combines the technical disciplines of Electrical and Mechanical Engineering. ' + intro + \"\"\" \n \n \"\"\" + 'As an intern at ' + name + ' , I will bring my toolset of ' + skills + \"\"\". Additionally I have experience in quality and reliability of electronic circuit systems through the tests that I have done when I was Analog Devices like shock, high voltage, HALT testing. Along with developing reliability testers that I programmed using LabView(a graphical programming language). My C programming and Python experience is from a project that I have done for a Junior Design Project and you can see the pictures through my personal website list below.\n\n \"\"\" + ' ' + ' As an engineering student, the most valuable thing that I have currently learned about myself is that when faced with a difficult problem I may initially fail, but I don’t quit until I eventually solve the problem. I am a quick learner and will be a good asset to ' + name + '. Wentworth Institute of Technology incorporates COOPS/internships as part of its curriculum, and, therefore, I would be available to work full time throughout the summer for a minimum of 14 weeks. I would be honored to intern for ' + name + ' and gain experience in engineering and further ' + name + \"\"\" initiative. has a reputation for excellence, and I value your commitment to making the world a better and safer place.\n\n \"\"\" + ' ' + \"\"\" You may contact me by phone, email or my personal website, which I have supplied below. Thank you for your time and consideration.\n\n \"\"\"\n anothertext = \"\"\" \nRespectfully yours,\nMartynas Baranauskas\nbaranauskasm@wit.edu\n781-572-9775\nPersonal Website: https://baranauskasm.wixsite.com/mysite\nor scan QR code with smartphone camera\n \"\"\"\n document = Document()\n p = document.add_paragraph(mytext)\n g = document.add_paragraph(anothertext)\n k = document.add_picture('qr_code.png', width=Inches(0.7))\n filename = name + '_' + pos + '_baranauskas_.docx'\n document.save(filename)\n print('-----------------------------------------------------')\n print(name + '_' + pos + '_baranauskas.doxc document was developed')\n print('------------------------------------------------------')\n self.edit1.clear()\n self.edit2.clear()\n self.edit3.clear()\n self.edit4.clear()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Form(QDialog):\n\n def __init__(self, parent=None):\n super(Form, self).__init__(parent)\n self.setWindowTitle('Cover Letter Developer')\n self.label1 = QLabel('Input Company Name')\n self.edit1 = QLineEdit('')\n self.label2 = QLabel('Input Position Title')\n self.edit2 = QLineEdit('')\n self.label3 = QLabel('How did you get introduced to the company?')\n self.edit3 = QLineEdit('')\n self.label4 = QLabel(\n 'What skills do you have that would help the COOP/Internship')\n self.edit4 = QLineEdit('')\n self.button = QPushButton('Develop')\n layout = QVBoxLayout()\n layout.addWidget(self.label1)\n layout.addWidget(self.edit1)\n layout.addWidget(self.label2)\n layout.addWidget(self.edit2)\n layout.addWidget(self.label3)\n layout.addWidget(self.edit3)\n layout.addWidget(self.label4)\n layout.addWidget(self.edit4)\n layout.addWidget(self.button)\n self.setLayout(layout)\n self.button.clicked.connect(self.coverlet)\n\n def coverlet(self):\n name = self.edit1.text()\n pos = self.edit2.text()\n intro = self.edit3.text()\n skills = self.edit4.text()\n mytext = '\\n Dear ' + name + \"\"\"’s Hiring Team,\n \n\n \"\"\" + ' ' + ' I am writing to apply to the ' + pos + ' Intern/COOP position at ' + name + '. I am a 4th year at Wentworth Institute of Technology, pursuing a Bachelor of Science degree in Electro-mechanical Engineering. The Electro-mechanical Engineering program combines the technical disciplines of Electrical and Mechanical Engineering. ' + intro + \"\"\" \n \n \"\"\" + 'As an intern at ' + name + ' , I will bring my toolset of ' + skills + \"\"\". Additionally I have experience in quality and reliability of electronic circuit systems through the tests that I have done when I was Analog Devices like shock, high voltage, HALT testing. Along with developing reliability testers that I programmed using LabView(a graphical programming language). My C programming and Python experience is from a project that I have done for a Junior Design Project and you can see the pictures through my personal website list below.\n\n \"\"\" + ' ' + ' As an engineering student, the most valuable thing that I have currently learned about myself is that when faced with a difficult problem I may initially fail, but I don’t quit until I eventually solve the problem. I am a quick learner and will be a good asset to ' + name + '. Wentworth Institute of Technology incorporates COOPS/internships as part of its curriculum, and, therefore, I would be available to work full time throughout the summer for a minimum of 14 weeks. I would be honored to intern for ' + name + ' and gain experience in engineering and further ' + name + \"\"\" initiative. has a reputation for excellence, and I value your commitment to making the world a better and safer place.\n\n \"\"\" + ' ' + \"\"\" You may contact me by phone, email or my personal website, which I have supplied below. Thank you for your time and consideration.\n\n \"\"\"\n anothertext = \"\"\" \nRespectfully yours,\nMartynas Baranauskas\nbaranauskasm@wit.edu\n781-572-9775\nPersonal Website: https://baranauskasm.wixsite.com/mysite\nor scan QR code with smartphone camera\n \"\"\"\n document = Document()\n p = document.add_paragraph(mytext)\n g = document.add_paragraph(anothertext)\n k = document.add_picture('qr_code.png', width=Inches(0.7))\n filename = name + '_' + pos + '_baranauskas_.docx'\n document.save(filename)\n print('-----------------------------------------------------')\n print(name + '_' + pos + '_baranauskas.doxc document was developed')\n print('------------------------------------------------------')\n self.edit1.clear()\n self.edit2.clear()\n self.edit3.clear()\n self.edit4.clear()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n form = Form()\n form.resize(1300, 250)\n form.show()\n sys.exit(app.exec_())\n",
"step-4": "import sys\nfrom PySide2.QtWidgets import QApplication, QDialog, QLineEdit, QPushButton, QVBoxLayout, QLabel, QWidget\nfrom docx import Document\nfrom docx.shared import Inches\n\n\nclass Form(QDialog):\n\n def __init__(self, parent=None):\n super(Form, self).__init__(parent)\n self.setWindowTitle('Cover Letter Developer')\n self.label1 = QLabel('Input Company Name')\n self.edit1 = QLineEdit('')\n self.label2 = QLabel('Input Position Title')\n self.edit2 = QLineEdit('')\n self.label3 = QLabel('How did you get introduced to the company?')\n self.edit3 = QLineEdit('')\n self.label4 = QLabel(\n 'What skills do you have that would help the COOP/Internship')\n self.edit4 = QLineEdit('')\n self.button = QPushButton('Develop')\n layout = QVBoxLayout()\n layout.addWidget(self.label1)\n layout.addWidget(self.edit1)\n layout.addWidget(self.label2)\n layout.addWidget(self.edit2)\n layout.addWidget(self.label3)\n layout.addWidget(self.edit3)\n layout.addWidget(self.label4)\n layout.addWidget(self.edit4)\n layout.addWidget(self.button)\n self.setLayout(layout)\n self.button.clicked.connect(self.coverlet)\n\n def coverlet(self):\n name = self.edit1.text()\n pos = self.edit2.text()\n intro = self.edit3.text()\n skills = self.edit4.text()\n mytext = '\\n Dear ' + name + \"\"\"’s Hiring Team,\n \n\n \"\"\" + ' ' + ' I am writing to apply to the ' + pos + ' Intern/COOP position at ' + name + '. I am a 4th year at Wentworth Institute of Technology, pursuing a Bachelor of Science degree in Electro-mechanical Engineering. The Electro-mechanical Engineering program combines the technical disciplines of Electrical and Mechanical Engineering. ' + intro + \"\"\" \n \n \"\"\" + 'As an intern at ' + name + ' , I will bring my toolset of ' + skills + \"\"\". Additionally I have experience in quality and reliability of electronic circuit systems through the tests that I have done when I was Analog Devices like shock, high voltage, HALT testing. Along with developing reliability testers that I programmed using LabView(a graphical programming language). My C programming and Python experience is from a project that I have done for a Junior Design Project and you can see the pictures through my personal website list below.\n\n \"\"\" + ' ' + ' As an engineering student, the most valuable thing that I have currently learned about myself is that when faced with a difficult problem I may initially fail, but I don’t quit until I eventually solve the problem. I am a quick learner and will be a good asset to ' + name + '. Wentworth Institute of Technology incorporates COOPS/internships as part of its curriculum, and, therefore, I would be available to work full time throughout the summer for a minimum of 14 weeks. I would be honored to intern for ' + name + ' and gain experience in engineering and further ' + name + \"\"\" initiative. has a reputation for excellence, and I value your commitment to making the world a better and safer place.\n\n \"\"\" + ' ' + \"\"\" You may contact me by phone, email or my personal website, which I have supplied below. Thank you for your time and consideration.\n\n \"\"\"\n anothertext = \"\"\" \nRespectfully yours,\nMartynas Baranauskas\nbaranauskasm@wit.edu\n781-572-9775\nPersonal Website: https://baranauskasm.wixsite.com/mysite\nor scan QR code with smartphone camera\n \"\"\"\n document = Document()\n p = document.add_paragraph(mytext)\n g = document.add_paragraph(anothertext)\n k = document.add_picture('qr_code.png', width=Inches(0.7))\n filename = name + '_' + pos + '_baranauskas_.docx'\n document.save(filename)\n print('-----------------------------------------------------')\n print(name + '_' + pos + '_baranauskas.doxc document was developed')\n print('------------------------------------------------------')\n self.edit1.clear()\n self.edit2.clear()\n self.edit3.clear()\n self.edit4.clear()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n form = Form()\n form.resize(1300, 250)\n form.show()\n sys.exit(app.exec_())\n",
"step-5": "import sys\nfrom PySide2.QtWidgets import QApplication, QDialog, QLineEdit, QPushButton,QVBoxLayout, QLabel, QWidget\nfrom docx import Document\nfrom docx.shared import Inches\n\nclass Form(QDialog):\n\n def __init__(self, parent=None):\n super(Form, self).__init__(parent)\n #set the size\n #Creat widgets\n self.setWindowTitle(\"Cover Letter Developer\")\n self.label1 = QLabel('Input Company Name')\n self.edit1 = QLineEdit(\"\")\n self.label2 = QLabel('Input Position Title')\n self.edit2 = QLineEdit(\"\")\n self.label3 = QLabel('How did you get introduced to the company?')\n self.edit3 = QLineEdit(\"\")\n self.label4 = QLabel('What skills do you have that would help the COOP/Internship')\n self.edit4 = QLineEdit(\"\")\n self.button = QPushButton(\"Develop\")\n # Creat layout and add widgets\n layout = QVBoxLayout()\n layout.addWidget(self.label1)\n layout.addWidget(self.edit1)\n layout.addWidget(self.label2)\n layout.addWidget(self.edit2)\n layout.addWidget(self.label3)\n layout.addWidget(self.edit3)\n layout.addWidget(self.label4)\n layout.addWidget(self.edit4)\n layout.addWidget(self.button)\n #set dialog layout\n self.setLayout(layout)\n self.button.clicked.connect(self.coverlet)\n\n\n def coverlet(self):\n name = self.edit1.text()\n pos = self.edit2.text()\n intro = self.edit3.text()\n skills = self.edit4.text()\n mytext = \"\"\"\n Dear \"\"\" + name + \"\"\"’s Hiring Team,\n \\n\n \"\"\" + \"\"\" \"\"\" + \"\"\" I am writing to apply to the \"\"\" + pos + \"\"\" Intern/COOP position at \"\"\" + name + \"\"\". I am a 4th year at Wentworth Institute of Technology, pursuing a Bachelor of Science degree in Electro-mechanical Engineering. The Electro-mechanical Engineering program combines the technical disciplines of Electrical and Mechanical Engineering. \"\"\" + intro + \"\"\" \n \n \"\"\"+ \"\"\"As an intern at \"\"\" + name + \"\"\" , I will bring my toolset of \"\"\" + skills + \"\"\". Additionally I have experience in quality and reliability of electronic circuit systems through the tests that I have done when I was Analog Devices like shock, high voltage, HALT testing. Along with developing reliability testers that I programmed using LabView(a graphical programming language). My C programming and Python experience is from a project that I have done for a Junior Design Project and you can see the pictures through my personal website list below.\n\n \"\"\" + \"\"\" \"\"\" + \"\"\" As an engineering student, the most valuable thing that I have currently learned about myself is that when faced with a difficult problem I may initially fail, but I don’t quit until I eventually solve the problem. I am a quick learner and will be a good asset to \"\"\" + name + \"\"\". Wentworth Institute of Technology incorporates COOPS/internships as part of its curriculum, and, therefore, I would be available to work full time throughout the summer for a minimum of 14 weeks. I would be honored to intern for \"\"\" + name + \"\"\" and gain experience in engineering and further \"\"\"+ name +\"\"\" initiative. has a reputation for excellence, and I value your commitment to making the world a better and safer place.\n\n \"\"\" + \"\"\" \"\"\" + \"\"\" You may contact me by phone, email or my personal website, which I have supplied below. Thank you for your time and consideration.\n\n \"\"\"\n\n anothertext = \"\"\" \nRespectfully yours,\nMartynas Baranauskas\nbaranauskasm@wit.edu\n781-572-9775\nPersonal Website: https://baranauskasm.wixsite.com/mysite\nor scan QR code with smartphone camera\n \"\"\"\n\n document = Document()\n p = document.add_paragraph(mytext)\n g = document.add_paragraph(anothertext)\n k = document.add_picture('qr_code.png', width=Inches(0.7))\n # document.add_page_break()\n\n # the saving of the document and the path to the\n filename = name + '_' + pos + '_baranauskas_.docx'\n # filepath = r'C:\\Users\\baranauskasm\\Desktop\\COOP Stuff\\Summer 2020 COOP (future)\\cover letters\\automated cover letters'\n document.save(filename)\n print(\"-----------------------------------------------------\")\n print(name + \"_\" + pos + \"_baranauskas.doxc document was developed\")\n print(\"------------------------------------------------------\")\n\n #clear the form for another submition\n self.edit1.clear()\n self.edit2.clear()\n self.edit3.clear()\n self.edit4.clear()\n\nif __name__ == '__main__':\n #or you can do a automatic one with something like\n # Create the Qt Application\n app = QApplication(sys.argv)\n # Create and show the form\n form = Form()\n #the size of the gui\n form.resize(1300,250)\n form.show()\n # Run the main Qt loop\n sys.exit(app.exec_())\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
@app.route('/')
def addRegion():
return render_template('Website WordCount.html')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/')
def addRegion():
return render_template('Website WordCount.html')
@app.route('/output_data', methods=['POST', 'GET'])
def output_data():
unique_links = []
link_len = {}
out_arr = []
if request.method == 'POST':
url = request.form['url']
main = re.sub('([\\w:///.]+com|info|in|org)([\\w///?/=/&/_-]*)',
'\\1', url, 0, re.MULTILINE | re.UNICODE | re.IGNORECASE)
req = Request(main, headers={'User-Agent':
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30'
})
sample = urlopen(req)
soap = bs(sample, 'lxml')
for data in soap.find_all('a', href=True):
links = data['href']
links = links if links.startswith(main) else str(main) + str(links
) if links.startswith('/') else str(main) + '/' + str(links)
if links in unique_links:
continue
unique_links.append(links)
req = Request(links, headers={'User-Agent':
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30'
})
sample1 = urlopen(req)
soap1 = bs(sample1, 'lxml')
[x.extract() for x in soap1.findAll(['script', 'style'])]
data = soap1.text
stri = re.sub('[.,/!"@:+*&^%~#=-_]', '', data)
stri = stri.split()
num_word = len(stri)
if num_word < 5:
continue
link_len['link'] = links
link_len['wordCount'] = num_word
out_arr.append(link_len)
print(out_arr)
return out_arr
if __name__ == '__main__':
app.run(debug=True, host='192.168.43.164')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
@app.route('/')
def addRegion():
return render_template('Website WordCount.html')
@app.route('/output_data', methods=['POST', 'GET'])
def output_data():
unique_links = []
link_len = {}
out_arr = []
if request.method == 'POST':
url = request.form['url']
main = re.sub('([\\w:///.]+com|info|in|org)([\\w///?/=/&/_-]*)',
'\\1', url, 0, re.MULTILINE | re.UNICODE | re.IGNORECASE)
req = Request(main, headers={'User-Agent':
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30'
})
sample = urlopen(req)
soap = bs(sample, 'lxml')
for data in soap.find_all('a', href=True):
links = data['href']
links = links if links.startswith(main) else str(main) + str(links
) if links.startswith('/') else str(main) + '/' + str(links)
if links in unique_links:
continue
unique_links.append(links)
req = Request(links, headers={'User-Agent':
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30'
})
sample1 = urlopen(req)
soap1 = bs(sample1, 'lxml')
[x.extract() for x in soap1.findAll(['script', 'style'])]
data = soap1.text
stri = re.sub('[.,/!"@:+*&^%~#=-_]', '', data)
stri = stri.split()
num_word = len(stri)
if num_word < 5:
continue
link_len['link'] = links
link_len['wordCount'] = num_word
out_arr.append(link_len)
print(out_arr)
return out_arr
if __name__ == '__main__':
app.run(debug=True, host='192.168.43.164')
<|reserved_special_token_1|>
from flask import request, Flask, render_template
from bs4 import BeautifulSoup as bs
from urllib.request import Request, urlopen
import re
app = Flask(__name__)
@app.route('/')
def addRegion():
return render_template('Website WordCount.html')
@app.route('/output_data', methods=['POST', 'GET'])
def output_data():
unique_links = []
link_len = {}
out_arr = []
if request.method == 'POST':
url = request.form['url']
main = re.sub('([\\w:///.]+com|info|in|org)([\\w///?/=/&/_-]*)',
'\\1', url, 0, re.MULTILINE | re.UNICODE | re.IGNORECASE)
req = Request(main, headers={'User-Agent':
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30'
})
sample = urlopen(req)
soap = bs(sample, 'lxml')
for data in soap.find_all('a', href=True):
links = data['href']
links = links if links.startswith(main) else str(main) + str(links
) if links.startswith('/') else str(main) + '/' + str(links)
if links in unique_links:
continue
unique_links.append(links)
req = Request(links, headers={'User-Agent':
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30'
})
sample1 = urlopen(req)
soap1 = bs(sample1, 'lxml')
[x.extract() for x in soap1.findAll(['script', 'style'])]
data = soap1.text
stri = re.sub('[.,/!"@:+*&^%~#=-_]', '', data)
stri = stri.split()
num_word = len(stri)
if num_word < 5:
continue
link_len['link'] = links
link_len['wordCount'] = num_word
out_arr.append(link_len)
print(out_arr)
return out_arr
if __name__ == '__main__':
app.run(debug=True, host='192.168.43.164')
<|reserved_special_token_1|>
from flask import request,Flask, render_template
from bs4 import BeautifulSoup as bs
from urllib.request import Request,urlopen
import re
app = Flask(__name__)
@app.route('/')
def addRegion():
return render_template('Website WordCount.html')
@app.route('/output_data', methods=['POST','GET'])
def output_data():
unique_links=[]
link_len={}
out_arr=[]
if request.method == 'POST':
url = request.form['url']
main = re.sub(r"([\w:///.]+com|info|in|org)([\w///?/=/&/_-]*)",r"\1",url,0, re.MULTILINE | re.UNICODE | re.IGNORECASE)
req =Request(main, headers={'User-Agent' : "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30"})
sample=urlopen(req)
soap=bs(sample,"lxml")
for data in soap.find_all('a', href=True):
links=data['href']
links=links if links.startswith(main) else (str(main)+str(links) if links.startswith( '/' ) else str(main)+"/"+str(links))
if(links in unique_links):
continue
unique_links.append(links)
req =Request(links, headers={'User-Agent' : "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30"})
sample1=urlopen(req)
soap1=bs(sample1,"lxml")
[x.extract() for x in soap1.findAll(['script', 'style'])]
data=soap1.text
stri=re.sub('[.,/!"@:+*&^%~#=-_]','',data)
stri=stri.split()
num_word=len(stri)
if(num_word<5):
continue
link_len['link']=links
link_len['wordCount']=num_word
out_arr.append(link_len)
print(out_arr)
return(out_arr)
if __name__ == '__main__':
app.run(debug = True,host='192.168.43.164')
|
flexible
|
{
"blob_id": "11dfb09286b8a5742550b5300c776ed82e69ead5",
"index": 2577,
"step-1": "<mask token>\n\n\n@app.route('/')\ndef addRegion():\n return render_template('Website WordCount.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef addRegion():\n return render_template('Website WordCount.html')\n\n\n@app.route('/output_data', methods=['POST', 'GET'])\ndef output_data():\n unique_links = []\n link_len = {}\n out_arr = []\n if request.method == 'POST':\n url = request.form['url']\n main = re.sub('([\\\\w:///.]+com|info|in|org)([\\\\w///?/=/&/_-]*)',\n '\\\\1', url, 0, re.MULTILINE | re.UNICODE | re.IGNORECASE)\n req = Request(main, headers={'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30'\n })\n sample = urlopen(req)\n soap = bs(sample, 'lxml')\n for data in soap.find_all('a', href=True):\n links = data['href']\n links = links if links.startswith(main) else str(main) + str(links\n ) if links.startswith('/') else str(main) + '/' + str(links)\n if links in unique_links:\n continue\n unique_links.append(links)\n req = Request(links, headers={'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30'\n })\n sample1 = urlopen(req)\n soap1 = bs(sample1, 'lxml')\n [x.extract() for x in soap1.findAll(['script', 'style'])]\n data = soap1.text\n stri = re.sub('[.,/!\"@:+*&^%~#=-_]', '', data)\n stri = stri.split()\n num_word = len(stri)\n if num_word < 5:\n continue\n link_len['link'] = links\n link_len['wordCount'] = num_word\n out_arr.append(link_len)\n print(out_arr)\n return out_arr\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='192.168.43.164')\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/')\ndef addRegion():\n return render_template('Website WordCount.html')\n\n\n@app.route('/output_data', methods=['POST', 'GET'])\ndef output_data():\n unique_links = []\n link_len = {}\n out_arr = []\n if request.method == 'POST':\n url = request.form['url']\n main = re.sub('([\\\\w:///.]+com|info|in|org)([\\\\w///?/=/&/_-]*)',\n '\\\\1', url, 0, re.MULTILINE | re.UNICODE | re.IGNORECASE)\n req = Request(main, headers={'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30'\n })\n sample = urlopen(req)\n soap = bs(sample, 'lxml')\n for data in soap.find_all('a', href=True):\n links = data['href']\n links = links if links.startswith(main) else str(main) + str(links\n ) if links.startswith('/') else str(main) + '/' + str(links)\n if links in unique_links:\n continue\n unique_links.append(links)\n req = Request(links, headers={'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30'\n })\n sample1 = urlopen(req)\n soap1 = bs(sample1, 'lxml')\n [x.extract() for x in soap1.findAll(['script', 'style'])]\n data = soap1.text\n stri = re.sub('[.,/!\"@:+*&^%~#=-_]', '', data)\n stri = stri.split()\n num_word = len(stri)\n if num_word < 5:\n continue\n link_len['link'] = links\n link_len['wordCount'] = num_word\n out_arr.append(link_len)\n print(out_arr)\n return out_arr\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='192.168.43.164')\n",
"step-4": "from flask import request, Flask, render_template\nfrom bs4 import BeautifulSoup as bs\nfrom urllib.request import Request, urlopen\nimport re\napp = Flask(__name__)\n\n\n@app.route('/')\ndef addRegion():\n return render_template('Website WordCount.html')\n\n\n@app.route('/output_data', methods=['POST', 'GET'])\ndef output_data():\n unique_links = []\n link_len = {}\n out_arr = []\n if request.method == 'POST':\n url = request.form['url']\n main = re.sub('([\\\\w:///.]+com|info|in|org)([\\\\w///?/=/&/_-]*)',\n '\\\\1', url, 0, re.MULTILINE | re.UNICODE | re.IGNORECASE)\n req = Request(main, headers={'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30'\n })\n sample = urlopen(req)\n soap = bs(sample, 'lxml')\n for data in soap.find_all('a', href=True):\n links = data['href']\n links = links if links.startswith(main) else str(main) + str(links\n ) if links.startswith('/') else str(main) + '/' + str(links)\n if links in unique_links:\n continue\n unique_links.append(links)\n req = Request(links, headers={'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30'\n })\n sample1 = urlopen(req)\n soap1 = bs(sample1, 'lxml')\n [x.extract() for x in soap1.findAll(['script', 'style'])]\n data = soap1.text\n stri = re.sub('[.,/!\"@:+*&^%~#=-_]', '', data)\n stri = stri.split()\n num_word = len(stri)\n if num_word < 5:\n continue\n link_len['link'] = links\n link_len['wordCount'] = num_word\n out_arr.append(link_len)\n print(out_arr)\n return out_arr\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='192.168.43.164')\n",
"step-5": "from flask import request,Flask, render_template\nfrom bs4 import BeautifulSoup as bs\nfrom urllib.request import Request,urlopen\nimport re\n\napp = Flask(__name__)\n@app.route('/')\ndef addRegion():\n return render_template('Website WordCount.html')\n\n\n@app.route('/output_data', methods=['POST','GET'])\n\ndef output_data():\n unique_links=[]\n link_len={}\n out_arr=[]\n if request.method == 'POST':\n url = request.form['url']\n main = re.sub(r\"([\\w:///.]+com|info|in|org)([\\w///?/=/&/_-]*)\",r\"\\1\",url,0, re.MULTILINE | re.UNICODE | re.IGNORECASE)\n req =Request(main, headers={'User-Agent' : \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30\"})\n sample=urlopen(req)\n soap=bs(sample,\"lxml\")\n for data in soap.find_all('a', href=True):\n links=data['href']\n links=links if links.startswith(main) else (str(main)+str(links) if links.startswith( '/' ) else str(main)+\"/\"+str(links))\n if(links in unique_links):\n continue\n unique_links.append(links)\n req =Request(links, headers={'User-Agent' : \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30\"})\n sample1=urlopen(req)\n soap1=bs(sample1,\"lxml\")\n [x.extract() for x in soap1.findAll(['script', 'style'])]\n data=soap1.text\n stri=re.sub('[.,/!\"@:+*&^%~#=-_]','',data)\n stri=stri.split()\n num_word=len(stri)\n if(num_word<5):\n continue\n link_len['link']=links\n link_len['wordCount']=num_word\n out_arr.append(link_len)\n print(out_arr)\n return(out_arr)\n\nif __name__ == '__main__':\n app.run(debug = True,host='192.168.43.164')\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for [ix, iy], [jx, jy] in itertools.combinations(a, 2):
ans += ((jx - ix) ** 2 + (jy - iy) ** 2) ** 0.5 * 2
print(ans / n)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
n = int(input())
a = [list(map(int, input().split(' '))) for i in range(n)]
ans = 0
for [ix, iy], [jx, jy] in itertools.combinations(a, 2):
ans += ((jx - ix) ** 2 + (jy - iy) ** 2) ** 0.5 * 2
print(ans / n)
<|reserved_special_token_1|>
import itertools
n = int(input())
a = [list(map(int, input().split(' '))) for i in range(n)]
ans = 0
for [ix, iy], [jx, jy] in itertools.combinations(a, 2):
ans += ((jx - ix) ** 2 + (jy - iy) ** 2) ** 0.5 * 2
print(ans / n)
<|reserved_special_token_1|>
import itertools
n = int(input())
a = [list(map(int, input().split(" "))) for i in range(n)]
ans = 0
for [ix,iy], [jx, jy] in itertools.combinations(a, 2):
ans += ((jx-ix)**2+(jy-iy)**2)**0.5*2
print(ans/n)
|
flexible
|
{
"blob_id": "a210a015284130f23bfec99898f2f21163a33a67",
"index": 9897,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor [ix, iy], [jx, jy] in itertools.combinations(a, 2):\n ans += ((jx - ix) ** 2 + (jy - iy) ** 2) ** 0.5 * 2\nprint(ans / n)\n",
"step-3": "<mask token>\nn = int(input())\na = [list(map(int, input().split(' '))) for i in range(n)]\nans = 0\nfor [ix, iy], [jx, jy] in itertools.combinations(a, 2):\n ans += ((jx - ix) ** 2 + (jy - iy) ** 2) ** 0.5 * 2\nprint(ans / n)\n",
"step-4": "import itertools\nn = int(input())\na = [list(map(int, input().split(' '))) for i in range(n)]\nans = 0\nfor [ix, iy], [jx, jy] in itertools.combinations(a, 2):\n ans += ((jx - ix) ** 2 + (jy - iy) ** 2) ** 0.5 * 2\nprint(ans / n)\n",
"step-5": "import itertools\nn = int(input())\na = [list(map(int, input().split(\" \"))) for i in range(n)]\nans = 0\nfor [ix,iy], [jx, jy] in itertools.combinations(a, 2):\n ans += ((jx-ix)**2+(jy-iy)**2)**0.5*2\nprint(ans/n)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import fnmatch
import hashlib
from .mplog import MachopLog
from .utils import MachopProcess, wait_for_interrupt
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
class MachopWatchCommand(MachopProcess):
class MachopHandler(PatternMatchingEventHandler):
""" watcher for a file system event """
def on_modified(self, event):
if event.is_directory:
return
source = event.src_path
self._watcher.modified(source)
def __init__(self, globs=None, cmds=None, path=None, queue=None):
super(MachopWatchCommand, self).__init__()
recreate = (globs, cmds, path, queue)
self._safe_process(queue=queue, cfgpath=path, init=recreate)
self.globs = globs if globs else []
self.actions = cmds if cmds else []
self.watchpath = path
self.queue = queue
self.hashmap = {}
self.log = None
def set_queue(self, queue):
self.queue = queue
def modified(self, eventsrc):
"""
@@@ needs proper event handling for actions!!!
"""
if not self.has_changed(eventsrc):
return
matched = False
for pattern in self.globs:
if fnmatch.fnmatch(eventsrc, pattern):
matched = True
break
if matched:
for action in self.actions:
action(cmdpath=eventsrc, log=MachopLog(self.queue, 'watch'))
self.announce()
def announce(self, nl=False):
log = self.log
msg = "watching " + log.yellow(self.watchpath)
for match in self.globs:
msg += " for [" + log.yellow(match) + "]"
msg += "..."
if nl:
msg += '\n'
log.out(msg)
def run(self):
self.log = MachopLog(self.queue, 'watch')
self.handler = self.MachopHandler(patterns=self.globs)
self.handler._watcher = self
self.observer = Observer()
self.observer.schedule(self.handler, self.watchpath, recursive=True)
self.observer.start()
self.announce(True)
wait_for_interrupt(self.observer)
self.observer.stop()
self.observer.join(3)
def has_changed(self, key):
hasher = hashlib.md5()
with open(key, 'rb') as modfile:
hasher.update(modfile.read())
xhash = hasher.hexdigest()
if self.hashmap.get(key, "") != xhash:
self.hashmap[key] = xhash
return True
return False
|
normal
|
{
"blob_id": "4e30f0a9b420123c28858aad2a71040dcc952829",
"index": 1391,
"step-1": "<mask token>\n\n\nclass MachopWatchCommand(MachopProcess):\n\n\n class MachopHandler(PatternMatchingEventHandler):\n \"\"\" watcher for a file system event \"\"\"\n\n def on_modified(self, event):\n if event.is_directory:\n return\n source = event.src_path\n self._watcher.modified(source)\n <mask token>\n <mask token>\n\n def modified(self, eventsrc):\n \"\"\"\n @@@ needs proper event handling for actions!!!\n \"\"\"\n if not self.has_changed(eventsrc):\n return\n matched = False\n for pattern in self.globs:\n if fnmatch.fnmatch(eventsrc, pattern):\n matched = True\n break\n if matched:\n for action in self.actions:\n action(cmdpath=eventsrc, log=MachopLog(self.queue, 'watch'))\n self.announce()\n\n def announce(self, nl=False):\n log = self.log\n msg = 'watching ' + log.yellow(self.watchpath)\n for match in self.globs:\n msg += ' for [' + log.yellow(match) + ']'\n msg += '...'\n if nl:\n msg += '\\n'\n log.out(msg)\n\n def run(self):\n self.log = MachopLog(self.queue, 'watch')\n self.handler = self.MachopHandler(patterns=self.globs)\n self.handler._watcher = self\n self.observer = Observer()\n self.observer.schedule(self.handler, self.watchpath, recursive=True)\n self.observer.start()\n self.announce(True)\n wait_for_interrupt(self.observer)\n self.observer.stop()\n self.observer.join(3)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MachopWatchCommand(MachopProcess):\n\n\n class MachopHandler(PatternMatchingEventHandler):\n \"\"\" watcher for a file system event \"\"\"\n\n def on_modified(self, event):\n if event.is_directory:\n return\n source = event.src_path\n self._watcher.modified(source)\n\n def __init__(self, globs=None, cmds=None, path=None, queue=None):\n super(MachopWatchCommand, self).__init__()\n recreate = globs, cmds, path, queue\n self._safe_process(queue=queue, cfgpath=path, init=recreate)\n self.globs = globs if globs else []\n self.actions = cmds if cmds else []\n self.watchpath = path\n self.queue = queue\n self.hashmap = {}\n self.log = None\n <mask token>\n\n def modified(self, eventsrc):\n \"\"\"\n @@@ needs proper event handling for actions!!!\n \"\"\"\n if not self.has_changed(eventsrc):\n return\n matched = False\n for pattern in self.globs:\n if fnmatch.fnmatch(eventsrc, pattern):\n matched = True\n break\n if matched:\n for action in self.actions:\n action(cmdpath=eventsrc, log=MachopLog(self.queue, 'watch'))\n self.announce()\n\n def announce(self, nl=False):\n log = self.log\n msg = 'watching ' + log.yellow(self.watchpath)\n for match in self.globs:\n msg += ' for [' + log.yellow(match) + ']'\n msg += '...'\n if nl:\n msg += '\\n'\n log.out(msg)\n\n def run(self):\n self.log = MachopLog(self.queue, 'watch')\n self.handler = self.MachopHandler(patterns=self.globs)\n self.handler._watcher = self\n self.observer = Observer()\n self.observer.schedule(self.handler, self.watchpath, recursive=True)\n self.observer.start()\n self.announce(True)\n wait_for_interrupt(self.observer)\n self.observer.stop()\n self.observer.join(3)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass MachopWatchCommand(MachopProcess):\n\n\n class MachopHandler(PatternMatchingEventHandler):\n \"\"\" watcher for a file system event \"\"\"\n\n def on_modified(self, event):\n if event.is_directory:\n return\n source = event.src_path\n self._watcher.modified(source)\n\n def __init__(self, globs=None, cmds=None, path=None, queue=None):\n super(MachopWatchCommand, self).__init__()\n recreate = globs, cmds, path, queue\n self._safe_process(queue=queue, cfgpath=path, init=recreate)\n self.globs = globs if globs else []\n self.actions = cmds if cmds else []\n self.watchpath = path\n self.queue = queue\n self.hashmap = {}\n self.log = None\n\n def set_queue(self, queue):\n self.queue = queue\n\n def modified(self, eventsrc):\n \"\"\"\n @@@ needs proper event handling for actions!!!\n \"\"\"\n if not self.has_changed(eventsrc):\n return\n matched = False\n for pattern in self.globs:\n if fnmatch.fnmatch(eventsrc, pattern):\n matched = True\n break\n if matched:\n for action in self.actions:\n action(cmdpath=eventsrc, log=MachopLog(self.queue, 'watch'))\n self.announce()\n\n def announce(self, nl=False):\n log = self.log\n msg = 'watching ' + log.yellow(self.watchpath)\n for match in self.globs:\n msg += ' for [' + log.yellow(match) + ']'\n msg += '...'\n if nl:\n msg += '\\n'\n log.out(msg)\n\n def run(self):\n self.log = MachopLog(self.queue, 'watch')\n self.handler = self.MachopHandler(patterns=self.globs)\n self.handler._watcher = self\n self.observer = Observer()\n self.observer.schedule(self.handler, self.watchpath, recursive=True)\n self.observer.start()\n self.announce(True)\n wait_for_interrupt(self.observer)\n self.observer.stop()\n self.observer.join(3)\n\n def has_changed(self, key):\n hasher = hashlib.md5()\n with open(key, 'rb') as modfile:\n hasher.update(modfile.read())\n xhash = hasher.hexdigest()\n if self.hashmap.get(key, '') != xhash:\n self.hashmap[key] = xhash\n return True\n return False\n",
"step-4": "import fnmatch\nimport hashlib\nfrom .mplog import MachopLog\nfrom .utils import MachopProcess, wait_for_interrupt\nfrom watchdog.observers import Observer\nfrom watchdog.events import PatternMatchingEventHandler\n\n\nclass MachopWatchCommand(MachopProcess):\n\n\n class MachopHandler(PatternMatchingEventHandler):\n \"\"\" watcher for a file system event \"\"\"\n\n def on_modified(self, event):\n if event.is_directory:\n return\n source = event.src_path\n self._watcher.modified(source)\n\n def __init__(self, globs=None, cmds=None, path=None, queue=None):\n super(MachopWatchCommand, self).__init__()\n recreate = globs, cmds, path, queue\n self._safe_process(queue=queue, cfgpath=path, init=recreate)\n self.globs = globs if globs else []\n self.actions = cmds if cmds else []\n self.watchpath = path\n self.queue = queue\n self.hashmap = {}\n self.log = None\n\n def set_queue(self, queue):\n self.queue = queue\n\n def modified(self, eventsrc):\n \"\"\"\n @@@ needs proper event handling for actions!!!\n \"\"\"\n if not self.has_changed(eventsrc):\n return\n matched = False\n for pattern in self.globs:\n if fnmatch.fnmatch(eventsrc, pattern):\n matched = True\n break\n if matched:\n for action in self.actions:\n action(cmdpath=eventsrc, log=MachopLog(self.queue, 'watch'))\n self.announce()\n\n def announce(self, nl=False):\n log = self.log\n msg = 'watching ' + log.yellow(self.watchpath)\n for match in self.globs:\n msg += ' for [' + log.yellow(match) + ']'\n msg += '...'\n if nl:\n msg += '\\n'\n log.out(msg)\n\n def run(self):\n self.log = MachopLog(self.queue, 'watch')\n self.handler = self.MachopHandler(patterns=self.globs)\n self.handler._watcher = self\n self.observer = Observer()\n self.observer.schedule(self.handler, self.watchpath, recursive=True)\n self.observer.start()\n self.announce(True)\n wait_for_interrupt(self.observer)\n self.observer.stop()\n self.observer.join(3)\n\n def has_changed(self, key):\n hasher = hashlib.md5()\n with open(key, 'rb') as modfile:\n hasher.update(modfile.read())\n xhash = hasher.hexdigest()\n if self.hashmap.get(key, '') != xhash:\n self.hashmap[key] = xhash\n return True\n return False\n",
"step-5": "\nimport fnmatch\nimport hashlib\n\nfrom .mplog import MachopLog\nfrom .utils import MachopProcess, wait_for_interrupt\n\nfrom watchdog.observers import Observer\nfrom watchdog.events import PatternMatchingEventHandler\n\n\nclass MachopWatchCommand(MachopProcess):\n\n class MachopHandler(PatternMatchingEventHandler):\n \"\"\" watcher for a file system event \"\"\"\n def on_modified(self, event):\n if event.is_directory:\n return\n source = event.src_path\n self._watcher.modified(source)\n\n def __init__(self, globs=None, cmds=None, path=None, queue=None):\n super(MachopWatchCommand, self).__init__()\n recreate = (globs, cmds, path, queue)\n self._safe_process(queue=queue, cfgpath=path, init=recreate)\n self.globs = globs if globs else []\n self.actions = cmds if cmds else []\n self.watchpath = path\n self.queue = queue\n self.hashmap = {}\n self.log = None\n\n def set_queue(self, queue):\n self.queue = queue\n\n def modified(self, eventsrc):\n \"\"\"\n @@@ needs proper event handling for actions!!!\n \"\"\"\n if not self.has_changed(eventsrc):\n return\n matched = False\n for pattern in self.globs:\n if fnmatch.fnmatch(eventsrc, pattern):\n matched = True\n break\n if matched:\n for action in self.actions:\n action(cmdpath=eventsrc, log=MachopLog(self.queue, 'watch'))\n self.announce()\n\n def announce(self, nl=False):\n log = self.log\n msg = \"watching \" + log.yellow(self.watchpath)\n for match in self.globs:\n msg += \" for [\" + log.yellow(match) + \"]\"\n msg += \"...\"\n if nl:\n msg += '\\n'\n log.out(msg)\n\n def run(self):\n self.log = MachopLog(self.queue, 'watch')\n self.handler = self.MachopHandler(patterns=self.globs)\n self.handler._watcher = self\n self.observer = Observer()\n self.observer.schedule(self.handler, self.watchpath, recursive=True)\n self.observer.start()\n self.announce(True)\n wait_for_interrupt(self.observer)\n self.observer.stop()\n self.observer.join(3)\n\n def has_changed(self, key):\n hasher = hashlib.md5()\n with open(key, 'rb') as modfile:\n hasher.update(modfile.read())\n xhash = hasher.hexdigest()\n if self.hashmap.get(key, \"\") != xhash:\n self.hashmap[key] = xhash\n return True\n return False\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
import unittest
from pattern.multiplier import Multiplier, FixedWidth, Range
from pattern.multiplier import WHATEVER, ONE_OR_MORE
class TestMultipler(unittest.TestCase):
def test__create__fixed_width(self):
self.assertIsInstance(Multiplier.create(23), FixedWidth)
def test__create__range(self):
self.assertIsInstance(Multiplier.create((23, 27)), Range)
def test__create__multiplier(self):
self.assertEqual(Multiplier.create(WHATEVER), WHATEVER)
self.assertEqual(Multiplier.create(ONE_OR_MORE), ONE_OR_MORE)
def test__create__bad_argument(self):
self.assertRaises(ValueError, Multiplier.create, '1234')
class TestWhatever(unittest.TestCase):
def test_compile(self):
self.assertEqual(WHATEVER.compile(), '*')
class TestOneOrMore(unittest.TestCase):
def test_compile(self):
self.assertEqual(ONE_OR_MORE.compile(), '+')
class TestFixedWidth(unittest.TestCase):
def test_compile(self):
self.assertEqual(FixedWidth(23).compile(), '{23}')
class TestRange(unittest.TestCase):
def test_compile(self):
self.assertEqual(Range((23, 27)).compile(), '{23,27}')
|
normal
|
{
"blob_id": "5a7e535f2ae585f862cc792dab77f2fe0584fddc",
"index": 9986,
"step-1": "<mask token>\n\n\nclass TestWhatever(unittest.TestCase):\n\n def test_compile(self):\n self.assertEqual(WHATEVER.compile(), '*')\n\n\nclass TestOneOrMore(unittest.TestCase):\n\n def test_compile(self):\n self.assertEqual(ONE_OR_MORE.compile(), '+')\n\n\nclass TestFixedWidth(unittest.TestCase):\n\n def test_compile(self):\n self.assertEqual(FixedWidth(23).compile(), '{23}')\n\n\nclass TestRange(unittest.TestCase):\n\n def test_compile(self):\n self.assertEqual(Range((23, 27)).compile(), '{23,27}')\n",
"step-2": "<mask token>\n\n\nclass TestMultipler(unittest.TestCase):\n <mask token>\n\n def test__create__range(self):\n self.assertIsInstance(Multiplier.create((23, 27)), Range)\n <mask token>\n <mask token>\n\n\nclass TestWhatever(unittest.TestCase):\n\n def test_compile(self):\n self.assertEqual(WHATEVER.compile(), '*')\n\n\nclass TestOneOrMore(unittest.TestCase):\n\n def test_compile(self):\n self.assertEqual(ONE_OR_MORE.compile(), '+')\n\n\nclass TestFixedWidth(unittest.TestCase):\n\n def test_compile(self):\n self.assertEqual(FixedWidth(23).compile(), '{23}')\n\n\nclass TestRange(unittest.TestCase):\n\n def test_compile(self):\n self.assertEqual(Range((23, 27)).compile(), '{23,27}')\n",
"step-3": "<mask token>\n\n\nclass TestMultipler(unittest.TestCase):\n\n def test__create__fixed_width(self):\n self.assertIsInstance(Multiplier.create(23), FixedWidth)\n\n def test__create__range(self):\n self.assertIsInstance(Multiplier.create((23, 27)), Range)\n\n def test__create__multiplier(self):\n self.assertEqual(Multiplier.create(WHATEVER), WHATEVER)\n self.assertEqual(Multiplier.create(ONE_OR_MORE), ONE_OR_MORE)\n <mask token>\n\n\nclass TestWhatever(unittest.TestCase):\n\n def test_compile(self):\n self.assertEqual(WHATEVER.compile(), '*')\n\n\nclass TestOneOrMore(unittest.TestCase):\n\n def test_compile(self):\n self.assertEqual(ONE_OR_MORE.compile(), '+')\n\n\nclass TestFixedWidth(unittest.TestCase):\n\n def test_compile(self):\n self.assertEqual(FixedWidth(23).compile(), '{23}')\n\n\nclass TestRange(unittest.TestCase):\n\n def test_compile(self):\n self.assertEqual(Range((23, 27)).compile(), '{23,27}')\n",
"step-4": "<mask token>\n\n\nclass TestMultipler(unittest.TestCase):\n\n def test__create__fixed_width(self):\n self.assertIsInstance(Multiplier.create(23), FixedWidth)\n\n def test__create__range(self):\n self.assertIsInstance(Multiplier.create((23, 27)), Range)\n\n def test__create__multiplier(self):\n self.assertEqual(Multiplier.create(WHATEVER), WHATEVER)\n self.assertEqual(Multiplier.create(ONE_OR_MORE), ONE_OR_MORE)\n\n def test__create__bad_argument(self):\n self.assertRaises(ValueError, Multiplier.create, '1234')\n\n\nclass TestWhatever(unittest.TestCase):\n\n def test_compile(self):\n self.assertEqual(WHATEVER.compile(), '*')\n\n\nclass TestOneOrMore(unittest.TestCase):\n\n def test_compile(self):\n self.assertEqual(ONE_OR_MORE.compile(), '+')\n\n\nclass TestFixedWidth(unittest.TestCase):\n\n def test_compile(self):\n self.assertEqual(FixedWidth(23).compile(), '{23}')\n\n\nclass TestRange(unittest.TestCase):\n\n def test_compile(self):\n self.assertEqual(Range((23, 27)).compile(), '{23,27}')\n",
"step-5": "import unittest\nfrom pattern.multiplier import Multiplier, FixedWidth, Range\nfrom pattern.multiplier import WHATEVER, ONE_OR_MORE\n\n\nclass TestMultipler(unittest.TestCase):\n def test__create__fixed_width(self):\n self.assertIsInstance(Multiplier.create(23), FixedWidth)\n\n def test__create__range(self):\n self.assertIsInstance(Multiplier.create((23, 27)), Range)\n\n def test__create__multiplier(self):\n self.assertEqual(Multiplier.create(WHATEVER), WHATEVER)\n self.assertEqual(Multiplier.create(ONE_OR_MORE), ONE_OR_MORE)\n\n def test__create__bad_argument(self):\n self.assertRaises(ValueError, Multiplier.create, '1234')\n\n\nclass TestWhatever(unittest.TestCase):\n def test_compile(self):\n self.assertEqual(WHATEVER.compile(), '*')\n\n\nclass TestOneOrMore(unittest.TestCase):\n def test_compile(self):\n self.assertEqual(ONE_OR_MORE.compile(), '+')\n\n\nclass TestFixedWidth(unittest.TestCase):\n def test_compile(self):\n self.assertEqual(FixedWidth(23).compile(), '{23}')\n\n\nclass TestRange(unittest.TestCase):\n def test_compile(self):\n self.assertEqual(Range((23, 27)).compile(), '{23,27}')\n\n",
"step-ids": [
8,
10,
12,
13,
15
]
}
|
[
8,
10,
12,
13,
15
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def encode(strs):
"""Encodes a list of strings to a single string.
:type strs: List[str]
:rtype: str
"""
res = ''
for string in strs.split():
res += str(len(string)) + ':' + string
return res
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def encode(strs):
"""Encodes a list of strings to a single string.
:type strs: List[str]
:rtype: str
"""
res = ''
for string in strs.split():
res += str(len(string)) + ':' + string
return res
def decode(string):
strs = []
i = 0
while i < len(string):
index = string.find(':', i)
size = int(string[i:index])
strs.append(string[index + 1:index + 1 + size])
i = index + 1 + size
return strs
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def encode(strs):
"""Encodes a list of strings to a single string.
:type strs: List[str]
:rtype: str
"""
res = ''
for string in strs.split():
res += str(len(string)) + ':' + string
return res
def decode(string):
strs = []
i = 0
while i < len(string):
index = string.find(':', i)
size = int(string[i:index])
strs.append(string[index + 1:index + 1 + size])
i = index + 1 + size
return strs
if __name__ == '__main__':
strs = 'keno is awesome'
r = encode(strs)
print(r)
r = decode(r)
print(r)
<|reserved_special_token_1|>
#!/usr/bin/env python
# encoding: utf-8
"""
@author: swensun
@github:https://github.com/yunshuipiao
@software: python
@file: encode_decode.py
@desc: 字符串编解码
@hint:
"""
def encode(strs):
"""Encodes a list of strings to a single string.
:type strs: List[str]
:rtype: str
"""
res = ''
for string in strs.split():
res += str(len(string)) + ":" + string
return res
def decode(string):
strs = []
i = 0
while i < len(string):
index = string.find(":", i)
# print(index)
size = int(string[i:index])
strs.append(string[index + 1: index + 1 + size])
i = index + 1 + size
return strs
if __name__ == '__main__':
strs = "keno is awesome"
r = encode(strs)
print(r)
r = decode(r)
print(r)
|
flexible
|
{
"blob_id": "2561db1264fe399db85460e9f32213b70ddf03ff",
"index": 1864,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef encode(strs):\n \"\"\"Encodes a list of strings to a single string.\n :type strs: List[str]\n :rtype: str\n \"\"\"\n res = ''\n for string in strs.split():\n res += str(len(string)) + ':' + string\n return res\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef encode(strs):\n \"\"\"Encodes a list of strings to a single string.\n :type strs: List[str]\n :rtype: str\n \"\"\"\n res = ''\n for string in strs.split():\n res += str(len(string)) + ':' + string\n return res\n\n\ndef decode(string):\n strs = []\n i = 0\n while i < len(string):\n index = string.find(':', i)\n size = int(string[i:index])\n strs.append(string[index + 1:index + 1 + size])\n i = index + 1 + size\n return strs\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef encode(strs):\n \"\"\"Encodes a list of strings to a single string.\n :type strs: List[str]\n :rtype: str\n \"\"\"\n res = ''\n for string in strs.split():\n res += str(len(string)) + ':' + string\n return res\n\n\ndef decode(string):\n strs = []\n i = 0\n while i < len(string):\n index = string.find(':', i)\n size = int(string[i:index])\n strs.append(string[index + 1:index + 1 + size])\n i = index + 1 + size\n return strs\n\n\nif __name__ == '__main__':\n strs = 'keno is awesome'\n r = encode(strs)\n print(r)\n r = decode(r)\n print(r)\n",
"step-5": "#!/usr/bin/env python\n\n# encoding: utf-8\n\n\"\"\"\n@author: swensun\n\n@github:https://github.com/yunshuipiao\n\n@software: python\n\n@file: encode_decode.py\n\n@desc: 字符串编解码\n\n@hint:\n\"\"\"\n\n\ndef encode(strs):\n \"\"\"Encodes a list of strings to a single string.\n :type strs: List[str]\n :rtype: str\n \"\"\"\n res = ''\n for string in strs.split():\n res += str(len(string)) + \":\" + string\n return res\n\ndef decode(string):\n strs = []\n i = 0\n while i < len(string):\n index = string.find(\":\", i)\n # print(index)\n size = int(string[i:index])\n strs.append(string[index + 1: index + 1 + size])\n i = index + 1 + size\n return strs\n\n\nif __name__ == '__main__':\n strs = \"keno is awesome\"\n r = encode(strs)\n print(r)\n r = decode(r)\n print(r)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def vector_to_message(vector):
vocab_file = open('data/vocab.txt', 'r')
vocab = vocab_file.readlines()
message_words = []
for vocab_record, vector_enterance in zip(vocab, vector):
is_trigger_word = bool(vector_enterance)
word = vocab_record.split()[1]
if is_trigger_word:
message_words.append(word)
return ' '.join(message_words)
<|reserved_special_token_0|>
def one_hot_convert(message):
message_words = message.split()
message_words.sort()
vocab_file = open('data/vocab.txt', 'r')
vocab = vocab_file.readlines()
one_hot = []
for record in vocab:
word = record.split()[1]
one_hot.append(int(word in message_words))
pass
return np.array([one_hot])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def vector_to_message(vector):
vocab_file = open('data/vocab.txt', 'r')
vocab = vocab_file.readlines()
message_words = []
for vocab_record, vector_enterance in zip(vocab, vector):
is_trigger_word = bool(vector_enterance)
word = vocab_record.split()[1]
if is_trigger_word:
message_words.append(word)
return ' '.join(message_words)
<|reserved_special_token_0|>
def one_hot_convert(message):
message_words = message.split()
message_words.sort()
vocab_file = open('data/vocab.txt', 'r')
vocab = vocab_file.readlines()
one_hot = []
for record in vocab:
word = record.split()[1]
one_hot.append(int(word in message_words))
pass
return np.array([one_hot])
one_hot_convert()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
train_data = loadmat('data/spamTrain.mat')
x = train_data['X']
y = train_data['y']
test_data = loadmat('data/spamTest.mat')
x_test = test_data['Xtest']
y_test = test_data['ytest']
def vector_to_message(vector):
vocab_file = open('data/vocab.txt', 'r')
vocab = vocab_file.readlines()
message_words = []
for vocab_record, vector_enterance in zip(vocab, vector):
is_trigger_word = bool(vector_enterance)
word = vocab_record.split()[1]
if is_trigger_word:
message_words.append(word)
return ' '.join(message_words)
message = vector_to_message(x_test[0])
def one_hot_convert(message):
message_words = message.split()
message_words.sort()
vocab_file = open('data/vocab.txt', 'r')
vocab = vocab_file.readlines()
one_hot = []
for record in vocab:
word = record.split()[1]
one_hot.append(int(word in message_words))
pass
return np.array([one_hot])
one_hot_convert()
<|reserved_special_token_1|>
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
from scipy.io import loadmat
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
import copy
from matplotlib import cm
from matplotlib.animation import FuncAnimation
import scipy.optimize
import networkx as nx
from sklearn import svm
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
train_data = loadmat('data/spamTrain.mat')
x = train_data['X']
y = train_data['y']
test_data = loadmat('data/spamTest.mat')
x_test = test_data['Xtest']
y_test = test_data['ytest']
def vector_to_message(vector):
vocab_file = open('data/vocab.txt', 'r')
vocab = vocab_file.readlines()
message_words = []
for vocab_record, vector_enterance in zip(vocab, vector):
is_trigger_word = bool(vector_enterance)
word = vocab_record.split()[1]
if is_trigger_word:
message_words.append(word)
return ' '.join(message_words)
message = vector_to_message(x_test[0])
def one_hot_convert(message):
message_words = message.split()
message_words.sort()
vocab_file = open('data/vocab.txt', 'r')
vocab = vocab_file.readlines()
one_hot = []
for record in vocab:
word = record.split()[1]
one_hot.append(int(word in message_words))
pass
return np.array([one_hot])
one_hot_convert()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
from scipy.io import loadmat
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
import copy
from matplotlib import cm
from matplotlib.animation import FuncAnimation
import scipy.optimize
import networkx as nx
from sklearn import svm
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# task 13
# Загрузите данные spamTrain.mat из файла.
train_data = loadmat('data/spamTrain.mat')
x = train_data["X"]
y = train_data["y"]
test_data = loadmat('data/spamTest.mat')
x_test = test_data["Xtest"]
y_test = test_data["ytest"]
def vector_to_message(vector):
vocab_file = open("data/vocab.txt", "r")
vocab = vocab_file.readlines()
# one_hot = [int(record.split()[1] in message) for record in vocab]
message_words = []
for vocab_record, vector_enterance in zip(vocab, vector):
is_trigger_word = bool(vector_enterance)
word = vocab_record.split()[1]
if is_trigger_word:
message_words.append(word)
return " ".join(message_words)
message = vector_to_message(x_test[0])
def one_hot_convert(message):
message_words = message.split()
message_words.sort()
vocab_file = open("data/vocab.txt", "r")
vocab = vocab_file.readlines()
# one_hot = [int(record.split()[1] in message) for record in vocab]
one_hot = []
for record in vocab:
word = record.split()[1]
one_hot.append(int(word in message_words))
pass
return np.array([one_hot])
one_hot_convert()
|
flexible
|
{
"blob_id": "f5820824b5b7e473b79b5dfee2f203684c3755be",
"index": 5154,
"step-1": "<mask token>\n\n\ndef vector_to_message(vector):\n vocab_file = open('data/vocab.txt', 'r')\n vocab = vocab_file.readlines()\n message_words = []\n for vocab_record, vector_enterance in zip(vocab, vector):\n is_trigger_word = bool(vector_enterance)\n word = vocab_record.split()[1]\n if is_trigger_word:\n message_words.append(word)\n return ' '.join(message_words)\n\n\n<mask token>\n\n\ndef one_hot_convert(message):\n message_words = message.split()\n message_words.sort()\n vocab_file = open('data/vocab.txt', 'r')\n vocab = vocab_file.readlines()\n one_hot = []\n for record in vocab:\n word = record.split()[1]\n one_hot.append(int(word in message_words))\n pass\n return np.array([one_hot])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef vector_to_message(vector):\n vocab_file = open('data/vocab.txt', 'r')\n vocab = vocab_file.readlines()\n message_words = []\n for vocab_record, vector_enterance in zip(vocab, vector):\n is_trigger_word = bool(vector_enterance)\n word = vocab_record.split()[1]\n if is_trigger_word:\n message_words.append(word)\n return ' '.join(message_words)\n\n\n<mask token>\n\n\ndef one_hot_convert(message):\n message_words = message.split()\n message_words.sort()\n vocab_file = open('data/vocab.txt', 'r')\n vocab = vocab_file.readlines()\n one_hot = []\n for record in vocab:\n word = record.split()[1]\n one_hot.append(int(word in message_words))\n pass\n return np.array([one_hot])\n\n\none_hot_convert()\n",
"step-3": "<mask token>\ntrain_data = loadmat('data/spamTrain.mat')\nx = train_data['X']\ny = train_data['y']\ntest_data = loadmat('data/spamTest.mat')\nx_test = test_data['Xtest']\ny_test = test_data['ytest']\n\n\ndef vector_to_message(vector):\n vocab_file = open('data/vocab.txt', 'r')\n vocab = vocab_file.readlines()\n message_words = []\n for vocab_record, vector_enterance in zip(vocab, vector):\n is_trigger_word = bool(vector_enterance)\n word = vocab_record.split()[1]\n if is_trigger_word:\n message_words.append(word)\n return ' '.join(message_words)\n\n\nmessage = vector_to_message(x_test[0])\n\n\ndef one_hot_convert(message):\n message_words = message.split()\n message_words.sort()\n vocab_file = open('data/vocab.txt', 'r')\n vocab = vocab_file.readlines()\n one_hot = []\n for record in vocab:\n word = record.split()[1]\n one_hot.append(int(word in message_words))\n pass\n return np.array([one_hot])\n\n\none_hot_convert()\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom scipy.io import loadmat\nfrom mpl_toolkits.mplot3d import axes3d\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport copy\nfrom matplotlib import cm\nfrom matplotlib.animation import FuncAnimation\nimport scipy.optimize\nimport networkx as nx\nfrom sklearn import svm\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import svm, datasets\ntrain_data = loadmat('data/spamTrain.mat')\nx = train_data['X']\ny = train_data['y']\ntest_data = loadmat('data/spamTest.mat')\nx_test = test_data['Xtest']\ny_test = test_data['ytest']\n\n\ndef vector_to_message(vector):\n vocab_file = open('data/vocab.txt', 'r')\n vocab = vocab_file.readlines()\n message_words = []\n for vocab_record, vector_enterance in zip(vocab, vector):\n is_trigger_word = bool(vector_enterance)\n word = vocab_record.split()[1]\n if is_trigger_word:\n message_words.append(word)\n return ' '.join(message_words)\n\n\nmessage = vector_to_message(x_test[0])\n\n\ndef one_hot_convert(message):\n message_words = message.split()\n message_words.sort()\n vocab_file = open('data/vocab.txt', 'r')\n vocab = vocab_file.readlines()\n one_hot = []\n for record in vocab:\n word = record.split()[1]\n one_hot.append(int(word in message_words))\n pass\n return np.array([one_hot])\n\n\none_hot_convert()\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom scipy.io import loadmat\nfrom mpl_toolkits.mplot3d import axes3d\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport copy\nfrom matplotlib import cm\nfrom matplotlib.animation import FuncAnimation\nimport scipy.optimize\nimport networkx as nx\nfrom sklearn import svm\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import svm, datasets\n\n# task 13\n# Загрузите данные spamTrain.mat из файла.\n\ntrain_data = loadmat('data/spamTrain.mat')\n\nx = train_data[\"X\"]\ny = train_data[\"y\"]\n\ntest_data = loadmat('data/spamTest.mat')\n\nx_test = test_data[\"Xtest\"]\ny_test = test_data[\"ytest\"]\n\n\ndef vector_to_message(vector):\n vocab_file = open(\"data/vocab.txt\", \"r\")\n vocab = vocab_file.readlines()\n # one_hot = [int(record.split()[1] in message) for record in vocab]\n message_words = []\n for vocab_record, vector_enterance in zip(vocab, vector):\n is_trigger_word = bool(vector_enterance)\n word = vocab_record.split()[1]\n if is_trigger_word:\n message_words.append(word)\n return \" \".join(message_words)\n\n\nmessage = vector_to_message(x_test[0])\n\n\ndef one_hot_convert(message):\n message_words = message.split()\n message_words.sort()\n vocab_file = open(\"data/vocab.txt\", \"r\")\n vocab = vocab_file.readlines()\n # one_hot = [int(record.split()[1] in message) for record in vocab]\n one_hot = []\n for record in vocab:\n word = record.split()[1]\n one_hot.append(int(word in message_words))\n pass\n return np.array([one_hot])\none_hot_convert()",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
"""
A web-page.
"""
import re
import pkg_resources
from .components import Link, Javascript, inject
class Page:
"""A web-page presenting container.
Args:
favicon (str): The file name for the favorite icon displayed in a
browser tab(default=None)
title (str): The page title, displayed in a browser tab (default=None).
resources (list): The list of `Link` and `Javascript` components which
representing the page resources (default=None).
menu (Menu): The page top level menu (default=None).
container (WebComponent): The page container (default=None).
"""
def __init__(
self,
favicon=None,
resources=None,
title=None,
menu=None,
container=None
):
super().__init__()
self.__favicon = favicon
self.__resources = resources
self.__title = title
self.__menu = menu
self.__container = container
self.__vars = {}
def __html__(self):
"""Renders an HTML page."""
return str(self)
def background(self, color=None, image=None, position=None, size=None,
repeat=None, origin=None, clip=None, attachment=None):
"""Configures the page background.
See <a href="https://www.w3schools.com/cssref/css3_pr_background.asp">
form more information.
Args:
color (str): The background color to used.
image(str): The background images to used.
position(str): The position of the background images.
size(str): The size of the background images.
repeat(str): The parameter to define of how to repeat the
background images.
origin(str): The positioning area of the background images.
clip(str): The painting area of the background images.
attachment (str): The parameter to define whether the background
images are fixed or scrolls with the rest of the page.
Returns:
obj (self): The instance of this class.
"""
if color:
self.__vars['--body-background-color'] = color
if image:
self.__vars['--body-background-image'] = image
if position:
self.__vars['--body-background-position'] = position
if size:
self.__vars['background-size'] = size
if repeat:
self.__vars['--body-background-repeat'] = repeat
if origin:
self.__vars['--body-background-origin'] = origin
if clip:
self.__vars['--body-background-clip'] = clip
if attachment:
self.__vars['--body-background-attachment'] = attachment
return self
def __str__(self):
"""Renders an HTML page."""
# Collects CSS supporting Bootstrap stypes.
links = [
Link('https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css'), # NOQA
Link('https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.8.2/css/all.min.css'), # NOQA
Link('https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/styles/default.min.css') # NOQA
]
# Collects FABICON showing in tab.
if self.__favicon:
links.append(Link(self.__favicon, 'icon', 'image/x-icon'))
# Collects JS scriptis supporting JQuery and code highlights.
scripts = [
Javascript('https://ajax.googleapis.com/ajax/libs/jquery/3.1.0/jquery.min.js'), # NOQA
Javascript('https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.16.0/umd/popper.min.js'), # NOQA
Javascript('https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js'), # NOQA
Javascript('https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/highlight.min.js'), # NOQA
Javascript('https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/languages/python.min.js') # NOQA
]
# Adds customer defined resources which could be CSS or JS files.
if self.__resources:
for resource in self.__resources:
if isinstance(resource, Link):
links.append(resource)
elif isinstance(resource, Javascript):
scripts.append(resource)
else:
raise TypeError(
'Page resource must be either <class "Link"> or '
f'<class "Javascript">, but got: {type(resource)};',
)
# Sets the page title.
title = None
if self.__title:
if isinstance(self.__title, str):
title = f'''<title>{self.__title}</title>'''
else:
raise TypeError(
f'Page title must be <str>, but got: {type(title)};',
)
# Creates inner style which will be embedded in the page.
root_vars = ''
if len(self.__vars) > 0:
for name, value in self.__vars.items():
root_vars += '%s: %s;' % (name, value)
root_vars += '--container-margin-top: %s' % (
'90px' if self.__menu else '10px'
)
root_vars = ':root{' + root_vars + '}'
inner_style = pkg_resources.resource_string(__name__, 'generic.css').\
decode('utf-8')
inner_style = root_vars + inner_style
inner_style = re.sub('\\n|\\s\\s+', ' ', inner_style)
# Creates inner script which will be embedded in the page.
inner_script = pkg_resources.resource_string(__name__, 'generic.js').\
decode('utf-8')
inner_script = re.sub('\\n|\\s\\s+', ' ', inner_script)
return f'''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8"/>
<meta name="viewport"
content="width=device-width, initial-scale=1,
shrink-to-fit=no"/>
{inject(*links)}
{inject(*scripts)}
{inject(title)}
</head>
<body>
{inject(self.__menu)}
<div class="container-fluid">
{inject(self.__container)}
</div>
</body>
<script>{inner_script}</script>
<style>{inner_style}</style>
</html>
'''
|
normal
|
{
"blob_id": "2c2ad4b6e8c5055afa3dfb3b540a44bda65fa004",
"index": 5991,
"step-1": "<mask token>\n\n\nclass Page:\n <mask token>\n <mask token>\n <mask token>\n\n def background(self, color=None, image=None, position=None, size=None,\n repeat=None, origin=None, clip=None, attachment=None):\n \"\"\"Configures the page background.\n\n See <a href=\"https://www.w3schools.com/cssref/css3_pr_background.asp\">\n form more information.\n\n Args:\n color (str): The background color to used.\n image(str): The background images to used.\n position(str): The position of the background images.\n size(str): The size of the background images.\n repeat(str): The parameter to define of how to repeat the\n background images.\n origin(str): The positioning area of the background images.\n clip(str): The painting area of the background images.\n attachment (str): The parameter to define whether the background\n images are fixed or scrolls with the rest of the page.\n\n Returns:\n obj (self): The instance of this class.\n \"\"\"\n if color:\n self.__vars['--body-background-color'] = color\n if image:\n self.__vars['--body-background-image'] = image\n if position:\n self.__vars['--body-background-position'] = position\n if size:\n self.__vars['background-size'] = size\n if repeat:\n self.__vars['--body-background-repeat'] = repeat\n if origin:\n self.__vars['--body-background-origin'] = origin\n if clip:\n self.__vars['--body-background-clip'] = clip\n if attachment:\n self.__vars['--body-background-attachment'] = attachment\n return self\n\n def __str__(self):\n \"\"\"Renders an HTML page.\"\"\"\n links = [Link(\n 'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css'\n ), Link(\n 'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.8.2/css/all.min.css'\n ), Link(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/styles/default.min.css'\n )]\n if self.__favicon:\n links.append(Link(self.__favicon, 'icon', 'image/x-icon'))\n scripts = [Javascript(\n 'https://ajax.googleapis.com/ajax/libs/jquery/3.1.0/jquery.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.16.0/umd/popper.min.js'\n ), Javascript(\n 'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/highlight.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/languages/python.min.js'\n )]\n if self.__resources:\n for resource in self.__resources:\n if isinstance(resource, Link):\n links.append(resource)\n elif isinstance(resource, Javascript):\n scripts.append(resource)\n else:\n raise TypeError(\n f'Page resource must be either <class \"Link\"> or <class \"Javascript\">, but got: {type(resource)};'\n )\n title = None\n if self.__title:\n if isinstance(self.__title, str):\n title = f'<title>{self.__title}</title>'\n else:\n raise TypeError(\n f'Page title must be <str>, but got: {type(title)};')\n root_vars = ''\n if len(self.__vars) > 0:\n for name, value in self.__vars.items():\n root_vars += '%s: %s;' % (name, value)\n root_vars += '--container-margin-top: %s' % ('90px' if self.__menu else\n '10px')\n root_vars = ':root{' + root_vars + '}'\n inner_style = pkg_resources.resource_string(__name__, 'generic.css'\n ).decode('utf-8')\n inner_style = root_vars + inner_style\n inner_style = re.sub('\\\\n|\\\\s\\\\s+', ' ', inner_style)\n inner_script = pkg_resources.resource_string(__name__, 'generic.js'\n ).decode('utf-8')\n inner_script = re.sub('\\\\n|\\\\s\\\\s+', ' ', inner_script)\n return f\"\"\"\n <!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\"/>\n <meta name=\"viewport\"\n content=\"width=device-width, initial-scale=1,\n shrink-to-fit=no\"/>\n {inject(*links)}\n {inject(*scripts)}\n {inject(title)}\n </head>\n <body>\n {inject(self.__menu)}\n <div class=\"container-fluid\">\n {inject(self.__container)}\n </div>\n </body>\n <script>{inner_script}</script>\n <style>{inner_style}</style>\n </html>\n \"\"\"\n",
"step-2": "<mask token>\n\n\nclass Page:\n <mask token>\n\n def __init__(self, favicon=None, resources=None, title=None, menu=None,\n container=None):\n super().__init__()\n self.__favicon = favicon\n self.__resources = resources\n self.__title = title\n self.__menu = menu\n self.__container = container\n self.__vars = {}\n\n def __html__(self):\n \"\"\"Renders an HTML page.\"\"\"\n return str(self)\n\n def background(self, color=None, image=None, position=None, size=None,\n repeat=None, origin=None, clip=None, attachment=None):\n \"\"\"Configures the page background.\n\n See <a href=\"https://www.w3schools.com/cssref/css3_pr_background.asp\">\n form more information.\n\n Args:\n color (str): The background color to used.\n image(str): The background images to used.\n position(str): The position of the background images.\n size(str): The size of the background images.\n repeat(str): The parameter to define of how to repeat the\n background images.\n origin(str): The positioning area of the background images.\n clip(str): The painting area of the background images.\n attachment (str): The parameter to define whether the background\n images are fixed or scrolls with the rest of the page.\n\n Returns:\n obj (self): The instance of this class.\n \"\"\"\n if color:\n self.__vars['--body-background-color'] = color\n if image:\n self.__vars['--body-background-image'] = image\n if position:\n self.__vars['--body-background-position'] = position\n if size:\n self.__vars['background-size'] = size\n if repeat:\n self.__vars['--body-background-repeat'] = repeat\n if origin:\n self.__vars['--body-background-origin'] = origin\n if clip:\n self.__vars['--body-background-clip'] = clip\n if attachment:\n self.__vars['--body-background-attachment'] = attachment\n return self\n\n def __str__(self):\n \"\"\"Renders an HTML page.\"\"\"\n links = [Link(\n 'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css'\n ), Link(\n 'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.8.2/css/all.min.css'\n ), Link(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/styles/default.min.css'\n )]\n if self.__favicon:\n links.append(Link(self.__favicon, 'icon', 'image/x-icon'))\n scripts = [Javascript(\n 'https://ajax.googleapis.com/ajax/libs/jquery/3.1.0/jquery.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.16.0/umd/popper.min.js'\n ), Javascript(\n 'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/highlight.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/languages/python.min.js'\n )]\n if self.__resources:\n for resource in self.__resources:\n if isinstance(resource, Link):\n links.append(resource)\n elif isinstance(resource, Javascript):\n scripts.append(resource)\n else:\n raise TypeError(\n f'Page resource must be either <class \"Link\"> or <class \"Javascript\">, but got: {type(resource)};'\n )\n title = None\n if self.__title:\n if isinstance(self.__title, str):\n title = f'<title>{self.__title}</title>'\n else:\n raise TypeError(\n f'Page title must be <str>, but got: {type(title)};')\n root_vars = ''\n if len(self.__vars) > 0:\n for name, value in self.__vars.items():\n root_vars += '%s: %s;' % (name, value)\n root_vars += '--container-margin-top: %s' % ('90px' if self.__menu else\n '10px')\n root_vars = ':root{' + root_vars + '}'\n inner_style = pkg_resources.resource_string(__name__, 'generic.css'\n ).decode('utf-8')\n inner_style = root_vars + inner_style\n inner_style = re.sub('\\\\n|\\\\s\\\\s+', ' ', inner_style)\n inner_script = pkg_resources.resource_string(__name__, 'generic.js'\n ).decode('utf-8')\n inner_script = re.sub('\\\\n|\\\\s\\\\s+', ' ', inner_script)\n return f\"\"\"\n <!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\"/>\n <meta name=\"viewport\"\n content=\"width=device-width, initial-scale=1,\n shrink-to-fit=no\"/>\n {inject(*links)}\n {inject(*scripts)}\n {inject(title)}\n </head>\n <body>\n {inject(self.__menu)}\n <div class=\"container-fluid\">\n {inject(self.__container)}\n </div>\n </body>\n <script>{inner_script}</script>\n <style>{inner_style}</style>\n </html>\n \"\"\"\n",
"step-3": "<mask token>\n\n\nclass Page:\n \"\"\"A web-page presenting container.\n\n Args:\n favicon (str): The file name for the favorite icon displayed in a\n browser tab(default=None)\n title (str): The page title, displayed in a browser tab (default=None).\n resources (list): The list of `Link` and `Javascript` components which\n representing the page resources (default=None).\n menu (Menu): The page top level menu (default=None).\n container (WebComponent): The page container (default=None).\n \"\"\"\n\n def __init__(self, favicon=None, resources=None, title=None, menu=None,\n container=None):\n super().__init__()\n self.__favicon = favicon\n self.__resources = resources\n self.__title = title\n self.__menu = menu\n self.__container = container\n self.__vars = {}\n\n def __html__(self):\n \"\"\"Renders an HTML page.\"\"\"\n return str(self)\n\n def background(self, color=None, image=None, position=None, size=None,\n repeat=None, origin=None, clip=None, attachment=None):\n \"\"\"Configures the page background.\n\n See <a href=\"https://www.w3schools.com/cssref/css3_pr_background.asp\">\n form more information.\n\n Args:\n color (str): The background color to used.\n image(str): The background images to used.\n position(str): The position of the background images.\n size(str): The size of the background images.\n repeat(str): The parameter to define of how to repeat the\n background images.\n origin(str): The positioning area of the background images.\n clip(str): The painting area of the background images.\n attachment (str): The parameter to define whether the background\n images are fixed or scrolls with the rest of the page.\n\n Returns:\n obj (self): The instance of this class.\n \"\"\"\n if color:\n self.__vars['--body-background-color'] = color\n if image:\n self.__vars['--body-background-image'] = image\n if position:\n self.__vars['--body-background-position'] = position\n if size:\n self.__vars['background-size'] = size\n if repeat:\n self.__vars['--body-background-repeat'] = repeat\n if origin:\n self.__vars['--body-background-origin'] = origin\n if clip:\n self.__vars['--body-background-clip'] = clip\n if attachment:\n self.__vars['--body-background-attachment'] = attachment\n return self\n\n def __str__(self):\n \"\"\"Renders an HTML page.\"\"\"\n links = [Link(\n 'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css'\n ), Link(\n 'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.8.2/css/all.min.css'\n ), Link(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/styles/default.min.css'\n )]\n if self.__favicon:\n links.append(Link(self.__favicon, 'icon', 'image/x-icon'))\n scripts = [Javascript(\n 'https://ajax.googleapis.com/ajax/libs/jquery/3.1.0/jquery.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.16.0/umd/popper.min.js'\n ), Javascript(\n 'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/highlight.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/languages/python.min.js'\n )]\n if self.__resources:\n for resource in self.__resources:\n if isinstance(resource, Link):\n links.append(resource)\n elif isinstance(resource, Javascript):\n scripts.append(resource)\n else:\n raise TypeError(\n f'Page resource must be either <class \"Link\"> or <class \"Javascript\">, but got: {type(resource)};'\n )\n title = None\n if self.__title:\n if isinstance(self.__title, str):\n title = f'<title>{self.__title}</title>'\n else:\n raise TypeError(\n f'Page title must be <str>, but got: {type(title)};')\n root_vars = ''\n if len(self.__vars) > 0:\n for name, value in self.__vars.items():\n root_vars += '%s: %s;' % (name, value)\n root_vars += '--container-margin-top: %s' % ('90px' if self.__menu else\n '10px')\n root_vars = ':root{' + root_vars + '}'\n inner_style = pkg_resources.resource_string(__name__, 'generic.css'\n ).decode('utf-8')\n inner_style = root_vars + inner_style\n inner_style = re.sub('\\\\n|\\\\s\\\\s+', ' ', inner_style)\n inner_script = pkg_resources.resource_string(__name__, 'generic.js'\n ).decode('utf-8')\n inner_script = re.sub('\\\\n|\\\\s\\\\s+', ' ', inner_script)\n return f\"\"\"\n <!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\"/>\n <meta name=\"viewport\"\n content=\"width=device-width, initial-scale=1,\n shrink-to-fit=no\"/>\n {inject(*links)}\n {inject(*scripts)}\n {inject(title)}\n </head>\n <body>\n {inject(self.__menu)}\n <div class=\"container-fluid\">\n {inject(self.__container)}\n </div>\n </body>\n <script>{inner_script}</script>\n <style>{inner_style}</style>\n </html>\n \"\"\"\n",
"step-4": "<mask token>\nimport re\nimport pkg_resources\nfrom .components import Link, Javascript, inject\n\n\nclass Page:\n \"\"\"A web-page presenting container.\n\n Args:\n favicon (str): The file name for the favorite icon displayed in a\n browser tab(default=None)\n title (str): The page title, displayed in a browser tab (default=None).\n resources (list): The list of `Link` and `Javascript` components which\n representing the page resources (default=None).\n menu (Menu): The page top level menu (default=None).\n container (WebComponent): The page container (default=None).\n \"\"\"\n\n def __init__(self, favicon=None, resources=None, title=None, menu=None,\n container=None):\n super().__init__()\n self.__favicon = favicon\n self.__resources = resources\n self.__title = title\n self.__menu = menu\n self.__container = container\n self.__vars = {}\n\n def __html__(self):\n \"\"\"Renders an HTML page.\"\"\"\n return str(self)\n\n def background(self, color=None, image=None, position=None, size=None,\n repeat=None, origin=None, clip=None, attachment=None):\n \"\"\"Configures the page background.\n\n See <a href=\"https://www.w3schools.com/cssref/css3_pr_background.asp\">\n form more information.\n\n Args:\n color (str): The background color to used.\n image(str): The background images to used.\n position(str): The position of the background images.\n size(str): The size of the background images.\n repeat(str): The parameter to define of how to repeat the\n background images.\n origin(str): The positioning area of the background images.\n clip(str): The painting area of the background images.\n attachment (str): The parameter to define whether the background\n images are fixed or scrolls with the rest of the page.\n\n Returns:\n obj (self): The instance of this class.\n \"\"\"\n if color:\n self.__vars['--body-background-color'] = color\n if image:\n self.__vars['--body-background-image'] = image\n if position:\n self.__vars['--body-background-position'] = position\n if size:\n self.__vars['background-size'] = size\n if repeat:\n self.__vars['--body-background-repeat'] = repeat\n if origin:\n self.__vars['--body-background-origin'] = origin\n if clip:\n self.__vars['--body-background-clip'] = clip\n if attachment:\n self.__vars['--body-background-attachment'] = attachment\n return self\n\n def __str__(self):\n \"\"\"Renders an HTML page.\"\"\"\n links = [Link(\n 'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css'\n ), Link(\n 'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.8.2/css/all.min.css'\n ), Link(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/styles/default.min.css'\n )]\n if self.__favicon:\n links.append(Link(self.__favicon, 'icon', 'image/x-icon'))\n scripts = [Javascript(\n 'https://ajax.googleapis.com/ajax/libs/jquery/3.1.0/jquery.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.16.0/umd/popper.min.js'\n ), Javascript(\n 'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/highlight.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/languages/python.min.js'\n )]\n if self.__resources:\n for resource in self.__resources:\n if isinstance(resource, Link):\n links.append(resource)\n elif isinstance(resource, Javascript):\n scripts.append(resource)\n else:\n raise TypeError(\n f'Page resource must be either <class \"Link\"> or <class \"Javascript\">, but got: {type(resource)};'\n )\n title = None\n if self.__title:\n if isinstance(self.__title, str):\n title = f'<title>{self.__title}</title>'\n else:\n raise TypeError(\n f'Page title must be <str>, but got: {type(title)};')\n root_vars = ''\n if len(self.__vars) > 0:\n for name, value in self.__vars.items():\n root_vars += '%s: %s;' % (name, value)\n root_vars += '--container-margin-top: %s' % ('90px' if self.__menu else\n '10px')\n root_vars = ':root{' + root_vars + '}'\n inner_style = pkg_resources.resource_string(__name__, 'generic.css'\n ).decode('utf-8')\n inner_style = root_vars + inner_style\n inner_style = re.sub('\\\\n|\\\\s\\\\s+', ' ', inner_style)\n inner_script = pkg_resources.resource_string(__name__, 'generic.js'\n ).decode('utf-8')\n inner_script = re.sub('\\\\n|\\\\s\\\\s+', ' ', inner_script)\n return f\"\"\"\n <!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\"/>\n <meta name=\"viewport\"\n content=\"width=device-width, initial-scale=1,\n shrink-to-fit=no\"/>\n {inject(*links)}\n {inject(*scripts)}\n {inject(title)}\n </head>\n <body>\n {inject(self.__menu)}\n <div class=\"container-fluid\">\n {inject(self.__container)}\n </div>\n </body>\n <script>{inner_script}</script>\n <style>{inner_style}</style>\n </html>\n \"\"\"\n",
"step-5": "\"\"\"\nA web-page.\n\"\"\"\n\nimport re\nimport pkg_resources\n\nfrom .components import Link, Javascript, inject\n\n\nclass Page:\n \"\"\"A web-page presenting container.\n\n Args:\n favicon (str): The file name for the favorite icon displayed in a\n browser tab(default=None)\n title (str): The page title, displayed in a browser tab (default=None).\n resources (list): The list of `Link` and `Javascript` components which\n representing the page resources (default=None).\n menu (Menu): The page top level menu (default=None).\n container (WebComponent): The page container (default=None).\n \"\"\"\n\n def __init__(\n self,\n favicon=None,\n resources=None,\n title=None,\n menu=None,\n container=None\n ):\n super().__init__()\n self.__favicon = favicon\n self.__resources = resources\n self.__title = title\n self.__menu = menu\n self.__container = container\n self.__vars = {}\n\n def __html__(self):\n \"\"\"Renders an HTML page.\"\"\"\n return str(self)\n\n def background(self, color=None, image=None, position=None, size=None,\n repeat=None, origin=None, clip=None, attachment=None):\n \"\"\"Configures the page background.\n\n See <a href=\"https://www.w3schools.com/cssref/css3_pr_background.asp\">\n form more information.\n\n Args:\n color (str): The background color to used.\n image(str): The background images to used.\n position(str): The position of the background images.\n size(str): The size of the background images.\n repeat(str): The parameter to define of how to repeat the\n background images.\n origin(str): The positioning area of the background images.\n clip(str): The painting area of the background images.\n attachment (str): The parameter to define whether the background\n images are fixed or scrolls with the rest of the page.\n\n Returns:\n obj (self): The instance of this class.\n \"\"\"\n if color:\n self.__vars['--body-background-color'] = color\n if image:\n self.__vars['--body-background-image'] = image\n if position:\n self.__vars['--body-background-position'] = position\n if size:\n self.__vars['background-size'] = size\n if repeat:\n self.__vars['--body-background-repeat'] = repeat\n if origin:\n self.__vars['--body-background-origin'] = origin\n if clip:\n self.__vars['--body-background-clip'] = clip\n if attachment:\n self.__vars['--body-background-attachment'] = attachment\n return self\n\n def __str__(self):\n \"\"\"Renders an HTML page.\"\"\"\n # Collects CSS supporting Bootstrap stypes.\n links = [\n Link('https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css'), # NOQA\n Link('https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.8.2/css/all.min.css'), # NOQA\n Link('https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/styles/default.min.css') # NOQA\n ]\n\n # Collects FABICON showing in tab.\n if self.__favicon:\n links.append(Link(self.__favicon, 'icon', 'image/x-icon'))\n\n # Collects JS scriptis supporting JQuery and code highlights.\n scripts = [\n Javascript('https://ajax.googleapis.com/ajax/libs/jquery/3.1.0/jquery.min.js'), # NOQA\n Javascript('https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.16.0/umd/popper.min.js'), # NOQA\n Javascript('https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js'), # NOQA\n Javascript('https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/highlight.min.js'), # NOQA\n Javascript('https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/languages/python.min.js') # NOQA\n ]\n\n # Adds customer defined resources which could be CSS or JS files.\n if self.__resources:\n for resource in self.__resources:\n if isinstance(resource, Link):\n links.append(resource)\n elif isinstance(resource, Javascript):\n scripts.append(resource)\n else:\n raise TypeError(\n 'Page resource must be either <class \"Link\"> or '\n f'<class \"Javascript\">, but got: {type(resource)};',\n )\n\n # Sets the page title.\n title = None\n if self.__title:\n if isinstance(self.__title, str):\n title = f'''<title>{self.__title}</title>'''\n else:\n raise TypeError(\n f'Page title must be <str>, but got: {type(title)};',\n )\n\n # Creates inner style which will be embedded in the page.\n root_vars = ''\n if len(self.__vars) > 0:\n for name, value in self.__vars.items():\n root_vars += '%s: %s;' % (name, value)\n root_vars += '--container-margin-top: %s' % (\n '90px' if self.__menu else '10px'\n )\n root_vars = ':root{' + root_vars + '}'\n\n inner_style = pkg_resources.resource_string(__name__, 'generic.css').\\\n decode('utf-8')\n inner_style = root_vars + inner_style\n inner_style = re.sub('\\\\n|\\\\s\\\\s+', ' ', inner_style)\n\n # Creates inner script which will be embedded in the page.\n inner_script = pkg_resources.resource_string(__name__, 'generic.js').\\\n decode('utf-8')\n inner_script = re.sub('\\\\n|\\\\s\\\\s+', ' ', inner_script)\n\n return f'''\n <!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\"/>\n <meta name=\"viewport\"\n content=\"width=device-width, initial-scale=1,\n shrink-to-fit=no\"/>\n {inject(*links)}\n {inject(*scripts)}\n {inject(title)}\n </head>\n <body>\n {inject(self.__menu)}\n <div class=\"container-fluid\">\n {inject(self.__container)}\n </div>\n </body>\n <script>{inner_script}</script>\n <style>{inner_style}</style>\n </html>\n '''\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class Trie:
def __init__(self):
self.dicts = dict()
def add(self, word):
node = self.dicts
for w in word:
if w not in node:
node[w] = dict()
node = node[w]
<|reserved_special_token_0|>
class Solution:
def minimumLengthEncoding(self, words: List[str]) ->int:
trie = Trie()
ret = 0
words.sort(key=lambda w: len(w), reverse=True)
for word in words:
if trie.search(word[::-1]):
continue
trie.add(word[::-1])
ret += len(word) + 1
return ret
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class KMP:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Trie:
def __init__(self):
self.dicts = dict()
def add(self, word):
node = self.dicts
for w in word:
if w not in node:
node[w] = dict()
node = node[w]
def search(self, word):
node = self.dicts
for w in word:
if w not in node:
return False
node = node[w]
return True
class Solution:
def minimumLengthEncoding(self, words: List[str]) ->int:
trie = Trie()
ret = 0
words.sort(key=lambda w: len(w), reverse=True)
for word in words:
if trie.search(word[::-1]):
continue
trie.add(word[::-1])
ret += len(word) + 1
return ret
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class KMP:
def partial(self, pattern):
""" Calculate partial match table: String -> [Int]"""
ret = [0]
for i in range(1, len(pattern)):
j = ret[i - 1]
while j > 0 and pattern[j] != pattern[i]:
j = ret[j - 1]
ret.append(j + 1 if pattern[j] == pattern[i] else j)
return ret
<|reserved_special_token_0|>
class Trie:
def __init__(self):
self.dicts = dict()
def add(self, word):
node = self.dicts
for w in word:
if w not in node:
node[w] = dict()
node = node[w]
def search(self, word):
node = self.dicts
for w in word:
if w not in node:
return False
node = node[w]
return True
class Solution:
def minimumLengthEncoding(self, words: List[str]) ->int:
trie = Trie()
ret = 0
words.sort(key=lambda w: len(w), reverse=True)
for word in words:
if trie.search(word[::-1]):
continue
trie.add(word[::-1])
ret += len(word) + 1
return ret
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class KMP:
def partial(self, pattern):
""" Calculate partial match table: String -> [Int]"""
ret = [0]
for i in range(1, len(pattern)):
j = ret[i - 1]
while j > 0 and pattern[j] != pattern[i]:
j = ret[j - 1]
ret.append(j + 1 if pattern[j] == pattern[i] else j)
return ret
def search(self, T, P):
"""
KMP search main algorithm: String -> String -> [Int]
Return all the matching position of pattern string P in T
"""
partial, j = self.partial(P), 0
for i in range(len(T)):
while j > 0 and T[i] != P[j]:
j = partial[j - 1]
if T[i] == P[j]:
j += 1
if j == len(P):
return i - (j - 1)
return -1
class Trie:
def __init__(self):
self.dicts = dict()
def add(self, word):
node = self.dicts
for w in word:
if w not in node:
node[w] = dict()
node = node[w]
def search(self, word):
node = self.dicts
for w in word:
if w not in node:
return False
node = node[w]
return True
class Solution:
def minimumLengthEncoding(self, words: List[str]) ->int:
trie = Trie()
ret = 0
words.sort(key=lambda w: len(w), reverse=True)
for word in words:
if trie.search(word[::-1]):
continue
trie.add(word[::-1])
ret += len(word) + 1
return ret
if __name__ == '__main__':
s = Solution()
assert s.minimumLengthEncoding(['time', 'me', 'bell']) == 10
assert s.minimumLengthEncoding(['ojtnj', 'uuydcho', 'dgsyp', 'dwxycpx',
'dpmvc', 'dvfhmb', 'flrxjjx', 'fwhdhvn', 'rgsakp', 'aiconf',
'nzacpk', 'sbxnaj', 'shway', 'rgrmz', 'rysudo', 'bzkioce',
'mqxkzvu', 'wyebk', 'tymoaz', 'mlmbg', 'djbmek', 'qfnme', 'khkiyae',
'tjdaxry', 'sqtcwz', 'ehnsai', 'jhncvrm', 'cxkzgrx', 'pummt',
'hzrpfcn', 'lkyqit', 'phpqdxw', 'vangm', 'wcjdgw', 'pxesvtn',
'mnqory', 'bdrzvh', 'brtzmo', 'chqgf', 'bipyxm', 'meoikg', 'ysyckk',
'ojayeiq', 'zrfbsb', 'yhuotea', 'crfbhq', 'tllycn', 'qxnzihf',
'avyawpz', 'bwsjym', 'myjozc', 'lbdksm', 'mctlt', 'dszowuw',
'syshm', 'xrvhhkn', 'kgrcwfv', 'dwlajlf', 'yviuk', 'xegjj',
'spiczl', 'vfvomi', 'mgcujy', 'dqmzb', 'isrisgt', 'vdrtuah',
'vsyth', 'eoclef', 'poccek', 'cgafrlu', 'crbhpgk', 'sromv',
'xmvbca', 'gobra', 'ygvlq', 'pjvhe', 'tfweiso', 'cskuohg',
'eyalone', 'pobkak', 'nzpxn', 'lbcrws', 'uhtfe', 'eorth', 'showvu',
'hxsmb', 'jrggose', 'izifkb', 'oqwyf', 'mozmzj', 'ijwle', 'ggtqqqv',
'geevzj', 'meota', 'ifsse', 'kdtofm', 'swydhvf', 'tzjhqap', 'wqwwd',
'jlinnov', 'lmxkgeg', 'stbot', 'xrsfn', 'etoyctk', 'rygagm',
'vcnrf', 'zkdge', 'emqtscp', 'newqcyy', 'nnuus', 'exwsxbd', 'zstvl',
'lbkko', 'kygkyqq', 'oggji', 'xytbjo', 'mfbahk', 'ggoks', 'lmqewkl',
'qexhyqe', 'ogaogio', 'nzvbav', 'mdole', 'qvyks', 'gkupfu', 'dgmpn',
'ngrdrj', 'iitqvk', 'ipuiqb', 'ugxfea', 'ialkmv', 'hmgnx', 'aoyoj',
'fvzhjil', 'butrbp', 'dwhxnes', 'etkdwg', 'cjkghz', 'tovkq',
'mmxhv', 'jgcsn', 'hmictal', 'zxmnek', 'pcoeg', 'ntyqmlq',
'hfubhtg', 'ydjbv', 'xnwlqto', 'hatgi', 'bsaczd', 'pokwk',
'arxlula', 'zjtqlk', 'ocfxup', 'nsnqjc', 'xdcsopi', 'iqxyxp',
'xfmtpvm', 'bqtgcf', 'wboycn', 'aoeda', 'uowqdgj', 'rzzzx', 'liucs',
'ejzxz', 'qmlehsh', 'igrbmon', 'dpmkbon', 'pmayh', 'nujdwdw',
'awdgo', 'ijgkzk', 'inhee', 'jzdtv', 'adhauh', 'grtmbp', 'qndbvw',
'zprrw', 'mpqieq', 'jzmzeuu', 'fcvftqs', 'qxzxqy', 'lidguzz',
'eazwd', 'zjhfsz', 'zsnzefh', 'mnckfg', 'zjgtq', 'ckyxlif', 'fznfo',
'jegnof', 'lzwyzb', 'ozivfio', 'igkclsa', 'bebzn', 'bitsggm',
'lrnwin', 'hjnnzr', 'idvoirn', 'dgile', 'vfngh', 'xbmur', 'rqaftt',
'wjwwwxs', 'btreou', 'gjsycg', 'pvsiylz', 'ccxzgdf', 'excrrrr',
'fiesr', 'jdioj', 'uzwsc', 'odrlcoy', 'hcsit', 'ptwfprh', 'sbqry',
'kffvy', 'ejeawbp', 'omvcc', 'iqgxqlt', 'edsuu', 'xnbue', 'qfbcx',
'fzlmbkl', 'wrrcueb', 'mmqispp', 'nknilwd', 'dewuhju', 'hmdqlxy',
'vjxgg', 'lkuexo', 'dzvfscm', 'voulbs', 'uevoqgq', 'kmhwu',
'oglzllg', 'torhihn', 'fhuqzc', 'mmcfhb', 'woyayma', 'uznsvre',
'mmxed', 'aoskwg', 'xrosbm', 'hpyrgh', 'tghwbwh', 'hcwzn',
'iepeftj', 'judij', 'kudbk', 'jonpv', 'lywck', 'rxelz', 'bgifz',
'mehbxq', 'fmqnz', 'sqrmzj', 'iqqjzex', 'qioliz', 'kjizbf',
'lgdcffc', 'pfgmcr', 'trdabul', 'vlqjdnc', 'jjvbxe', 'fqlayw',
'ilbhtyq', 'saawulw', 'gxysrb', 'kighql', 'eceapr', 'kztbcww',
'jedkoy', 'dxpcaga', 'ndacphe', 'rcoit', 'ywgcnxg', 'klipfup',
'bddws', 'jwyof', 'lrfwgo', 'bediwuf', 'ujakh', 'ppima', 'xzhwvm',
'guzmsqt', 'ffbliq', 'adjmynm', 'akabzn', 'inmykju', 'vlcjyv',
'orquepg', 'tufrk', 'vqpjymm', 'lvuab', 'qzxav', 'ekcmu', 'uqtuhie',
'kfvtgf', 'nklwjo', 'ujxlfpl', 'zobfpq', 'eignijd', 'ythctg',
'artllm', 'wodhh', 'tzpwszq', 'njdqegg', 'hzrqib', 'zvoxtfd',
'htboem', 'axjuix', 'bvmvm', 'jbnum', 'bxdth', 'atejt', 'gqsqtnk',
'fykrjbp', 'ldyhonr', 'wcuoj', 'upphc', 'agydg', 'cjmwk', 'rhxbqh',
'tpgozdd', 'qyqoy', 'zjqutw', 'qoohqny', 'nsiacwz', 'xupin',
'criuvs', 'eswjeft', 'pdmevn', 'zvogq', 'lrrvo', 'qhfqqpw',
'ktudfg', 'ijvmi', 'neyjjdx', 'rllpi', 'vllvaa', 'esebtu', 'jyhcrh',
'otgmr', 'oudvyxj', 'pmszy', 'opeed', 'gicni', 'mnuzn', 'mjbfpod',
'sqwgxu', 'dwniwz', 'wmbmmv', 'lyafuy', 'zmvlz', 'kopxzuh',
'urcbbiy', 'guhco', 'nerjm', 'lpdxc', 'hxmjzz', 'hynagc', 'iyxeczi',
'bdfxmoz', 'yybnpqd', 'jvgnb', 'oquqem', 'fmclmz', 'dmkhf',
'zxbjpp', 'qpxgcir', 'iecvjm', 'gtkne', 'lgtqrbc', 'gilbn', 'mcxsg',
'ncwbhn', 'wkriiq', 'zhsir', 'ptkkmw', 'jcbpkrm', 'vbefo', 'vmbcd',
'vqffj', 'fhqzjt', 'nryuh', 'vmclav', 'cjyggm', 'sanev', 'rrdocz',
'zqdexbs', 'jrxstt', 'pyhcesj', 'aagghyr', 'cyemjrb', 'aliohf',
'qaslg', 'pnyjzxz', 'pehnvi', 'suhuw', 'twopabr', 'sapqoc', 'mckrh',
'nzlgrxt', 'aqpobnu', 'pirbjgb', 'plzlj', 'raylxpu', 'gyasfrh',
'urjfxux', 'xjbwau', 'iupknn', 'vhxnc', 'dnbjop', 'vrxhwmd',
'vjsmkh', 'rfmqids', 'smaiwt', 'vkyfo', 'bjqyxc', 'rbbbp', 'dlkzg',
'dwvdwu', 'prulzh', 'bavge', 'ehhrz', 'xxjqk', 'pxopmp', 'okmkmb',
'slcznpp', 'nvqlb', 'jalrk', 'parwlcd', 'anbxo', 'oqcxyzo',
'fjhrdjh', 'pgvnwfe', 'yfjyvh', 'quvszjm', 'xyiig', 'xtncqv',
'svsix', 'jvpdnh', 'owuiv', 'bsrugtt', 'rmvggws', 'lmdql', 'kvmvd',
'xrpmaw', 'ssnxyb', 'oworq', 'rmmpuya', 'rijpih', 'aelazka',
'kncksqx', 'yvtdiy', 'epato', 'pbbamj', 'fejsw', 'zgsru', 'ekwrre',
'zqben', 'vugxi', 'fvcsdp', 'rujcews', 'asqxya', 'worjlsd',
'xggakg', 'kzfpot', 'haqon', 'ypqxzz', 'mmkzwt', 'bdhif', 'exzhv',
'srnklzh', 'hlrunb', 'dwfyke', 'fvgbtdm', 'aeutp', 'czhefx',
'tegfw', 'jkxpsb', 'gxkfkw', 'exvntd', 'gvuti', 'jdmly', 'owaqhw',
'fopuxzv', 'edrvil', 'biszwgv', 'vgckzd', 'fqdxn', 'qktdf',
'hpgwrk', 'gpxiips', 'vxnlab', 'yylxz', 'hsuscch', 'bhivaf',
'wzrwtc', 'ebplv', 'yzxykou', 'mxlssom', 'evghv', 'hksleg',
'shybau', 'zeyqa', 'tljqka', 'axfkec', 'fatdj', 'janlkcc',
'sjorbra', 'jplge', 'oazzot', 'qbgtncn', 'ozlil', 'stohadq',
'rvpuwn', 'oqwpl', 'byftgi', 'ubuusl', 'fkogr', 'bybdyhj',
'vinyuzs', 'ivsqvz', 'vmnae', 'gckxw', 'rozbe', 'glvxwj', 'rcgicu',
'xmvbd', 'itycsry', 'llmwrs', 'fuqth', 'styrrwl', 'wsseuln',
'xwflcli', 'muxgz', 'ypmbboh', 'rpmvnep', 'wjvvnv', 'arjnw',
'toauwc', 'ltjxqrl', 'basffd', 'clxozwd', 'glmrv', 'iejgfj',
'cvkoj', 'wotjf', 'mqucec', 'xalgemc', 'hgimkh', 'golvfq',
'fuqpmak', 'mhpcp', 'pxoibt', 'ledqa', 'guzbyr', 'ztvbeka', 'racdp',
'krsngra', 'aaiknz', 'bhoobyc', 'xibbe', 'yohepxk', 'eclevs',
'ldliwcm', 'qatvlk', 'eiypbw', 'vxvtwa', 'nkdwsej', 'ftmyvp',
'gpthye', 'gazwoi', 'zzgipon', 'cithg', 'wpabujl', 'jhezlnb',
'vqqaxfg', 'kvpbk', 'vggjemp', 'owylv', 'lgwtfpg', 'jjqvfm',
'xbhga', 'tulvfv', 'sefuo', 'hbysv', 'ozopepd', 'awyrifd', 'pnudwx',
'vreje', 'zhpgw', 'qygbf', 'tvbrvy', 'zzmcw', 'cznee', 'deuzxt',
'qfppjvi', 'ilkps', 'ydwhg', 'krwkxzu', 'mnsidg', 'rkxyyr', 'ajkqz',
'xtmom', 'vqocor', 'fympcl', 'yyleyzy', 'jjvzhrn', 'kpmxvuz',
'txoeqlx', 'lhhmn', 'chzgpf', 'ncnjxle', 'ihxrg', 'feqixq',
'lkfhcar', 'hfnsh', 'bifczy', 'umknat', 'yrhgkh', 'mgpcu',
'qotukst', 'yqlmfq', 'ttcdp', 'xnjjzm', 'cukbr', 'hjhjb', 'iikfcsr',
'nsqbnnz', 'dauygf', 'cmydq', 'lfnhqnl', 'ppqgs', 'hscbfug',
'ohzisud', 'opspdkv', 'aauxbop', 'wpkhzo', 'sxbsgu', 'tajrv',
'ololy', 'mxmus', 'vizvxv', 'osaqz', 'rxygkn', 'mrzqlf', 'zrriyxb',
'ufroe', 'bajozg', 'atpsu', 'uhgauzu', 'tffdw', 'mdjulde', 'rbrmy',
'jhkqvwl', 'gzsultq', 'nkbfi', 'xtvwh', 'dryzcv', 'emaxuk',
'zucvutb', 'jdduyk', 'bjdin', 'loicuq', 'qhjjb', 'rgfjbq', 'mphnk',
'lxvceyx', 'zeoxb', 'fxhnxu', 'qpbipe', 'ophwp', 'wiioer', 'quchwj',
'pouxunw', 'bloxgg', 'xbsma', 'dtwew', 'xstorn', 'qfrfkz',
'gxusbsn', 'dhnxd', 'mhstbs', 'hekbtu', 'wvrrjw', 'yeiwd',
'patplsx', 'qmyiyi', 'mowboj', 'iskyd', 'bqhjj', 'povppk', 'vthpwx',
'uuydaw', 'rduxvez', 'vmcww', 'ylruvph', 'ymqosp', 'wzcvohg',
'lhepwta', 'bckhc', 'oiyyt', 'wqzfv', 'uduec', 'lkkbtzl', 'prvpbo',
'jrwstii', 'ijztoo', 'qwwth', 'vqzqiun', 'krnjp', 'zyanpiw',
'ojhjhvg', 'lohmb', 'thqtf', 'reptzv', 'zgkyq', 'lhkvy', 'cmjwl',
'fmilgpw', 'jrfawz', 'vrtzd', 'ezgfl', 'plzng', 'zidzso', 'civavlg',
'vtwopu', 'ljhckxo', 'nuydt', 'qembl', 'fiwrre', 'gfrgi', 'gzegiq',
'mltlqo', 'pcett', 'snbsc', 'msibcqn', 'beacrhz', 'vsycjt', 'gjqji',
'smcegol', 'zregkp', 'smcazoj', 'dziqad', 'jpuwp', 'hnlztac',
'vduitco', 'wyencad', 'bkdnnqo', 'cabzyg', 'mgpcwr', 'fxgvkxt',
'wlkcrdd', 'bhmhsy', 'gqcctjc', 'atafpt', 'vdzhmcg', 'ighxj',
'gfqpale', 'fohbrtj', 'mfpsgt', 'tarjocf', 'gyycb', 'qvqfryl',
'jpwowwc', 'jcgcg', 'gmrjze', 'nfptxq', 'hmjhxge', 'ieelj',
'suvkgr', 'nwjxe', 'tkepqm', 'extnpmq', 'rxzdvf', 'relzaa',
'hfhgaq', 'lmihlz', 'pacocq', 'dclxr', 'oknoem', 'pbpnnd',
'nleerfl', 'tvytymc', 'aamfnl', 'ufdnq', 'bxyzvyh', 'vksvout',
'lohxhf', 'sskgn', 'aawbv', 'hrvhx', 'wvoqf', 'vxkvh', 'oqany',
'bcmyd', 'epdddqn', 'zrlej', 'bchaf', 'hmftii', 'mefcrz', 'wbxvc',
'ewwnldf', 'cqecxgh', 'cnwvdmk', 'vetrw', 'zmogwov', 'lshlzpe',
'lijay', 'tcdqg', 'xavqixd', 'yjkhtsl', 'myjvow', 'cgthhd', 'taaii',
'iuuegk', 'lcypmle', 'wesrit', 'tybco', 'nhxysw', 'awkrj', 'jcmqa',
'porvo', 'nrypriu', 'vznnevp', 'hzklwi', 'vapuxh', 'wyfkn',
'albemu', 'ttfdbl', 'dbqrjv', 'cxals', 'qzitwf', 'ysunur', 'llsefy',
'cghfzji', 'jboaa', 'emhlkw', 'khhmgha', 'twlxgjz', 'pyujor',
'ozcax', 'fetvovo', 'mdhrrd', 'qdhdne', 'fiuvw', 'ebyxh', 'ldaothh',
'vwyjf', 'yjyljlu', 'ivroqg', 'qvpeyec', 'eemsdra', 'wavgeqk',
'bjejrqg', 'mdjimoz', 'fgopy', 'lgwodr', 'cunvszh', 'wiver',
'ghmog', 'jzgfyk', 'vxlbx', 'kvgbtn', 'cunorte', 'mtesdc', 'zdzmqu',
'pigik', 'smruadg', 'czjxlt', 'kukgaok', 'tsldpqq', 'luomo',
'ezbcvdc', 'tfetwes', 'uopzf', 'wsvezkw', 'wrnlvbx', 'bpqungd',
'jqnnof', 'rqhiomi', 'voulqb', 'ouspxn', 'chngpz', 'fbogfcv',
'nqhunxo', 'rydbke', 'ewduo', 'suqqwup', 'oxzfxj', 'kuwfwm',
'euiics', 'mvftoau', 'vstfbm', 'vnmtoo', 'muicf', 'bjbskxb',
'knbomlf', 'enrbtfk', 'hnaqe', 'vxzsr', 'gkqma', 'qygmn', 'ztkybmb',
'injggpk', 'enqrgdk', 'rkgoct', 'tgaiu', 'dnknoxk', 'iwuou',
'oxanccl', 'xestej', 'ekrqq', 'xbwhz', 'jkdvxfh', 'oybaay',
'afyhci', 'papffjq', 'bdppssw', 'qwyvjx', 'xmnnosl', 'kvqzjl',
'wcwii', 'ygfvt', 'tpabbht', 'kjmaq', 'duschjz', 'gguiof', 'wgfhve',
'joqmfjq', 'smqfd', 'ynlovlz', 'sgrzum', 'bobmux', 'dcppi',
'isdjrwl', 'lbevb', 'efqsirq', 'hlgfql', 'enmemlb', 'dbmfk',
'ibfpzm', 'rtdnooq', 'yicdq', 'xadul', 'dxibxzi', 'yyxnj',
'jhsdzxw', 'thltbi', 'kwhreyi', 'hrocoa', 'fnaalbd', 'vnwona',
'nnonm', 'naqaf', 'xgzzies', 'uhruynk', 'kgadfx', 'hyohzbd',
'hnajx', 'yipzh', 'ezdxaet', 'xbzppoz', 'rwnewxz', 'hlcbkmb',
'znyhu', 'zsqtpkr', 'gmyxr', 'rphyvo', 'bgjuz', 'nulpv', 'eejfoso',
'xmwcnes', 'xxxxnpe', 'jezkk', 'idfsxrw', 'qgzjtf', 'arpzpo',
'hxsanlt', 'emvotcb', 'sknzhvg', 'icitca', 'ivhdln', 'sqilerz',
'ndigw', 'bcsre', 'mibbep', 'zsczom', 'cgghjbb', 'fkylfgt',
'bvzofs', 'mefsng', 'bispbza', 'tsosgy', 'xopalrw', 'wserf',
'jbmlz', 'xidxny', 'ffmpjos', 'vddwxmd', 'netnsg', 'kgevsp',
'pguuv', 'cwisp', 'slxiyb', 'dmwaguc', 'jobwusu', 'uytcqrv',
'hzhsy', 'zrlsdd', 'xhxah', 'rxzij', 'zwdgy', 'ygmvkz', 'drkzbo',
'qpsal', 'tpxvl', 'lfmfl', 'sayjvlh', 'rdamym', 'ycuzd', 'zkycu',
'hdesec', 'unequk', 'lpkdid', 'vorxls', 'admsdop', 'rqnvkyg',
'krnqqtb', 'rxfms', 'xfthd', 'pxjbk', 'gpslrg', 'rwziwef',
'usxgqvz', 'baxxye', 'ocrkkrw', 'lrlgsp', 'ceyctg', 'rniml',
'vavug', 'jgircl', 'jrpnmsa', 'rywvlfg', 'prxnys', 'fkzmknn',
'ooelc', 'btvfs', 'yqepuvw', 'tmmmb', 'qmpzexb', 'zjckjvd',
'aieytbb', 'oafqq', 'szrcyh', 'czrxgae', 'ifkte', 'hfgajox',
'pwpnkqq', 'yqphogn', 'xuwthrd', 'mpcmy', 'qitdoa', 'avlzfrh',
'ywpip', 'dgeki', 'fgbnx', 'tyofu', 'xziqzj', 'qxzvqz', 'vtsqk',
'ipkld', 'yfhim', 'ebaegdc', 'ubhrh', 'ldejv', 'mtflwy', 'ocpyj',
'yopgqs', 'fkjxxd', 'njnnwr', 'nylkeb', 'taymdqv', 'ekpznq',
'cbzobmg', 'bucdds', 'qjozu', 'uvpghor', 'obhnu', 'ljkxbg',
'uqrxjtf', 'xwbxiw', 'oxsmcg', 'spchdd', 'pcuitj', 'faidq', 'tybmy',
'uygiyp', 'qloizj', 'cafgmy', 'smetd', 'kwcwb', 'tdabxf', 'fpmrc',
'lfjujn', 'vvmvex', 'mnsgdc', 'enjlgsw', 'ohwcg', 'kxjdaup',
'rotjarp', 'aovdoq', 'oviwq', 'qwaxs', 'bmazco', 'plcljsv',
'yytjhl', 'vgwjm', 'drnue', 'vqjgf', 'uqlsfy', 'bmqmfp', 'lkauwna',
'ozmqce', 'heunaxr', 'zaffbj', 'arbek', 'qjnllw', 'fdkhlz',
'wgmbwh', 'yceqag', 'ltjjq', 'yurggfw', 'puaafsl', 'tjiqkyt',
'yuzub', 'ytmrfq', 'ommmu', 'ipknn', 'iubnuab', 'dzthvc', 'zjbzpew',
'dcooev', 'pjydqcf', 'zuojlzy', 'zwjyfc', 'spmac', 'dfkbnz',
'fzriie', 'asusog', 'hdodx', 'drjpo', 'ddyif', 'chabv', 'ebvkwrr',
'burdjl', 'jjddi', 'dljzkye', 'samyg', 'zwgxcq', 'xtratwo', 'qfopz',
'xvlaw', 'laage', 'btdium', 'vzlnzt', 'kmvbzkq', 'kctobsx',
'kazbelu', 'yxdwrk', 'eslvjc', 'nhsdmvs', 'zuxqcc', 'hqtxovn',
'zrbdai', 'fgjxs', 'txecvio', 'kjxlq', 'dkuxss', 'mkbevn', 'pzmdqc',
'ihyia', 'atsub', 'twytus', 'nzooxj', 'qwuoly', 'fdoigo', 'zukhlh',
'mugeaxt', 'qqsfyls', 'qqtql', 'wrvphcx', 'nzjfhx', 'uequtk',
'fxuto', 'qnast', 'nveys', 'ltbrcth', 'toctdib', 'fbpnh', 'umxfgn',
'zvjuta', 'yeron', 'qzvswqk', 'gbctr', 'ryryz', 'zieknd', 'zcsna',
'jrhak', 'zfxqsj', 'urlba', 'lbozqf', 'yfcjaa', 'hazgy', 'gmmfzyz',
'zjvkyc', 'rvfdcf', 'daitab', 'hcxqgum', 'qwakp', 'ltbsjwo',
'pqqtygx', 'upxcxao', 'qylot', 'lmxqc', 'dwzcd', 'tjccm', 'mqcpap',
'wgxqtr', 'ivycvxy', 'wdykg', 'snvqka', 'jxtvtsb', 'jnyowsq',
'iwfuoig', 'cuoixhu', 'fzwalg', 'djhrar', 'sjmahk', 'dyusf',
'wrxqvdi', 'ftytlor', 'jsjbv', 'vjbebg', 'agvsn', 'vvmpgm',
'gsgjopk', 'vbqvhy', 'afopf', 'zybfuz', 'aqsgc', 'ytrjsvn',
'wlhdfr', 'vdhvl', 'jrlvr', 'cscxwf', 'yhgbew', 'wupbl', 'ssuhyvv',
'bhcirzk', 'oykwk', 'ijbto', 'qsnpgw', 'otwzage', 'ytqzh', 'rgwow',
'bvhgkwh', 'fvawxie', 'fllxw', 'gfcqf', 'scoqb', 'qubrq', 'gdxjtp',
'ahrpck', 'awnlgi', 'cmehsyp', 'dwmytpy', 'firyeq', 'oohwhr',
'caelk', 'mqemvs', 'qflkzi', 'tfpibll', 'ybhzd', 'ctsxri', 'yurocj',
'dnlnl', 'ydmdva', 'xkaotl', 'xovax', 'ypynrqp', 'kwfzw', 'fbgsmrc',
'tutime', 'rcugul', 'cvewno', 'typhbpa', 'wazew', 'flzfs', 'wxxbza',
'ogjfkl', 'vjlebet', 'imbubm', 'xinyncy', 'dqmxfy', 'buhagzh',
'jjadpos', 'gejyz', 'gxshqk', 'wkwrs', 'dqeriqo', 'dmixr', 'bysjih',
'aoloq', 'ddwhsxs', 'nteqv', 'cqagf', 'ditsrn', 'wfxgl', 'jwjqb',
'rvkxj', 'rxapr', 'yrlkip', 'npquasb', 'nvezlr', 'gmhchcx',
'lodfihi', 'dheypxa', 'plzjykh', 'qopsthg', 'zsnes', 'raongg',
'zrpnac', 'tzmtltj', 'jsecdn', 'rzudh', 'hkcyic', 'xsxmw',
'reeuwpn', 'grkwrag', 'gvzzbsq', 'lrfta', 'aqyvbkj', 'ytgfu',
'wcmvd', 'olnvfi', 'hhgmhb', 'kojmepr', 'wpohl', 'szhgg', 'hymiblu',
'lkwjr', 'zulqpz', 'sdcqjo', 'olgsgez', 'lxkpqci', 'yxcgn', 'gmvex',
'fskpppe', 'utzto', 'axncvp', 'lcyahba', 'ydeae', 'zvzar',
'ghfkkqv', 'ryrpg', 'gucpbq', 'reofjz', 'cdnoo', 'dchhh', 'byiwd',
'cqbhok', 'ksfnoa', 'xsmmlr', 'qyvdfqh', 'dzshj', 'bpifnzh',
'uxmoml', 'jdxvojf', 'ihfll', 'vwesfof', 'zynnpb', 'fwzra',
'rxlgww', 'vkmjd', 'hcjgzt', 'mkapfl', 'ffjqlf', 'wulaebc',
'gurramv', 'tufkzai', 'bxprqek', 'nkohv', 'abgfwyl', 'slslg',
'wirsnh', 'pykvuh', 'fdrwk', 'gtmgsxe', 'dxsaab', 'lqiryty',
'aoezg', 'tzhugcg', 'uoarf', 'dwhsv', 'rjiuoi', 'ycgcdnf', 'rtfmwz',
'amkjc', 'woogtdi', 'deprx', 'ucknu', 'womfm', 'xdeev', 'qapxpuu',
'ngulnk', 'fgtxyf', 'hnyabid', 'cilmy', 'wrsewtf', 'luvtmo',
'wftuh', 'ifoeeqp', 'dtfdhhl', 'rwnburg', 'fohkkul', 'frqqi',
'gsrcyc', 'teuync', 'dvpvak', 'daqjki', 'kksscp', 'somsde',
'tyfvck', 'ftfekl', 'ahncv', 'yvosm', 'qgllvg', 'ylfwv', 'jenqns',
'lqovrnm', 'iyger', 'nfvtsv', 'bknxmqj', 'pfzybdr', 'hqjol',
'chlpk', 'etgrtqa', 'msuxdx', 'vnoatf', 'ypdzomn', 'vsshmg',
'rfkipq', 'jvpbiz', 'vbskd', 'edsoixj', 'uowim', 'hqtsj', 'inbsxal',
'ookrv', 'ipotdnk', 'kmazqd', 'jpfghb', 'gvmnnpv', 'juvwa',
'xtkvzw', 'ejqcl', 'ebgcnt', 'ztuyu', 'dlzthw', 'zzipe', 'iaxwdxy',
'htynwkc', 'lefbq', 'pizfr', 'vttrsv', 'oagak', 'eqlrom', 'vttefg',
'dsrmk', 'oekbe', 'cvugzk', 'diwvz', 'gxmfob', 'vjowzm', 'mjpop',
'uznhz', 'kqvjwug', 'wjqvxfg', 'jbpwezu', 'wsckdx', 'slqfomn',
'omuxk', 'zlgblso', 'kvitoq', 'dmafq', 'djxmzk', 'pjqfegq',
'yjrttas', 'siakcx', 'iutiqk', 'nwfdj', 'gbgtazk', 'cpqtf',
'panmlr', 'aqubhsg', 'iwdim', 'nqetym', 'mwazh', 'thyhy', 'ydtxan',
'xfoin', 'lsosc', 'esznfa', 'xgdisi', 'flvbzh', 'mpltx', 'iwjpsqp',
'udfycf', 'rntmc', 'ltflwu', 'wkgbaw', 'bcuzt', 'hejxuhb', 'lguohe',
'klnhb', 'mjump', 'avcwrol', 'yrcqlc', 'ihxul', 'avajh', 'gtpauet',
'iemzk', 'rfdub', 'gqnbk', 'cfcmg', 'iobyh', 'iruuapf', 'tyifwt',
'sbdtp', 'mngcpmb', 'oaqpolm', 'mmimmh', 'gxknadi', 'bmxhuu',
'ulyoa', 'keidy', 'vsnfk', 'cnnnfty', 'pkajm', 'ddgeecb', 'prxidqd',
'wmenvhd', 'akjcqo', 'tnekfef', 'ipvsi', 'pzjwq', 'wmmct',
'erdjnuf', 'vgeaqs', 'nlbdx', 'dpvbe', 'dgeqz', 'aiguzh', 'akawppx',
'tykrjcs', 'gvavo', 'hkyle', 'yhedx', 'xzqcg', 'gzdxt', 'csssbk',
'tmekrmv', 'lfsgo', 'iizahz', 'aszfd', 'aybqnsl', 'vadwxsl',
'ulmiii', 'xaxdugp', 'sfnnsbg', 'dkyruh', 'qhpqu', 'amesjd',
'evjuki', 'vtqjw', 'aoabp', 'qnsuhe', 'bplbx', 'fdqok', 'ozkhgib',
'cggwzys', 'nbknjay', 'ooambw', 'evmvegf', 'htdlxik', 'kahcume',
'bojpn', 'bhipie', 'hdyjslw', 'pbkkq', 'qwszl', 'fgkbzsd', 'hejdx',
'vmcfhgx', 'puzlmmm', 'meffil', 'boakbiz', 'eczot', 'fvkkit',
'jebfx', 'umvkjg', 'uikgs', 'rycgpf', 'rfmfgmy', 'nveho', 'bgywqen',
'gepfma', 'vquyq', 'wcercbw', 'wbpjkxc', 'rqloeda', 'omclokx',
'hvotwp', 'tvqfxxu', 'qrtghk', 'hggme', 'arnmfnt', 'cxprj', 'rspdt',
'hlgfq', 'dmqel', 'pcerxk', 'ptqjc', 'wzreko', 'kahks', 'xjnzo',
'xzzye', 'xbdeu', 'koiwkv', 'jlwkkjr', 'xzdixoc', 'xeedvrm',
'mrtnhqi', 'jaeann', 'mvubp', 'olklqf', 'retbgcj', 'qxxlhh',
'cqyyoy', 'ngwikg', 'qijte', 'sjzck', 'zkmkx', 'ongtzf', 'tanow',
'smgntvq', 'urfgt', 'xwcroa', 'kadcpd', 'cxhgo', 'walku', 'kvvcsyt',
'elwmuxk', 'bfphtm', 'vzeumuq', 'sknvev', 'vbsnfd', 'grmbg',
'vjahwt', 'dmcbmn', 'smubz', 'jobbfcv', 'ujlkm', 'lcthh', 'bauuqdu',
'kjgzgtq', 'gicjz', 'nugbax', 'kbnjfiu', 'sqfpein', 'obbgfww',
'ykggxjx', 'irnmog', 'xniuv', 'rqiwycq', 'hzlgyu', 'yjtrttv',
'satym', 'dgqhlkk', 'rghal', 'tbekx', 'kkwmo', 'eahwhks', 'bpvmbur',
'sqtgkj', 'khboz', 'enefr', 'vkzqvt', 'wfruavu', 'ninomu',
'ypktaoa', 'mlpmoit', 'fxyhjfp', 'fgnpp', 'txieja', 'dprnj',
'bgyrp', 'zsqwqrw', 'stqzki', 'kwiayb', 'ulbsn', 'aetje', 'vwzbb',
'tedwyqs', 'cymiruy', 'jigpoqx', 'ypuqsc', 'weletu', 'gvibea',
'chhuldm', 'baylv', 'wdhovo', 'imfqu', 'meodnsk', 'jhlckqw',
'jolyfh', 'jsfkrhr', 'tnbfzvs', 'egcfht', 'qnzmyr', 'owtrqu',
'oqaqu', 'xftys', 'goxfftm', 'sgbnp', 'bhfvaz', 'gospa', 'jwzlvwk',
'lqncoqd', 'xxizglc', 'bwffm', 'mhpggzr', 'kdaoewx', 'anviou',
'mqiij', 'wkskpn', 'enougdh', 'vldnn', 'gbfgz', 'ejmbh', 'qsdrvsx',
'mrvbz', 'cqlufpf', 'kbgjlu', 'njgna', 'admrmk', 'pwwsc', 'gxkot',
'pdjwh', 'ejwxt', 'bpaxufv', 'iwjzs', 'xxfsg', 'vuhgh', 'srytgb',
'yesvlux', 'tggnch', 'cgnbb', 'fbzbx', 'aomoqf', 'zkrvrjg', 'ueaoz',
'dppacnl', 'ewovhxz', 'kbvee', 'ixeeb', 'gwgoqm', 'hlwlxe',
'fpmkrk', 'wzjsr', 'ispwe', 'garofu', 'jcmpec', 'tggeo', 'yzdeo',
'axpmln', 'zhnlhck', 'duyqcn', 'tpqwqi', 'jvmaj', 'bisgoy',
'mpwmurb', 'olqla', 'ecapwan', 'kcpxn', 'xcapin', 'ooctk', 'sgqql',
'vcyyjxf', 'ejyom', 'jsgtha', 'logxnjg', 'nypadhj', 'dprmk',
'cqkuzb', 'gratv', 'tgkjgu', 'fttcafm', 'tpryi', 'ubbhw', 'uwcuyn',
'zkgohs', 'snfesz', 'ifrex', 'tkbfz', 'fvvkp', 'otjiq', 'lgomjjv',
'ertracf', 'bregu', 'kkbizb', 'hyhvn', 'zjcnxfl', 'mceskuj',
'lmupdq', 'zdzqzgo', 'yorppew', 'fpwtjd', 'dxvyzt', 'bbnnu',
'pkycae', 'ucvapn', 'dijmkb', 'nvwwpr', 'bufkw', 'zhono', 'vayxf',
'hlfwkev', 'klkvkj', 'yzgpwg', 'lcbqr', 'tkkfi', 'pcgljx', 'bhduxu',
'rgfipts', 'hkjbrr', 'fobvy', 'wqmqhxo', 'yjgvypg', 'ehgoizl',
'ipiibzh', 'aqxbxtx', 'lrtin', 'fyyuypr', 'pyrocgm', 'kwqbg',
'ukccw', 'wgsbpvx', 'pcoivrv', 'okhxaba', 'bbuaibf', 'ccvfm',
'phpst', 'yxtqiz', 'cdfbo', 'sijfljn', 'gdlhn', 'bqmbced', 'tiejf',
'aurqer', 'olmyd', 'prctay', 'lwflhi', 'bbehvta', 'oxoda', 'lklyc',
'rzedhp', 'kairil', 'envan', 'wdcwfk', 'xoroddb', 'womrlr',
'ruxebe', 'jnpywrd', 'wrifvz', 'zkewcd', 'vllfrn', 'uvdvjh',
'bglpya', 'vzokkbw', 'apaoqt', 'xpjizn', 'xoajmd', 'xapjwc',
'jcknwg', 'bjpreep', 'ffkua', 'ukcbah', 'bugvkrf', 'cbmmfs',
'cwaczhl', 'nsqaj', 'sjeikg', 'fayqif', 'slowoh', 'xjpvkpa',
'ynunjle', 'bqavt', 'nkpqudr', 'neikvd', 'yuqlzg', 'pdxbtrb',
'cashlog', 'iqiqy', 'smjmxv', 'zbtpbr', 'zzamzcv', 'jmakg',
'txfswc', 'pkaym', 'swlde', 'utann', 'mqgpjne', 'pslfvek', 'nbiqhb',
'bzsianu', 'wnxgbi', 'ahkeeiz', 'dqdfjg', 'bptdg', 'pwita',
'uqyflq', 'txabjn', 'yznjmve', 'mukcqqf', 'cxonbf', 'ixuewjm',
'pzlcat', 'eikeeo', 'scwsoa', 'uaeyw', 'oeorff', 'gbqgd', 'qboqiv',
'hiulpb', 'dbbdm', 'qvdxx', 'aypxbcn', 'ykjwdbg', 'pvfxn', 'shrqyz',
'zaxtu', 'pfefgww', 'jwifrw', 'zxuud', 'kpkwhlj', 'lwptgd',
'zpdmvsw', 'takeb', 'ynehl', 'kixtod', 'fyrgm', 'qirzmr', 'shyvec',
'xjgzt', 'bwfvht', 'wyehh', 'renzc', 'nnibax', 'slhfng', 'yjtecc',
'lghvbzf', 'qroxvun', 'mlsed', 'rrudho', 'cyffhh', 'tjlxahp',
'xmaepzk', 'jvdzh', 'bbvegrw', 'cebcz', 'odjpeam', 'guerph',
'tgmphgo', 'ohtkqq', 'jcxojz', 'haeheae', 'erydxni', 'hatjxx',
'kwmgkjw', 'wmezvy', 'hsuuvfi', 'ineek', 'grkxmhb', 'alxkt', 'rmspxdg']
) == 13956
assert s.minimumLengthEncoding(['me', 'time']) == 5
assert s.minimumLengthEncoding(['yiyqbv', 'njqvawn', 'wnlovvp', 'vogum',
'jpolc', 'zleec', 'sxdrww', 'rbowr', 'xsjorra', 'kwjsx', 'vornum',
'echku', 'kuizegn', 'rhuvv', 'eemkh', 'yshht', 'pbixoa', 'cmbxvtr',
'iupia', 'nmcbq', 'mgrjsx', 'ejvniwt', 'svhsel', 'kazenhf', 'fevpm',
'xcwqfgw', 'ozikzc', 'mywnmqt', 'taorwjm', 'gcshacq', 'fgtasq',
'qexygw', 'ljmbari', 'zfjudos', 'rgxuzy', 'kmzryaf', 'exjfd',
'mcqnebz', 'ptoim', 'zglfi', 'fhneaz', 'rexgc', 'lhplwyr', 'dthdp',
'jizetec', 'obyzg', 'rqupa', 'yphttge', 'wdcdn', 'wdomtr', 'hchbd',
'ytyra', 'upytftl', 'swbbi', 'qpcybv', 'dcoxspd', 'dftkf', 'nwjfmj',
'ojbwy', 'zofuy', 'adqkt', 'kpcply', 'aeukw', 'fqblb', 'xurrbpo',
'veioa', 'puzvl', 'bnzvlax', 'tjzsdcw', 'jarqr', 'orxjbg',
'ilrqdri', 'syjuoyi', 'htoqdco', 'gwslw', 'dpqyf', 'jnkhv',
'fpqhpr', 'baewnvc', 'caunsf', 'qhbpe', 'wlckl', 'lmoroqe', 'ddlak',
'qipwbfp', 'cefqs', 'surczp', 'jtmfuro', 'ezhqau', 'dlsco',
'hywoqh', 'lnifq', 'hvfmu', 'cqjdkok', 'tggdact', 'rwuowdk',
'attnl', 'lwhyq', 'mqtsc', 'bmwajiy', 'nyohug', 'vvfpt', 'lbyazu',
'sarwago', 'iccztck', 'ugsxcw', 'rpwza', 'yofmlll', 'ulhdzhg',
'lbaqk', 'bwxxwc', 'dmsbawg', 'tjloy', 'imbrkul', 'xguke', 'shlkuq',
'lizjcdu', 'kmvykl', 'ilqxxjm', 'rtbvvqt', 'qisec', 'zobzr',
'thwntt', 'afpifh', 'uwiiovy', 'hgsyecl', 'pdgnm', 'mqyesch',
'suexztu', 'msguuwu', 'yrykkv', 'xtoommc', 'muteu', 'bamml',
'kkhlb', 'jfrnx', 'wpytor', 'zzogpt', 'yryxxt', 'hzqofjd',
'ehtildc', 'ptclf', 'nyltvd', 'nrret', 'qqqqt', 'uuxunf', 'jajxt',
'lzdvlc', 'gpdtjug', 'hjsso', 'jairua', 'qarxuey', 'rpwwjwv',
'cjqypep', 'tuzgcs', 'oytqxb', 'rgfmud', 'stnwn', 'tzzaop',
'jpuopzg', 'qeywd', 'spnstrg', 'dfwgntg', 'yjyqk', 'ioowc', 'duqfg',
'gmqxe', 'xhlbby', 'liurjk', 'vdujfm', 'xxyyn', 'omapgc', 'koemzbz',
'ziiyako', 'pjmhfrv', 'bshtfgj', 'ihjvt', 'pnipuw', 'fajiuj',
'rdvcqzd', 'mgknns', 'ouwkm', 'ejnklwc', 'osepl', 'gplpyvs',
'paxrddg', 'gsjlpd', 'lgnmgl', 'yifeeer', 'hhnwlol', 'fcmxs',
'ilinwgm', 'udhfdtq', 'ceefc', 'xweqx', 'jfelwod', 'rtywfjo',
'kzwrgqx', 'fcjriov', 'fzytqv', 'zcpcddo', 'scpyzow', 'kbzegu',
'gclwr', 'gmiwlp', 'rtpka', 'yiywuyy', 'qceot', 'dtrgn', 'ntwbu',
'fxobd', 'zmxwza', 'qcksyz', 'wgbtmm', 'pzorve', 'hztydc', 'jqlay',
'ijdkbk', 'uzjrps', 'gfzibk', 'gsxqj', 'kgjrkdd', 'smdeuk',
'iwizewp', 'owjie', 'kcdccu', 'ifltqr', 'zrdfbm', 'pznbcsk',
'mtkpi', 'cpasir', 'flrxrm', 'uxcxnv', 'htlfcp', 'ltukxfr',
'ftbbha', 'jhgjgyz', 'qjreroc', 'vcvtbid', 'nrhlq', 'gtkpot',
'gyplqqg', 'lnorig', 'fixhufv', 'ugcug', 'ndfug', 'wuorhe',
'owocnkw', 'rcnbf', 'ioiiiui', 'kakwtne', 'svxtt', 'wdrxogm',
'ibrxs', 'bddqi', 'jeguac', 'hlftdw', 'nutgfjw', 'krrzvf',
'amxuloc', 'deozdoe', 'ovsvk', 'sfqsl', 'slgiw', 'jbjujag', 'mhiru',
'uqksech', 'davosw', 'nlueljv', 'rhtvdu', 'ivdpdqa', 'qnbenpq',
'dtapqq', 'hwwfpxl', 'oyrfosn', 'goxgmgo', 'tbvutl', 'cbbbcm',
'iiugpk', 'hinkem', 'vvaitk', 'pskyf', 'hdnekg', 'nqhfn', 'dqbozx',
'zcwpko', 'kafyu', 'jfegubk', 'nofqzsk', 'ujmxxg', 'akwzemu',
'yvhxb', 'qqlwofi', 'hmoecj', 'qwgtlc', 'jepvygq', 'uzggm',
'fztiews', 'lvndvf', 'vulax', 'znqudh', 'whgqi', 'noguo', 'vewkx',
'uruvgf', 'ubohmba', 'aulzi', 'flvfdlq', 'yspfie', 'wugif',
'qndyiwa', 'keihmct', 'rggvn', 'ojjmuoh', 'sbbcl', 'cdivmoz',
'vkusmp', 'mfddp', 'kgohwvp', 'rjbbxw', 'vsgptj', 'hbyjoz', 'gufrv',
'orxiv', 'fxcqfw', 'okppik', 'qlouw', 'lkryigo', 'qccvc', 'ixcnodg',
'wlfilts', 'ahqtevp', 'kkbuha', 'oehaez', 'rzczib', 'vxobk',
'wmetvjs', 'xfjgeq', 'eadzl', 'aeqdvch', 'czojfq', 'hxshidl',
'ofswsj', 'iwbqcmg', 'schhwtt', 'ltyth', 'wiccu', 'akill', 'zaaji',
'qepvfa', 'mpvrkeu', 'dcpenm', 'wdhlk', 'llqbby', 'lronwkr',
'rwtguo', 'ofnvs', 'lxdnwzf', 'dctmilf', 'zhckjd', 'hajsuac',
'wpylhy', 'zhipvm', 'ihikr', 'zzwjgvr', 'gdglrn', 'skhow', 'tlqtjl',
'uypli', 'evdva', 'civide', 'iroihm', 'lvuzid', 'vexat', 'ngmvrz',
'szdhbt', 'ggrbz', 'bsmovlt', 'kguomvl', 'onzvx', 'nobgxw',
'tqxemc', 'vbiyx', 'fpzpf', 'ogtvf', 'yuthri', 'xszbn', 'xcuhj',
'nosnpbp', 'mowsxg', 'tfalyy', 'kxombgm', 'cukrz', 'krmseq',
'velzh', 'kmufxj', 'nvxlkq', 'ualvras', 'wytoucy', 'qicqyym',
'pbeujtv', 'haojnbm', 'xnfffpe', 'wvoiald', 'rlyvf', 'sxamoxw',
'ztqnmp', 'biiavx', 'lnjnzs', 'arqdjdy', 'pkrgokc', 'qxswouj',
'dgqah', 'mnhzo', 'ggilb', 'qscrd', 'ggvkimw', 'qlxjys', 'wximi',
'aqlhio', 'iavtvy', 'grkqf', 'dwrtut', 'uozutfc', 'fogxpdb',
'ydtntlq', 'vnmpmwp', 'gtxhwq', 'mlpihx', 'yfpjlz', 'hdvcquq',
'nunny', 'wklasgp', 'wxduo', 'topsqf', 'tngcpzc', 'mcrut', 'pdnsmt',
'kavaok', 'seiqsqa', 'bhgkiyt', 'mawvhtp', 'domcnrm', 'fgusghc',
'wdaufwz', 'tzpuks', 'kisndyz', 'fwyieu', 'wtdum', 'ytxhl',
'yhzkmuv', 'nppnqe', 'ccvhj', 'dautnyq', 'hkaliab', 'kngan',
'ebmhiop', 'vsdkcef', 'nmpcnd', 'vxvnl', 'cwcgu', 'zsuneh',
'qjgcmd', 'awvba', 'rzbisxo', 'oilqrj', 'neiazlm', 'hlyrl', 'tmiht',
'lwqxxv', 'gyblrw', 'gnnjkb', 'lrxiln', 'xlwlseh', 'npfwcvp',
'yjcdhw', 'rzndd', 'orlhmip', 'gatuojh', 'osotgvv', 'owksz',
'kcocizf', 'izlev', 'smigns', 'wtxfwo', 'knwizte', 'mqjojzp',
'lkezye', 'xqldbu', 'cvbpyl', 'aoipbz', 'asrupt', 'bdwkesh',
'jpaykm', 'pksbg', 'gdbsibd', 'lfxpwk', 'rmnfph', 'yzxwke',
'xjwyusv', 'yetar', 'sytdz', 'pnystzi', 'yntcqo', 'egoorl', 'aydxu',
'rfdrfhe', 'flzkos', 'mmjgev', 'fbjwmvi', 'jeouc', 'lcmkri',
'aggsb', 'aaeazai', 'amyxpey', 'onxqpg', 'qrjpxq', 'zanea',
'niwsgtv', 'nsqja', 'utgskd', 'hlcum', 'frygtl', 'xjmqetz',
'upqddd', 'vxzdstm', 'hcmtera', 'ejstou', 'xkcguf', 'bokigdk',
'vurnv', 'zsgrje', 'nbxlf', 'tpilcx', 'lvepux', 'xacdtp', 'amdgx',
'ubbvnx', 'xmvznh', 'tlprri', 'sthkn', 'xhoad', 'deotaxo',
'pqzppmw', 'xlcpx', 'qwzrpyp', 'lujabeb', 'heskwyy', 'mzzaaur',
'vnestcs', 'rryphdl', 'ibdiabi', 'eoiyt', 'znflx', 'clougix',
'zzadxw', 'lrrgtf', 'lsdoakf', 'yxfmqx', 'qhnrry', 'ktcdmv',
'veygqu', 'btjlo', 'fcspsc', 'gozoazm', 'xcsqgz', 'aazae',
'nkuvask', 'mzdgjq', 'sihqdhy', 'zadrwzw', 'gzcyuea', 'lpgccic',
'fqtfuzw', 'bjoqpkc', 'oydpkxc', 'sugnnu', 'hyvygf', 'axkxo',
'rsmzb', 'dlhqmac', 'gbqby', 'npqkj', 'odbtb', 'bdsib', 'zyasxv',
'ifxqcc', 'lmnjwhr', 'ibuyu', 'uzhle', 'ccpwhjr', 'vhrojnz',
'fkzfz', 'fyesm', 'dnvipvm', 'jbbqn', 'qdkgl', 'xkvvgq', 'dphugaf',
'soxbfun', 'rbgokx', 'biveiz', 'vbaqtn', 'qapydgf', 'llldu',
'ottjpzu', 'fwjuc', 'cawio', 'gbkwe', 'rrnnxer', 'luviy', 'zsalse',
'ckwdeox', 'ozhqocm', 'vtozfwz', 'jztole', 'ydqei', 'bfugz',
'psawjp', 'dzlyrwp', 'izuyrne', 'rbwcfr', 'vdvte', 'usjbqs',
'zzovkxr', 'frfkwk', 'mmtmdd', 'sntka', 'wachbzo', 'rmzvj',
'scbngo', 'eqiuiwi', 'qfakk', 'cckcmt', 'owhzow', 'rejdlw',
'iprsqdq', 'twwaldw', 'mfilzyk', 'jygvx', 'iewbo', 'irhko',
'zpazqhn', 'ndqbg', 'ayzxqdz', 'zvpbh', 'maapq', 'pzitrfm',
'qsgsurv', 'viwcfff', 'wpgenms', 'tjmvu', 'czuemc', 'infxoo',
'avhbw', 'nugkqx', 'xubakjp', 'ndask', 'utaqq', 'njhuxq', 'sdvuex',
'tfmxqp', 'bydovjo', 'bizxjsp', 'zoozxyv', 'jegei', 'gkpqobw',
'psumbtg', 'gkgoh', 'sgcbpql', 'xxkhy', 'kdorkr', 'hcomj', 'ulrpyv',
'rhplil', 'tyyochd', 'xhzul', 'srdjmns', 'kgukye', 'yepvs',
'xnobsjb', 'umxmtub', 'wvqasr', 'igftpzw', 'exhecn', 'rreee',
'jpxuvxh', 'jriqf', 'akexunb', 'ekvdsoe', 'ytzvj', 'vfrlyae',
'pmfai', 'biouzle', 'xkbce', 'clzyi', 'xhjoso', 'wmxkxb', 'dqzzig',
'ydtby', 'gskwj', 'wlkwbz', 'zepvllz', 'zsgqp', 'blntawk', 'eynmil',
'bdqyp', 'wgtnqbc', 'rrgaq', 'gtafuzo', 'qdiko', 'kkcsdo', 'zwqhs',
'kugzbmf', 'wtvvs', 'kqsdx', 'mxsuxiz', 'pgbgjfe', 'vodfr', 'qbvwu',
'vfwbhgw', 'ayojye', 'kolzfqg', 'xnbecj', 'akbcnf', 'uutrn',
'upmesa', 'marqej', 'bbucee', 'bazqbau', 'qikgsyf', 'oeayzn',
'uilxnzr', 'vpnxknl', 'btgtxgh', 'vjaav', 'zaxtzah', 'msweps',
'awduwld', 'gzaep', 'ngvgc', 'qpoqdgn', 'kimndg', 'qilmmpw',
'oafhlyp', 'nyelgvw', 'onymk', 'feycbc', 'dhcrx', 'siqpfly',
'tyvycmf', 'huctqp', 'uscjrp', 'bbptd', 'msdmu', 'xlxhye',
'xnyzcox', 'kyskda', 'injdkmp', 'jiwus', 'spjylwd', 'eqcrnt',
'snfiu', 'jvwvge', 'yfeaw', 'mmdnsjj', 'suzdw', 'xiupf', 'rjwjhng',
'tqvasy', 'rmibpa', 'zuqax', 'prpndnp', 'efryqe', 'pwuqfy',
'wpqlfs', 'aeswq', 'cxkeiue', 'jydxzfi', 'tzfvwp', 'zzgtw',
'mupiusx', 'sojavt', 'dxmsgq', 'migjiyj', 'kixjk', 'ywwvcpl',
'khzcuo', 'oykhx', 'fochin', 'foxbfkc', 'sizjg', 'wrjcvr', 'ceadd',
'tvfqgxq', 'whzhche', 'dcoeti', 'mpilfib', 'cphie', 'ucpnjm',
'ajltvx', 'kpizym', 'vevfsrs', 'jznrri', 'yvhxomr', 'cbcnk',
'yuwuhu', 'jywuzed', 'kqakusq', 'jrnzgfo', 'mjimzz', 'mfjybnd',
'ntqyq', 'junxxck', 'myvqajv', 'kvuqs', 'obfxw', 'jwuba', 'vnrvzvy',
'aeric', 'vtgda', 'nkrocpt', 'ahitg', 'dzxtr', 'zswwc', 'yhxap',
'fdhiwr', 'cpxtqv', 'izbmo', 'zyioo', 'vysnoe', 'ouuyvj', 'cumdhzn',
'dbsmph', 'cktjem', 'vbmxy', 'utgfyhc', 'rqdeorp', 'btnlmd',
'chxwlt', 'nsghoqi', 'egycsm', 'wkanat', 'lzjyf', 'donyx', 'cchqsa',
'xozzz', 'yzmnf', 'jfzuh', 'dpcpg', 'hlahz', 'vobopk', 'lssfeli',
'ccttzi', 'glzgqpv', 'oyqzug', 'qqhkrr', 'euwotv', 'hwbmtz',
'hiylhly', 'bppzne', 'yetyyvs', 'cnbwcby', 'hzblk', 'pfjmxt',
'dsxvt', 'vvkju', 'zjrfr', 'gdbhb', 'udoad', 'nbhpzfm', 'iwetbym',
'atmly', 'tnxli', 'myegb', 'hiwqsk', 'btrajk', 'nhrmwn', 'ftmbecv',
'xopht', 'eiikqy', 'qizanwa', 'cwxiatf', 'jshjva', 'llrtkn',
'zhivu', 'lmwiu', 'oaeaqz', 'oxotfub', 'jnkafm', 'juhrmq', 'mqzbtw',
'puiaxty', 'dnahvoj', 'gaxhz', 'xfnay', 'iqmlnlq', 'xudhcg',
'izpkz', 'tqttmt', 'bwnbs', 'fdufd', 'vhzyymh', 'zhqtxr', 'evbcrv',
'xvnma', 'dgcwy', 'cwxzlbz', 'oodiol', 'teyim', 'kqqfjub', 'ftsqzi',
'arfztkr', 'oqlujx', 'rpkkdov', 'ptoff', 'ivxaxr', 'nxeept',
'cacpl', 'tehir', 'spvggl', 'qfzxkn', 'bhwkukx', 'fkdpuq',
'xdrngre', 'fnfplq', 'dzbrl', 'ufgxu', 'sciec', 'fgdydvw',
'nmpaqxi', 'ydsvfv', 'natjz', 'lruyvzf', 'xznznxp', 'mhfrh',
'kddsk', 'uwatn', 'uklzs', 'lnuta', 'ryizc', 'cvwko', 'tnzpk',
'ywpiv', 'vbvcagq', 'pzolw', 'nmyfhg', 'cshkofj', 'ksptw', 'kqejh',
'zgzjqzo', 'mxzrw', 'enabosq', 'vmubgc', 'sfzcj', 'hewvk', 'ewhrq',
'oifnsmi', 'izdnvu', 'cshgtk', 'mqotuhd', 'gnqgj', 'rxailbm',
'iyhxvtu', 'ncjzklq', 'zjmnoc', 'awqwos', 'ugujppc', 'spbvfwl',
'gntsvo', 'euksu', 'qnvneph', 'crhmf', 'brktmf', 'mvgmr', 'yzcskrp',
'tihawec', 'edqmxpn', 'fxyymlr', 'dzfkucm', 'prldz', 'gplrlhz',
'bohwr', 'bhebbk', 'mmecj', 'segydd', 'ptslsb', 'pyhgw', 'cwmrq',
'mjfhflh', 'xhuid', 'npxmb', 'izilq', 'dczhqh', 'tgfnxtb', 'zrylvo',
'lctxrar', 'ylhrbii', 'rfxedv', 'llvhzjq', 'bjocv', 'wbnex',
'cnohnf', 'xahrl', 'rouvwyc', 'hbhovgv', 'dhucp', 'ncmff', 'ncsskg',
'gsjbyin', 'lroxscf', 'whfaenl', 'vsfultg', 'floxkpy', 'captoai',
'qwolyex', 'ggaypn', 'wzunypd', 'pjixeu', 'gxnjkoc', 'pqiqhn',
'xakjmgz', 'vqizkx', 'gdzcxr', 'kyxwdd', 'pgxmazn', 'qeuwf',
'bduknm', 'tcrcn', 'nehgee', 'wktbcgu', 'jwqltdt', 'wczkai',
'drkqs', 'qhdqnn', 'oobxirc', 'lbunv', 'ifscr', 'xnfpbrw',
'yrrdbax', 'fbocs', 'tewne', 'iobixe', 'zgosas', 'yhesn', 'xlqwd',
'pfcen', 'slsjffx', 'ilwatrc', 'mhsmgp', 'iteghl', 'aqhufdl',
'kxgpqcu', 'ryrcgp', 'azidf', 'smlnl', 'rocxvbt', 'iutfc',
'loapgbr', 'musulp', 'dqcnj', 'tpgbkfh', 'wvskii', 'itkfopo',
'kytyb', 'rzahbu', 'aewptd', 'ohergbb', 'cadxh', 'aphwelj',
'huooyzn', 'gtttia', 'izeyhcr', 'cfvxz', 'aitaxyp', 'vypqost',
'ebfnmif', 'kgiucm', 'zryyu', 'oxgnbpt', 'frpwo', 'ouqvodl',
'pdaazh', 'gxwmf', 'dozxsjm', 'yndpsik', 'zcwvu', 'mihug',
'jgodklw', 'ysklw', 'cfxqv', 'yqvtz', 'rctnp', 'xjywa', 'kpqyw',
'hhtegzt', 'rnwbeoi', 'uyxqum', 'jahcwbe', 'jzjns', 'ovwoaz',
'oqmsrua', 'natbejl', 'deffv', 'okgbr', 'paqhy', 'jkafhte',
'lifsknp', 'afmskh', 'oemdro', 'oxuwov', 'qtyxa', 'hkpfsm',
'ulaubn', 'tciurw', 'myohwlo', 'okuiejb', 'ormoqsb', 'gmipz',
'hterzir', 'ekxzre', 'xkevge', 'ihenf', 'nnhzv', 'eocjmx', 'upzal',
'oounfko', 'myhbwub', 'fwipva', 'pkzzvpd', 'nrupm', 'vluzq',
'fxkoyho', 'atzktr', 'aomrp', 'qwpser', 'ejagmb', 'cfigelm',
'bvanb', 'cgcgabo', 'hmjvlqt', 'hxxocf', 'ftqaud', 'htuipy',
'bhwmcn', 'tgyvaqe', 'lvuwh', 'yiabzs', 'rzzavu', 'fiubm', 'uuqsb',
'riyakuf', 'psscffd', 'kvckzr', 'fktmnf', 'ivzqexi', 'nhxzm',
'kffjmb', 'vdzxv', 'esago', 'bfikw', 'gaiuxmz', 'volokcm', 'jypcs',
'psibvs', 'hxaxklf', 'lmqwgy', 'spnbimo', 'mtihak', 'xikoiy',
'rmmtv', 'phaqgxj', 'zcuwkhk', 'emodbyb', 'ztahsya', 'ieiqm',
'lfoquh', 'emznnq', 'pnhlgut', 'pgvads', 'cqsjx', 'lxnjei', 'zpque',
'rdjbiyb', 'sxedpu', 'potnqva', 'iirkn', 'rjmnrxd', 'ksgcd',
'waeymnh', 'tizdz', 'kproa', 'wpttygd', 'lvyze', 'peewvgm',
'fwtyzbw', 'zitkk', 'gfgqr', 'udgvlz', 'swqspo', 'ohhvyq', 'kgyuau',
'hcerp', 'pdomlm', 'twabkk', 'zfsea', 'epiwp', 'xgycjpt', 'jtkdh',
'mxmdm', 'rtkzm', 'qkacy', 'nuvdiq', 'agctak', 'hypgyh', 'ewtjp',
'paysolw', 'bcutebe', 'xelxyb', 'gzdvrth', 'vpzfv', 'cxrkt',
'admiyzi', 'lqlmn', 'zbjpbg', 'tlvdnli', 'zetnox', 'ylcsobo',
'balajod', 'igoume', 'sxcgw', 'sbkkafk', 'fmndnnw', 'incsa',
'jyupkg', 'uhvvc', 'rswnbth', 'nvprfj', 'figqf', 'znyidqi',
'aijper', 'euidr', 'dftxkze', 'vnppi', 'splwifc', 'fprgafl',
'ixzaz', 'mrhqtne', 'dtkjsy', 'dsmqrgy', 'xfscz', 'cymvmpu',
'vptkfdx', 'zrgrjq', 'mqvwsur', 'hdtlw', 'ugdpwun', 'cvxitc',
'vytvqg', 'pmtpfz', 'nfdtdt', 'umvwjuc', 'jouxc', 'qpypri', 'pdhqp',
'lmise', 'wlsvcfg', 'aqdkzcb', 'qlrmrfz', 'pbgoyi', 'xmsskoh',
'jjdye', 'xvsdmq', 'ymjeipy', 'igjyv', 'uiojvmc', 'uckoww',
'grlnyeg', 'hpglp', 'omnnyy', 'iiliir', 'cnucbcx', 'pcxvs', 'hipad',
'xmiltkj', 'oorwi', 'qgoxjj', 'jnmviqs', 'wpleqn', 'tudxw',
'pcogem', 'hgewaf', 'niwfexy', 'vcttgcb', 'anjgovq', 'epgmscd',
'mdtru', 'xvapv', 'rydjik', 'kopppcr', 'mjbsmu', 'unxoakz', 'ldpsw',
'frksjr', 'vyxxg', 'yyydri', 'szidq', 'qvbtd', 'qratl', 'xwfov',
'bzhqyxl', 'fskrtf', 'pcpzmnv', 'xuxwx', 'vzbevnb', 'ebaqz',
'dbpuek', 'ooqwj', 'gaimp', 'coelqh', 'bwuceq', 'oxpfjt', 'zrqyc',
'rwllk', 'pqunv', 'ufbnn', 'tbnjoz', 'kkqmrxu', 'qyyrm', 'hislf',
'wyuck', 'ubpre', 'pdioi', 'aryhv', 'vdcxv', 'rkgmaag', 'czlzokw',
'gtxuduz', 'grpijx', 'qzrar', 'qhues', 'rmznt', 'sxxmved',
'onjzuwl', 'atbjhip', 'nrardl', 'alrocy', 'cfkip', 'ihtbf', 'pqdgm',
'hmokun', 'dpghac', 'otwml', 'mnbzwa', 'ehetlt', 'rchvq', 'lwjgywn',
'lzdmjo', 'nvhohdp', 'tmshcpc', 'gavjv', 'ycnkv', 'uynzh',
'bvpnfjq', 'lfbem', 'qberui', 'vrmmhx', 'wpbqtfq', 'jujpx',
'dujgkof', 'hrpbso', 'zhcdt', 'iybngyb', 'rgeruza', 'nesyxr',
'cihgfe', 'hjgskb', 'zspxeqm', 'inzrgyd', 'crkjq', 'iooshwp',
'muvvj', 'wakis', 'rowibwa', 'qikwypf', 'aportho', 'pubcgx',
'vqoqpfi', 'rnpbri', 'ussjv', 'looor', 'xkzvdv', 'tstegg',
'zgiiokw', 'rwvyaun', 'mqqla', 'asnqp', 'nghuryl', 'hlvhn',
'ecuotnu', 'judvbu', 'xgvuw', 'oeckn', 'hdhttsg', 'hcyhu', 'klbyjc',
'tnrmqnc', 'mjojxhi', 'kvdet', 'vbmevim', 'oglrzs', 'afbscdi',
'zxrffti', 'firzgmz', 'oenim', 'wgpua', 'asiep', 'kyteq', 'wpeneca',
'qixmeoq', 'zaofon', 'csxxtr', 'cpwmnl', 'feylas', 'idjuo',
'mrtpvta', 'jjvmjy', 'mnljocc', 'lnvjleq', 'oognud', 'rbyneq',
'rhvomm', 'fldrkpk', 'znvrp', 'myswmz', 'jiloe', 'juivjmo',
'ylhbyzl', 'ndmabkt', 'sgdvlq', 'pmnddmi', 'utpuj', 'kfisv',
'nxfeell', 'mxhgqd', 'ccvdsdg', 'emtybo', 'zmkylbt', 'mmrpi',
'dkwlgq', 'iwlappb', 'uimsrnu', 'mkxaxmi', 'tcvll', 'njggal',
'kmqud', 'evgzlh', 'oaxizbp', 'jiuej', 'xknlp', 'cyksydh', 'gbixmz',
'vtouyk', 'sxjpkio', 'qhubt', 'kflvnb', 'sjdfggl', 'bxozyj',
'xekbh', 'wtmcb', 'xtapfco', 'rnornl', 'ursdpki', 'waonim',
'eibfyed', 'zniinaz', 'uyfohq', 'qcaxlt', 'koyaapa', 'pjuvbsi',
'ecpdl', 'ifaqwm', 'yyumzc', 'gvfngfp', 'lttul', 'flyza', 'uasdlme',
'oklhb', 'wulkzzv', 'ziwsxo', 'jqcxiu', 'qdzrwgm', 'zjdwy', 'uumns',
'emlnp', 'irnrqp', 'gqkza', 'oynpcz', 'yxyea', 'zpamf', 'gyehxbv',
'nplkhcc', 'rxeekyo', 'kecgp', 'gseju', 'nkisxqf', 'vlyud',
'fxxihhm', 'yjgtml', 'fehwpdi', 'wclnvyy', 'lriwrc', 'ikparv',
'volfh', 'ysphh', 'szrvrv', 'rqlmz', 'jyqut', 'fyftsj', 'uvwfip',
'rngwgm', 'mjwaz', 'roehjki', 'ploxokr', 'yjbalp', 'fspkq', 'yfxrb',
'kzulvk', 'ordxp', 'vdrrt', 'wdiojwd', 'ridzl', 'niykdvu',
'whyycmn', 'riwcma', 'bkhgkrb', 'nsine', 'emgtgf', 'zoymw',
'ljtvhzb', 'kfyfdma', 'piygxdl', 'onfwgdf', 'fwmkm', 'vqbljay',
'icife', 'bxfli', 'yeygr', 'qenhgm', 'mtxuckj', 'kdcyx', 'kwqhfcn',
'ywkfy', 'prbpw', 'pheyc', 'kmnds', 'cacqs', 'kvekiqy', 'bfvfhdy',
'gxulp', 'skmcra', 'exomt', 'lcxue', 'mnvvday', 'rsddl', 'gooegc',
'udght', 'doymnin', 'ccdap', 'wuive', 'dyyln', 'rynust', 'luxabyg',
'kdkkyyw', 'vawqfsy', 'rmeswm', 'rcxzyv', 'clpowz', 'pdntqm',
'tvjkkmz', 'iiclw', 'nhudzen', 'cybhu', 'crwtw', 'enypnh', 'ygekg',
'hrjwqt', 'peissge', 'wangcy', 'rbpoik', 'raqulbf', 'gyisnsj',
'rgbqn', 'lgvuzb', 'djicf', 'epnuu', 'nsapc', 'voatgh', 'yorfehc',
'jxfttat', 'wyuivb', 'bwopl', 'odwdsh', 'anchkv', 'sepvew',
'qoxxmae', 'bpvqnj', 'sngfo', 'buoazou', 'zhijssa', 'janng',
'uvdbd', 'yfvkqo', 'lcjii', 'mvacvrz', 'xztiar', 'lpbtrqa',
'ukbpdx', 'okaqpgr', 'idgqlj', 'ewglgo', 'ruymhi', 'pcidw', 'bvuqj',
'npzch', 'yppyan', 'oiguirj', 'iijvwqj', 'jvbwjys', 'yjtunfc',
'iaikra', 'oduhdgk', 'ivixur', 'ibcgai', 'djzvcbx', 'lmtsul',
'lgnwzol', 'wursq', 'xsxbqwq', 'jqvwnc', 'dcwwvtb', 'vwybnr',
'bughwjl', 'rnelxb', 'hmacv', 'ufgdygl', 'aabuat', 'oynwask',
'gnfjjf', 'zipbq', 'zxstn', 'jdrbprf', 'jmkvny', 'rblpql', 'vykdj',
'qaakyqw', 'osbhddb', 'avgldyy', 'kvpoa', 'fnqcliu', 'zzlninw',
'drsal', 'omswys', 'hwqcpct', 'ecraq', 'fvhsbjq', 'raauy', 'pfmoz',
'vvqvcm', 'tbjqjun', 'jcfbegq', 'otiwup', 'axvvce', 'dhpdnx',
'pennr', 'hvvmvzv', 'binezl', 'ygdmcuo', 'ypwnqn', 'aloxdv',
'ucieh', 'kovbtag', 'rgfpaww', 'fpbftg', 'spjowfr', 'zridoy',
'blwbbf', 'evwlxi', 'itbcz', 'hgixuo', 'qmoqmjb', 'tkeeis', 'pjiaq',
'rbpje', 'ledoui', 'ubecht', 'mphdd', 'uzswsbb', 'ntsybr',
'qmnijyp', 'pqwawe', 'ltytill', 'dpnxy', 'pkxqcol', 'ayrdi',
'mycnd', 'knotsn', 'zvcrjl', 'qwroblg', 'vtrktey', 'dzilezi',
'wzkxg', 'varqc', 'xlpttyc', 'xxqhnl', 'jpxywa', 'kjdsh', 'hdseebw',
'bxqbp', 'flazqce', 'xrtab', 'rupsfq', 'asswer', 'rhqof', 'hjzdv',
'addsgax', 'cuahzjj', 'xwdilr', 'osqgg', 'pfhwv', 'rqorah',
'ggdlnv', 'truvaoj', 'jzuldwf', 'mjddj', 'vixtn', 'eslxoaj',
'cmoypm', 'jvvzs', 'oqgxcc', 'tptls', 'wwgwbj', 'tysuhg', 'xbnqb',
'iogjvg', 'fbxdmr', 'zdvsmx', 'hiuja', 'watrt', 'kjawab', 'entxk',
'jmnkaox', 'zznsox', 'asmzc', 'soblvp', 'quyxjw', 'udrdc',
'hyylvvw', 'gzfwxuv', 'jjqmjw', 'faegxbl', 'lqjcg', 'bzmruq',
'bykuh', 'miwhd', 'ykgtwhk', 'oyobzwi', 'oltwpua', 'ctulabr',
'dwandd', 'vhuhox', 'vtlknw', 'ywvln', 'qemqdeg', 'akezvx',
'kjmjpv', 'vwuftx', 'kreaxnj', 'fvfop', 'cxabs', 'jfacbje', 'eecnz',
'cmblit', 'gfvpoq', 'whywnh', 'pghvx', 'ohgkmf', 'xxtiwd', 'nkojni',
'dlcicnp', 'bwyvyyd', 'gifup', 'vgjfr', 'hhteifi', 'kjhffq',
'pawqaxl', 'yozro', 'slxluvd', 'amqcquy', 'vnnxkr', 'wgdur',
'rvawiu', 'thcwnc', 'cddut', 'vnrtrv', 'fnfio', 'nhvxe', 'rfdqmj',
'ucblh', 'ccbnt', 'lxckaoy', 'fnwcbx', 'gmdbiwt', 'ypvwjy',
'cbjazk', 'qmujnm', 'nsqot', 'lhcqt', 'ijxcts', 'nujrms', 'itxel',
'ghukr', 'qpwitlr', 'gcafqrn', 'lcoho', 'lfzab', 'vwhgceb', 'vgsgy',
'jrtgo', 'ryxlz', 'deoyq', 'ybenly', 'lyysca', 'sodvazo', 'hbnnoz',
'ovgvda', 'elwtjx', 'soydmn', 'trdsi', 'mwwjwo', 'vupwj', 'dszpcv',
'kkhjdj', 'ewmyo', 'nmpeq', 'oepldcq', 'xttrgu', 'wbcbxi', 'jakzk',
'peukyw', 'fvcqv', 'xklwuu', 'hsmva', 'kslmkq', 'azllbig', 'stnzih',
'wfyud', 'ihauy', 'cfxmj', 'pdyogwv', 'dcqdpa', 'xhusy', 'jfpmpmm',
'odeiiw', 'ozyaer', 'uykzvma', 'tuaznxj', 'kdnbdki', 'syrnsem',
'fdysz', 'hhrpo', 'fglzfi', 'vgcqzqm', 'qhsjr', 'bvboe', 'dpfwpvg',
'mvvry', 'itnnr', 'lgykbe', 'pscow', 'mkrgeqv', 'czffv', 'apteht',
'jeqixsx', 'ksmbe', 'zamivv', 'vvmyo', 'cwwoce', 'sppubxc', 'qaich',
'nmbxr', 'tfkwfxi', 'iakhezl', 'fxujis', 'fkwffe', 'antaylq',
'mmfgstq', 'zxaacy', 'zlswx', 'pbqxil', 'eupck', 'qzcxpbe',
'rjalbzr', 'wioagbq', 'kreec', 'zsdcuft', 'rrdzb', 'ocdlvq',
'oxiroo', 'zcxsqh', 'wbrsi', 'fqike', 'oskzupi', 'thvof', 'dicbyst',
'iojwe', 'hyfizq', 'yoknhww', 'nupiyyn', 'ievah', 'slcgmxg',
'cnecpa', 'lcwsoj', 'hnqsc', 'ghipbi', 'exobr', 'nwpnq', 'dmhbj',
'amdbmwl', 'xfbzovs', 'puizvu', 'yvsus', 'ykysqg', 'bgqdv', 'zgqbr',
'zkjpkej', 'crkot', 'zciymk', 'tleogn', 'sayrmz', 'elwma', 'zugjva',
'uifwsmw', 'wstrg', 'xbotd', 'hinsg', 'qpgyoyp', 'xzfocdy',
'mbvuepb', 'dtphufk', 'cyapnt', 'yyehhad', 'ohdrd', 'mlibm',
'qzdfil', 'rdwszqx', 'bzcbmyn', 'uarjlg', 'mtwpqmx', 'nmagl',
'cepniel', 'tylvaa', 'melhd', 'jygeneg', 'fdglfy', 'xcpciu',
'ayrel', 'bxceshv', 'kspyg', 'iclkaz', 'ykbzt', 'nrnkzo', 'kxkto',
'fabzszn', 'edalls', 'nilmh', 'wwawgnn', 'gymbtx', 'mzipa', 'ajevx',
'qppisv', 'otqhsf', 'ippxak', 'bixnqd', 'uqitwo', 'soxcug',
'loiscd', 'wqrjk', 'rqntoa', 'fzpxlp', 'tuaob', 'pyqqms', 'krbzmmj',
'aijqpfg', 'nstqrbu', 'wmtiahz', 'joplby', 'jyszxq', 'jnxtyhe',
'lbvfv']) == 14011
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
@Author: xiezizhe
@Date: 5/7/2020 下午8:52
"""
from typing import List
class KMP:
def partial(self, pattern):
""" Calculate partial match table: String -> [Int]"""
ret = [0]
for i in range(1, len(pattern)):
j = ret[i - 1]
while j > 0 and pattern[j] != pattern[i]:
j = ret[j - 1]
ret.append(j + 1 if pattern[j] == pattern[i] else j)
return ret
def search(self, T, P):
"""
KMP search main algorithm: String -> String -> [Int]
Return all the matching position of pattern string P in T
"""
partial, j = self.partial(P), 0
for i in range(len(T)):
while j > 0 and T[i] != P[j]:
j = partial[j - 1]
if T[i] == P[j]: j += 1
if j == len(P):
return i - (j - 1)
return -1
class Trie:
def __init__(self):
self.dicts = dict()
def add(self, word):
node = self.dicts
for w in word:
if w not in node:
node[w] = dict()
node = node[w]
def search(self, word):
node = self.dicts
for w in word:
if w not in node:
return False
node = node[w]
return True
class Solution:
# def minimumLengthEncoding(self, words: List[str]) -> int:
# kmp = KMP()
# ret = 0
# texts = ''
# words.sort(key=lambda w: len(w), reverse=True)
# for word in words:
# idx = kmp.search(texts, word)
# if idx == -1:
# ret += len(word)
# if len(texts) == 0:
# texts = word + "#"
# else:
# texts = texts + word + '#'
# ret += 1
#
# # print(texts)
# for word in words:
# if word not in texts:
# print(word)
# return len(texts)
def minimumLengthEncoding(self, words: List[str]) -> int:
trie = Trie()
ret = 0
words.sort(key=lambda w: len(w), reverse=True)
for word in words:
if trie.search(word[::-1]):
continue
trie.add(word[::-1])
ret += len(word) + 1
return ret
if __name__ == "__main__":
s = Solution()
assert s.minimumLengthEncoding(["time", "me", "bell"]) == 10
assert s.minimumLengthEncoding(
["ojtnj", "uuydcho", "dgsyp", "dwxycpx", "dpmvc", "dvfhmb", "flrxjjx", "fwhdhvn", "rgsakp", "aiconf", "nzacpk",
"sbxnaj", "shway", "rgrmz", "rysudo", "bzkioce", "mqxkzvu", "wyebk", "tymoaz", "mlmbg", "djbmek", "qfnme",
"khkiyae", "tjdaxry", "sqtcwz", "ehnsai", "jhncvrm", "cxkzgrx", "pummt", "hzrpfcn", "lkyqit", "phpqdxw",
"vangm", "wcjdgw", "pxesvtn", "mnqory", "bdrzvh", "brtzmo", "chqgf", "bipyxm", "meoikg", "ysyckk", "ojayeiq",
"zrfbsb", "yhuotea", "crfbhq", "tllycn", "qxnzihf", "avyawpz", "bwsjym", "myjozc", "lbdksm", "mctlt",
"dszowuw", "syshm", "xrvhhkn", "kgrcwfv", "dwlajlf", "yviuk", "xegjj", "spiczl", "vfvomi", "mgcujy", "dqmzb",
"isrisgt", "vdrtuah", "vsyth", "eoclef", "poccek", "cgafrlu", "crbhpgk", "sromv", "xmvbca", "gobra", "ygvlq",
"pjvhe", "tfweiso", "cskuohg", "eyalone", "pobkak", "nzpxn", "lbcrws", "uhtfe", "eorth", "showvu", "hxsmb",
"jrggose", "izifkb", "oqwyf", "mozmzj", "ijwle", "ggtqqqv", "geevzj", "meota", "ifsse", "kdtofm", "swydhvf",
"tzjhqap", "wqwwd", "jlinnov", "lmxkgeg", "stbot", "xrsfn", "etoyctk", "rygagm", "vcnrf", "zkdge", "emqtscp",
"newqcyy", "nnuus", "exwsxbd", "zstvl", "lbkko", "kygkyqq", "oggji", "xytbjo", "mfbahk", "ggoks", "lmqewkl",
"qexhyqe", "ogaogio", "nzvbav", "mdole", "qvyks", "gkupfu", "dgmpn", "ngrdrj", "iitqvk", "ipuiqb", "ugxfea",
"ialkmv", "hmgnx", "aoyoj", "fvzhjil", "butrbp", "dwhxnes", "etkdwg", "cjkghz", "tovkq", "mmxhv", "jgcsn",
"hmictal", "zxmnek", "pcoeg", "ntyqmlq", "hfubhtg", "ydjbv", "xnwlqto", "hatgi", "bsaczd", "pokwk", "arxlula",
"zjtqlk", "ocfxup", "nsnqjc", "xdcsopi", "iqxyxp", "xfmtpvm", "bqtgcf", "wboycn", "aoeda", "uowqdgj", "rzzzx",
"liucs", "ejzxz", "qmlehsh", "igrbmon", "dpmkbon", "pmayh", "nujdwdw", "awdgo", "ijgkzk", "inhee", "jzdtv",
"adhauh", "grtmbp", "qndbvw", "zprrw", "mpqieq", "jzmzeuu", "fcvftqs", "qxzxqy", "lidguzz", "eazwd", "zjhfsz",
"zsnzefh", "mnckfg", "zjgtq", "ckyxlif", "fznfo", "jegnof", "lzwyzb", "ozivfio", "igkclsa", "bebzn", "bitsggm",
"lrnwin", "hjnnzr", "idvoirn", "dgile", "vfngh", "xbmur", "rqaftt", "wjwwwxs", "btreou", "gjsycg", "pvsiylz",
"ccxzgdf", "excrrrr", "fiesr", "jdioj", "uzwsc", "odrlcoy", "hcsit", "ptwfprh", "sbqry", "kffvy", "ejeawbp",
"omvcc", "iqgxqlt", "edsuu", "xnbue", "qfbcx", "fzlmbkl", "wrrcueb", "mmqispp", "nknilwd", "dewuhju",
"hmdqlxy", "vjxgg", "lkuexo", "dzvfscm", "voulbs", "uevoqgq", "kmhwu", "oglzllg", "torhihn", "fhuqzc",
"mmcfhb", "woyayma", "uznsvre", "mmxed", "aoskwg", "xrosbm", "hpyrgh", "tghwbwh", "hcwzn", "iepeftj", "judij",
"kudbk", "jonpv", "lywck", "rxelz", "bgifz", "mehbxq", "fmqnz", "sqrmzj", "iqqjzex", "qioliz", "kjizbf",
"lgdcffc", "pfgmcr", "trdabul", "vlqjdnc", "jjvbxe", "fqlayw", "ilbhtyq", "saawulw", "gxysrb", "kighql",
"eceapr", "kztbcww", "jedkoy", "dxpcaga", "ndacphe", "rcoit", "ywgcnxg", "klipfup", "bddws", "jwyof", "lrfwgo",
"bediwuf", "ujakh", "ppima", "xzhwvm", "guzmsqt", "ffbliq", "adjmynm", "akabzn", "inmykju", "vlcjyv",
"orquepg", "tufrk", "vqpjymm", "lvuab", "qzxav", "ekcmu", "uqtuhie", "kfvtgf", "nklwjo", "ujxlfpl", "zobfpq",
"eignijd", "ythctg", "artllm", "wodhh", "tzpwszq", "njdqegg", "hzrqib", "zvoxtfd", "htboem", "axjuix", "bvmvm",
"jbnum", "bxdth", "atejt", "gqsqtnk", "fykrjbp", "ldyhonr", "wcuoj", "upphc", "agydg", "cjmwk", "rhxbqh",
"tpgozdd", "qyqoy", "zjqutw", "qoohqny", "nsiacwz", "xupin", "criuvs", "eswjeft", "pdmevn", "zvogq", "lrrvo",
"qhfqqpw", "ktudfg", "ijvmi", "neyjjdx", "rllpi", "vllvaa", "esebtu", "jyhcrh", "otgmr", "oudvyxj", "pmszy",
"opeed", "gicni", "mnuzn", "mjbfpod", "sqwgxu", "dwniwz", "wmbmmv", "lyafuy", "zmvlz", "kopxzuh", "urcbbiy",
"guhco", "nerjm", "lpdxc", "hxmjzz", "hynagc", "iyxeczi", "bdfxmoz", "yybnpqd", "jvgnb", "oquqem", "fmclmz",
"dmkhf", "zxbjpp", "qpxgcir", "iecvjm", "gtkne", "lgtqrbc", "gilbn", "mcxsg", "ncwbhn", "wkriiq", "zhsir",
"ptkkmw", "jcbpkrm", "vbefo", "vmbcd", "vqffj", "fhqzjt", "nryuh", "vmclav", "cjyggm", "sanev", "rrdocz",
"zqdexbs", "jrxstt", "pyhcesj", "aagghyr", "cyemjrb", "aliohf", "qaslg", "pnyjzxz", "pehnvi", "suhuw",
"twopabr", "sapqoc", "mckrh", "nzlgrxt", "aqpobnu", "pirbjgb", "plzlj", "raylxpu", "gyasfrh", "urjfxux",
"xjbwau", "iupknn", "vhxnc", "dnbjop", "vrxhwmd", "vjsmkh", "rfmqids", "smaiwt", "vkyfo", "bjqyxc", "rbbbp",
"dlkzg", "dwvdwu", "prulzh", "bavge", "ehhrz", "xxjqk", "pxopmp", "okmkmb", "slcznpp", "nvqlb", "jalrk",
"parwlcd", "anbxo", "oqcxyzo", "fjhrdjh", "pgvnwfe", "yfjyvh", "quvszjm", "xyiig", "xtncqv", "svsix", "jvpdnh",
"owuiv", "bsrugtt", "rmvggws", "lmdql", "kvmvd", "xrpmaw", "ssnxyb", "oworq", "rmmpuya", "rijpih", "aelazka",
"kncksqx", "yvtdiy", "epato", "pbbamj", "fejsw", "zgsru", "ekwrre", "zqben", "vugxi", "fvcsdp", "rujcews",
"asqxya", "worjlsd", "xggakg", "kzfpot", "haqon", "ypqxzz", "mmkzwt", "bdhif", "exzhv", "srnklzh", "hlrunb",
"dwfyke", "fvgbtdm", "aeutp", "czhefx", "tegfw", "jkxpsb", "gxkfkw", "exvntd", "gvuti", "jdmly", "owaqhw",
"fopuxzv", "edrvil", "biszwgv", "vgckzd", "fqdxn", "qktdf", "hpgwrk", "gpxiips", "vxnlab", "yylxz", "hsuscch",
"bhivaf", "wzrwtc", "ebplv", "yzxykou", "mxlssom", "evghv", "hksleg", "shybau", "zeyqa", "tljqka", "axfkec",
"fatdj", "janlkcc", "sjorbra", "jplge", "oazzot", "qbgtncn", "ozlil", "stohadq", "rvpuwn", "oqwpl", "byftgi",
"ubuusl", "fkogr", "bybdyhj", "vinyuzs", "ivsqvz", "vmnae", "gckxw", "rozbe", "glvxwj", "rcgicu", "xmvbd",
"itycsry", "llmwrs", "fuqth", "styrrwl", "wsseuln", "xwflcli", "muxgz", "ypmbboh", "rpmvnep", "wjvvnv",
"arjnw", "toauwc", "ltjxqrl", "basffd", "clxozwd", "glmrv", "iejgfj", "cvkoj", "wotjf", "mqucec", "xalgemc",
"hgimkh", "golvfq", "fuqpmak", "mhpcp", "pxoibt", "ledqa", "guzbyr", "ztvbeka", "racdp", "krsngra", "aaiknz",
"bhoobyc", "xibbe", "yohepxk", "eclevs", "ldliwcm", "qatvlk", "eiypbw", "vxvtwa", "nkdwsej", "ftmyvp",
"gpthye", "gazwoi", "zzgipon", "cithg", "wpabujl", "jhezlnb", "vqqaxfg", "kvpbk", "vggjemp", "owylv",
"lgwtfpg", "jjqvfm", "xbhga", "tulvfv", "sefuo", "hbysv", "ozopepd", "awyrifd", "pnudwx", "vreje", "zhpgw",
"qygbf", "tvbrvy", "zzmcw", "cznee", "deuzxt", "qfppjvi", "ilkps", "ydwhg", "krwkxzu", "mnsidg", "rkxyyr",
"ajkqz", "xtmom", "vqocor", "fympcl", "yyleyzy", "jjvzhrn", "kpmxvuz", "txoeqlx", "lhhmn", "chzgpf", "ncnjxle",
"ihxrg", "feqixq", "lkfhcar", "hfnsh", "bifczy", "umknat", "yrhgkh", "mgpcu", "qotukst", "yqlmfq", "ttcdp",
"xnjjzm", "cukbr", "hjhjb", "iikfcsr", "nsqbnnz", "dauygf", "cmydq", "lfnhqnl", "ppqgs", "hscbfug", "ohzisud",
"opspdkv", "aauxbop", "wpkhzo", "sxbsgu", "tajrv", "ololy", "mxmus", "vizvxv", "osaqz", "rxygkn", "mrzqlf",
"zrriyxb", "ufroe", "bajozg", "atpsu", "uhgauzu", "tffdw", "mdjulde", "rbrmy", "jhkqvwl", "gzsultq", "nkbfi",
"xtvwh", "dryzcv", "emaxuk", "zucvutb", "jdduyk", "bjdin", "loicuq", "qhjjb", "rgfjbq", "mphnk", "lxvceyx",
"zeoxb", "fxhnxu", "qpbipe", "ophwp", "wiioer", "quchwj", "pouxunw", "bloxgg", "xbsma", "dtwew", "xstorn",
"qfrfkz", "gxusbsn", "dhnxd", "mhstbs", "hekbtu", "wvrrjw", "yeiwd", "patplsx", "qmyiyi", "mowboj", "iskyd",
"bqhjj", "povppk", "vthpwx", "uuydaw", "rduxvez", "vmcww", "ylruvph", "ymqosp", "wzcvohg", "lhepwta", "bckhc",
"oiyyt", "wqzfv", "uduec", "lkkbtzl", "prvpbo", "jrwstii", "ijztoo", "qwwth", "vqzqiun", "krnjp", "zyanpiw",
"ojhjhvg", "lohmb", "thqtf", "reptzv", "zgkyq", "lhkvy", "cmjwl", "fmilgpw", "jrfawz", "vrtzd", "ezgfl",
"plzng", "zidzso", "civavlg", "vtwopu", "ljhckxo", "nuydt", "qembl", "fiwrre", "gfrgi", "gzegiq", "mltlqo",
"pcett", "snbsc", "msibcqn", "beacrhz", "vsycjt", "gjqji", "smcegol", "zregkp", "smcazoj", "dziqad", "jpuwp",
"hnlztac", "vduitco", "wyencad", "bkdnnqo", "cabzyg", "mgpcwr", "fxgvkxt", "wlkcrdd", "bhmhsy", "gqcctjc",
"atafpt", "vdzhmcg", "ighxj", "gfqpale", "fohbrtj", "mfpsgt", "tarjocf", "gyycb", "qvqfryl", "jpwowwc",
"jcgcg", "gmrjze", "nfptxq", "hmjhxge", "ieelj", "suvkgr", "nwjxe", "tkepqm", "extnpmq", "rxzdvf", "relzaa",
"hfhgaq", "lmihlz", "pacocq", "dclxr", "oknoem", "pbpnnd", "nleerfl", "tvytymc", "aamfnl", "ufdnq", "bxyzvyh",
"vksvout", "lohxhf", "sskgn", "aawbv", "hrvhx", "wvoqf", "vxkvh", "oqany", "bcmyd", "epdddqn", "zrlej",
"bchaf", "hmftii", "mefcrz", "wbxvc", "ewwnldf", "cqecxgh", "cnwvdmk", "vetrw", "zmogwov", "lshlzpe", "lijay",
"tcdqg", "xavqixd", "yjkhtsl", "myjvow", "cgthhd", "taaii", "iuuegk", "lcypmle", "wesrit", "tybco", "nhxysw",
"awkrj", "jcmqa", "porvo", "nrypriu", "vznnevp", "hzklwi", "vapuxh", "wyfkn", "albemu", "ttfdbl", "dbqrjv",
"cxals", "qzitwf", "ysunur", "llsefy", "cghfzji", "jboaa", "emhlkw", "khhmgha", "twlxgjz", "pyujor", "ozcax",
"fetvovo", "mdhrrd", "qdhdne", "fiuvw", "ebyxh", "ldaothh", "vwyjf", "yjyljlu", "ivroqg", "qvpeyec", "eemsdra",
"wavgeqk", "bjejrqg", "mdjimoz", "fgopy", "lgwodr", "cunvszh", "wiver", "ghmog", "jzgfyk", "vxlbx", "kvgbtn",
"cunorte", "mtesdc", "zdzmqu", "pigik", "smruadg", "czjxlt", "kukgaok", "tsldpqq", "luomo", "ezbcvdc",
"tfetwes", "uopzf", "wsvezkw", "wrnlvbx", "bpqungd", "jqnnof", "rqhiomi", "voulqb", "ouspxn", "chngpz",
"fbogfcv", "nqhunxo", "rydbke", "ewduo", "suqqwup", "oxzfxj", "kuwfwm", "euiics", "mvftoau", "vstfbm",
"vnmtoo", "muicf", "bjbskxb", "knbomlf", "enrbtfk", "hnaqe", "vxzsr", "gkqma", "qygmn", "ztkybmb", "injggpk",
"enqrgdk", "rkgoct", "tgaiu", "dnknoxk", "iwuou", "oxanccl", "xestej", "ekrqq", "xbwhz", "jkdvxfh", "oybaay",
"afyhci", "papffjq", "bdppssw", "qwyvjx", "xmnnosl", "kvqzjl", "wcwii", "ygfvt", "tpabbht", "kjmaq", "duschjz",
"gguiof", "wgfhve", "joqmfjq", "smqfd", "ynlovlz", "sgrzum", "bobmux", "dcppi", "isdjrwl", "lbevb", "efqsirq",
"hlgfql", "enmemlb", "dbmfk", "ibfpzm", "rtdnooq", "yicdq", "xadul", "dxibxzi", "yyxnj", "jhsdzxw", "thltbi",
"kwhreyi", "hrocoa", "fnaalbd", "vnwona", "nnonm", "naqaf", "xgzzies", "uhruynk", "kgadfx", "hyohzbd", "hnajx",
"yipzh", "ezdxaet", "xbzppoz", "rwnewxz", "hlcbkmb", "znyhu", "zsqtpkr", "gmyxr", "rphyvo", "bgjuz", "nulpv",
"eejfoso", "xmwcnes", "xxxxnpe", "jezkk", "idfsxrw", "qgzjtf", "arpzpo", "hxsanlt", "emvotcb", "sknzhvg",
"icitca", "ivhdln", "sqilerz", "ndigw", "bcsre", "mibbep", "zsczom", "cgghjbb", "fkylfgt", "bvzofs", "mefsng",
"bispbza", "tsosgy", "xopalrw", "wserf", "jbmlz", "xidxny", "ffmpjos", "vddwxmd", "netnsg", "kgevsp", "pguuv",
"cwisp", "slxiyb", "dmwaguc", "jobwusu", "uytcqrv", "hzhsy", "zrlsdd", "xhxah", "rxzij", "zwdgy", "ygmvkz",
"drkzbo", "qpsal", "tpxvl", "lfmfl", "sayjvlh", "rdamym", "ycuzd", "zkycu", "hdesec", "unequk", "lpkdid",
"vorxls", "admsdop", "rqnvkyg", "krnqqtb", "rxfms", "xfthd", "pxjbk", "gpslrg", "rwziwef", "usxgqvz", "baxxye",
"ocrkkrw", "lrlgsp", "ceyctg", "rniml", "vavug", "jgircl", "jrpnmsa", "rywvlfg", "prxnys", "fkzmknn", "ooelc",
"btvfs", "yqepuvw", "tmmmb", "qmpzexb", "zjckjvd", "aieytbb", "oafqq", "szrcyh", "czrxgae", "ifkte", "hfgajox",
"pwpnkqq", "yqphogn", "xuwthrd", "mpcmy", "qitdoa", "avlzfrh", "ywpip", "dgeki", "fgbnx", "tyofu", "xziqzj",
"qxzvqz", "vtsqk", "ipkld", "yfhim", "ebaegdc", "ubhrh", "ldejv", "mtflwy", "ocpyj", "yopgqs", "fkjxxd",
"njnnwr", "nylkeb", "taymdqv", "ekpznq", "cbzobmg", "bucdds", "qjozu", "uvpghor", "obhnu", "ljkxbg", "uqrxjtf",
"xwbxiw", "oxsmcg", "spchdd", "pcuitj", "faidq", "tybmy", "uygiyp", "qloizj", "cafgmy", "smetd", "kwcwb",
"tdabxf", "fpmrc", "lfjujn", "vvmvex", "mnsgdc", "enjlgsw", "ohwcg", "kxjdaup", "rotjarp", "aovdoq", "oviwq",
"qwaxs", "bmazco", "plcljsv", "yytjhl", "vgwjm", "drnue", "vqjgf", "uqlsfy", "bmqmfp", "lkauwna", "ozmqce",
"heunaxr", "zaffbj", "arbek", "qjnllw", "fdkhlz", "wgmbwh", "yceqag", "ltjjq", "yurggfw", "puaafsl", "tjiqkyt",
"yuzub", "ytmrfq", "ommmu", "ipknn", "iubnuab", "dzthvc", "zjbzpew", "dcooev", "pjydqcf", "zuojlzy", "zwjyfc",
"spmac", "dfkbnz", "fzriie", "asusog", "hdodx", "drjpo", "ddyif", "chabv", "ebvkwrr", "burdjl", "jjddi",
"dljzkye", "samyg", "zwgxcq", "xtratwo", "qfopz", "xvlaw", "laage", "btdium", "vzlnzt", "kmvbzkq", "kctobsx",
"kazbelu", "yxdwrk", "eslvjc", "nhsdmvs", "zuxqcc", "hqtxovn", "zrbdai", "fgjxs", "txecvio", "kjxlq", "dkuxss",
"mkbevn", "pzmdqc", "ihyia", "atsub", "twytus", "nzooxj", "qwuoly", "fdoigo", "zukhlh", "mugeaxt", "qqsfyls",
"qqtql", "wrvphcx", "nzjfhx", "uequtk", "fxuto", "qnast", "nveys", "ltbrcth", "toctdib", "fbpnh", "umxfgn",
"zvjuta", "yeron", "qzvswqk", "gbctr", "ryryz", "zieknd", "zcsna", "jrhak", "zfxqsj", "urlba", "lbozqf",
"yfcjaa", "hazgy", "gmmfzyz", "zjvkyc", "rvfdcf", "daitab", "hcxqgum", "qwakp", "ltbsjwo", "pqqtygx",
"upxcxao", "qylot", "lmxqc", "dwzcd", "tjccm", "mqcpap", "wgxqtr", "ivycvxy", "wdykg", "snvqka", "jxtvtsb",
"jnyowsq", "iwfuoig", "cuoixhu", "fzwalg", "djhrar", "sjmahk", "dyusf", "wrxqvdi", "ftytlor", "jsjbv",
"vjbebg", "agvsn", "vvmpgm", "gsgjopk", "vbqvhy", "afopf", "zybfuz", "aqsgc", "ytrjsvn", "wlhdfr", "vdhvl",
"jrlvr", "cscxwf", "yhgbew", "wupbl", "ssuhyvv", "bhcirzk", "oykwk", "ijbto", "qsnpgw", "otwzage", "ytqzh",
"rgwow", "bvhgkwh", "fvawxie", "fllxw", "gfcqf", "scoqb", "qubrq", "gdxjtp", "ahrpck", "awnlgi", "cmehsyp",
"dwmytpy", "firyeq", "oohwhr", "caelk", "mqemvs", "qflkzi", "tfpibll", "ybhzd", "ctsxri", "yurocj", "dnlnl",
"ydmdva", "xkaotl", "xovax", "ypynrqp", "kwfzw", "fbgsmrc", "tutime", "rcugul", "cvewno", "typhbpa", "wazew",
"flzfs", "wxxbza", "ogjfkl", "vjlebet", "imbubm", "xinyncy", "dqmxfy", "buhagzh", "jjadpos", "gejyz", "gxshqk",
"wkwrs", "dqeriqo", "dmixr", "bysjih", "aoloq", "ddwhsxs", "nteqv", "cqagf", "ditsrn", "wfxgl", "jwjqb",
"rvkxj", "rxapr", "yrlkip", "npquasb", "nvezlr", "gmhchcx", "lodfihi", "dheypxa", "plzjykh", "qopsthg",
"zsnes", "raongg", "zrpnac", "tzmtltj", "jsecdn", "rzudh", "hkcyic", "xsxmw", "reeuwpn", "grkwrag", "gvzzbsq",
"lrfta", "aqyvbkj", "ytgfu", "wcmvd", "olnvfi", "hhgmhb", "kojmepr", "wpohl", "szhgg", "hymiblu", "lkwjr",
"zulqpz", "sdcqjo", "olgsgez", "lxkpqci", "yxcgn", "gmvex", "fskpppe", "utzto", "axncvp", "lcyahba", "ydeae",
"zvzar", "ghfkkqv", "ryrpg", "gucpbq", "reofjz", "cdnoo", "dchhh", "byiwd", "cqbhok", "ksfnoa", "xsmmlr",
"qyvdfqh", "dzshj", "bpifnzh", "uxmoml", "jdxvojf", "ihfll", "vwesfof", "zynnpb", "fwzra", "rxlgww", "vkmjd",
"hcjgzt", "mkapfl", "ffjqlf", "wulaebc", "gurramv", "tufkzai", "bxprqek", "nkohv", "abgfwyl", "slslg",
"wirsnh", "pykvuh", "fdrwk", "gtmgsxe", "dxsaab", "lqiryty", "aoezg", "tzhugcg", "uoarf", "dwhsv", "rjiuoi",
"ycgcdnf", "rtfmwz", "amkjc", "woogtdi", "deprx", "ucknu", "womfm", "xdeev", "qapxpuu", "ngulnk", "fgtxyf",
"hnyabid", "cilmy", "wrsewtf", "luvtmo", "wftuh", "ifoeeqp", "dtfdhhl", "rwnburg", "fohkkul", "frqqi",
"gsrcyc", "teuync", "dvpvak", "daqjki", "kksscp", "somsde", "tyfvck", "ftfekl", "ahncv", "yvosm", "qgllvg",
"ylfwv", "jenqns", "lqovrnm", "iyger", "nfvtsv", "bknxmqj", "pfzybdr", "hqjol", "chlpk", "etgrtqa", "msuxdx",
"vnoatf", "ypdzomn", "vsshmg", "rfkipq", "jvpbiz", "vbskd", "edsoixj", "uowim", "hqtsj", "inbsxal", "ookrv",
"ipotdnk", "kmazqd", "jpfghb", "gvmnnpv", "juvwa", "xtkvzw", "ejqcl", "ebgcnt", "ztuyu", "dlzthw", "zzipe",
"iaxwdxy", "htynwkc", "lefbq", "pizfr", "vttrsv", "oagak", "eqlrom", "vttefg", "dsrmk", "oekbe", "cvugzk",
"diwvz", "gxmfob", "vjowzm", "mjpop", "uznhz", "kqvjwug", "wjqvxfg", "jbpwezu", "wsckdx", "slqfomn", "omuxk",
"zlgblso", "kvitoq", "dmafq", "djxmzk", "pjqfegq", "yjrttas", "siakcx", "iutiqk", "nwfdj", "gbgtazk", "cpqtf",
"panmlr", "aqubhsg", "iwdim", "nqetym", "mwazh", "thyhy", "ydtxan", "xfoin", "lsosc", "esznfa", "xgdisi",
"flvbzh", "mpltx", "iwjpsqp", "udfycf", "rntmc", "ltflwu", "wkgbaw", "bcuzt", "hejxuhb", "lguohe", "klnhb",
"mjump", "avcwrol", "yrcqlc", "ihxul", "avajh", "gtpauet", "iemzk", "rfdub", "gqnbk", "cfcmg", "iobyh",
"iruuapf", "tyifwt", "sbdtp", "mngcpmb", "oaqpolm", "mmimmh", "gxknadi", "bmxhuu", "ulyoa", "keidy", "vsnfk",
"cnnnfty", "pkajm", "ddgeecb", "prxidqd", "wmenvhd", "akjcqo", "tnekfef", "ipvsi", "pzjwq", "wmmct", "erdjnuf",
"vgeaqs", "nlbdx", "dpvbe", "dgeqz", "aiguzh", "akawppx", "tykrjcs", "gvavo", "hkyle", "yhedx", "xzqcg",
"gzdxt", "csssbk", "tmekrmv", "lfsgo", "iizahz", "aszfd", "aybqnsl", "vadwxsl", "ulmiii", "xaxdugp", "sfnnsbg",
"dkyruh", "qhpqu", "amesjd", "evjuki", "vtqjw", "aoabp", "qnsuhe", "bplbx", "fdqok", "ozkhgib", "cggwzys",
"nbknjay", "ooambw", "evmvegf", "htdlxik", "kahcume", "bojpn", "bhipie", "hdyjslw", "pbkkq", "qwszl",
"fgkbzsd", "hejdx", "vmcfhgx", "puzlmmm", "meffil", "boakbiz", "eczot", "fvkkit", "jebfx", "umvkjg", "uikgs",
"rycgpf", "rfmfgmy", "nveho", "bgywqen", "gepfma", "vquyq", "wcercbw", "wbpjkxc", "rqloeda", "omclokx",
"hvotwp", "tvqfxxu", "qrtghk", "hggme", "arnmfnt", "cxprj", "rspdt", "hlgfq", "dmqel", "pcerxk", "ptqjc",
"wzreko", "kahks", "xjnzo", "xzzye", "xbdeu", "koiwkv", "jlwkkjr", "xzdixoc", "xeedvrm", "mrtnhqi", "jaeann",
"mvubp", "olklqf", "retbgcj", "qxxlhh", "cqyyoy", "ngwikg", "qijte", "sjzck", "zkmkx", "ongtzf", "tanow",
"smgntvq", "urfgt", "xwcroa", "kadcpd", "cxhgo", "walku", "kvvcsyt", "elwmuxk", "bfphtm", "vzeumuq", "sknvev",
"vbsnfd", "grmbg", "vjahwt", "dmcbmn", "smubz", "jobbfcv", "ujlkm", "lcthh", "bauuqdu", "kjgzgtq", "gicjz",
"nugbax", "kbnjfiu", "sqfpein", "obbgfww", "ykggxjx", "irnmog", "xniuv", "rqiwycq", "hzlgyu", "yjtrttv",
"satym", "dgqhlkk", "rghal", "tbekx", "kkwmo", "eahwhks", "bpvmbur", "sqtgkj", "khboz", "enefr", "vkzqvt",
"wfruavu", "ninomu", "ypktaoa", "mlpmoit", "fxyhjfp", "fgnpp", "txieja", "dprnj", "bgyrp", "zsqwqrw", "stqzki",
"kwiayb", "ulbsn", "aetje", "vwzbb", "tedwyqs", "cymiruy", "jigpoqx", "ypuqsc", "weletu", "gvibea", "chhuldm",
"baylv", "wdhovo", "imfqu", "meodnsk", "jhlckqw", "jolyfh", "jsfkrhr", "tnbfzvs", "egcfht", "qnzmyr", "owtrqu",
"oqaqu", "xftys", "goxfftm", "sgbnp", "bhfvaz", "gospa", "jwzlvwk", "lqncoqd", "xxizglc", "bwffm", "mhpggzr",
"kdaoewx", "anviou", "mqiij", "wkskpn", "enougdh", "vldnn", "gbfgz", "ejmbh", "qsdrvsx", "mrvbz", "cqlufpf",
"kbgjlu", "njgna", "admrmk", "pwwsc", "gxkot", "pdjwh", "ejwxt", "bpaxufv", "iwjzs", "xxfsg", "vuhgh",
"srytgb", "yesvlux", "tggnch", "cgnbb", "fbzbx", "aomoqf", "zkrvrjg", "ueaoz", "dppacnl", "ewovhxz", "kbvee",
"ixeeb", "gwgoqm", "hlwlxe", "fpmkrk", "wzjsr", "ispwe", "garofu", "jcmpec", "tggeo", "yzdeo", "axpmln",
"zhnlhck", "duyqcn", "tpqwqi", "jvmaj", "bisgoy", "mpwmurb", "olqla", "ecapwan", "kcpxn", "xcapin", "ooctk",
"sgqql", "vcyyjxf", "ejyom", "jsgtha", "logxnjg", "nypadhj", "dprmk", "cqkuzb", "gratv", "tgkjgu", "fttcafm",
"tpryi", "ubbhw", "uwcuyn", "zkgohs", "snfesz", "ifrex", "tkbfz", "fvvkp", "otjiq", "lgomjjv", "ertracf",
"bregu", "kkbizb", "hyhvn", "zjcnxfl", "mceskuj", "lmupdq", "zdzqzgo", "yorppew", "fpwtjd", "dxvyzt", "bbnnu",
"pkycae", "ucvapn", "dijmkb", "nvwwpr", "bufkw", "zhono", "vayxf", "hlfwkev", "klkvkj", "yzgpwg", "lcbqr",
"tkkfi", "pcgljx", "bhduxu", "rgfipts", "hkjbrr", "fobvy", "wqmqhxo", "yjgvypg", "ehgoizl", "ipiibzh",
"aqxbxtx", "lrtin", "fyyuypr", "pyrocgm", "kwqbg", "ukccw", "wgsbpvx", "pcoivrv", "okhxaba", "bbuaibf",
"ccvfm", "phpst", "yxtqiz", "cdfbo", "sijfljn", "gdlhn", "bqmbced", "tiejf", "aurqer", "olmyd", "prctay",
"lwflhi", "bbehvta", "oxoda", "lklyc", "rzedhp", "kairil", "envan", "wdcwfk", "xoroddb", "womrlr", "ruxebe",
"jnpywrd", "wrifvz", "zkewcd", "vllfrn", "uvdvjh", "bglpya", "vzokkbw", "apaoqt", "xpjizn", "xoajmd", "xapjwc",
"jcknwg", "bjpreep", "ffkua", "ukcbah", "bugvkrf", "cbmmfs", "cwaczhl", "nsqaj", "sjeikg", "fayqif", "slowoh",
"xjpvkpa", "ynunjle", "bqavt", "nkpqudr", "neikvd", "yuqlzg", "pdxbtrb", "cashlog", "iqiqy", "smjmxv",
"zbtpbr", "zzamzcv", "jmakg", "txfswc", "pkaym", "swlde", "utann", "mqgpjne", "pslfvek", "nbiqhb", "bzsianu",
"wnxgbi", "ahkeeiz", "dqdfjg", "bptdg", "pwita", "uqyflq", "txabjn", "yznjmve", "mukcqqf", "cxonbf", "ixuewjm",
"pzlcat", "eikeeo", "scwsoa", "uaeyw", "oeorff", "gbqgd", "qboqiv", "hiulpb", "dbbdm", "qvdxx", "aypxbcn",
"ykjwdbg", "pvfxn", "shrqyz", "zaxtu", "pfefgww", "jwifrw", "zxuud", "kpkwhlj", "lwptgd", "zpdmvsw", "takeb",
"ynehl", "kixtod", "fyrgm", "qirzmr", "shyvec", "xjgzt", "bwfvht", "wyehh", "renzc", "nnibax", "slhfng",
"yjtecc", "lghvbzf", "qroxvun", "mlsed", "rrudho", "cyffhh", "tjlxahp", "xmaepzk", "jvdzh", "bbvegrw", "cebcz",
"odjpeam", "guerph", "tgmphgo", "ohtkqq", "jcxojz", "haeheae", "erydxni", "hatjxx", "kwmgkjw", "wmezvy",
"hsuuvfi", "ineek", "grkxmhb", "alxkt", "rmspxdg"]) == 13956
assert s.minimumLengthEncoding(["me", "time"]) == 5
assert s.minimumLengthEncoding(
["yiyqbv", "njqvawn", "wnlovvp", "vogum", "jpolc", "zleec", "sxdrww", "rbowr", "xsjorra", "kwjsx", "vornum",
"echku", "kuizegn", "rhuvv", "eemkh", "yshht", "pbixoa", "cmbxvtr", "iupia", "nmcbq", "mgrjsx", "ejvniwt",
"svhsel", "kazenhf", "fevpm", "xcwqfgw", "ozikzc", "mywnmqt", "taorwjm", "gcshacq", "fgtasq", "qexygw",
"ljmbari", "zfjudos", "rgxuzy", "kmzryaf", "exjfd", "mcqnebz", "ptoim", "zglfi", "fhneaz", "rexgc", "lhplwyr",
"dthdp", "jizetec", "obyzg", "rqupa", "yphttge", "wdcdn", "wdomtr", "hchbd", "ytyra", "upytftl", "swbbi",
"qpcybv", "dcoxspd", "dftkf", "nwjfmj", "ojbwy", "zofuy", "adqkt", "kpcply", "aeukw", "fqblb", "xurrbpo",
"veioa", "puzvl", "bnzvlax", "tjzsdcw", "jarqr", "orxjbg", "ilrqdri", "syjuoyi", "htoqdco", "gwslw", "dpqyf",
"jnkhv", "fpqhpr", "baewnvc", "caunsf", "qhbpe", "wlckl", "lmoroqe", "ddlak", "qipwbfp", "cefqs", "surczp",
"jtmfuro", "ezhqau", "dlsco", "hywoqh", "lnifq", "hvfmu", "cqjdkok", "tggdact", "rwuowdk", "attnl", "lwhyq",
"mqtsc", "bmwajiy", "nyohug", "vvfpt", "lbyazu", "sarwago", "iccztck", "ugsxcw", "rpwza", "yofmlll", "ulhdzhg",
"lbaqk", "bwxxwc", "dmsbawg", "tjloy", "imbrkul", "xguke", "shlkuq", "lizjcdu", "kmvykl", "ilqxxjm", "rtbvvqt",
"qisec", "zobzr", "thwntt", "afpifh", "uwiiovy", "hgsyecl", "pdgnm", "mqyesch", "suexztu", "msguuwu", "yrykkv",
"xtoommc", "muteu", "bamml", "kkhlb", "jfrnx", "wpytor", "zzogpt", "yryxxt", "hzqofjd", "ehtildc", "ptclf",
"nyltvd", "nrret", "qqqqt", "uuxunf", "jajxt", "lzdvlc", "gpdtjug", "hjsso", "jairua", "qarxuey", "rpwwjwv",
"cjqypep", "tuzgcs", "oytqxb", "rgfmud", "stnwn", "tzzaop", "jpuopzg", "qeywd", "spnstrg", "dfwgntg", "yjyqk",
"ioowc", "duqfg", "gmqxe", "xhlbby", "liurjk", "vdujfm", "xxyyn", "omapgc", "koemzbz", "ziiyako", "pjmhfrv",
"bshtfgj", "ihjvt", "pnipuw", "fajiuj", "rdvcqzd", "mgknns", "ouwkm", "ejnklwc", "osepl", "gplpyvs", "paxrddg",
"gsjlpd", "lgnmgl", "yifeeer", "hhnwlol", "fcmxs", "ilinwgm", "udhfdtq", "ceefc", "xweqx", "jfelwod",
"rtywfjo", "kzwrgqx", "fcjriov", "fzytqv", "zcpcddo", "scpyzow", "kbzegu", "gclwr", "gmiwlp", "rtpka",
"yiywuyy", "qceot", "dtrgn", "ntwbu", "fxobd", "zmxwza", "qcksyz", "wgbtmm", "pzorve", "hztydc", "jqlay",
"ijdkbk", "uzjrps", "gfzibk", "gsxqj", "kgjrkdd", "smdeuk", "iwizewp", "owjie", "kcdccu", "ifltqr", "zrdfbm",
"pznbcsk", "mtkpi", "cpasir", "flrxrm", "uxcxnv", "htlfcp", "ltukxfr", "ftbbha", "jhgjgyz", "qjreroc",
"vcvtbid", "nrhlq", "gtkpot", "gyplqqg", "lnorig", "fixhufv", "ugcug", "ndfug", "wuorhe", "owocnkw", "rcnbf",
"ioiiiui", "kakwtne", "svxtt", "wdrxogm", "ibrxs", "bddqi", "jeguac", "hlftdw", "nutgfjw", "krrzvf", "amxuloc",
"deozdoe", "ovsvk", "sfqsl", "slgiw", "jbjujag", "mhiru", "uqksech", "davosw", "nlueljv", "rhtvdu", "ivdpdqa",
"qnbenpq", "dtapqq", "hwwfpxl", "oyrfosn", "goxgmgo", "tbvutl", "cbbbcm", "iiugpk", "hinkem", "vvaitk",
"pskyf", "hdnekg", "nqhfn", "dqbozx", "zcwpko", "kafyu", "jfegubk", "nofqzsk", "ujmxxg", "akwzemu", "yvhxb",
"qqlwofi", "hmoecj", "qwgtlc", "jepvygq", "uzggm", "fztiews", "lvndvf", "vulax", "znqudh", "whgqi", "noguo",
"vewkx", "uruvgf", "ubohmba", "aulzi", "flvfdlq", "yspfie", "wugif", "qndyiwa", "keihmct", "rggvn", "ojjmuoh",
"sbbcl", "cdivmoz", "vkusmp", "mfddp", "kgohwvp", "rjbbxw", "vsgptj", "hbyjoz", "gufrv", "orxiv", "fxcqfw",
"okppik", "qlouw", "lkryigo", "qccvc", "ixcnodg", "wlfilts", "ahqtevp", "kkbuha", "oehaez", "rzczib", "vxobk",
"wmetvjs", "xfjgeq", "eadzl", "aeqdvch", "czojfq", "hxshidl", "ofswsj", "iwbqcmg", "schhwtt", "ltyth", "wiccu",
"akill", "zaaji", "qepvfa", "mpvrkeu", "dcpenm", "wdhlk", "llqbby", "lronwkr", "rwtguo", "ofnvs", "lxdnwzf",
"dctmilf", "zhckjd", "hajsuac", "wpylhy", "zhipvm", "ihikr", "zzwjgvr", "gdglrn", "skhow", "tlqtjl", "uypli",
"evdva", "civide", "iroihm", "lvuzid", "vexat", "ngmvrz", "szdhbt", "ggrbz", "bsmovlt", "kguomvl", "onzvx",
"nobgxw", "tqxemc", "vbiyx", "fpzpf", "ogtvf", "yuthri", "xszbn", "xcuhj", "nosnpbp", "mowsxg", "tfalyy",
"kxombgm", "cukrz", "krmseq", "velzh", "kmufxj", "nvxlkq", "ualvras", "wytoucy", "qicqyym", "pbeujtv",
"haojnbm", "xnfffpe", "wvoiald", "rlyvf", "sxamoxw", "ztqnmp", "biiavx", "lnjnzs", "arqdjdy", "pkrgokc",
"qxswouj", "dgqah", "mnhzo", "ggilb", "qscrd", "ggvkimw", "qlxjys", "wximi", "aqlhio", "iavtvy", "grkqf",
"dwrtut", "uozutfc", "fogxpdb", "ydtntlq", "vnmpmwp", "gtxhwq", "mlpihx", "yfpjlz", "hdvcquq", "nunny",
"wklasgp", "wxduo", "topsqf", "tngcpzc", "mcrut", "pdnsmt", "kavaok", "seiqsqa", "bhgkiyt", "mawvhtp",
"domcnrm", "fgusghc", "wdaufwz", "tzpuks", "kisndyz", "fwyieu", "wtdum", "ytxhl", "yhzkmuv", "nppnqe", "ccvhj",
"dautnyq", "hkaliab", "kngan", "ebmhiop", "vsdkcef", "nmpcnd", "vxvnl", "cwcgu", "zsuneh", "qjgcmd", "awvba",
"rzbisxo", "oilqrj", "neiazlm", "hlyrl", "tmiht", "lwqxxv", "gyblrw", "gnnjkb", "lrxiln", "xlwlseh", "npfwcvp",
"yjcdhw", "rzndd", "orlhmip", "gatuojh", "osotgvv", "owksz", "kcocizf", "izlev", "smigns", "wtxfwo", "knwizte",
"mqjojzp", "lkezye", "xqldbu", "cvbpyl", "aoipbz", "asrupt", "bdwkesh", "jpaykm", "pksbg", "gdbsibd", "lfxpwk",
"rmnfph", "yzxwke", "xjwyusv", "yetar", "sytdz", "pnystzi", "yntcqo", "egoorl", "aydxu", "rfdrfhe", "flzkos",
"mmjgev", "fbjwmvi", "jeouc", "lcmkri", "aggsb", "aaeazai", "amyxpey", "onxqpg", "qrjpxq", "zanea", "niwsgtv",
"nsqja", "utgskd", "hlcum", "frygtl", "xjmqetz", "upqddd", "vxzdstm", "hcmtera", "ejstou", "xkcguf", "bokigdk",
"vurnv", "zsgrje", "nbxlf", "tpilcx", "lvepux", "xacdtp", "amdgx", "ubbvnx", "xmvznh", "tlprri", "sthkn",
"xhoad", "deotaxo", "pqzppmw", "xlcpx", "qwzrpyp", "lujabeb", "heskwyy", "mzzaaur", "vnestcs", "rryphdl",
"ibdiabi", "eoiyt", "znflx", "clougix", "zzadxw", "lrrgtf", "lsdoakf", "yxfmqx", "qhnrry", "ktcdmv", "veygqu",
"btjlo", "fcspsc", "gozoazm", "xcsqgz", "aazae", "nkuvask", "mzdgjq", "sihqdhy", "zadrwzw", "gzcyuea",
"lpgccic", "fqtfuzw", "bjoqpkc", "oydpkxc", "sugnnu", "hyvygf", "axkxo", "rsmzb", "dlhqmac", "gbqby", "npqkj",
"odbtb", "bdsib", "zyasxv", "ifxqcc", "lmnjwhr", "ibuyu", "uzhle", "ccpwhjr", "vhrojnz", "fkzfz", "fyesm",
"dnvipvm", "jbbqn", "qdkgl", "xkvvgq", "dphugaf", "soxbfun", "rbgokx", "biveiz", "vbaqtn", "qapydgf", "llldu",
"ottjpzu", "fwjuc", "cawio", "gbkwe", "rrnnxer", "luviy", "zsalse", "ckwdeox", "ozhqocm", "vtozfwz", "jztole",
"ydqei", "bfugz", "psawjp", "dzlyrwp", "izuyrne", "rbwcfr", "vdvte", "usjbqs", "zzovkxr", "frfkwk", "mmtmdd",
"sntka", "wachbzo", "rmzvj", "scbngo", "eqiuiwi", "qfakk", "cckcmt", "owhzow", "rejdlw", "iprsqdq", "twwaldw",
"mfilzyk", "jygvx", "iewbo", "irhko", "zpazqhn", "ndqbg", "ayzxqdz", "zvpbh", "maapq", "pzitrfm", "qsgsurv",
"viwcfff", "wpgenms", "tjmvu", "czuemc", "infxoo", "avhbw", "nugkqx", "xubakjp", "ndask", "utaqq", "njhuxq",
"sdvuex", "tfmxqp", "bydovjo", "bizxjsp", "zoozxyv", "jegei", "gkpqobw", "psumbtg", "gkgoh", "sgcbpql",
"xxkhy", "kdorkr", "hcomj", "ulrpyv", "rhplil", "tyyochd", "xhzul", "srdjmns", "kgukye", "yepvs", "xnobsjb",
"umxmtub", "wvqasr", "igftpzw", "exhecn", "rreee", "jpxuvxh", "jriqf", "akexunb", "ekvdsoe", "ytzvj",
"vfrlyae", "pmfai", "biouzle", "xkbce", "clzyi", "xhjoso", "wmxkxb", "dqzzig", "ydtby", "gskwj", "wlkwbz",
"zepvllz", "zsgqp", "blntawk", "eynmil", "bdqyp", "wgtnqbc", "rrgaq", "gtafuzo", "qdiko", "kkcsdo", "zwqhs",
"kugzbmf", "wtvvs", "kqsdx", "mxsuxiz", "pgbgjfe", "vodfr", "qbvwu", "vfwbhgw", "ayojye", "kolzfqg", "xnbecj",
"akbcnf", "uutrn", "upmesa", "marqej", "bbucee", "bazqbau", "qikgsyf", "oeayzn", "uilxnzr", "vpnxknl",
"btgtxgh", "vjaav", "zaxtzah", "msweps", "awduwld", "gzaep", "ngvgc", "qpoqdgn", "kimndg", "qilmmpw",
"oafhlyp", "nyelgvw", "onymk", "feycbc", "dhcrx", "siqpfly", "tyvycmf", "huctqp", "uscjrp", "bbptd", "msdmu",
"xlxhye", "xnyzcox", "kyskda", "injdkmp", "jiwus", "spjylwd", "eqcrnt", "snfiu", "jvwvge", "yfeaw", "mmdnsjj",
"suzdw", "xiupf", "rjwjhng", "tqvasy", "rmibpa", "zuqax", "prpndnp", "efryqe", "pwuqfy", "wpqlfs", "aeswq",
"cxkeiue", "jydxzfi", "tzfvwp", "zzgtw", "mupiusx", "sojavt", "dxmsgq", "migjiyj", "kixjk", "ywwvcpl",
"khzcuo", "oykhx", "fochin", "foxbfkc", "sizjg", "wrjcvr", "ceadd", "tvfqgxq", "whzhche", "dcoeti", "mpilfib",
"cphie", "ucpnjm", "ajltvx", "kpizym", "vevfsrs", "jznrri", "yvhxomr", "cbcnk", "yuwuhu", "jywuzed", "kqakusq",
"jrnzgfo", "mjimzz", "mfjybnd", "ntqyq", "junxxck", "myvqajv", "kvuqs", "obfxw", "jwuba", "vnrvzvy", "aeric",
"vtgda", "nkrocpt", "ahitg", "dzxtr", "zswwc", "yhxap", "fdhiwr", "cpxtqv", "izbmo", "zyioo", "vysnoe",
"ouuyvj", "cumdhzn", "dbsmph", "cktjem", "vbmxy", "utgfyhc", "rqdeorp", "btnlmd", "chxwlt", "nsghoqi",
"egycsm", "wkanat", "lzjyf", "donyx", "cchqsa", "xozzz", "yzmnf", "jfzuh", "dpcpg", "hlahz", "vobopk",
"lssfeli", "ccttzi", "glzgqpv", "oyqzug", "qqhkrr", "euwotv", "hwbmtz", "hiylhly", "bppzne", "yetyyvs",
"cnbwcby", "hzblk", "pfjmxt", "dsxvt", "vvkju", "zjrfr", "gdbhb", "udoad", "nbhpzfm", "iwetbym", "atmly",
"tnxli", "myegb", "hiwqsk", "btrajk", "nhrmwn", "ftmbecv", "xopht", "eiikqy", "qizanwa", "cwxiatf", "jshjva",
"llrtkn", "zhivu", "lmwiu", "oaeaqz", "oxotfub", "jnkafm", "juhrmq", "mqzbtw", "puiaxty", "dnahvoj", "gaxhz",
"xfnay", "iqmlnlq", "xudhcg", "izpkz", "tqttmt", "bwnbs", "fdufd", "vhzyymh", "zhqtxr", "evbcrv", "xvnma",
"dgcwy", "cwxzlbz", "oodiol", "teyim", "kqqfjub", "ftsqzi", "arfztkr", "oqlujx", "rpkkdov", "ptoff", "ivxaxr",
"nxeept", "cacpl", "tehir", "spvggl", "qfzxkn", "bhwkukx", "fkdpuq", "xdrngre", "fnfplq", "dzbrl", "ufgxu",
"sciec", "fgdydvw", "nmpaqxi", "ydsvfv", "natjz", "lruyvzf", "xznznxp", "mhfrh", "kddsk", "uwatn", "uklzs",
"lnuta", "ryizc", "cvwko", "tnzpk", "ywpiv", "vbvcagq", "pzolw", "nmyfhg", "cshkofj", "ksptw", "kqejh",
"zgzjqzo", "mxzrw", "enabosq", "vmubgc", "sfzcj", "hewvk", "ewhrq", "oifnsmi", "izdnvu", "cshgtk", "mqotuhd",
"gnqgj", "rxailbm", "iyhxvtu", "ncjzklq", "zjmnoc", "awqwos", "ugujppc", "spbvfwl", "gntsvo", "euksu",
"qnvneph", "crhmf", "brktmf", "mvgmr", "yzcskrp", "tihawec", "edqmxpn", "fxyymlr", "dzfkucm", "prldz",
"gplrlhz", "bohwr", "bhebbk", "mmecj", "segydd", "ptslsb", "pyhgw", "cwmrq", "mjfhflh", "xhuid", "npxmb",
"izilq", "dczhqh", "tgfnxtb", "zrylvo", "lctxrar", "ylhrbii", "rfxedv", "llvhzjq", "bjocv", "wbnex", "cnohnf",
"xahrl", "rouvwyc", "hbhovgv", "dhucp", "ncmff", "ncsskg", "gsjbyin", "lroxscf", "whfaenl", "vsfultg",
"floxkpy", "captoai", "qwolyex", "ggaypn", "wzunypd", "pjixeu", "gxnjkoc", "pqiqhn", "xakjmgz", "vqizkx",
"gdzcxr", "kyxwdd", "pgxmazn", "qeuwf", "bduknm", "tcrcn", "nehgee", "wktbcgu", "jwqltdt", "wczkai", "drkqs",
"qhdqnn", "oobxirc", "lbunv", "ifscr", "xnfpbrw", "yrrdbax", "fbocs", "tewne", "iobixe", "zgosas", "yhesn",
"xlqwd", "pfcen", "slsjffx", "ilwatrc", "mhsmgp", "iteghl", "aqhufdl", "kxgpqcu", "ryrcgp", "azidf", "smlnl",
"rocxvbt", "iutfc", "loapgbr", "musulp", "dqcnj", "tpgbkfh", "wvskii", "itkfopo", "kytyb", "rzahbu", "aewptd",
"ohergbb", "cadxh", "aphwelj", "huooyzn", "gtttia", "izeyhcr", "cfvxz", "aitaxyp", "vypqost", "ebfnmif",
"kgiucm", "zryyu", "oxgnbpt", "frpwo", "ouqvodl", "pdaazh", "gxwmf", "dozxsjm", "yndpsik", "zcwvu", "mihug",
"jgodklw", "ysklw", "cfxqv", "yqvtz", "rctnp", "xjywa", "kpqyw", "hhtegzt", "rnwbeoi", "uyxqum", "jahcwbe",
"jzjns", "ovwoaz", "oqmsrua", "natbejl", "deffv", "okgbr", "paqhy", "jkafhte", "lifsknp", "afmskh", "oemdro",
"oxuwov", "qtyxa", "hkpfsm", "ulaubn", "tciurw", "myohwlo", "okuiejb", "ormoqsb", "gmipz", "hterzir", "ekxzre",
"xkevge", "ihenf", "nnhzv", "eocjmx", "upzal", "oounfko", "myhbwub", "fwipva", "pkzzvpd", "nrupm", "vluzq",
"fxkoyho", "atzktr", "aomrp", "qwpser", "ejagmb", "cfigelm", "bvanb", "cgcgabo", "hmjvlqt", "hxxocf", "ftqaud",
"htuipy", "bhwmcn", "tgyvaqe", "lvuwh", "yiabzs", "rzzavu", "fiubm", "uuqsb", "riyakuf", "psscffd", "kvckzr",
"fktmnf", "ivzqexi", "nhxzm", "kffjmb", "vdzxv", "esago", "bfikw", "gaiuxmz", "volokcm", "jypcs", "psibvs",
"hxaxklf", "lmqwgy", "spnbimo", "mtihak", "xikoiy", "rmmtv", "phaqgxj", "zcuwkhk", "emodbyb", "ztahsya",
"ieiqm", "lfoquh", "emznnq", "pnhlgut", "pgvads", "cqsjx", "lxnjei", "zpque", "rdjbiyb", "sxedpu", "potnqva",
"iirkn", "rjmnrxd", "ksgcd", "waeymnh", "tizdz", "kproa", "wpttygd", "lvyze", "peewvgm", "fwtyzbw", "zitkk",
"gfgqr", "udgvlz", "swqspo", "ohhvyq", "kgyuau", "hcerp", "pdomlm", "twabkk", "zfsea", "epiwp", "xgycjpt",
"jtkdh", "mxmdm", "rtkzm", "qkacy", "nuvdiq", "agctak", "hypgyh", "ewtjp", "paysolw", "bcutebe", "xelxyb",
"gzdvrth", "vpzfv", "cxrkt", "admiyzi", "lqlmn", "zbjpbg", "tlvdnli", "zetnox", "ylcsobo", "balajod", "igoume",
"sxcgw", "sbkkafk", "fmndnnw", "incsa", "jyupkg", "uhvvc", "rswnbth", "nvprfj", "figqf", "znyidqi", "aijper",
"euidr", "dftxkze", "vnppi", "splwifc", "fprgafl", "ixzaz", "mrhqtne", "dtkjsy", "dsmqrgy", "xfscz", "cymvmpu",
"vptkfdx", "zrgrjq", "mqvwsur", "hdtlw", "ugdpwun", "cvxitc", "vytvqg", "pmtpfz", "nfdtdt", "umvwjuc", "jouxc",
"qpypri", "pdhqp", "lmise", "wlsvcfg", "aqdkzcb", "qlrmrfz", "pbgoyi", "xmsskoh", "jjdye", "xvsdmq", "ymjeipy",
"igjyv", "uiojvmc", "uckoww", "grlnyeg", "hpglp", "omnnyy", "iiliir", "cnucbcx", "pcxvs", "hipad", "xmiltkj",
"oorwi", "qgoxjj", "jnmviqs", "wpleqn", "tudxw", "pcogem", "hgewaf", "niwfexy", "vcttgcb", "anjgovq",
"epgmscd", "mdtru", "xvapv", "rydjik", "kopppcr", "mjbsmu", "unxoakz", "ldpsw", "frksjr", "vyxxg", "yyydri",
"szidq", "qvbtd", "qratl", "xwfov", "bzhqyxl", "fskrtf", "pcpzmnv", "xuxwx", "vzbevnb", "ebaqz", "dbpuek",
"ooqwj", "gaimp", "coelqh", "bwuceq", "oxpfjt", "zrqyc", "rwllk", "pqunv", "ufbnn", "tbnjoz", "kkqmrxu",
"qyyrm", "hislf", "wyuck", "ubpre", "pdioi", "aryhv", "vdcxv", "rkgmaag", "czlzokw", "gtxuduz", "grpijx",
"qzrar", "qhues", "rmznt", "sxxmved", "onjzuwl", "atbjhip", "nrardl", "alrocy", "cfkip", "ihtbf", "pqdgm",
"hmokun", "dpghac", "otwml", "mnbzwa", "ehetlt", "rchvq", "lwjgywn", "lzdmjo", "nvhohdp", "tmshcpc", "gavjv",
"ycnkv", "uynzh", "bvpnfjq", "lfbem", "qberui", "vrmmhx", "wpbqtfq", "jujpx", "dujgkof", "hrpbso", "zhcdt",
"iybngyb", "rgeruza", "nesyxr", "cihgfe", "hjgskb", "zspxeqm", "inzrgyd", "crkjq", "iooshwp", "muvvj", "wakis",
"rowibwa", "qikwypf", "aportho", "pubcgx", "vqoqpfi", "rnpbri", "ussjv", "looor", "xkzvdv", "tstegg",
"zgiiokw", "rwvyaun", "mqqla", "asnqp", "nghuryl", "hlvhn", "ecuotnu", "judvbu", "xgvuw", "oeckn", "hdhttsg",
"hcyhu", "klbyjc", "tnrmqnc", "mjojxhi", "kvdet", "vbmevim", "oglrzs", "afbscdi", "zxrffti", "firzgmz",
"oenim", "wgpua", "asiep", "kyteq", "wpeneca", "qixmeoq", "zaofon", "csxxtr", "cpwmnl", "feylas", "idjuo",
"mrtpvta", "jjvmjy", "mnljocc", "lnvjleq", "oognud", "rbyneq", "rhvomm", "fldrkpk", "znvrp", "myswmz", "jiloe",
"juivjmo", "ylhbyzl", "ndmabkt", "sgdvlq", "pmnddmi", "utpuj", "kfisv", "nxfeell", "mxhgqd", "ccvdsdg",
"emtybo", "zmkylbt", "mmrpi", "dkwlgq", "iwlappb", "uimsrnu", "mkxaxmi", "tcvll", "njggal", "kmqud", "evgzlh",
"oaxizbp", "jiuej", "xknlp", "cyksydh", "gbixmz", "vtouyk", "sxjpkio", "qhubt", "kflvnb", "sjdfggl", "bxozyj",
"xekbh", "wtmcb", "xtapfco", "rnornl", "ursdpki", "waonim", "eibfyed", "zniinaz", "uyfohq", "qcaxlt",
"koyaapa", "pjuvbsi", "ecpdl", "ifaqwm", "yyumzc", "gvfngfp", "lttul", "flyza", "uasdlme", "oklhb", "wulkzzv",
"ziwsxo", "jqcxiu", "qdzrwgm", "zjdwy", "uumns", "emlnp", "irnrqp", "gqkza", "oynpcz", "yxyea", "zpamf",
"gyehxbv", "nplkhcc", "rxeekyo", "kecgp", "gseju", "nkisxqf", "vlyud", "fxxihhm", "yjgtml", "fehwpdi",
"wclnvyy", "lriwrc", "ikparv", "volfh", "ysphh", "szrvrv", "rqlmz", "jyqut", "fyftsj", "uvwfip", "rngwgm",
"mjwaz", "roehjki", "ploxokr", "yjbalp", "fspkq", "yfxrb", "kzulvk", "ordxp", "vdrrt", "wdiojwd", "ridzl",
"niykdvu", "whyycmn", "riwcma", "bkhgkrb", "nsine", "emgtgf", "zoymw", "ljtvhzb", "kfyfdma", "piygxdl",
"onfwgdf", "fwmkm", "vqbljay", "icife", "bxfli", "yeygr", "qenhgm", "mtxuckj", "kdcyx", "kwqhfcn", "ywkfy",
"prbpw", "pheyc", "kmnds", "cacqs", "kvekiqy", "bfvfhdy", "gxulp", "skmcra", "exomt", "lcxue", "mnvvday",
"rsddl", "gooegc", "udght", "doymnin", "ccdap", "wuive", "dyyln", "rynust", "luxabyg", "kdkkyyw", "vawqfsy",
"rmeswm", "rcxzyv", "clpowz", "pdntqm", "tvjkkmz", "iiclw", "nhudzen", "cybhu", "crwtw", "enypnh", "ygekg",
"hrjwqt", "peissge", "wangcy", "rbpoik", "raqulbf", "gyisnsj", "rgbqn", "lgvuzb", "djicf", "epnuu", "nsapc",
"voatgh", "yorfehc", "jxfttat", "wyuivb", "bwopl", "odwdsh", "anchkv", "sepvew", "qoxxmae", "bpvqnj", "sngfo",
"buoazou", "zhijssa", "janng", "uvdbd", "yfvkqo", "lcjii", "mvacvrz", "xztiar", "lpbtrqa", "ukbpdx", "okaqpgr",
"idgqlj", "ewglgo", "ruymhi", "pcidw", "bvuqj", "npzch", "yppyan", "oiguirj", "iijvwqj", "jvbwjys", "yjtunfc",
"iaikra", "oduhdgk", "ivixur", "ibcgai", "djzvcbx", "lmtsul", "lgnwzol", "wursq", "xsxbqwq", "jqvwnc",
"dcwwvtb", "vwybnr", "bughwjl", "rnelxb", "hmacv", "ufgdygl", "aabuat", "oynwask", "gnfjjf", "zipbq", "zxstn",
"jdrbprf", "jmkvny", "rblpql", "vykdj", "qaakyqw", "osbhddb", "avgldyy", "kvpoa", "fnqcliu", "zzlninw",
"drsal", "omswys", "hwqcpct", "ecraq", "fvhsbjq", "raauy", "pfmoz", "vvqvcm", "tbjqjun", "jcfbegq", "otiwup",
"axvvce", "dhpdnx", "pennr", "hvvmvzv", "binezl", "ygdmcuo", "ypwnqn", "aloxdv", "ucieh", "kovbtag", "rgfpaww",
"fpbftg", "spjowfr", "zridoy", "blwbbf", "evwlxi", "itbcz", "hgixuo", "qmoqmjb", "tkeeis", "pjiaq", "rbpje",
"ledoui", "ubecht", "mphdd", "uzswsbb", "ntsybr", "qmnijyp", "pqwawe", "ltytill", "dpnxy", "pkxqcol", "ayrdi",
"mycnd", "knotsn", "zvcrjl", "qwroblg", "vtrktey", "dzilezi", "wzkxg", "varqc", "xlpttyc", "xxqhnl", "jpxywa",
"kjdsh", "hdseebw", "bxqbp", "flazqce", "xrtab", "rupsfq", "asswer", "rhqof", "hjzdv", "addsgax", "cuahzjj",
"xwdilr", "osqgg", "pfhwv", "rqorah", "ggdlnv", "truvaoj", "jzuldwf", "mjddj", "vixtn", "eslxoaj", "cmoypm",
"jvvzs", "oqgxcc", "tptls", "wwgwbj", "tysuhg", "xbnqb", "iogjvg", "fbxdmr", "zdvsmx", "hiuja", "watrt",
"kjawab", "entxk", "jmnkaox", "zznsox", "asmzc", "soblvp", "quyxjw", "udrdc", "hyylvvw", "gzfwxuv", "jjqmjw",
"faegxbl", "lqjcg", "bzmruq", "bykuh", "miwhd", "ykgtwhk", "oyobzwi", "oltwpua", "ctulabr", "dwandd", "vhuhox",
"vtlknw", "ywvln", "qemqdeg", "akezvx", "kjmjpv", "vwuftx", "kreaxnj", "fvfop", "cxabs", "jfacbje", "eecnz",
"cmblit", "gfvpoq", "whywnh", "pghvx", "ohgkmf", "xxtiwd", "nkojni", "dlcicnp", "bwyvyyd", "gifup", "vgjfr",
"hhteifi", "kjhffq", "pawqaxl", "yozro", "slxluvd", "amqcquy", "vnnxkr", "wgdur", "rvawiu", "thcwnc", "cddut",
"vnrtrv", "fnfio", "nhvxe", "rfdqmj", "ucblh", "ccbnt", "lxckaoy", "fnwcbx", "gmdbiwt", "ypvwjy", "cbjazk",
"qmujnm", "nsqot", "lhcqt", "ijxcts", "nujrms", "itxel", "ghukr", "qpwitlr", "gcafqrn", "lcoho", "lfzab",
"vwhgceb", "vgsgy", "jrtgo", "ryxlz", "deoyq", "ybenly", "lyysca", "sodvazo", "hbnnoz", "ovgvda", "elwtjx",
"soydmn", "trdsi", "mwwjwo", "vupwj", "dszpcv", "kkhjdj", "ewmyo", "nmpeq", "oepldcq", "xttrgu", "wbcbxi",
"jakzk", "peukyw", "fvcqv", "xklwuu", "hsmva", "kslmkq", "azllbig", "stnzih", "wfyud", "ihauy", "cfxmj",
"pdyogwv", "dcqdpa", "xhusy", "jfpmpmm", "odeiiw", "ozyaer", "uykzvma", "tuaznxj", "kdnbdki", "syrnsem",
"fdysz", "hhrpo", "fglzfi", "vgcqzqm", "qhsjr", "bvboe", "dpfwpvg", "mvvry", "itnnr", "lgykbe", "pscow",
"mkrgeqv", "czffv", "apteht", "jeqixsx", "ksmbe", "zamivv", "vvmyo", "cwwoce", "sppubxc", "qaich", "nmbxr",
"tfkwfxi", "iakhezl", "fxujis", "fkwffe", "antaylq", "mmfgstq", "zxaacy", "zlswx", "pbqxil", "eupck",
"qzcxpbe", "rjalbzr", "wioagbq", "kreec", "zsdcuft", "rrdzb", "ocdlvq", "oxiroo", "zcxsqh", "wbrsi", "fqike",
"oskzupi", "thvof", "dicbyst", "iojwe", "hyfizq", "yoknhww", "nupiyyn", "ievah", "slcgmxg", "cnecpa", "lcwsoj",
"hnqsc", "ghipbi", "exobr", "nwpnq", "dmhbj", "amdbmwl", "xfbzovs", "puizvu", "yvsus", "ykysqg", "bgqdv",
"zgqbr", "zkjpkej", "crkot", "zciymk", "tleogn", "sayrmz", "elwma", "zugjva", "uifwsmw", "wstrg", "xbotd",
"hinsg", "qpgyoyp", "xzfocdy", "mbvuepb", "dtphufk", "cyapnt", "yyehhad", "ohdrd", "mlibm", "qzdfil",
"rdwszqx", "bzcbmyn", "uarjlg", "mtwpqmx", "nmagl", "cepniel", "tylvaa", "melhd", "jygeneg", "fdglfy",
"xcpciu", "ayrel", "bxceshv", "kspyg", "iclkaz", "ykbzt", "nrnkzo", "kxkto", "fabzszn", "edalls", "nilmh",
"wwawgnn", "gymbtx", "mzipa", "ajevx", "qppisv", "otqhsf", "ippxak", "bixnqd", "uqitwo", "soxcug", "loiscd",
"wqrjk", "rqntoa", "fzpxlp", "tuaob", "pyqqms", "krbzmmj", "aijqpfg", "nstqrbu", "wmtiahz", "joplby", "jyszxq",
"jnxtyhe", "lbvfv"]) == 14011
|
flexible
|
{
"blob_id": "57de9a46dfbf33b117c2dfbb534a5020e019d520",
"index": 8513,
"step-1": "<mask token>\n\n\nclass Trie:\n\n def __init__(self):\n self.dicts = dict()\n\n def add(self, word):\n node = self.dicts\n for w in word:\n if w not in node:\n node[w] = dict()\n node = node[w]\n <mask token>\n\n\nclass Solution:\n\n def minimumLengthEncoding(self, words: List[str]) ->int:\n trie = Trie()\n ret = 0\n words.sort(key=lambda w: len(w), reverse=True)\n for word in words:\n if trie.search(word[::-1]):\n continue\n trie.add(word[::-1])\n ret += len(word) + 1\n return ret\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass KMP:\n <mask token>\n <mask token>\n\n\nclass Trie:\n\n def __init__(self):\n self.dicts = dict()\n\n def add(self, word):\n node = self.dicts\n for w in word:\n if w not in node:\n node[w] = dict()\n node = node[w]\n\n def search(self, word):\n node = self.dicts\n for w in word:\n if w not in node:\n return False\n node = node[w]\n return True\n\n\nclass Solution:\n\n def minimumLengthEncoding(self, words: List[str]) ->int:\n trie = Trie()\n ret = 0\n words.sort(key=lambda w: len(w), reverse=True)\n for word in words:\n if trie.search(word[::-1]):\n continue\n trie.add(word[::-1])\n ret += len(word) + 1\n return ret\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass KMP:\n\n def partial(self, pattern):\n \"\"\" Calculate partial match table: String -> [Int]\"\"\"\n ret = [0]\n for i in range(1, len(pattern)):\n j = ret[i - 1]\n while j > 0 and pattern[j] != pattern[i]:\n j = ret[j - 1]\n ret.append(j + 1 if pattern[j] == pattern[i] else j)\n return ret\n <mask token>\n\n\nclass Trie:\n\n def __init__(self):\n self.dicts = dict()\n\n def add(self, word):\n node = self.dicts\n for w in word:\n if w not in node:\n node[w] = dict()\n node = node[w]\n\n def search(self, word):\n node = self.dicts\n for w in word:\n if w not in node:\n return False\n node = node[w]\n return True\n\n\nclass Solution:\n\n def minimumLengthEncoding(self, words: List[str]) ->int:\n trie = Trie()\n ret = 0\n words.sort(key=lambda w: len(w), reverse=True)\n for word in words:\n if trie.search(word[::-1]):\n continue\n trie.add(word[::-1])\n ret += len(word) + 1\n return ret\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass KMP:\n\n def partial(self, pattern):\n \"\"\" Calculate partial match table: String -> [Int]\"\"\"\n ret = [0]\n for i in range(1, len(pattern)):\n j = ret[i - 1]\n while j > 0 and pattern[j] != pattern[i]:\n j = ret[j - 1]\n ret.append(j + 1 if pattern[j] == pattern[i] else j)\n return ret\n\n def search(self, T, P):\n \"\"\"\n KMP search main algorithm: String -> String -> [Int]\n Return all the matching position of pattern string P in T\n \"\"\"\n partial, j = self.partial(P), 0\n for i in range(len(T)):\n while j > 0 and T[i] != P[j]:\n j = partial[j - 1]\n if T[i] == P[j]:\n j += 1\n if j == len(P):\n return i - (j - 1)\n return -1\n\n\nclass Trie:\n\n def __init__(self):\n self.dicts = dict()\n\n def add(self, word):\n node = self.dicts\n for w in word:\n if w not in node:\n node[w] = dict()\n node = node[w]\n\n def search(self, word):\n node = self.dicts\n for w in word:\n if w not in node:\n return False\n node = node[w]\n return True\n\n\nclass Solution:\n\n def minimumLengthEncoding(self, words: List[str]) ->int:\n trie = Trie()\n ret = 0\n words.sort(key=lambda w: len(w), reverse=True)\n for word in words:\n if trie.search(word[::-1]):\n continue\n trie.add(word[::-1])\n ret += len(word) + 1\n return ret\n\n\nif __name__ == '__main__':\n s = Solution()\n assert s.minimumLengthEncoding(['time', 'me', 'bell']) == 10\n assert s.minimumLengthEncoding(['ojtnj', 'uuydcho', 'dgsyp', 'dwxycpx',\n 'dpmvc', 'dvfhmb', 'flrxjjx', 'fwhdhvn', 'rgsakp', 'aiconf',\n 'nzacpk', 'sbxnaj', 'shway', 'rgrmz', 'rysudo', 'bzkioce',\n 'mqxkzvu', 'wyebk', 'tymoaz', 'mlmbg', 'djbmek', 'qfnme', 'khkiyae',\n 'tjdaxry', 'sqtcwz', 'ehnsai', 'jhncvrm', 'cxkzgrx', 'pummt',\n 'hzrpfcn', 'lkyqit', 'phpqdxw', 'vangm', 'wcjdgw', 'pxesvtn',\n 'mnqory', 'bdrzvh', 'brtzmo', 'chqgf', 'bipyxm', 'meoikg', 'ysyckk',\n 'ojayeiq', 'zrfbsb', 'yhuotea', 'crfbhq', 'tllycn', 'qxnzihf',\n 'avyawpz', 'bwsjym', 'myjozc', 'lbdksm', 'mctlt', 'dszowuw',\n 'syshm', 'xrvhhkn', 'kgrcwfv', 'dwlajlf', 'yviuk', 'xegjj',\n 'spiczl', 'vfvomi', 'mgcujy', 'dqmzb', 'isrisgt', 'vdrtuah',\n 'vsyth', 'eoclef', 'poccek', 'cgafrlu', 'crbhpgk', 'sromv',\n 'xmvbca', 'gobra', 'ygvlq', 'pjvhe', 'tfweiso', 'cskuohg',\n 'eyalone', 'pobkak', 'nzpxn', 'lbcrws', 'uhtfe', 'eorth', 'showvu',\n 'hxsmb', 'jrggose', 'izifkb', 'oqwyf', 'mozmzj', 'ijwle', 'ggtqqqv',\n 'geevzj', 'meota', 'ifsse', 'kdtofm', 'swydhvf', 'tzjhqap', 'wqwwd',\n 'jlinnov', 'lmxkgeg', 'stbot', 'xrsfn', 'etoyctk', 'rygagm',\n 'vcnrf', 'zkdge', 'emqtscp', 'newqcyy', 'nnuus', 'exwsxbd', 'zstvl',\n 'lbkko', 'kygkyqq', 'oggji', 'xytbjo', 'mfbahk', 'ggoks', 'lmqewkl',\n 'qexhyqe', 'ogaogio', 'nzvbav', 'mdole', 'qvyks', 'gkupfu', 'dgmpn',\n 'ngrdrj', 'iitqvk', 'ipuiqb', 'ugxfea', 'ialkmv', 'hmgnx', 'aoyoj',\n 'fvzhjil', 'butrbp', 'dwhxnes', 'etkdwg', 'cjkghz', 'tovkq',\n 'mmxhv', 'jgcsn', 'hmictal', 'zxmnek', 'pcoeg', 'ntyqmlq',\n 'hfubhtg', 'ydjbv', 'xnwlqto', 'hatgi', 'bsaczd', 'pokwk',\n 'arxlula', 'zjtqlk', 'ocfxup', 'nsnqjc', 'xdcsopi', 'iqxyxp',\n 'xfmtpvm', 'bqtgcf', 'wboycn', 'aoeda', 'uowqdgj', 'rzzzx', 'liucs',\n 'ejzxz', 'qmlehsh', 'igrbmon', 'dpmkbon', 'pmayh', 'nujdwdw',\n 'awdgo', 'ijgkzk', 'inhee', 'jzdtv', 'adhauh', 'grtmbp', 'qndbvw',\n 'zprrw', 'mpqieq', 'jzmzeuu', 'fcvftqs', 'qxzxqy', 'lidguzz',\n 'eazwd', 'zjhfsz', 'zsnzefh', 'mnckfg', 'zjgtq', 'ckyxlif', 'fznfo',\n 'jegnof', 'lzwyzb', 'ozivfio', 'igkclsa', 'bebzn', 'bitsggm',\n 'lrnwin', 'hjnnzr', 'idvoirn', 'dgile', 'vfngh', 'xbmur', 'rqaftt',\n 'wjwwwxs', 'btreou', 'gjsycg', 'pvsiylz', 'ccxzgdf', 'excrrrr',\n 'fiesr', 'jdioj', 'uzwsc', 'odrlcoy', 'hcsit', 'ptwfprh', 'sbqry',\n 'kffvy', 'ejeawbp', 'omvcc', 'iqgxqlt', 'edsuu', 'xnbue', 'qfbcx',\n 'fzlmbkl', 'wrrcueb', 'mmqispp', 'nknilwd', 'dewuhju', 'hmdqlxy',\n 'vjxgg', 'lkuexo', 'dzvfscm', 'voulbs', 'uevoqgq', 'kmhwu',\n 'oglzllg', 'torhihn', 'fhuqzc', 'mmcfhb', 'woyayma', 'uznsvre',\n 'mmxed', 'aoskwg', 'xrosbm', 'hpyrgh', 'tghwbwh', 'hcwzn',\n 'iepeftj', 'judij', 'kudbk', 'jonpv', 'lywck', 'rxelz', 'bgifz',\n 'mehbxq', 'fmqnz', 'sqrmzj', 'iqqjzex', 'qioliz', 'kjizbf',\n 'lgdcffc', 'pfgmcr', 'trdabul', 'vlqjdnc', 'jjvbxe', 'fqlayw',\n 'ilbhtyq', 'saawulw', 'gxysrb', 'kighql', 'eceapr', 'kztbcww',\n 'jedkoy', 'dxpcaga', 'ndacphe', 'rcoit', 'ywgcnxg', 'klipfup',\n 'bddws', 'jwyof', 'lrfwgo', 'bediwuf', 'ujakh', 'ppima', 'xzhwvm',\n 'guzmsqt', 'ffbliq', 'adjmynm', 'akabzn', 'inmykju', 'vlcjyv',\n 'orquepg', 'tufrk', 'vqpjymm', 'lvuab', 'qzxav', 'ekcmu', 'uqtuhie',\n 'kfvtgf', 'nklwjo', 'ujxlfpl', 'zobfpq', 'eignijd', 'ythctg',\n 'artllm', 'wodhh', 'tzpwszq', 'njdqegg', 'hzrqib', 'zvoxtfd',\n 'htboem', 'axjuix', 'bvmvm', 'jbnum', 'bxdth', 'atejt', 'gqsqtnk',\n 'fykrjbp', 'ldyhonr', 'wcuoj', 'upphc', 'agydg', 'cjmwk', 'rhxbqh',\n 'tpgozdd', 'qyqoy', 'zjqutw', 'qoohqny', 'nsiacwz', 'xupin',\n 'criuvs', 'eswjeft', 'pdmevn', 'zvogq', 'lrrvo', 'qhfqqpw',\n 'ktudfg', 'ijvmi', 'neyjjdx', 'rllpi', 'vllvaa', 'esebtu', 'jyhcrh',\n 'otgmr', 'oudvyxj', 'pmszy', 'opeed', 'gicni', 'mnuzn', 'mjbfpod',\n 'sqwgxu', 'dwniwz', 'wmbmmv', 'lyafuy', 'zmvlz', 'kopxzuh',\n 'urcbbiy', 'guhco', 'nerjm', 'lpdxc', 'hxmjzz', 'hynagc', 'iyxeczi',\n 'bdfxmoz', 'yybnpqd', 'jvgnb', 'oquqem', 'fmclmz', 'dmkhf',\n 'zxbjpp', 'qpxgcir', 'iecvjm', 'gtkne', 'lgtqrbc', 'gilbn', 'mcxsg',\n 'ncwbhn', 'wkriiq', 'zhsir', 'ptkkmw', 'jcbpkrm', 'vbefo', 'vmbcd',\n 'vqffj', 'fhqzjt', 'nryuh', 'vmclav', 'cjyggm', 'sanev', 'rrdocz',\n 'zqdexbs', 'jrxstt', 'pyhcesj', 'aagghyr', 'cyemjrb', 'aliohf',\n 'qaslg', 'pnyjzxz', 'pehnvi', 'suhuw', 'twopabr', 'sapqoc', 'mckrh',\n 'nzlgrxt', 'aqpobnu', 'pirbjgb', 'plzlj', 'raylxpu', 'gyasfrh',\n 'urjfxux', 'xjbwau', 'iupknn', 'vhxnc', 'dnbjop', 'vrxhwmd',\n 'vjsmkh', 'rfmqids', 'smaiwt', 'vkyfo', 'bjqyxc', 'rbbbp', 'dlkzg',\n 'dwvdwu', 'prulzh', 'bavge', 'ehhrz', 'xxjqk', 'pxopmp', 'okmkmb',\n 'slcznpp', 'nvqlb', 'jalrk', 'parwlcd', 'anbxo', 'oqcxyzo',\n 'fjhrdjh', 'pgvnwfe', 'yfjyvh', 'quvszjm', 'xyiig', 'xtncqv',\n 'svsix', 'jvpdnh', 'owuiv', 'bsrugtt', 'rmvggws', 'lmdql', 'kvmvd',\n 'xrpmaw', 'ssnxyb', 'oworq', 'rmmpuya', 'rijpih', 'aelazka',\n 'kncksqx', 'yvtdiy', 'epato', 'pbbamj', 'fejsw', 'zgsru', 'ekwrre',\n 'zqben', 'vugxi', 'fvcsdp', 'rujcews', 'asqxya', 'worjlsd',\n 'xggakg', 'kzfpot', 'haqon', 'ypqxzz', 'mmkzwt', 'bdhif', 'exzhv',\n 'srnklzh', 'hlrunb', 'dwfyke', 'fvgbtdm', 'aeutp', 'czhefx',\n 'tegfw', 'jkxpsb', 'gxkfkw', 'exvntd', 'gvuti', 'jdmly', 'owaqhw',\n 'fopuxzv', 'edrvil', 'biszwgv', 'vgckzd', 'fqdxn', 'qktdf',\n 'hpgwrk', 'gpxiips', 'vxnlab', 'yylxz', 'hsuscch', 'bhivaf',\n 'wzrwtc', 'ebplv', 'yzxykou', 'mxlssom', 'evghv', 'hksleg',\n 'shybau', 'zeyqa', 'tljqka', 'axfkec', 'fatdj', 'janlkcc',\n 'sjorbra', 'jplge', 'oazzot', 'qbgtncn', 'ozlil', 'stohadq',\n 'rvpuwn', 'oqwpl', 'byftgi', 'ubuusl', 'fkogr', 'bybdyhj',\n 'vinyuzs', 'ivsqvz', 'vmnae', 'gckxw', 'rozbe', 'glvxwj', 'rcgicu',\n 'xmvbd', 'itycsry', 'llmwrs', 'fuqth', 'styrrwl', 'wsseuln',\n 'xwflcli', 'muxgz', 'ypmbboh', 'rpmvnep', 'wjvvnv', 'arjnw',\n 'toauwc', 'ltjxqrl', 'basffd', 'clxozwd', 'glmrv', 'iejgfj',\n 'cvkoj', 'wotjf', 'mqucec', 'xalgemc', 'hgimkh', 'golvfq',\n 'fuqpmak', 'mhpcp', 'pxoibt', 'ledqa', 'guzbyr', 'ztvbeka', 'racdp',\n 'krsngra', 'aaiknz', 'bhoobyc', 'xibbe', 'yohepxk', 'eclevs',\n 'ldliwcm', 'qatvlk', 'eiypbw', 'vxvtwa', 'nkdwsej', 'ftmyvp',\n 'gpthye', 'gazwoi', 'zzgipon', 'cithg', 'wpabujl', 'jhezlnb',\n 'vqqaxfg', 'kvpbk', 'vggjemp', 'owylv', 'lgwtfpg', 'jjqvfm',\n 'xbhga', 'tulvfv', 'sefuo', 'hbysv', 'ozopepd', 'awyrifd', 'pnudwx',\n 'vreje', 'zhpgw', 'qygbf', 'tvbrvy', 'zzmcw', 'cznee', 'deuzxt',\n 'qfppjvi', 'ilkps', 'ydwhg', 'krwkxzu', 'mnsidg', 'rkxyyr', 'ajkqz',\n 'xtmom', 'vqocor', 'fympcl', 'yyleyzy', 'jjvzhrn', 'kpmxvuz',\n 'txoeqlx', 'lhhmn', 'chzgpf', 'ncnjxle', 'ihxrg', 'feqixq',\n 'lkfhcar', 'hfnsh', 'bifczy', 'umknat', 'yrhgkh', 'mgpcu',\n 'qotukst', 'yqlmfq', 'ttcdp', 'xnjjzm', 'cukbr', 'hjhjb', 'iikfcsr',\n 'nsqbnnz', 'dauygf', 'cmydq', 'lfnhqnl', 'ppqgs', 'hscbfug',\n 'ohzisud', 'opspdkv', 'aauxbop', 'wpkhzo', 'sxbsgu', 'tajrv',\n 'ololy', 'mxmus', 'vizvxv', 'osaqz', 'rxygkn', 'mrzqlf', 'zrriyxb',\n 'ufroe', 'bajozg', 'atpsu', 'uhgauzu', 'tffdw', 'mdjulde', 'rbrmy',\n 'jhkqvwl', 'gzsultq', 'nkbfi', 'xtvwh', 'dryzcv', 'emaxuk',\n 'zucvutb', 'jdduyk', 'bjdin', 'loicuq', 'qhjjb', 'rgfjbq', 'mphnk',\n 'lxvceyx', 'zeoxb', 'fxhnxu', 'qpbipe', 'ophwp', 'wiioer', 'quchwj',\n 'pouxunw', 'bloxgg', 'xbsma', 'dtwew', 'xstorn', 'qfrfkz',\n 'gxusbsn', 'dhnxd', 'mhstbs', 'hekbtu', 'wvrrjw', 'yeiwd',\n 'patplsx', 'qmyiyi', 'mowboj', 'iskyd', 'bqhjj', 'povppk', 'vthpwx',\n 'uuydaw', 'rduxvez', 'vmcww', 'ylruvph', 'ymqosp', 'wzcvohg',\n 'lhepwta', 'bckhc', 'oiyyt', 'wqzfv', 'uduec', 'lkkbtzl', 'prvpbo',\n 'jrwstii', 'ijztoo', 'qwwth', 'vqzqiun', 'krnjp', 'zyanpiw',\n 'ojhjhvg', 'lohmb', 'thqtf', 'reptzv', 'zgkyq', 'lhkvy', 'cmjwl',\n 'fmilgpw', 'jrfawz', 'vrtzd', 'ezgfl', 'plzng', 'zidzso', 'civavlg',\n 'vtwopu', 'ljhckxo', 'nuydt', 'qembl', 'fiwrre', 'gfrgi', 'gzegiq',\n 'mltlqo', 'pcett', 'snbsc', 'msibcqn', 'beacrhz', 'vsycjt', 'gjqji',\n 'smcegol', 'zregkp', 'smcazoj', 'dziqad', 'jpuwp', 'hnlztac',\n 'vduitco', 'wyencad', 'bkdnnqo', 'cabzyg', 'mgpcwr', 'fxgvkxt',\n 'wlkcrdd', 'bhmhsy', 'gqcctjc', 'atafpt', 'vdzhmcg', 'ighxj',\n 'gfqpale', 'fohbrtj', 'mfpsgt', 'tarjocf', 'gyycb', 'qvqfryl',\n 'jpwowwc', 'jcgcg', 'gmrjze', 'nfptxq', 'hmjhxge', 'ieelj',\n 'suvkgr', 'nwjxe', 'tkepqm', 'extnpmq', 'rxzdvf', 'relzaa',\n 'hfhgaq', 'lmihlz', 'pacocq', 'dclxr', 'oknoem', 'pbpnnd',\n 'nleerfl', 'tvytymc', 'aamfnl', 'ufdnq', 'bxyzvyh', 'vksvout',\n 'lohxhf', 'sskgn', 'aawbv', 'hrvhx', 'wvoqf', 'vxkvh', 'oqany',\n 'bcmyd', 'epdddqn', 'zrlej', 'bchaf', 'hmftii', 'mefcrz', 'wbxvc',\n 'ewwnldf', 'cqecxgh', 'cnwvdmk', 'vetrw', 'zmogwov', 'lshlzpe',\n 'lijay', 'tcdqg', 'xavqixd', 'yjkhtsl', 'myjvow', 'cgthhd', 'taaii',\n 'iuuegk', 'lcypmle', 'wesrit', 'tybco', 'nhxysw', 'awkrj', 'jcmqa',\n 'porvo', 'nrypriu', 'vznnevp', 'hzklwi', 'vapuxh', 'wyfkn',\n 'albemu', 'ttfdbl', 'dbqrjv', 'cxals', 'qzitwf', 'ysunur', 'llsefy',\n 'cghfzji', 'jboaa', 'emhlkw', 'khhmgha', 'twlxgjz', 'pyujor',\n 'ozcax', 'fetvovo', 'mdhrrd', 'qdhdne', 'fiuvw', 'ebyxh', 'ldaothh',\n 'vwyjf', 'yjyljlu', 'ivroqg', 'qvpeyec', 'eemsdra', 'wavgeqk',\n 'bjejrqg', 'mdjimoz', 'fgopy', 'lgwodr', 'cunvszh', 'wiver',\n 'ghmog', 'jzgfyk', 'vxlbx', 'kvgbtn', 'cunorte', 'mtesdc', 'zdzmqu',\n 'pigik', 'smruadg', 'czjxlt', 'kukgaok', 'tsldpqq', 'luomo',\n 'ezbcvdc', 'tfetwes', 'uopzf', 'wsvezkw', 'wrnlvbx', 'bpqungd',\n 'jqnnof', 'rqhiomi', 'voulqb', 'ouspxn', 'chngpz', 'fbogfcv',\n 'nqhunxo', 'rydbke', 'ewduo', 'suqqwup', 'oxzfxj', 'kuwfwm',\n 'euiics', 'mvftoau', 'vstfbm', 'vnmtoo', 'muicf', 'bjbskxb',\n 'knbomlf', 'enrbtfk', 'hnaqe', 'vxzsr', 'gkqma', 'qygmn', 'ztkybmb',\n 'injggpk', 'enqrgdk', 'rkgoct', 'tgaiu', 'dnknoxk', 'iwuou',\n 'oxanccl', 'xestej', 'ekrqq', 'xbwhz', 'jkdvxfh', 'oybaay',\n 'afyhci', 'papffjq', 'bdppssw', 'qwyvjx', 'xmnnosl', 'kvqzjl',\n 'wcwii', 'ygfvt', 'tpabbht', 'kjmaq', 'duschjz', 'gguiof', 'wgfhve',\n 'joqmfjq', 'smqfd', 'ynlovlz', 'sgrzum', 'bobmux', 'dcppi',\n 'isdjrwl', 'lbevb', 'efqsirq', 'hlgfql', 'enmemlb', 'dbmfk',\n 'ibfpzm', 'rtdnooq', 'yicdq', 'xadul', 'dxibxzi', 'yyxnj',\n 'jhsdzxw', 'thltbi', 'kwhreyi', 'hrocoa', 'fnaalbd', 'vnwona',\n 'nnonm', 'naqaf', 'xgzzies', 'uhruynk', 'kgadfx', 'hyohzbd',\n 'hnajx', 'yipzh', 'ezdxaet', 'xbzppoz', 'rwnewxz', 'hlcbkmb',\n 'znyhu', 'zsqtpkr', 'gmyxr', 'rphyvo', 'bgjuz', 'nulpv', 'eejfoso',\n 'xmwcnes', 'xxxxnpe', 'jezkk', 'idfsxrw', 'qgzjtf', 'arpzpo',\n 'hxsanlt', 'emvotcb', 'sknzhvg', 'icitca', 'ivhdln', 'sqilerz',\n 'ndigw', 'bcsre', 'mibbep', 'zsczom', 'cgghjbb', 'fkylfgt',\n 'bvzofs', 'mefsng', 'bispbza', 'tsosgy', 'xopalrw', 'wserf',\n 'jbmlz', 'xidxny', 'ffmpjos', 'vddwxmd', 'netnsg', 'kgevsp',\n 'pguuv', 'cwisp', 'slxiyb', 'dmwaguc', 'jobwusu', 'uytcqrv',\n 'hzhsy', 'zrlsdd', 'xhxah', 'rxzij', 'zwdgy', 'ygmvkz', 'drkzbo',\n 'qpsal', 'tpxvl', 'lfmfl', 'sayjvlh', 'rdamym', 'ycuzd', 'zkycu',\n 'hdesec', 'unequk', 'lpkdid', 'vorxls', 'admsdop', 'rqnvkyg',\n 'krnqqtb', 'rxfms', 'xfthd', 'pxjbk', 'gpslrg', 'rwziwef',\n 'usxgqvz', 'baxxye', 'ocrkkrw', 'lrlgsp', 'ceyctg', 'rniml',\n 'vavug', 'jgircl', 'jrpnmsa', 'rywvlfg', 'prxnys', 'fkzmknn',\n 'ooelc', 'btvfs', 'yqepuvw', 'tmmmb', 'qmpzexb', 'zjckjvd',\n 'aieytbb', 'oafqq', 'szrcyh', 'czrxgae', 'ifkte', 'hfgajox',\n 'pwpnkqq', 'yqphogn', 'xuwthrd', 'mpcmy', 'qitdoa', 'avlzfrh',\n 'ywpip', 'dgeki', 'fgbnx', 'tyofu', 'xziqzj', 'qxzvqz', 'vtsqk',\n 'ipkld', 'yfhim', 'ebaegdc', 'ubhrh', 'ldejv', 'mtflwy', 'ocpyj',\n 'yopgqs', 'fkjxxd', 'njnnwr', 'nylkeb', 'taymdqv', 'ekpznq',\n 'cbzobmg', 'bucdds', 'qjozu', 'uvpghor', 'obhnu', 'ljkxbg',\n 'uqrxjtf', 'xwbxiw', 'oxsmcg', 'spchdd', 'pcuitj', 'faidq', 'tybmy',\n 'uygiyp', 'qloizj', 'cafgmy', 'smetd', 'kwcwb', 'tdabxf', 'fpmrc',\n 'lfjujn', 'vvmvex', 'mnsgdc', 'enjlgsw', 'ohwcg', 'kxjdaup',\n 'rotjarp', 'aovdoq', 'oviwq', 'qwaxs', 'bmazco', 'plcljsv',\n 'yytjhl', 'vgwjm', 'drnue', 'vqjgf', 'uqlsfy', 'bmqmfp', 'lkauwna',\n 'ozmqce', 'heunaxr', 'zaffbj', 'arbek', 'qjnllw', 'fdkhlz',\n 'wgmbwh', 'yceqag', 'ltjjq', 'yurggfw', 'puaafsl', 'tjiqkyt',\n 'yuzub', 'ytmrfq', 'ommmu', 'ipknn', 'iubnuab', 'dzthvc', 'zjbzpew',\n 'dcooev', 'pjydqcf', 'zuojlzy', 'zwjyfc', 'spmac', 'dfkbnz',\n 'fzriie', 'asusog', 'hdodx', 'drjpo', 'ddyif', 'chabv', 'ebvkwrr',\n 'burdjl', 'jjddi', 'dljzkye', 'samyg', 'zwgxcq', 'xtratwo', 'qfopz',\n 'xvlaw', 'laage', 'btdium', 'vzlnzt', 'kmvbzkq', 'kctobsx',\n 'kazbelu', 'yxdwrk', 'eslvjc', 'nhsdmvs', 'zuxqcc', 'hqtxovn',\n 'zrbdai', 'fgjxs', 'txecvio', 'kjxlq', 'dkuxss', 'mkbevn', 'pzmdqc',\n 'ihyia', 'atsub', 'twytus', 'nzooxj', 'qwuoly', 'fdoigo', 'zukhlh',\n 'mugeaxt', 'qqsfyls', 'qqtql', 'wrvphcx', 'nzjfhx', 'uequtk',\n 'fxuto', 'qnast', 'nveys', 'ltbrcth', 'toctdib', 'fbpnh', 'umxfgn',\n 'zvjuta', 'yeron', 'qzvswqk', 'gbctr', 'ryryz', 'zieknd', 'zcsna',\n 'jrhak', 'zfxqsj', 'urlba', 'lbozqf', 'yfcjaa', 'hazgy', 'gmmfzyz',\n 'zjvkyc', 'rvfdcf', 'daitab', 'hcxqgum', 'qwakp', 'ltbsjwo',\n 'pqqtygx', 'upxcxao', 'qylot', 'lmxqc', 'dwzcd', 'tjccm', 'mqcpap',\n 'wgxqtr', 'ivycvxy', 'wdykg', 'snvqka', 'jxtvtsb', 'jnyowsq',\n 'iwfuoig', 'cuoixhu', 'fzwalg', 'djhrar', 'sjmahk', 'dyusf',\n 'wrxqvdi', 'ftytlor', 'jsjbv', 'vjbebg', 'agvsn', 'vvmpgm',\n 'gsgjopk', 'vbqvhy', 'afopf', 'zybfuz', 'aqsgc', 'ytrjsvn',\n 'wlhdfr', 'vdhvl', 'jrlvr', 'cscxwf', 'yhgbew', 'wupbl', 'ssuhyvv',\n 'bhcirzk', 'oykwk', 'ijbto', 'qsnpgw', 'otwzage', 'ytqzh', 'rgwow',\n 'bvhgkwh', 'fvawxie', 'fllxw', 'gfcqf', 'scoqb', 'qubrq', 'gdxjtp',\n 'ahrpck', 'awnlgi', 'cmehsyp', 'dwmytpy', 'firyeq', 'oohwhr',\n 'caelk', 'mqemvs', 'qflkzi', 'tfpibll', 'ybhzd', 'ctsxri', 'yurocj',\n 'dnlnl', 'ydmdva', 'xkaotl', 'xovax', 'ypynrqp', 'kwfzw', 'fbgsmrc',\n 'tutime', 'rcugul', 'cvewno', 'typhbpa', 'wazew', 'flzfs', 'wxxbza',\n 'ogjfkl', 'vjlebet', 'imbubm', 'xinyncy', 'dqmxfy', 'buhagzh',\n 'jjadpos', 'gejyz', 'gxshqk', 'wkwrs', 'dqeriqo', 'dmixr', 'bysjih',\n 'aoloq', 'ddwhsxs', 'nteqv', 'cqagf', 'ditsrn', 'wfxgl', 'jwjqb',\n 'rvkxj', 'rxapr', 'yrlkip', 'npquasb', 'nvezlr', 'gmhchcx',\n 'lodfihi', 'dheypxa', 'plzjykh', 'qopsthg', 'zsnes', 'raongg',\n 'zrpnac', 'tzmtltj', 'jsecdn', 'rzudh', 'hkcyic', 'xsxmw',\n 'reeuwpn', 'grkwrag', 'gvzzbsq', 'lrfta', 'aqyvbkj', 'ytgfu',\n 'wcmvd', 'olnvfi', 'hhgmhb', 'kojmepr', 'wpohl', 'szhgg', 'hymiblu',\n 'lkwjr', 'zulqpz', 'sdcqjo', 'olgsgez', 'lxkpqci', 'yxcgn', 'gmvex',\n 'fskpppe', 'utzto', 'axncvp', 'lcyahba', 'ydeae', 'zvzar',\n 'ghfkkqv', 'ryrpg', 'gucpbq', 'reofjz', 'cdnoo', 'dchhh', 'byiwd',\n 'cqbhok', 'ksfnoa', 'xsmmlr', 'qyvdfqh', 'dzshj', 'bpifnzh',\n 'uxmoml', 'jdxvojf', 'ihfll', 'vwesfof', 'zynnpb', 'fwzra',\n 'rxlgww', 'vkmjd', 'hcjgzt', 'mkapfl', 'ffjqlf', 'wulaebc',\n 'gurramv', 'tufkzai', 'bxprqek', 'nkohv', 'abgfwyl', 'slslg',\n 'wirsnh', 'pykvuh', 'fdrwk', 'gtmgsxe', 'dxsaab', 'lqiryty',\n 'aoezg', 'tzhugcg', 'uoarf', 'dwhsv', 'rjiuoi', 'ycgcdnf', 'rtfmwz',\n 'amkjc', 'woogtdi', 'deprx', 'ucknu', 'womfm', 'xdeev', 'qapxpuu',\n 'ngulnk', 'fgtxyf', 'hnyabid', 'cilmy', 'wrsewtf', 'luvtmo',\n 'wftuh', 'ifoeeqp', 'dtfdhhl', 'rwnburg', 'fohkkul', 'frqqi',\n 'gsrcyc', 'teuync', 'dvpvak', 'daqjki', 'kksscp', 'somsde',\n 'tyfvck', 'ftfekl', 'ahncv', 'yvosm', 'qgllvg', 'ylfwv', 'jenqns',\n 'lqovrnm', 'iyger', 'nfvtsv', 'bknxmqj', 'pfzybdr', 'hqjol',\n 'chlpk', 'etgrtqa', 'msuxdx', 'vnoatf', 'ypdzomn', 'vsshmg',\n 'rfkipq', 'jvpbiz', 'vbskd', 'edsoixj', 'uowim', 'hqtsj', 'inbsxal',\n 'ookrv', 'ipotdnk', 'kmazqd', 'jpfghb', 'gvmnnpv', 'juvwa',\n 'xtkvzw', 'ejqcl', 'ebgcnt', 'ztuyu', 'dlzthw', 'zzipe', 'iaxwdxy',\n 'htynwkc', 'lefbq', 'pizfr', 'vttrsv', 'oagak', 'eqlrom', 'vttefg',\n 'dsrmk', 'oekbe', 'cvugzk', 'diwvz', 'gxmfob', 'vjowzm', 'mjpop',\n 'uznhz', 'kqvjwug', 'wjqvxfg', 'jbpwezu', 'wsckdx', 'slqfomn',\n 'omuxk', 'zlgblso', 'kvitoq', 'dmafq', 'djxmzk', 'pjqfegq',\n 'yjrttas', 'siakcx', 'iutiqk', 'nwfdj', 'gbgtazk', 'cpqtf',\n 'panmlr', 'aqubhsg', 'iwdim', 'nqetym', 'mwazh', 'thyhy', 'ydtxan',\n 'xfoin', 'lsosc', 'esznfa', 'xgdisi', 'flvbzh', 'mpltx', 'iwjpsqp',\n 'udfycf', 'rntmc', 'ltflwu', 'wkgbaw', 'bcuzt', 'hejxuhb', 'lguohe',\n 'klnhb', 'mjump', 'avcwrol', 'yrcqlc', 'ihxul', 'avajh', 'gtpauet',\n 'iemzk', 'rfdub', 'gqnbk', 'cfcmg', 'iobyh', 'iruuapf', 'tyifwt',\n 'sbdtp', 'mngcpmb', 'oaqpolm', 'mmimmh', 'gxknadi', 'bmxhuu',\n 'ulyoa', 'keidy', 'vsnfk', 'cnnnfty', 'pkajm', 'ddgeecb', 'prxidqd',\n 'wmenvhd', 'akjcqo', 'tnekfef', 'ipvsi', 'pzjwq', 'wmmct',\n 'erdjnuf', 'vgeaqs', 'nlbdx', 'dpvbe', 'dgeqz', 'aiguzh', 'akawppx',\n 'tykrjcs', 'gvavo', 'hkyle', 'yhedx', 'xzqcg', 'gzdxt', 'csssbk',\n 'tmekrmv', 'lfsgo', 'iizahz', 'aszfd', 'aybqnsl', 'vadwxsl',\n 'ulmiii', 'xaxdugp', 'sfnnsbg', 'dkyruh', 'qhpqu', 'amesjd',\n 'evjuki', 'vtqjw', 'aoabp', 'qnsuhe', 'bplbx', 'fdqok', 'ozkhgib',\n 'cggwzys', 'nbknjay', 'ooambw', 'evmvegf', 'htdlxik', 'kahcume',\n 'bojpn', 'bhipie', 'hdyjslw', 'pbkkq', 'qwszl', 'fgkbzsd', 'hejdx',\n 'vmcfhgx', 'puzlmmm', 'meffil', 'boakbiz', 'eczot', 'fvkkit',\n 'jebfx', 'umvkjg', 'uikgs', 'rycgpf', 'rfmfgmy', 'nveho', 'bgywqen',\n 'gepfma', 'vquyq', 'wcercbw', 'wbpjkxc', 'rqloeda', 'omclokx',\n 'hvotwp', 'tvqfxxu', 'qrtghk', 'hggme', 'arnmfnt', 'cxprj', 'rspdt',\n 'hlgfq', 'dmqel', 'pcerxk', 'ptqjc', 'wzreko', 'kahks', 'xjnzo',\n 'xzzye', 'xbdeu', 'koiwkv', 'jlwkkjr', 'xzdixoc', 'xeedvrm',\n 'mrtnhqi', 'jaeann', 'mvubp', 'olklqf', 'retbgcj', 'qxxlhh',\n 'cqyyoy', 'ngwikg', 'qijte', 'sjzck', 'zkmkx', 'ongtzf', 'tanow',\n 'smgntvq', 'urfgt', 'xwcroa', 'kadcpd', 'cxhgo', 'walku', 'kvvcsyt',\n 'elwmuxk', 'bfphtm', 'vzeumuq', 'sknvev', 'vbsnfd', 'grmbg',\n 'vjahwt', 'dmcbmn', 'smubz', 'jobbfcv', 'ujlkm', 'lcthh', 'bauuqdu',\n 'kjgzgtq', 'gicjz', 'nugbax', 'kbnjfiu', 'sqfpein', 'obbgfww',\n 'ykggxjx', 'irnmog', 'xniuv', 'rqiwycq', 'hzlgyu', 'yjtrttv',\n 'satym', 'dgqhlkk', 'rghal', 'tbekx', 'kkwmo', 'eahwhks', 'bpvmbur',\n 'sqtgkj', 'khboz', 'enefr', 'vkzqvt', 'wfruavu', 'ninomu',\n 'ypktaoa', 'mlpmoit', 'fxyhjfp', 'fgnpp', 'txieja', 'dprnj',\n 'bgyrp', 'zsqwqrw', 'stqzki', 'kwiayb', 'ulbsn', 'aetje', 'vwzbb',\n 'tedwyqs', 'cymiruy', 'jigpoqx', 'ypuqsc', 'weletu', 'gvibea',\n 'chhuldm', 'baylv', 'wdhovo', 'imfqu', 'meodnsk', 'jhlckqw',\n 'jolyfh', 'jsfkrhr', 'tnbfzvs', 'egcfht', 'qnzmyr', 'owtrqu',\n 'oqaqu', 'xftys', 'goxfftm', 'sgbnp', 'bhfvaz', 'gospa', 'jwzlvwk',\n 'lqncoqd', 'xxizglc', 'bwffm', 'mhpggzr', 'kdaoewx', 'anviou',\n 'mqiij', 'wkskpn', 'enougdh', 'vldnn', 'gbfgz', 'ejmbh', 'qsdrvsx',\n 'mrvbz', 'cqlufpf', 'kbgjlu', 'njgna', 'admrmk', 'pwwsc', 'gxkot',\n 'pdjwh', 'ejwxt', 'bpaxufv', 'iwjzs', 'xxfsg', 'vuhgh', 'srytgb',\n 'yesvlux', 'tggnch', 'cgnbb', 'fbzbx', 'aomoqf', 'zkrvrjg', 'ueaoz',\n 'dppacnl', 'ewovhxz', 'kbvee', 'ixeeb', 'gwgoqm', 'hlwlxe',\n 'fpmkrk', 'wzjsr', 'ispwe', 'garofu', 'jcmpec', 'tggeo', 'yzdeo',\n 'axpmln', 'zhnlhck', 'duyqcn', 'tpqwqi', 'jvmaj', 'bisgoy',\n 'mpwmurb', 'olqla', 'ecapwan', 'kcpxn', 'xcapin', 'ooctk', 'sgqql',\n 'vcyyjxf', 'ejyom', 'jsgtha', 'logxnjg', 'nypadhj', 'dprmk',\n 'cqkuzb', 'gratv', 'tgkjgu', 'fttcafm', 'tpryi', 'ubbhw', 'uwcuyn',\n 'zkgohs', 'snfesz', 'ifrex', 'tkbfz', 'fvvkp', 'otjiq', 'lgomjjv',\n 'ertracf', 'bregu', 'kkbizb', 'hyhvn', 'zjcnxfl', 'mceskuj',\n 'lmupdq', 'zdzqzgo', 'yorppew', 'fpwtjd', 'dxvyzt', 'bbnnu',\n 'pkycae', 'ucvapn', 'dijmkb', 'nvwwpr', 'bufkw', 'zhono', 'vayxf',\n 'hlfwkev', 'klkvkj', 'yzgpwg', 'lcbqr', 'tkkfi', 'pcgljx', 'bhduxu',\n 'rgfipts', 'hkjbrr', 'fobvy', 'wqmqhxo', 'yjgvypg', 'ehgoizl',\n 'ipiibzh', 'aqxbxtx', 'lrtin', 'fyyuypr', 'pyrocgm', 'kwqbg',\n 'ukccw', 'wgsbpvx', 'pcoivrv', 'okhxaba', 'bbuaibf', 'ccvfm',\n 'phpst', 'yxtqiz', 'cdfbo', 'sijfljn', 'gdlhn', 'bqmbced', 'tiejf',\n 'aurqer', 'olmyd', 'prctay', 'lwflhi', 'bbehvta', 'oxoda', 'lklyc',\n 'rzedhp', 'kairil', 'envan', 'wdcwfk', 'xoroddb', 'womrlr',\n 'ruxebe', 'jnpywrd', 'wrifvz', 'zkewcd', 'vllfrn', 'uvdvjh',\n 'bglpya', 'vzokkbw', 'apaoqt', 'xpjizn', 'xoajmd', 'xapjwc',\n 'jcknwg', 'bjpreep', 'ffkua', 'ukcbah', 'bugvkrf', 'cbmmfs',\n 'cwaczhl', 'nsqaj', 'sjeikg', 'fayqif', 'slowoh', 'xjpvkpa',\n 'ynunjle', 'bqavt', 'nkpqudr', 'neikvd', 'yuqlzg', 'pdxbtrb',\n 'cashlog', 'iqiqy', 'smjmxv', 'zbtpbr', 'zzamzcv', 'jmakg',\n 'txfswc', 'pkaym', 'swlde', 'utann', 'mqgpjne', 'pslfvek', 'nbiqhb',\n 'bzsianu', 'wnxgbi', 'ahkeeiz', 'dqdfjg', 'bptdg', 'pwita',\n 'uqyflq', 'txabjn', 'yznjmve', 'mukcqqf', 'cxonbf', 'ixuewjm',\n 'pzlcat', 'eikeeo', 'scwsoa', 'uaeyw', 'oeorff', 'gbqgd', 'qboqiv',\n 'hiulpb', 'dbbdm', 'qvdxx', 'aypxbcn', 'ykjwdbg', 'pvfxn', 'shrqyz',\n 'zaxtu', 'pfefgww', 'jwifrw', 'zxuud', 'kpkwhlj', 'lwptgd',\n 'zpdmvsw', 'takeb', 'ynehl', 'kixtod', 'fyrgm', 'qirzmr', 'shyvec',\n 'xjgzt', 'bwfvht', 'wyehh', 'renzc', 'nnibax', 'slhfng', 'yjtecc',\n 'lghvbzf', 'qroxvun', 'mlsed', 'rrudho', 'cyffhh', 'tjlxahp',\n 'xmaepzk', 'jvdzh', 'bbvegrw', 'cebcz', 'odjpeam', 'guerph',\n 'tgmphgo', 'ohtkqq', 'jcxojz', 'haeheae', 'erydxni', 'hatjxx',\n 'kwmgkjw', 'wmezvy', 'hsuuvfi', 'ineek', 'grkxmhb', 'alxkt', 'rmspxdg']\n ) == 13956\n assert s.minimumLengthEncoding(['me', 'time']) == 5\n assert s.minimumLengthEncoding(['yiyqbv', 'njqvawn', 'wnlovvp', 'vogum',\n 'jpolc', 'zleec', 'sxdrww', 'rbowr', 'xsjorra', 'kwjsx', 'vornum',\n 'echku', 'kuizegn', 'rhuvv', 'eemkh', 'yshht', 'pbixoa', 'cmbxvtr',\n 'iupia', 'nmcbq', 'mgrjsx', 'ejvniwt', 'svhsel', 'kazenhf', 'fevpm',\n 'xcwqfgw', 'ozikzc', 'mywnmqt', 'taorwjm', 'gcshacq', 'fgtasq',\n 'qexygw', 'ljmbari', 'zfjudos', 'rgxuzy', 'kmzryaf', 'exjfd',\n 'mcqnebz', 'ptoim', 'zglfi', 'fhneaz', 'rexgc', 'lhplwyr', 'dthdp',\n 'jizetec', 'obyzg', 'rqupa', 'yphttge', 'wdcdn', 'wdomtr', 'hchbd',\n 'ytyra', 'upytftl', 'swbbi', 'qpcybv', 'dcoxspd', 'dftkf', 'nwjfmj',\n 'ojbwy', 'zofuy', 'adqkt', 'kpcply', 'aeukw', 'fqblb', 'xurrbpo',\n 'veioa', 'puzvl', 'bnzvlax', 'tjzsdcw', 'jarqr', 'orxjbg',\n 'ilrqdri', 'syjuoyi', 'htoqdco', 'gwslw', 'dpqyf', 'jnkhv',\n 'fpqhpr', 'baewnvc', 'caunsf', 'qhbpe', 'wlckl', 'lmoroqe', 'ddlak',\n 'qipwbfp', 'cefqs', 'surczp', 'jtmfuro', 'ezhqau', 'dlsco',\n 'hywoqh', 'lnifq', 'hvfmu', 'cqjdkok', 'tggdact', 'rwuowdk',\n 'attnl', 'lwhyq', 'mqtsc', 'bmwajiy', 'nyohug', 'vvfpt', 'lbyazu',\n 'sarwago', 'iccztck', 'ugsxcw', 'rpwza', 'yofmlll', 'ulhdzhg',\n 'lbaqk', 'bwxxwc', 'dmsbawg', 'tjloy', 'imbrkul', 'xguke', 'shlkuq',\n 'lizjcdu', 'kmvykl', 'ilqxxjm', 'rtbvvqt', 'qisec', 'zobzr',\n 'thwntt', 'afpifh', 'uwiiovy', 'hgsyecl', 'pdgnm', 'mqyesch',\n 'suexztu', 'msguuwu', 'yrykkv', 'xtoommc', 'muteu', 'bamml',\n 'kkhlb', 'jfrnx', 'wpytor', 'zzogpt', 'yryxxt', 'hzqofjd',\n 'ehtildc', 'ptclf', 'nyltvd', 'nrret', 'qqqqt', 'uuxunf', 'jajxt',\n 'lzdvlc', 'gpdtjug', 'hjsso', 'jairua', 'qarxuey', 'rpwwjwv',\n 'cjqypep', 'tuzgcs', 'oytqxb', 'rgfmud', 'stnwn', 'tzzaop',\n 'jpuopzg', 'qeywd', 'spnstrg', 'dfwgntg', 'yjyqk', 'ioowc', 'duqfg',\n 'gmqxe', 'xhlbby', 'liurjk', 'vdujfm', 'xxyyn', 'omapgc', 'koemzbz',\n 'ziiyako', 'pjmhfrv', 'bshtfgj', 'ihjvt', 'pnipuw', 'fajiuj',\n 'rdvcqzd', 'mgknns', 'ouwkm', 'ejnklwc', 'osepl', 'gplpyvs',\n 'paxrddg', 'gsjlpd', 'lgnmgl', 'yifeeer', 'hhnwlol', 'fcmxs',\n 'ilinwgm', 'udhfdtq', 'ceefc', 'xweqx', 'jfelwod', 'rtywfjo',\n 'kzwrgqx', 'fcjriov', 'fzytqv', 'zcpcddo', 'scpyzow', 'kbzegu',\n 'gclwr', 'gmiwlp', 'rtpka', 'yiywuyy', 'qceot', 'dtrgn', 'ntwbu',\n 'fxobd', 'zmxwza', 'qcksyz', 'wgbtmm', 'pzorve', 'hztydc', 'jqlay',\n 'ijdkbk', 'uzjrps', 'gfzibk', 'gsxqj', 'kgjrkdd', 'smdeuk',\n 'iwizewp', 'owjie', 'kcdccu', 'ifltqr', 'zrdfbm', 'pznbcsk',\n 'mtkpi', 'cpasir', 'flrxrm', 'uxcxnv', 'htlfcp', 'ltukxfr',\n 'ftbbha', 'jhgjgyz', 'qjreroc', 'vcvtbid', 'nrhlq', 'gtkpot',\n 'gyplqqg', 'lnorig', 'fixhufv', 'ugcug', 'ndfug', 'wuorhe',\n 'owocnkw', 'rcnbf', 'ioiiiui', 'kakwtne', 'svxtt', 'wdrxogm',\n 'ibrxs', 'bddqi', 'jeguac', 'hlftdw', 'nutgfjw', 'krrzvf',\n 'amxuloc', 'deozdoe', 'ovsvk', 'sfqsl', 'slgiw', 'jbjujag', 'mhiru',\n 'uqksech', 'davosw', 'nlueljv', 'rhtvdu', 'ivdpdqa', 'qnbenpq',\n 'dtapqq', 'hwwfpxl', 'oyrfosn', 'goxgmgo', 'tbvutl', 'cbbbcm',\n 'iiugpk', 'hinkem', 'vvaitk', 'pskyf', 'hdnekg', 'nqhfn', 'dqbozx',\n 'zcwpko', 'kafyu', 'jfegubk', 'nofqzsk', 'ujmxxg', 'akwzemu',\n 'yvhxb', 'qqlwofi', 'hmoecj', 'qwgtlc', 'jepvygq', 'uzggm',\n 'fztiews', 'lvndvf', 'vulax', 'znqudh', 'whgqi', 'noguo', 'vewkx',\n 'uruvgf', 'ubohmba', 'aulzi', 'flvfdlq', 'yspfie', 'wugif',\n 'qndyiwa', 'keihmct', 'rggvn', 'ojjmuoh', 'sbbcl', 'cdivmoz',\n 'vkusmp', 'mfddp', 'kgohwvp', 'rjbbxw', 'vsgptj', 'hbyjoz', 'gufrv',\n 'orxiv', 'fxcqfw', 'okppik', 'qlouw', 'lkryigo', 'qccvc', 'ixcnodg',\n 'wlfilts', 'ahqtevp', 'kkbuha', 'oehaez', 'rzczib', 'vxobk',\n 'wmetvjs', 'xfjgeq', 'eadzl', 'aeqdvch', 'czojfq', 'hxshidl',\n 'ofswsj', 'iwbqcmg', 'schhwtt', 'ltyth', 'wiccu', 'akill', 'zaaji',\n 'qepvfa', 'mpvrkeu', 'dcpenm', 'wdhlk', 'llqbby', 'lronwkr',\n 'rwtguo', 'ofnvs', 'lxdnwzf', 'dctmilf', 'zhckjd', 'hajsuac',\n 'wpylhy', 'zhipvm', 'ihikr', 'zzwjgvr', 'gdglrn', 'skhow', 'tlqtjl',\n 'uypli', 'evdva', 'civide', 'iroihm', 'lvuzid', 'vexat', 'ngmvrz',\n 'szdhbt', 'ggrbz', 'bsmovlt', 'kguomvl', 'onzvx', 'nobgxw',\n 'tqxemc', 'vbiyx', 'fpzpf', 'ogtvf', 'yuthri', 'xszbn', 'xcuhj',\n 'nosnpbp', 'mowsxg', 'tfalyy', 'kxombgm', 'cukrz', 'krmseq',\n 'velzh', 'kmufxj', 'nvxlkq', 'ualvras', 'wytoucy', 'qicqyym',\n 'pbeujtv', 'haojnbm', 'xnfffpe', 'wvoiald', 'rlyvf', 'sxamoxw',\n 'ztqnmp', 'biiavx', 'lnjnzs', 'arqdjdy', 'pkrgokc', 'qxswouj',\n 'dgqah', 'mnhzo', 'ggilb', 'qscrd', 'ggvkimw', 'qlxjys', 'wximi',\n 'aqlhio', 'iavtvy', 'grkqf', 'dwrtut', 'uozutfc', 'fogxpdb',\n 'ydtntlq', 'vnmpmwp', 'gtxhwq', 'mlpihx', 'yfpjlz', 'hdvcquq',\n 'nunny', 'wklasgp', 'wxduo', 'topsqf', 'tngcpzc', 'mcrut', 'pdnsmt',\n 'kavaok', 'seiqsqa', 'bhgkiyt', 'mawvhtp', 'domcnrm', 'fgusghc',\n 'wdaufwz', 'tzpuks', 'kisndyz', 'fwyieu', 'wtdum', 'ytxhl',\n 'yhzkmuv', 'nppnqe', 'ccvhj', 'dautnyq', 'hkaliab', 'kngan',\n 'ebmhiop', 'vsdkcef', 'nmpcnd', 'vxvnl', 'cwcgu', 'zsuneh',\n 'qjgcmd', 'awvba', 'rzbisxo', 'oilqrj', 'neiazlm', 'hlyrl', 'tmiht',\n 'lwqxxv', 'gyblrw', 'gnnjkb', 'lrxiln', 'xlwlseh', 'npfwcvp',\n 'yjcdhw', 'rzndd', 'orlhmip', 'gatuojh', 'osotgvv', 'owksz',\n 'kcocizf', 'izlev', 'smigns', 'wtxfwo', 'knwizte', 'mqjojzp',\n 'lkezye', 'xqldbu', 'cvbpyl', 'aoipbz', 'asrupt', 'bdwkesh',\n 'jpaykm', 'pksbg', 'gdbsibd', 'lfxpwk', 'rmnfph', 'yzxwke',\n 'xjwyusv', 'yetar', 'sytdz', 'pnystzi', 'yntcqo', 'egoorl', 'aydxu',\n 'rfdrfhe', 'flzkos', 'mmjgev', 'fbjwmvi', 'jeouc', 'lcmkri',\n 'aggsb', 'aaeazai', 'amyxpey', 'onxqpg', 'qrjpxq', 'zanea',\n 'niwsgtv', 'nsqja', 'utgskd', 'hlcum', 'frygtl', 'xjmqetz',\n 'upqddd', 'vxzdstm', 'hcmtera', 'ejstou', 'xkcguf', 'bokigdk',\n 'vurnv', 'zsgrje', 'nbxlf', 'tpilcx', 'lvepux', 'xacdtp', 'amdgx',\n 'ubbvnx', 'xmvznh', 'tlprri', 'sthkn', 'xhoad', 'deotaxo',\n 'pqzppmw', 'xlcpx', 'qwzrpyp', 'lujabeb', 'heskwyy', 'mzzaaur',\n 'vnestcs', 'rryphdl', 'ibdiabi', 'eoiyt', 'znflx', 'clougix',\n 'zzadxw', 'lrrgtf', 'lsdoakf', 'yxfmqx', 'qhnrry', 'ktcdmv',\n 'veygqu', 'btjlo', 'fcspsc', 'gozoazm', 'xcsqgz', 'aazae',\n 'nkuvask', 'mzdgjq', 'sihqdhy', 'zadrwzw', 'gzcyuea', 'lpgccic',\n 'fqtfuzw', 'bjoqpkc', 'oydpkxc', 'sugnnu', 'hyvygf', 'axkxo',\n 'rsmzb', 'dlhqmac', 'gbqby', 'npqkj', 'odbtb', 'bdsib', 'zyasxv',\n 'ifxqcc', 'lmnjwhr', 'ibuyu', 'uzhle', 'ccpwhjr', 'vhrojnz',\n 'fkzfz', 'fyesm', 'dnvipvm', 'jbbqn', 'qdkgl', 'xkvvgq', 'dphugaf',\n 'soxbfun', 'rbgokx', 'biveiz', 'vbaqtn', 'qapydgf', 'llldu',\n 'ottjpzu', 'fwjuc', 'cawio', 'gbkwe', 'rrnnxer', 'luviy', 'zsalse',\n 'ckwdeox', 'ozhqocm', 'vtozfwz', 'jztole', 'ydqei', 'bfugz',\n 'psawjp', 'dzlyrwp', 'izuyrne', 'rbwcfr', 'vdvte', 'usjbqs',\n 'zzovkxr', 'frfkwk', 'mmtmdd', 'sntka', 'wachbzo', 'rmzvj',\n 'scbngo', 'eqiuiwi', 'qfakk', 'cckcmt', 'owhzow', 'rejdlw',\n 'iprsqdq', 'twwaldw', 'mfilzyk', 'jygvx', 'iewbo', 'irhko',\n 'zpazqhn', 'ndqbg', 'ayzxqdz', 'zvpbh', 'maapq', 'pzitrfm',\n 'qsgsurv', 'viwcfff', 'wpgenms', 'tjmvu', 'czuemc', 'infxoo',\n 'avhbw', 'nugkqx', 'xubakjp', 'ndask', 'utaqq', 'njhuxq', 'sdvuex',\n 'tfmxqp', 'bydovjo', 'bizxjsp', 'zoozxyv', 'jegei', 'gkpqobw',\n 'psumbtg', 'gkgoh', 'sgcbpql', 'xxkhy', 'kdorkr', 'hcomj', 'ulrpyv',\n 'rhplil', 'tyyochd', 'xhzul', 'srdjmns', 'kgukye', 'yepvs',\n 'xnobsjb', 'umxmtub', 'wvqasr', 'igftpzw', 'exhecn', 'rreee',\n 'jpxuvxh', 'jriqf', 'akexunb', 'ekvdsoe', 'ytzvj', 'vfrlyae',\n 'pmfai', 'biouzle', 'xkbce', 'clzyi', 'xhjoso', 'wmxkxb', 'dqzzig',\n 'ydtby', 'gskwj', 'wlkwbz', 'zepvllz', 'zsgqp', 'blntawk', 'eynmil',\n 'bdqyp', 'wgtnqbc', 'rrgaq', 'gtafuzo', 'qdiko', 'kkcsdo', 'zwqhs',\n 'kugzbmf', 'wtvvs', 'kqsdx', 'mxsuxiz', 'pgbgjfe', 'vodfr', 'qbvwu',\n 'vfwbhgw', 'ayojye', 'kolzfqg', 'xnbecj', 'akbcnf', 'uutrn',\n 'upmesa', 'marqej', 'bbucee', 'bazqbau', 'qikgsyf', 'oeayzn',\n 'uilxnzr', 'vpnxknl', 'btgtxgh', 'vjaav', 'zaxtzah', 'msweps',\n 'awduwld', 'gzaep', 'ngvgc', 'qpoqdgn', 'kimndg', 'qilmmpw',\n 'oafhlyp', 'nyelgvw', 'onymk', 'feycbc', 'dhcrx', 'siqpfly',\n 'tyvycmf', 'huctqp', 'uscjrp', 'bbptd', 'msdmu', 'xlxhye',\n 'xnyzcox', 'kyskda', 'injdkmp', 'jiwus', 'spjylwd', 'eqcrnt',\n 'snfiu', 'jvwvge', 'yfeaw', 'mmdnsjj', 'suzdw', 'xiupf', 'rjwjhng',\n 'tqvasy', 'rmibpa', 'zuqax', 'prpndnp', 'efryqe', 'pwuqfy',\n 'wpqlfs', 'aeswq', 'cxkeiue', 'jydxzfi', 'tzfvwp', 'zzgtw',\n 'mupiusx', 'sojavt', 'dxmsgq', 'migjiyj', 'kixjk', 'ywwvcpl',\n 'khzcuo', 'oykhx', 'fochin', 'foxbfkc', 'sizjg', 'wrjcvr', 'ceadd',\n 'tvfqgxq', 'whzhche', 'dcoeti', 'mpilfib', 'cphie', 'ucpnjm',\n 'ajltvx', 'kpizym', 'vevfsrs', 'jznrri', 'yvhxomr', 'cbcnk',\n 'yuwuhu', 'jywuzed', 'kqakusq', 'jrnzgfo', 'mjimzz', 'mfjybnd',\n 'ntqyq', 'junxxck', 'myvqajv', 'kvuqs', 'obfxw', 'jwuba', 'vnrvzvy',\n 'aeric', 'vtgda', 'nkrocpt', 'ahitg', 'dzxtr', 'zswwc', 'yhxap',\n 'fdhiwr', 'cpxtqv', 'izbmo', 'zyioo', 'vysnoe', 'ouuyvj', 'cumdhzn',\n 'dbsmph', 'cktjem', 'vbmxy', 'utgfyhc', 'rqdeorp', 'btnlmd',\n 'chxwlt', 'nsghoqi', 'egycsm', 'wkanat', 'lzjyf', 'donyx', 'cchqsa',\n 'xozzz', 'yzmnf', 'jfzuh', 'dpcpg', 'hlahz', 'vobopk', 'lssfeli',\n 'ccttzi', 'glzgqpv', 'oyqzug', 'qqhkrr', 'euwotv', 'hwbmtz',\n 'hiylhly', 'bppzne', 'yetyyvs', 'cnbwcby', 'hzblk', 'pfjmxt',\n 'dsxvt', 'vvkju', 'zjrfr', 'gdbhb', 'udoad', 'nbhpzfm', 'iwetbym',\n 'atmly', 'tnxli', 'myegb', 'hiwqsk', 'btrajk', 'nhrmwn', 'ftmbecv',\n 'xopht', 'eiikqy', 'qizanwa', 'cwxiatf', 'jshjva', 'llrtkn',\n 'zhivu', 'lmwiu', 'oaeaqz', 'oxotfub', 'jnkafm', 'juhrmq', 'mqzbtw',\n 'puiaxty', 'dnahvoj', 'gaxhz', 'xfnay', 'iqmlnlq', 'xudhcg',\n 'izpkz', 'tqttmt', 'bwnbs', 'fdufd', 'vhzyymh', 'zhqtxr', 'evbcrv',\n 'xvnma', 'dgcwy', 'cwxzlbz', 'oodiol', 'teyim', 'kqqfjub', 'ftsqzi',\n 'arfztkr', 'oqlujx', 'rpkkdov', 'ptoff', 'ivxaxr', 'nxeept',\n 'cacpl', 'tehir', 'spvggl', 'qfzxkn', 'bhwkukx', 'fkdpuq',\n 'xdrngre', 'fnfplq', 'dzbrl', 'ufgxu', 'sciec', 'fgdydvw',\n 'nmpaqxi', 'ydsvfv', 'natjz', 'lruyvzf', 'xznznxp', 'mhfrh',\n 'kddsk', 'uwatn', 'uklzs', 'lnuta', 'ryizc', 'cvwko', 'tnzpk',\n 'ywpiv', 'vbvcagq', 'pzolw', 'nmyfhg', 'cshkofj', 'ksptw', 'kqejh',\n 'zgzjqzo', 'mxzrw', 'enabosq', 'vmubgc', 'sfzcj', 'hewvk', 'ewhrq',\n 'oifnsmi', 'izdnvu', 'cshgtk', 'mqotuhd', 'gnqgj', 'rxailbm',\n 'iyhxvtu', 'ncjzklq', 'zjmnoc', 'awqwos', 'ugujppc', 'spbvfwl',\n 'gntsvo', 'euksu', 'qnvneph', 'crhmf', 'brktmf', 'mvgmr', 'yzcskrp',\n 'tihawec', 'edqmxpn', 'fxyymlr', 'dzfkucm', 'prldz', 'gplrlhz',\n 'bohwr', 'bhebbk', 'mmecj', 'segydd', 'ptslsb', 'pyhgw', 'cwmrq',\n 'mjfhflh', 'xhuid', 'npxmb', 'izilq', 'dczhqh', 'tgfnxtb', 'zrylvo',\n 'lctxrar', 'ylhrbii', 'rfxedv', 'llvhzjq', 'bjocv', 'wbnex',\n 'cnohnf', 'xahrl', 'rouvwyc', 'hbhovgv', 'dhucp', 'ncmff', 'ncsskg',\n 'gsjbyin', 'lroxscf', 'whfaenl', 'vsfultg', 'floxkpy', 'captoai',\n 'qwolyex', 'ggaypn', 'wzunypd', 'pjixeu', 'gxnjkoc', 'pqiqhn',\n 'xakjmgz', 'vqizkx', 'gdzcxr', 'kyxwdd', 'pgxmazn', 'qeuwf',\n 'bduknm', 'tcrcn', 'nehgee', 'wktbcgu', 'jwqltdt', 'wczkai',\n 'drkqs', 'qhdqnn', 'oobxirc', 'lbunv', 'ifscr', 'xnfpbrw',\n 'yrrdbax', 'fbocs', 'tewne', 'iobixe', 'zgosas', 'yhesn', 'xlqwd',\n 'pfcen', 'slsjffx', 'ilwatrc', 'mhsmgp', 'iteghl', 'aqhufdl',\n 'kxgpqcu', 'ryrcgp', 'azidf', 'smlnl', 'rocxvbt', 'iutfc',\n 'loapgbr', 'musulp', 'dqcnj', 'tpgbkfh', 'wvskii', 'itkfopo',\n 'kytyb', 'rzahbu', 'aewptd', 'ohergbb', 'cadxh', 'aphwelj',\n 'huooyzn', 'gtttia', 'izeyhcr', 'cfvxz', 'aitaxyp', 'vypqost',\n 'ebfnmif', 'kgiucm', 'zryyu', 'oxgnbpt', 'frpwo', 'ouqvodl',\n 'pdaazh', 'gxwmf', 'dozxsjm', 'yndpsik', 'zcwvu', 'mihug',\n 'jgodklw', 'ysklw', 'cfxqv', 'yqvtz', 'rctnp', 'xjywa', 'kpqyw',\n 'hhtegzt', 'rnwbeoi', 'uyxqum', 'jahcwbe', 'jzjns', 'ovwoaz',\n 'oqmsrua', 'natbejl', 'deffv', 'okgbr', 'paqhy', 'jkafhte',\n 'lifsknp', 'afmskh', 'oemdro', 'oxuwov', 'qtyxa', 'hkpfsm',\n 'ulaubn', 'tciurw', 'myohwlo', 'okuiejb', 'ormoqsb', 'gmipz',\n 'hterzir', 'ekxzre', 'xkevge', 'ihenf', 'nnhzv', 'eocjmx', 'upzal',\n 'oounfko', 'myhbwub', 'fwipva', 'pkzzvpd', 'nrupm', 'vluzq',\n 'fxkoyho', 'atzktr', 'aomrp', 'qwpser', 'ejagmb', 'cfigelm',\n 'bvanb', 'cgcgabo', 'hmjvlqt', 'hxxocf', 'ftqaud', 'htuipy',\n 'bhwmcn', 'tgyvaqe', 'lvuwh', 'yiabzs', 'rzzavu', 'fiubm', 'uuqsb',\n 'riyakuf', 'psscffd', 'kvckzr', 'fktmnf', 'ivzqexi', 'nhxzm',\n 'kffjmb', 'vdzxv', 'esago', 'bfikw', 'gaiuxmz', 'volokcm', 'jypcs',\n 'psibvs', 'hxaxklf', 'lmqwgy', 'spnbimo', 'mtihak', 'xikoiy',\n 'rmmtv', 'phaqgxj', 'zcuwkhk', 'emodbyb', 'ztahsya', 'ieiqm',\n 'lfoquh', 'emznnq', 'pnhlgut', 'pgvads', 'cqsjx', 'lxnjei', 'zpque',\n 'rdjbiyb', 'sxedpu', 'potnqva', 'iirkn', 'rjmnrxd', 'ksgcd',\n 'waeymnh', 'tizdz', 'kproa', 'wpttygd', 'lvyze', 'peewvgm',\n 'fwtyzbw', 'zitkk', 'gfgqr', 'udgvlz', 'swqspo', 'ohhvyq', 'kgyuau',\n 'hcerp', 'pdomlm', 'twabkk', 'zfsea', 'epiwp', 'xgycjpt', 'jtkdh',\n 'mxmdm', 'rtkzm', 'qkacy', 'nuvdiq', 'agctak', 'hypgyh', 'ewtjp',\n 'paysolw', 'bcutebe', 'xelxyb', 'gzdvrth', 'vpzfv', 'cxrkt',\n 'admiyzi', 'lqlmn', 'zbjpbg', 'tlvdnli', 'zetnox', 'ylcsobo',\n 'balajod', 'igoume', 'sxcgw', 'sbkkafk', 'fmndnnw', 'incsa',\n 'jyupkg', 'uhvvc', 'rswnbth', 'nvprfj', 'figqf', 'znyidqi',\n 'aijper', 'euidr', 'dftxkze', 'vnppi', 'splwifc', 'fprgafl',\n 'ixzaz', 'mrhqtne', 'dtkjsy', 'dsmqrgy', 'xfscz', 'cymvmpu',\n 'vptkfdx', 'zrgrjq', 'mqvwsur', 'hdtlw', 'ugdpwun', 'cvxitc',\n 'vytvqg', 'pmtpfz', 'nfdtdt', 'umvwjuc', 'jouxc', 'qpypri', 'pdhqp',\n 'lmise', 'wlsvcfg', 'aqdkzcb', 'qlrmrfz', 'pbgoyi', 'xmsskoh',\n 'jjdye', 'xvsdmq', 'ymjeipy', 'igjyv', 'uiojvmc', 'uckoww',\n 'grlnyeg', 'hpglp', 'omnnyy', 'iiliir', 'cnucbcx', 'pcxvs', 'hipad',\n 'xmiltkj', 'oorwi', 'qgoxjj', 'jnmviqs', 'wpleqn', 'tudxw',\n 'pcogem', 'hgewaf', 'niwfexy', 'vcttgcb', 'anjgovq', 'epgmscd',\n 'mdtru', 'xvapv', 'rydjik', 'kopppcr', 'mjbsmu', 'unxoakz', 'ldpsw',\n 'frksjr', 'vyxxg', 'yyydri', 'szidq', 'qvbtd', 'qratl', 'xwfov',\n 'bzhqyxl', 'fskrtf', 'pcpzmnv', 'xuxwx', 'vzbevnb', 'ebaqz',\n 'dbpuek', 'ooqwj', 'gaimp', 'coelqh', 'bwuceq', 'oxpfjt', 'zrqyc',\n 'rwllk', 'pqunv', 'ufbnn', 'tbnjoz', 'kkqmrxu', 'qyyrm', 'hislf',\n 'wyuck', 'ubpre', 'pdioi', 'aryhv', 'vdcxv', 'rkgmaag', 'czlzokw',\n 'gtxuduz', 'grpijx', 'qzrar', 'qhues', 'rmznt', 'sxxmved',\n 'onjzuwl', 'atbjhip', 'nrardl', 'alrocy', 'cfkip', 'ihtbf', 'pqdgm',\n 'hmokun', 'dpghac', 'otwml', 'mnbzwa', 'ehetlt', 'rchvq', 'lwjgywn',\n 'lzdmjo', 'nvhohdp', 'tmshcpc', 'gavjv', 'ycnkv', 'uynzh',\n 'bvpnfjq', 'lfbem', 'qberui', 'vrmmhx', 'wpbqtfq', 'jujpx',\n 'dujgkof', 'hrpbso', 'zhcdt', 'iybngyb', 'rgeruza', 'nesyxr',\n 'cihgfe', 'hjgskb', 'zspxeqm', 'inzrgyd', 'crkjq', 'iooshwp',\n 'muvvj', 'wakis', 'rowibwa', 'qikwypf', 'aportho', 'pubcgx',\n 'vqoqpfi', 'rnpbri', 'ussjv', 'looor', 'xkzvdv', 'tstegg',\n 'zgiiokw', 'rwvyaun', 'mqqla', 'asnqp', 'nghuryl', 'hlvhn',\n 'ecuotnu', 'judvbu', 'xgvuw', 'oeckn', 'hdhttsg', 'hcyhu', 'klbyjc',\n 'tnrmqnc', 'mjojxhi', 'kvdet', 'vbmevim', 'oglrzs', 'afbscdi',\n 'zxrffti', 'firzgmz', 'oenim', 'wgpua', 'asiep', 'kyteq', 'wpeneca',\n 'qixmeoq', 'zaofon', 'csxxtr', 'cpwmnl', 'feylas', 'idjuo',\n 'mrtpvta', 'jjvmjy', 'mnljocc', 'lnvjleq', 'oognud', 'rbyneq',\n 'rhvomm', 'fldrkpk', 'znvrp', 'myswmz', 'jiloe', 'juivjmo',\n 'ylhbyzl', 'ndmabkt', 'sgdvlq', 'pmnddmi', 'utpuj', 'kfisv',\n 'nxfeell', 'mxhgqd', 'ccvdsdg', 'emtybo', 'zmkylbt', 'mmrpi',\n 'dkwlgq', 'iwlappb', 'uimsrnu', 'mkxaxmi', 'tcvll', 'njggal',\n 'kmqud', 'evgzlh', 'oaxizbp', 'jiuej', 'xknlp', 'cyksydh', 'gbixmz',\n 'vtouyk', 'sxjpkio', 'qhubt', 'kflvnb', 'sjdfggl', 'bxozyj',\n 'xekbh', 'wtmcb', 'xtapfco', 'rnornl', 'ursdpki', 'waonim',\n 'eibfyed', 'zniinaz', 'uyfohq', 'qcaxlt', 'koyaapa', 'pjuvbsi',\n 'ecpdl', 'ifaqwm', 'yyumzc', 'gvfngfp', 'lttul', 'flyza', 'uasdlme',\n 'oklhb', 'wulkzzv', 'ziwsxo', 'jqcxiu', 'qdzrwgm', 'zjdwy', 'uumns',\n 'emlnp', 'irnrqp', 'gqkza', 'oynpcz', 'yxyea', 'zpamf', 'gyehxbv',\n 'nplkhcc', 'rxeekyo', 'kecgp', 'gseju', 'nkisxqf', 'vlyud',\n 'fxxihhm', 'yjgtml', 'fehwpdi', 'wclnvyy', 'lriwrc', 'ikparv',\n 'volfh', 'ysphh', 'szrvrv', 'rqlmz', 'jyqut', 'fyftsj', 'uvwfip',\n 'rngwgm', 'mjwaz', 'roehjki', 'ploxokr', 'yjbalp', 'fspkq', 'yfxrb',\n 'kzulvk', 'ordxp', 'vdrrt', 'wdiojwd', 'ridzl', 'niykdvu',\n 'whyycmn', 'riwcma', 'bkhgkrb', 'nsine', 'emgtgf', 'zoymw',\n 'ljtvhzb', 'kfyfdma', 'piygxdl', 'onfwgdf', 'fwmkm', 'vqbljay',\n 'icife', 'bxfli', 'yeygr', 'qenhgm', 'mtxuckj', 'kdcyx', 'kwqhfcn',\n 'ywkfy', 'prbpw', 'pheyc', 'kmnds', 'cacqs', 'kvekiqy', 'bfvfhdy',\n 'gxulp', 'skmcra', 'exomt', 'lcxue', 'mnvvday', 'rsddl', 'gooegc',\n 'udght', 'doymnin', 'ccdap', 'wuive', 'dyyln', 'rynust', 'luxabyg',\n 'kdkkyyw', 'vawqfsy', 'rmeswm', 'rcxzyv', 'clpowz', 'pdntqm',\n 'tvjkkmz', 'iiclw', 'nhudzen', 'cybhu', 'crwtw', 'enypnh', 'ygekg',\n 'hrjwqt', 'peissge', 'wangcy', 'rbpoik', 'raqulbf', 'gyisnsj',\n 'rgbqn', 'lgvuzb', 'djicf', 'epnuu', 'nsapc', 'voatgh', 'yorfehc',\n 'jxfttat', 'wyuivb', 'bwopl', 'odwdsh', 'anchkv', 'sepvew',\n 'qoxxmae', 'bpvqnj', 'sngfo', 'buoazou', 'zhijssa', 'janng',\n 'uvdbd', 'yfvkqo', 'lcjii', 'mvacvrz', 'xztiar', 'lpbtrqa',\n 'ukbpdx', 'okaqpgr', 'idgqlj', 'ewglgo', 'ruymhi', 'pcidw', 'bvuqj',\n 'npzch', 'yppyan', 'oiguirj', 'iijvwqj', 'jvbwjys', 'yjtunfc',\n 'iaikra', 'oduhdgk', 'ivixur', 'ibcgai', 'djzvcbx', 'lmtsul',\n 'lgnwzol', 'wursq', 'xsxbqwq', 'jqvwnc', 'dcwwvtb', 'vwybnr',\n 'bughwjl', 'rnelxb', 'hmacv', 'ufgdygl', 'aabuat', 'oynwask',\n 'gnfjjf', 'zipbq', 'zxstn', 'jdrbprf', 'jmkvny', 'rblpql', 'vykdj',\n 'qaakyqw', 'osbhddb', 'avgldyy', 'kvpoa', 'fnqcliu', 'zzlninw',\n 'drsal', 'omswys', 'hwqcpct', 'ecraq', 'fvhsbjq', 'raauy', 'pfmoz',\n 'vvqvcm', 'tbjqjun', 'jcfbegq', 'otiwup', 'axvvce', 'dhpdnx',\n 'pennr', 'hvvmvzv', 'binezl', 'ygdmcuo', 'ypwnqn', 'aloxdv',\n 'ucieh', 'kovbtag', 'rgfpaww', 'fpbftg', 'spjowfr', 'zridoy',\n 'blwbbf', 'evwlxi', 'itbcz', 'hgixuo', 'qmoqmjb', 'tkeeis', 'pjiaq',\n 'rbpje', 'ledoui', 'ubecht', 'mphdd', 'uzswsbb', 'ntsybr',\n 'qmnijyp', 'pqwawe', 'ltytill', 'dpnxy', 'pkxqcol', 'ayrdi',\n 'mycnd', 'knotsn', 'zvcrjl', 'qwroblg', 'vtrktey', 'dzilezi',\n 'wzkxg', 'varqc', 'xlpttyc', 'xxqhnl', 'jpxywa', 'kjdsh', 'hdseebw',\n 'bxqbp', 'flazqce', 'xrtab', 'rupsfq', 'asswer', 'rhqof', 'hjzdv',\n 'addsgax', 'cuahzjj', 'xwdilr', 'osqgg', 'pfhwv', 'rqorah',\n 'ggdlnv', 'truvaoj', 'jzuldwf', 'mjddj', 'vixtn', 'eslxoaj',\n 'cmoypm', 'jvvzs', 'oqgxcc', 'tptls', 'wwgwbj', 'tysuhg', 'xbnqb',\n 'iogjvg', 'fbxdmr', 'zdvsmx', 'hiuja', 'watrt', 'kjawab', 'entxk',\n 'jmnkaox', 'zznsox', 'asmzc', 'soblvp', 'quyxjw', 'udrdc',\n 'hyylvvw', 'gzfwxuv', 'jjqmjw', 'faegxbl', 'lqjcg', 'bzmruq',\n 'bykuh', 'miwhd', 'ykgtwhk', 'oyobzwi', 'oltwpua', 'ctulabr',\n 'dwandd', 'vhuhox', 'vtlknw', 'ywvln', 'qemqdeg', 'akezvx',\n 'kjmjpv', 'vwuftx', 'kreaxnj', 'fvfop', 'cxabs', 'jfacbje', 'eecnz',\n 'cmblit', 'gfvpoq', 'whywnh', 'pghvx', 'ohgkmf', 'xxtiwd', 'nkojni',\n 'dlcicnp', 'bwyvyyd', 'gifup', 'vgjfr', 'hhteifi', 'kjhffq',\n 'pawqaxl', 'yozro', 'slxluvd', 'amqcquy', 'vnnxkr', 'wgdur',\n 'rvawiu', 'thcwnc', 'cddut', 'vnrtrv', 'fnfio', 'nhvxe', 'rfdqmj',\n 'ucblh', 'ccbnt', 'lxckaoy', 'fnwcbx', 'gmdbiwt', 'ypvwjy',\n 'cbjazk', 'qmujnm', 'nsqot', 'lhcqt', 'ijxcts', 'nujrms', 'itxel',\n 'ghukr', 'qpwitlr', 'gcafqrn', 'lcoho', 'lfzab', 'vwhgceb', 'vgsgy',\n 'jrtgo', 'ryxlz', 'deoyq', 'ybenly', 'lyysca', 'sodvazo', 'hbnnoz',\n 'ovgvda', 'elwtjx', 'soydmn', 'trdsi', 'mwwjwo', 'vupwj', 'dszpcv',\n 'kkhjdj', 'ewmyo', 'nmpeq', 'oepldcq', 'xttrgu', 'wbcbxi', 'jakzk',\n 'peukyw', 'fvcqv', 'xklwuu', 'hsmva', 'kslmkq', 'azllbig', 'stnzih',\n 'wfyud', 'ihauy', 'cfxmj', 'pdyogwv', 'dcqdpa', 'xhusy', 'jfpmpmm',\n 'odeiiw', 'ozyaer', 'uykzvma', 'tuaznxj', 'kdnbdki', 'syrnsem',\n 'fdysz', 'hhrpo', 'fglzfi', 'vgcqzqm', 'qhsjr', 'bvboe', 'dpfwpvg',\n 'mvvry', 'itnnr', 'lgykbe', 'pscow', 'mkrgeqv', 'czffv', 'apteht',\n 'jeqixsx', 'ksmbe', 'zamivv', 'vvmyo', 'cwwoce', 'sppubxc', 'qaich',\n 'nmbxr', 'tfkwfxi', 'iakhezl', 'fxujis', 'fkwffe', 'antaylq',\n 'mmfgstq', 'zxaacy', 'zlswx', 'pbqxil', 'eupck', 'qzcxpbe',\n 'rjalbzr', 'wioagbq', 'kreec', 'zsdcuft', 'rrdzb', 'ocdlvq',\n 'oxiroo', 'zcxsqh', 'wbrsi', 'fqike', 'oskzupi', 'thvof', 'dicbyst',\n 'iojwe', 'hyfizq', 'yoknhww', 'nupiyyn', 'ievah', 'slcgmxg',\n 'cnecpa', 'lcwsoj', 'hnqsc', 'ghipbi', 'exobr', 'nwpnq', 'dmhbj',\n 'amdbmwl', 'xfbzovs', 'puizvu', 'yvsus', 'ykysqg', 'bgqdv', 'zgqbr',\n 'zkjpkej', 'crkot', 'zciymk', 'tleogn', 'sayrmz', 'elwma', 'zugjva',\n 'uifwsmw', 'wstrg', 'xbotd', 'hinsg', 'qpgyoyp', 'xzfocdy',\n 'mbvuepb', 'dtphufk', 'cyapnt', 'yyehhad', 'ohdrd', 'mlibm',\n 'qzdfil', 'rdwszqx', 'bzcbmyn', 'uarjlg', 'mtwpqmx', 'nmagl',\n 'cepniel', 'tylvaa', 'melhd', 'jygeneg', 'fdglfy', 'xcpciu',\n 'ayrel', 'bxceshv', 'kspyg', 'iclkaz', 'ykbzt', 'nrnkzo', 'kxkto',\n 'fabzszn', 'edalls', 'nilmh', 'wwawgnn', 'gymbtx', 'mzipa', 'ajevx',\n 'qppisv', 'otqhsf', 'ippxak', 'bixnqd', 'uqitwo', 'soxcug',\n 'loiscd', 'wqrjk', 'rqntoa', 'fzpxlp', 'tuaob', 'pyqqms', 'krbzmmj',\n 'aijqpfg', 'nstqrbu', 'wmtiahz', 'joplby', 'jyszxq', 'jnxtyhe',\n 'lbvfv']) == 14011\n",
"step-5": "# -*- coding: utf-8 -*-\n\n\"\"\"\n@Author: xiezizhe\n@Date: 5/7/2020 下午8:52\n\"\"\"\n\nfrom typing import List\n\n\nclass KMP:\n def partial(self, pattern):\n \"\"\" Calculate partial match table: String -> [Int]\"\"\"\n ret = [0]\n\n for i in range(1, len(pattern)):\n j = ret[i - 1]\n while j > 0 and pattern[j] != pattern[i]:\n j = ret[j - 1]\n ret.append(j + 1 if pattern[j] == pattern[i] else j)\n return ret\n\n def search(self, T, P):\n \"\"\"\n KMP search main algorithm: String -> String -> [Int]\n Return all the matching position of pattern string P in T\n \"\"\"\n partial, j = self.partial(P), 0\n\n for i in range(len(T)):\n while j > 0 and T[i] != P[j]:\n j = partial[j - 1]\n if T[i] == P[j]: j += 1\n if j == len(P):\n return i - (j - 1)\n\n return -1\n\n\nclass Trie:\n\n def __init__(self):\n self.dicts = dict()\n\n def add(self, word):\n node = self.dicts\n\n for w in word:\n if w not in node:\n node[w] = dict()\n node = node[w]\n\n def search(self, word):\n node = self.dicts\n for w in word:\n if w not in node:\n return False\n node = node[w]\n return True\n\n\nclass Solution:\n # def minimumLengthEncoding(self, words: List[str]) -> int:\n # kmp = KMP()\n # ret = 0\n # texts = ''\n # words.sort(key=lambda w: len(w), reverse=True)\n # for word in words:\n # idx = kmp.search(texts, word)\n # if idx == -1:\n # ret += len(word)\n # if len(texts) == 0:\n # texts = word + \"#\"\n # else:\n # texts = texts + word + '#'\n # ret += 1\n #\n # # print(texts)\n # for word in words:\n # if word not in texts:\n # print(word)\n # return len(texts)\n\n def minimumLengthEncoding(self, words: List[str]) -> int:\n trie = Trie()\n ret = 0\n words.sort(key=lambda w: len(w), reverse=True)\n for word in words:\n if trie.search(word[::-1]):\n continue\n trie.add(word[::-1])\n ret += len(word) + 1\n\n return ret\n\n\nif __name__ == \"__main__\":\n s = Solution()\n assert s.minimumLengthEncoding([\"time\", \"me\", \"bell\"]) == 10\n assert s.minimumLengthEncoding(\n [\"ojtnj\", \"uuydcho\", \"dgsyp\", \"dwxycpx\", \"dpmvc\", \"dvfhmb\", \"flrxjjx\", \"fwhdhvn\", \"rgsakp\", \"aiconf\", \"nzacpk\",\n \"sbxnaj\", \"shway\", \"rgrmz\", \"rysudo\", \"bzkioce\", \"mqxkzvu\", \"wyebk\", \"tymoaz\", \"mlmbg\", \"djbmek\", \"qfnme\",\n \"khkiyae\", \"tjdaxry\", \"sqtcwz\", \"ehnsai\", \"jhncvrm\", \"cxkzgrx\", \"pummt\", \"hzrpfcn\", \"lkyqit\", \"phpqdxw\",\n \"vangm\", \"wcjdgw\", \"pxesvtn\", \"mnqory\", \"bdrzvh\", \"brtzmo\", \"chqgf\", \"bipyxm\", \"meoikg\", \"ysyckk\", \"ojayeiq\",\n \"zrfbsb\", \"yhuotea\", \"crfbhq\", \"tllycn\", \"qxnzihf\", \"avyawpz\", \"bwsjym\", \"myjozc\", \"lbdksm\", \"mctlt\",\n \"dszowuw\", \"syshm\", \"xrvhhkn\", \"kgrcwfv\", \"dwlajlf\", \"yviuk\", \"xegjj\", \"spiczl\", \"vfvomi\", \"mgcujy\", \"dqmzb\",\n \"isrisgt\", \"vdrtuah\", \"vsyth\", \"eoclef\", \"poccek\", \"cgafrlu\", \"crbhpgk\", \"sromv\", \"xmvbca\", \"gobra\", \"ygvlq\",\n \"pjvhe\", \"tfweiso\", \"cskuohg\", \"eyalone\", \"pobkak\", \"nzpxn\", \"lbcrws\", \"uhtfe\", \"eorth\", \"showvu\", \"hxsmb\",\n \"jrggose\", \"izifkb\", \"oqwyf\", \"mozmzj\", \"ijwle\", \"ggtqqqv\", \"geevzj\", \"meota\", \"ifsse\", \"kdtofm\", \"swydhvf\",\n \"tzjhqap\", \"wqwwd\", \"jlinnov\", \"lmxkgeg\", \"stbot\", \"xrsfn\", \"etoyctk\", \"rygagm\", \"vcnrf\", \"zkdge\", \"emqtscp\",\n \"newqcyy\", \"nnuus\", \"exwsxbd\", \"zstvl\", \"lbkko\", \"kygkyqq\", \"oggji\", \"xytbjo\", \"mfbahk\", \"ggoks\", \"lmqewkl\",\n \"qexhyqe\", \"ogaogio\", \"nzvbav\", \"mdole\", \"qvyks\", \"gkupfu\", \"dgmpn\", \"ngrdrj\", \"iitqvk\", \"ipuiqb\", \"ugxfea\",\n \"ialkmv\", \"hmgnx\", \"aoyoj\", \"fvzhjil\", \"butrbp\", \"dwhxnes\", \"etkdwg\", \"cjkghz\", \"tovkq\", \"mmxhv\", \"jgcsn\",\n \"hmictal\", \"zxmnek\", \"pcoeg\", \"ntyqmlq\", \"hfubhtg\", \"ydjbv\", \"xnwlqto\", \"hatgi\", \"bsaczd\", \"pokwk\", \"arxlula\",\n \"zjtqlk\", \"ocfxup\", \"nsnqjc\", \"xdcsopi\", \"iqxyxp\", \"xfmtpvm\", \"bqtgcf\", \"wboycn\", \"aoeda\", \"uowqdgj\", \"rzzzx\",\n \"liucs\", \"ejzxz\", \"qmlehsh\", \"igrbmon\", \"dpmkbon\", \"pmayh\", \"nujdwdw\", \"awdgo\", \"ijgkzk\", \"inhee\", \"jzdtv\",\n \"adhauh\", \"grtmbp\", \"qndbvw\", \"zprrw\", \"mpqieq\", \"jzmzeuu\", \"fcvftqs\", \"qxzxqy\", \"lidguzz\", \"eazwd\", \"zjhfsz\",\n \"zsnzefh\", \"mnckfg\", \"zjgtq\", \"ckyxlif\", \"fznfo\", \"jegnof\", \"lzwyzb\", \"ozivfio\", \"igkclsa\", \"bebzn\", \"bitsggm\",\n \"lrnwin\", \"hjnnzr\", \"idvoirn\", \"dgile\", \"vfngh\", \"xbmur\", \"rqaftt\", \"wjwwwxs\", \"btreou\", \"gjsycg\", \"pvsiylz\",\n \"ccxzgdf\", \"excrrrr\", \"fiesr\", \"jdioj\", \"uzwsc\", \"odrlcoy\", \"hcsit\", \"ptwfprh\", \"sbqry\", \"kffvy\", \"ejeawbp\",\n \"omvcc\", \"iqgxqlt\", \"edsuu\", \"xnbue\", \"qfbcx\", \"fzlmbkl\", \"wrrcueb\", \"mmqispp\", \"nknilwd\", \"dewuhju\",\n \"hmdqlxy\", \"vjxgg\", \"lkuexo\", \"dzvfscm\", \"voulbs\", \"uevoqgq\", \"kmhwu\", \"oglzllg\", \"torhihn\", \"fhuqzc\",\n \"mmcfhb\", \"woyayma\", \"uznsvre\", \"mmxed\", \"aoskwg\", \"xrosbm\", \"hpyrgh\", \"tghwbwh\", \"hcwzn\", \"iepeftj\", \"judij\",\n \"kudbk\", \"jonpv\", \"lywck\", \"rxelz\", \"bgifz\", \"mehbxq\", \"fmqnz\", \"sqrmzj\", \"iqqjzex\", \"qioliz\", \"kjizbf\",\n \"lgdcffc\", \"pfgmcr\", \"trdabul\", \"vlqjdnc\", \"jjvbxe\", \"fqlayw\", \"ilbhtyq\", \"saawulw\", \"gxysrb\", \"kighql\",\n \"eceapr\", \"kztbcww\", \"jedkoy\", \"dxpcaga\", \"ndacphe\", \"rcoit\", \"ywgcnxg\", \"klipfup\", \"bddws\", \"jwyof\", \"lrfwgo\",\n \"bediwuf\", \"ujakh\", \"ppima\", \"xzhwvm\", \"guzmsqt\", \"ffbliq\", \"adjmynm\", \"akabzn\", \"inmykju\", \"vlcjyv\",\n \"orquepg\", \"tufrk\", \"vqpjymm\", \"lvuab\", \"qzxav\", \"ekcmu\", \"uqtuhie\", \"kfvtgf\", \"nklwjo\", \"ujxlfpl\", \"zobfpq\",\n \"eignijd\", \"ythctg\", \"artllm\", \"wodhh\", \"tzpwszq\", \"njdqegg\", \"hzrqib\", \"zvoxtfd\", \"htboem\", \"axjuix\", \"bvmvm\",\n \"jbnum\", \"bxdth\", \"atejt\", \"gqsqtnk\", \"fykrjbp\", \"ldyhonr\", \"wcuoj\", \"upphc\", \"agydg\", \"cjmwk\", \"rhxbqh\",\n \"tpgozdd\", \"qyqoy\", \"zjqutw\", \"qoohqny\", \"nsiacwz\", \"xupin\", \"criuvs\", \"eswjeft\", \"pdmevn\", \"zvogq\", \"lrrvo\",\n \"qhfqqpw\", \"ktudfg\", \"ijvmi\", \"neyjjdx\", \"rllpi\", \"vllvaa\", \"esebtu\", \"jyhcrh\", \"otgmr\", \"oudvyxj\", \"pmszy\",\n \"opeed\", \"gicni\", \"mnuzn\", \"mjbfpod\", \"sqwgxu\", \"dwniwz\", \"wmbmmv\", \"lyafuy\", \"zmvlz\", \"kopxzuh\", \"urcbbiy\",\n \"guhco\", \"nerjm\", \"lpdxc\", \"hxmjzz\", \"hynagc\", \"iyxeczi\", \"bdfxmoz\", \"yybnpqd\", \"jvgnb\", \"oquqem\", \"fmclmz\",\n \"dmkhf\", \"zxbjpp\", \"qpxgcir\", \"iecvjm\", \"gtkne\", \"lgtqrbc\", \"gilbn\", \"mcxsg\", \"ncwbhn\", \"wkriiq\", \"zhsir\",\n \"ptkkmw\", \"jcbpkrm\", \"vbefo\", \"vmbcd\", \"vqffj\", \"fhqzjt\", \"nryuh\", \"vmclav\", \"cjyggm\", \"sanev\", \"rrdocz\",\n \"zqdexbs\", \"jrxstt\", \"pyhcesj\", \"aagghyr\", \"cyemjrb\", \"aliohf\", \"qaslg\", \"pnyjzxz\", \"pehnvi\", \"suhuw\",\n \"twopabr\", \"sapqoc\", \"mckrh\", \"nzlgrxt\", \"aqpobnu\", \"pirbjgb\", \"plzlj\", \"raylxpu\", \"gyasfrh\", \"urjfxux\",\n \"xjbwau\", \"iupknn\", \"vhxnc\", \"dnbjop\", \"vrxhwmd\", \"vjsmkh\", \"rfmqids\", \"smaiwt\", \"vkyfo\", \"bjqyxc\", \"rbbbp\",\n \"dlkzg\", \"dwvdwu\", \"prulzh\", \"bavge\", \"ehhrz\", \"xxjqk\", \"pxopmp\", \"okmkmb\", \"slcznpp\", \"nvqlb\", \"jalrk\",\n \"parwlcd\", \"anbxo\", \"oqcxyzo\", \"fjhrdjh\", \"pgvnwfe\", \"yfjyvh\", \"quvszjm\", \"xyiig\", \"xtncqv\", \"svsix\", \"jvpdnh\",\n \"owuiv\", \"bsrugtt\", \"rmvggws\", \"lmdql\", \"kvmvd\", \"xrpmaw\", \"ssnxyb\", \"oworq\", \"rmmpuya\", \"rijpih\", \"aelazka\",\n \"kncksqx\", \"yvtdiy\", \"epato\", \"pbbamj\", \"fejsw\", \"zgsru\", \"ekwrre\", \"zqben\", \"vugxi\", \"fvcsdp\", \"rujcews\",\n \"asqxya\", \"worjlsd\", \"xggakg\", \"kzfpot\", \"haqon\", \"ypqxzz\", \"mmkzwt\", \"bdhif\", \"exzhv\", \"srnklzh\", \"hlrunb\",\n \"dwfyke\", \"fvgbtdm\", \"aeutp\", \"czhefx\", \"tegfw\", \"jkxpsb\", \"gxkfkw\", \"exvntd\", \"gvuti\", \"jdmly\", \"owaqhw\",\n \"fopuxzv\", \"edrvil\", \"biszwgv\", \"vgckzd\", \"fqdxn\", \"qktdf\", \"hpgwrk\", \"gpxiips\", \"vxnlab\", \"yylxz\", \"hsuscch\",\n \"bhivaf\", \"wzrwtc\", \"ebplv\", \"yzxykou\", \"mxlssom\", \"evghv\", \"hksleg\", \"shybau\", \"zeyqa\", \"tljqka\", \"axfkec\",\n \"fatdj\", \"janlkcc\", \"sjorbra\", \"jplge\", \"oazzot\", \"qbgtncn\", \"ozlil\", \"stohadq\", \"rvpuwn\", \"oqwpl\", \"byftgi\",\n \"ubuusl\", \"fkogr\", \"bybdyhj\", \"vinyuzs\", \"ivsqvz\", \"vmnae\", \"gckxw\", \"rozbe\", \"glvxwj\", \"rcgicu\", \"xmvbd\",\n \"itycsry\", \"llmwrs\", \"fuqth\", \"styrrwl\", \"wsseuln\", \"xwflcli\", \"muxgz\", \"ypmbboh\", \"rpmvnep\", \"wjvvnv\",\n \"arjnw\", \"toauwc\", \"ltjxqrl\", \"basffd\", \"clxozwd\", \"glmrv\", \"iejgfj\", \"cvkoj\", \"wotjf\", \"mqucec\", \"xalgemc\",\n \"hgimkh\", \"golvfq\", \"fuqpmak\", \"mhpcp\", \"pxoibt\", \"ledqa\", \"guzbyr\", \"ztvbeka\", \"racdp\", \"krsngra\", \"aaiknz\",\n \"bhoobyc\", \"xibbe\", \"yohepxk\", \"eclevs\", \"ldliwcm\", \"qatvlk\", \"eiypbw\", \"vxvtwa\", \"nkdwsej\", \"ftmyvp\",\n \"gpthye\", \"gazwoi\", \"zzgipon\", \"cithg\", \"wpabujl\", \"jhezlnb\", \"vqqaxfg\", \"kvpbk\", \"vggjemp\", \"owylv\",\n \"lgwtfpg\", \"jjqvfm\", \"xbhga\", \"tulvfv\", \"sefuo\", \"hbysv\", \"ozopepd\", \"awyrifd\", \"pnudwx\", \"vreje\", \"zhpgw\",\n \"qygbf\", \"tvbrvy\", \"zzmcw\", \"cznee\", \"deuzxt\", \"qfppjvi\", \"ilkps\", \"ydwhg\", \"krwkxzu\", \"mnsidg\", \"rkxyyr\",\n \"ajkqz\", \"xtmom\", \"vqocor\", \"fympcl\", \"yyleyzy\", \"jjvzhrn\", \"kpmxvuz\", \"txoeqlx\", \"lhhmn\", \"chzgpf\", \"ncnjxle\",\n \"ihxrg\", \"feqixq\", \"lkfhcar\", \"hfnsh\", \"bifczy\", \"umknat\", \"yrhgkh\", \"mgpcu\", \"qotukst\", \"yqlmfq\", \"ttcdp\",\n \"xnjjzm\", \"cukbr\", \"hjhjb\", \"iikfcsr\", \"nsqbnnz\", \"dauygf\", \"cmydq\", \"lfnhqnl\", \"ppqgs\", \"hscbfug\", \"ohzisud\",\n \"opspdkv\", \"aauxbop\", \"wpkhzo\", \"sxbsgu\", \"tajrv\", \"ololy\", \"mxmus\", \"vizvxv\", \"osaqz\", \"rxygkn\", \"mrzqlf\",\n \"zrriyxb\", \"ufroe\", \"bajozg\", \"atpsu\", \"uhgauzu\", \"tffdw\", \"mdjulde\", \"rbrmy\", \"jhkqvwl\", \"gzsultq\", \"nkbfi\",\n \"xtvwh\", \"dryzcv\", \"emaxuk\", \"zucvutb\", \"jdduyk\", \"bjdin\", \"loicuq\", \"qhjjb\", \"rgfjbq\", \"mphnk\", \"lxvceyx\",\n \"zeoxb\", \"fxhnxu\", \"qpbipe\", \"ophwp\", \"wiioer\", \"quchwj\", \"pouxunw\", \"bloxgg\", \"xbsma\", \"dtwew\", \"xstorn\",\n \"qfrfkz\", \"gxusbsn\", \"dhnxd\", \"mhstbs\", \"hekbtu\", \"wvrrjw\", \"yeiwd\", \"patplsx\", \"qmyiyi\", \"mowboj\", \"iskyd\",\n \"bqhjj\", \"povppk\", \"vthpwx\", \"uuydaw\", \"rduxvez\", \"vmcww\", \"ylruvph\", \"ymqosp\", \"wzcvohg\", \"lhepwta\", \"bckhc\",\n \"oiyyt\", \"wqzfv\", \"uduec\", \"lkkbtzl\", \"prvpbo\", \"jrwstii\", \"ijztoo\", \"qwwth\", \"vqzqiun\", \"krnjp\", \"zyanpiw\",\n \"ojhjhvg\", \"lohmb\", \"thqtf\", \"reptzv\", \"zgkyq\", \"lhkvy\", \"cmjwl\", \"fmilgpw\", \"jrfawz\", \"vrtzd\", \"ezgfl\",\n \"plzng\", \"zidzso\", \"civavlg\", \"vtwopu\", \"ljhckxo\", \"nuydt\", \"qembl\", \"fiwrre\", \"gfrgi\", \"gzegiq\", \"mltlqo\",\n \"pcett\", \"snbsc\", \"msibcqn\", \"beacrhz\", \"vsycjt\", \"gjqji\", \"smcegol\", \"zregkp\", \"smcazoj\", \"dziqad\", \"jpuwp\",\n \"hnlztac\", \"vduitco\", \"wyencad\", \"bkdnnqo\", \"cabzyg\", \"mgpcwr\", \"fxgvkxt\", \"wlkcrdd\", \"bhmhsy\", \"gqcctjc\",\n \"atafpt\", \"vdzhmcg\", \"ighxj\", \"gfqpale\", \"fohbrtj\", \"mfpsgt\", \"tarjocf\", \"gyycb\", \"qvqfryl\", \"jpwowwc\",\n \"jcgcg\", \"gmrjze\", \"nfptxq\", \"hmjhxge\", \"ieelj\", \"suvkgr\", \"nwjxe\", \"tkepqm\", \"extnpmq\", \"rxzdvf\", \"relzaa\",\n \"hfhgaq\", \"lmihlz\", \"pacocq\", \"dclxr\", \"oknoem\", \"pbpnnd\", \"nleerfl\", \"tvytymc\", \"aamfnl\", \"ufdnq\", \"bxyzvyh\",\n \"vksvout\", \"lohxhf\", \"sskgn\", \"aawbv\", \"hrvhx\", \"wvoqf\", \"vxkvh\", \"oqany\", \"bcmyd\", \"epdddqn\", \"zrlej\",\n \"bchaf\", \"hmftii\", \"mefcrz\", \"wbxvc\", \"ewwnldf\", \"cqecxgh\", \"cnwvdmk\", \"vetrw\", \"zmogwov\", \"lshlzpe\", \"lijay\",\n \"tcdqg\", \"xavqixd\", \"yjkhtsl\", \"myjvow\", \"cgthhd\", \"taaii\", \"iuuegk\", \"lcypmle\", \"wesrit\", \"tybco\", \"nhxysw\",\n \"awkrj\", \"jcmqa\", \"porvo\", \"nrypriu\", \"vznnevp\", \"hzklwi\", \"vapuxh\", \"wyfkn\", \"albemu\", \"ttfdbl\", \"dbqrjv\",\n \"cxals\", \"qzitwf\", \"ysunur\", \"llsefy\", \"cghfzji\", \"jboaa\", \"emhlkw\", \"khhmgha\", \"twlxgjz\", \"pyujor\", \"ozcax\",\n \"fetvovo\", \"mdhrrd\", \"qdhdne\", \"fiuvw\", \"ebyxh\", \"ldaothh\", \"vwyjf\", \"yjyljlu\", \"ivroqg\", \"qvpeyec\", \"eemsdra\",\n \"wavgeqk\", \"bjejrqg\", \"mdjimoz\", \"fgopy\", \"lgwodr\", \"cunvszh\", \"wiver\", \"ghmog\", \"jzgfyk\", \"vxlbx\", \"kvgbtn\",\n \"cunorte\", \"mtesdc\", \"zdzmqu\", \"pigik\", \"smruadg\", \"czjxlt\", \"kukgaok\", \"tsldpqq\", \"luomo\", \"ezbcvdc\",\n \"tfetwes\", \"uopzf\", \"wsvezkw\", \"wrnlvbx\", \"bpqungd\", \"jqnnof\", \"rqhiomi\", \"voulqb\", \"ouspxn\", \"chngpz\",\n \"fbogfcv\", \"nqhunxo\", \"rydbke\", \"ewduo\", \"suqqwup\", \"oxzfxj\", \"kuwfwm\", \"euiics\", \"mvftoau\", \"vstfbm\",\n \"vnmtoo\", \"muicf\", \"bjbskxb\", \"knbomlf\", \"enrbtfk\", \"hnaqe\", \"vxzsr\", \"gkqma\", \"qygmn\", \"ztkybmb\", \"injggpk\",\n \"enqrgdk\", \"rkgoct\", \"tgaiu\", \"dnknoxk\", \"iwuou\", \"oxanccl\", \"xestej\", \"ekrqq\", \"xbwhz\", \"jkdvxfh\", \"oybaay\",\n \"afyhci\", \"papffjq\", \"bdppssw\", \"qwyvjx\", \"xmnnosl\", \"kvqzjl\", \"wcwii\", \"ygfvt\", \"tpabbht\", \"kjmaq\", \"duschjz\",\n \"gguiof\", \"wgfhve\", \"joqmfjq\", \"smqfd\", \"ynlovlz\", \"sgrzum\", \"bobmux\", \"dcppi\", \"isdjrwl\", \"lbevb\", \"efqsirq\",\n \"hlgfql\", \"enmemlb\", \"dbmfk\", \"ibfpzm\", \"rtdnooq\", \"yicdq\", \"xadul\", \"dxibxzi\", \"yyxnj\", \"jhsdzxw\", \"thltbi\",\n \"kwhreyi\", \"hrocoa\", \"fnaalbd\", \"vnwona\", \"nnonm\", \"naqaf\", \"xgzzies\", \"uhruynk\", \"kgadfx\", \"hyohzbd\", \"hnajx\",\n \"yipzh\", \"ezdxaet\", \"xbzppoz\", \"rwnewxz\", \"hlcbkmb\", \"znyhu\", \"zsqtpkr\", \"gmyxr\", \"rphyvo\", \"bgjuz\", \"nulpv\",\n \"eejfoso\", \"xmwcnes\", \"xxxxnpe\", \"jezkk\", \"idfsxrw\", \"qgzjtf\", \"arpzpo\", \"hxsanlt\", \"emvotcb\", \"sknzhvg\",\n \"icitca\", \"ivhdln\", \"sqilerz\", \"ndigw\", \"bcsre\", \"mibbep\", \"zsczom\", \"cgghjbb\", \"fkylfgt\", \"bvzofs\", \"mefsng\",\n \"bispbza\", \"tsosgy\", \"xopalrw\", \"wserf\", \"jbmlz\", \"xidxny\", \"ffmpjos\", \"vddwxmd\", \"netnsg\", \"kgevsp\", \"pguuv\",\n \"cwisp\", \"slxiyb\", \"dmwaguc\", \"jobwusu\", \"uytcqrv\", \"hzhsy\", \"zrlsdd\", \"xhxah\", \"rxzij\", \"zwdgy\", \"ygmvkz\",\n \"drkzbo\", \"qpsal\", \"tpxvl\", \"lfmfl\", \"sayjvlh\", \"rdamym\", \"ycuzd\", \"zkycu\", \"hdesec\", \"unequk\", \"lpkdid\",\n \"vorxls\", \"admsdop\", \"rqnvkyg\", \"krnqqtb\", \"rxfms\", \"xfthd\", \"pxjbk\", \"gpslrg\", \"rwziwef\", \"usxgqvz\", \"baxxye\",\n \"ocrkkrw\", \"lrlgsp\", \"ceyctg\", \"rniml\", \"vavug\", \"jgircl\", \"jrpnmsa\", \"rywvlfg\", \"prxnys\", \"fkzmknn\", \"ooelc\",\n \"btvfs\", \"yqepuvw\", \"tmmmb\", \"qmpzexb\", \"zjckjvd\", \"aieytbb\", \"oafqq\", \"szrcyh\", \"czrxgae\", \"ifkte\", \"hfgajox\",\n \"pwpnkqq\", \"yqphogn\", \"xuwthrd\", \"mpcmy\", \"qitdoa\", \"avlzfrh\", \"ywpip\", \"dgeki\", \"fgbnx\", \"tyofu\", \"xziqzj\",\n \"qxzvqz\", \"vtsqk\", \"ipkld\", \"yfhim\", \"ebaegdc\", \"ubhrh\", \"ldejv\", \"mtflwy\", \"ocpyj\", \"yopgqs\", \"fkjxxd\",\n \"njnnwr\", \"nylkeb\", \"taymdqv\", \"ekpznq\", \"cbzobmg\", \"bucdds\", \"qjozu\", \"uvpghor\", \"obhnu\", \"ljkxbg\", \"uqrxjtf\",\n \"xwbxiw\", \"oxsmcg\", \"spchdd\", \"pcuitj\", \"faidq\", \"tybmy\", \"uygiyp\", \"qloizj\", \"cafgmy\", \"smetd\", \"kwcwb\",\n \"tdabxf\", \"fpmrc\", \"lfjujn\", \"vvmvex\", \"mnsgdc\", \"enjlgsw\", \"ohwcg\", \"kxjdaup\", \"rotjarp\", \"aovdoq\", \"oviwq\",\n \"qwaxs\", \"bmazco\", \"plcljsv\", \"yytjhl\", \"vgwjm\", \"drnue\", \"vqjgf\", \"uqlsfy\", \"bmqmfp\", \"lkauwna\", \"ozmqce\",\n \"heunaxr\", \"zaffbj\", \"arbek\", \"qjnllw\", \"fdkhlz\", \"wgmbwh\", \"yceqag\", \"ltjjq\", \"yurggfw\", \"puaafsl\", \"tjiqkyt\",\n \"yuzub\", \"ytmrfq\", \"ommmu\", \"ipknn\", \"iubnuab\", \"dzthvc\", \"zjbzpew\", \"dcooev\", \"pjydqcf\", \"zuojlzy\", \"zwjyfc\",\n \"spmac\", \"dfkbnz\", \"fzriie\", \"asusog\", \"hdodx\", \"drjpo\", \"ddyif\", \"chabv\", \"ebvkwrr\", \"burdjl\", \"jjddi\",\n \"dljzkye\", \"samyg\", \"zwgxcq\", \"xtratwo\", \"qfopz\", \"xvlaw\", \"laage\", \"btdium\", \"vzlnzt\", \"kmvbzkq\", \"kctobsx\",\n \"kazbelu\", \"yxdwrk\", \"eslvjc\", \"nhsdmvs\", \"zuxqcc\", \"hqtxovn\", \"zrbdai\", \"fgjxs\", \"txecvio\", \"kjxlq\", \"dkuxss\",\n \"mkbevn\", \"pzmdqc\", \"ihyia\", \"atsub\", \"twytus\", \"nzooxj\", \"qwuoly\", \"fdoigo\", \"zukhlh\", \"mugeaxt\", \"qqsfyls\",\n \"qqtql\", \"wrvphcx\", \"nzjfhx\", \"uequtk\", \"fxuto\", \"qnast\", \"nveys\", \"ltbrcth\", \"toctdib\", \"fbpnh\", \"umxfgn\",\n \"zvjuta\", \"yeron\", \"qzvswqk\", \"gbctr\", \"ryryz\", \"zieknd\", \"zcsna\", \"jrhak\", \"zfxqsj\", \"urlba\", \"lbozqf\",\n \"yfcjaa\", \"hazgy\", \"gmmfzyz\", \"zjvkyc\", \"rvfdcf\", \"daitab\", \"hcxqgum\", \"qwakp\", \"ltbsjwo\", \"pqqtygx\",\n \"upxcxao\", \"qylot\", \"lmxqc\", \"dwzcd\", \"tjccm\", \"mqcpap\", \"wgxqtr\", \"ivycvxy\", \"wdykg\", \"snvqka\", \"jxtvtsb\",\n \"jnyowsq\", \"iwfuoig\", \"cuoixhu\", \"fzwalg\", \"djhrar\", \"sjmahk\", \"dyusf\", \"wrxqvdi\", \"ftytlor\", \"jsjbv\",\n \"vjbebg\", \"agvsn\", \"vvmpgm\", \"gsgjopk\", \"vbqvhy\", \"afopf\", \"zybfuz\", \"aqsgc\", \"ytrjsvn\", \"wlhdfr\", \"vdhvl\",\n \"jrlvr\", \"cscxwf\", \"yhgbew\", \"wupbl\", \"ssuhyvv\", \"bhcirzk\", \"oykwk\", \"ijbto\", \"qsnpgw\", \"otwzage\", \"ytqzh\",\n \"rgwow\", \"bvhgkwh\", \"fvawxie\", \"fllxw\", \"gfcqf\", \"scoqb\", \"qubrq\", \"gdxjtp\", \"ahrpck\", \"awnlgi\", \"cmehsyp\",\n \"dwmytpy\", \"firyeq\", \"oohwhr\", \"caelk\", \"mqemvs\", \"qflkzi\", \"tfpibll\", \"ybhzd\", \"ctsxri\", \"yurocj\", \"dnlnl\",\n \"ydmdva\", \"xkaotl\", \"xovax\", \"ypynrqp\", \"kwfzw\", \"fbgsmrc\", \"tutime\", \"rcugul\", \"cvewno\", \"typhbpa\", \"wazew\",\n \"flzfs\", \"wxxbza\", \"ogjfkl\", \"vjlebet\", \"imbubm\", \"xinyncy\", \"dqmxfy\", \"buhagzh\", \"jjadpos\", \"gejyz\", \"gxshqk\",\n \"wkwrs\", \"dqeriqo\", \"dmixr\", \"bysjih\", \"aoloq\", \"ddwhsxs\", \"nteqv\", \"cqagf\", \"ditsrn\", \"wfxgl\", \"jwjqb\",\n \"rvkxj\", \"rxapr\", \"yrlkip\", \"npquasb\", \"nvezlr\", \"gmhchcx\", \"lodfihi\", \"dheypxa\", \"plzjykh\", \"qopsthg\",\n \"zsnes\", \"raongg\", \"zrpnac\", \"tzmtltj\", \"jsecdn\", \"rzudh\", \"hkcyic\", \"xsxmw\", \"reeuwpn\", \"grkwrag\", \"gvzzbsq\",\n \"lrfta\", \"aqyvbkj\", \"ytgfu\", \"wcmvd\", \"olnvfi\", \"hhgmhb\", \"kojmepr\", \"wpohl\", \"szhgg\", \"hymiblu\", \"lkwjr\",\n \"zulqpz\", \"sdcqjo\", \"olgsgez\", \"lxkpqci\", \"yxcgn\", \"gmvex\", \"fskpppe\", \"utzto\", \"axncvp\", \"lcyahba\", \"ydeae\",\n \"zvzar\", \"ghfkkqv\", \"ryrpg\", \"gucpbq\", \"reofjz\", \"cdnoo\", \"dchhh\", \"byiwd\", \"cqbhok\", \"ksfnoa\", \"xsmmlr\",\n \"qyvdfqh\", \"dzshj\", \"bpifnzh\", \"uxmoml\", \"jdxvojf\", \"ihfll\", \"vwesfof\", \"zynnpb\", \"fwzra\", \"rxlgww\", \"vkmjd\",\n \"hcjgzt\", \"mkapfl\", \"ffjqlf\", \"wulaebc\", \"gurramv\", \"tufkzai\", \"bxprqek\", \"nkohv\", \"abgfwyl\", \"slslg\",\n \"wirsnh\", \"pykvuh\", \"fdrwk\", \"gtmgsxe\", \"dxsaab\", \"lqiryty\", \"aoezg\", \"tzhugcg\", \"uoarf\", \"dwhsv\", \"rjiuoi\",\n \"ycgcdnf\", \"rtfmwz\", \"amkjc\", \"woogtdi\", \"deprx\", \"ucknu\", \"womfm\", \"xdeev\", \"qapxpuu\", \"ngulnk\", \"fgtxyf\",\n \"hnyabid\", \"cilmy\", \"wrsewtf\", \"luvtmo\", \"wftuh\", \"ifoeeqp\", \"dtfdhhl\", \"rwnburg\", \"fohkkul\", \"frqqi\",\n \"gsrcyc\", \"teuync\", \"dvpvak\", \"daqjki\", \"kksscp\", \"somsde\", \"tyfvck\", \"ftfekl\", \"ahncv\", \"yvosm\", \"qgllvg\",\n \"ylfwv\", \"jenqns\", \"lqovrnm\", \"iyger\", \"nfvtsv\", \"bknxmqj\", \"pfzybdr\", \"hqjol\", \"chlpk\", \"etgrtqa\", \"msuxdx\",\n \"vnoatf\", \"ypdzomn\", \"vsshmg\", \"rfkipq\", \"jvpbiz\", \"vbskd\", \"edsoixj\", \"uowim\", \"hqtsj\", \"inbsxal\", \"ookrv\",\n \"ipotdnk\", \"kmazqd\", \"jpfghb\", \"gvmnnpv\", \"juvwa\", \"xtkvzw\", \"ejqcl\", \"ebgcnt\", \"ztuyu\", \"dlzthw\", \"zzipe\",\n \"iaxwdxy\", \"htynwkc\", \"lefbq\", \"pizfr\", \"vttrsv\", \"oagak\", \"eqlrom\", \"vttefg\", \"dsrmk\", \"oekbe\", \"cvugzk\",\n \"diwvz\", \"gxmfob\", \"vjowzm\", \"mjpop\", \"uznhz\", \"kqvjwug\", \"wjqvxfg\", \"jbpwezu\", \"wsckdx\", \"slqfomn\", \"omuxk\",\n \"zlgblso\", \"kvitoq\", \"dmafq\", \"djxmzk\", \"pjqfegq\", \"yjrttas\", \"siakcx\", \"iutiqk\", \"nwfdj\", \"gbgtazk\", \"cpqtf\",\n \"panmlr\", \"aqubhsg\", \"iwdim\", \"nqetym\", \"mwazh\", \"thyhy\", \"ydtxan\", \"xfoin\", \"lsosc\", \"esznfa\", \"xgdisi\",\n \"flvbzh\", \"mpltx\", \"iwjpsqp\", \"udfycf\", \"rntmc\", \"ltflwu\", \"wkgbaw\", \"bcuzt\", \"hejxuhb\", \"lguohe\", \"klnhb\",\n \"mjump\", \"avcwrol\", \"yrcqlc\", \"ihxul\", \"avajh\", \"gtpauet\", \"iemzk\", \"rfdub\", \"gqnbk\", \"cfcmg\", \"iobyh\",\n \"iruuapf\", \"tyifwt\", \"sbdtp\", \"mngcpmb\", \"oaqpolm\", \"mmimmh\", \"gxknadi\", \"bmxhuu\", \"ulyoa\", \"keidy\", \"vsnfk\",\n \"cnnnfty\", \"pkajm\", \"ddgeecb\", \"prxidqd\", \"wmenvhd\", \"akjcqo\", \"tnekfef\", \"ipvsi\", \"pzjwq\", \"wmmct\", \"erdjnuf\",\n \"vgeaqs\", \"nlbdx\", \"dpvbe\", \"dgeqz\", \"aiguzh\", \"akawppx\", \"tykrjcs\", \"gvavo\", \"hkyle\", \"yhedx\", \"xzqcg\",\n \"gzdxt\", \"csssbk\", \"tmekrmv\", \"lfsgo\", \"iizahz\", \"aszfd\", \"aybqnsl\", \"vadwxsl\", \"ulmiii\", \"xaxdugp\", \"sfnnsbg\",\n \"dkyruh\", \"qhpqu\", \"amesjd\", \"evjuki\", \"vtqjw\", \"aoabp\", \"qnsuhe\", \"bplbx\", \"fdqok\", \"ozkhgib\", \"cggwzys\",\n \"nbknjay\", \"ooambw\", \"evmvegf\", \"htdlxik\", \"kahcume\", \"bojpn\", \"bhipie\", \"hdyjslw\", \"pbkkq\", \"qwszl\",\n \"fgkbzsd\", \"hejdx\", \"vmcfhgx\", \"puzlmmm\", \"meffil\", \"boakbiz\", \"eczot\", \"fvkkit\", \"jebfx\", \"umvkjg\", \"uikgs\",\n \"rycgpf\", \"rfmfgmy\", \"nveho\", \"bgywqen\", \"gepfma\", \"vquyq\", \"wcercbw\", \"wbpjkxc\", \"rqloeda\", \"omclokx\",\n \"hvotwp\", \"tvqfxxu\", \"qrtghk\", \"hggme\", \"arnmfnt\", \"cxprj\", \"rspdt\", \"hlgfq\", \"dmqel\", \"pcerxk\", \"ptqjc\",\n \"wzreko\", \"kahks\", \"xjnzo\", \"xzzye\", \"xbdeu\", \"koiwkv\", \"jlwkkjr\", \"xzdixoc\", \"xeedvrm\", \"mrtnhqi\", \"jaeann\",\n \"mvubp\", \"olklqf\", \"retbgcj\", \"qxxlhh\", \"cqyyoy\", \"ngwikg\", \"qijte\", \"sjzck\", \"zkmkx\", \"ongtzf\", \"tanow\",\n \"smgntvq\", \"urfgt\", \"xwcroa\", \"kadcpd\", \"cxhgo\", \"walku\", \"kvvcsyt\", \"elwmuxk\", \"bfphtm\", \"vzeumuq\", \"sknvev\",\n \"vbsnfd\", \"grmbg\", \"vjahwt\", \"dmcbmn\", \"smubz\", \"jobbfcv\", \"ujlkm\", \"lcthh\", \"bauuqdu\", \"kjgzgtq\", \"gicjz\",\n \"nugbax\", \"kbnjfiu\", \"sqfpein\", \"obbgfww\", \"ykggxjx\", \"irnmog\", \"xniuv\", \"rqiwycq\", \"hzlgyu\", \"yjtrttv\",\n \"satym\", \"dgqhlkk\", \"rghal\", \"tbekx\", \"kkwmo\", \"eahwhks\", \"bpvmbur\", \"sqtgkj\", \"khboz\", \"enefr\", \"vkzqvt\",\n \"wfruavu\", \"ninomu\", \"ypktaoa\", \"mlpmoit\", \"fxyhjfp\", \"fgnpp\", \"txieja\", \"dprnj\", \"bgyrp\", \"zsqwqrw\", \"stqzki\",\n \"kwiayb\", \"ulbsn\", \"aetje\", \"vwzbb\", \"tedwyqs\", \"cymiruy\", \"jigpoqx\", \"ypuqsc\", \"weletu\", \"gvibea\", \"chhuldm\",\n \"baylv\", \"wdhovo\", \"imfqu\", \"meodnsk\", \"jhlckqw\", \"jolyfh\", \"jsfkrhr\", \"tnbfzvs\", \"egcfht\", \"qnzmyr\", \"owtrqu\",\n \"oqaqu\", \"xftys\", \"goxfftm\", \"sgbnp\", \"bhfvaz\", \"gospa\", \"jwzlvwk\", \"lqncoqd\", \"xxizglc\", \"bwffm\", \"mhpggzr\",\n \"kdaoewx\", \"anviou\", \"mqiij\", \"wkskpn\", \"enougdh\", \"vldnn\", \"gbfgz\", \"ejmbh\", \"qsdrvsx\", \"mrvbz\", \"cqlufpf\",\n \"kbgjlu\", \"njgna\", \"admrmk\", \"pwwsc\", \"gxkot\", \"pdjwh\", \"ejwxt\", \"bpaxufv\", \"iwjzs\", \"xxfsg\", \"vuhgh\",\n \"srytgb\", \"yesvlux\", \"tggnch\", \"cgnbb\", \"fbzbx\", \"aomoqf\", \"zkrvrjg\", \"ueaoz\", \"dppacnl\", \"ewovhxz\", \"kbvee\",\n \"ixeeb\", \"gwgoqm\", \"hlwlxe\", \"fpmkrk\", \"wzjsr\", \"ispwe\", \"garofu\", \"jcmpec\", \"tggeo\", \"yzdeo\", \"axpmln\",\n \"zhnlhck\", \"duyqcn\", \"tpqwqi\", \"jvmaj\", \"bisgoy\", \"mpwmurb\", \"olqla\", \"ecapwan\", \"kcpxn\", \"xcapin\", \"ooctk\",\n \"sgqql\", \"vcyyjxf\", \"ejyom\", \"jsgtha\", \"logxnjg\", \"nypadhj\", \"dprmk\", \"cqkuzb\", \"gratv\", \"tgkjgu\", \"fttcafm\",\n \"tpryi\", \"ubbhw\", \"uwcuyn\", \"zkgohs\", \"snfesz\", \"ifrex\", \"tkbfz\", \"fvvkp\", \"otjiq\", \"lgomjjv\", \"ertracf\",\n \"bregu\", \"kkbizb\", \"hyhvn\", \"zjcnxfl\", \"mceskuj\", \"lmupdq\", \"zdzqzgo\", \"yorppew\", \"fpwtjd\", \"dxvyzt\", \"bbnnu\",\n \"pkycae\", \"ucvapn\", \"dijmkb\", \"nvwwpr\", \"bufkw\", \"zhono\", \"vayxf\", \"hlfwkev\", \"klkvkj\", \"yzgpwg\", \"lcbqr\",\n \"tkkfi\", \"pcgljx\", \"bhduxu\", \"rgfipts\", \"hkjbrr\", \"fobvy\", \"wqmqhxo\", \"yjgvypg\", \"ehgoizl\", \"ipiibzh\",\n \"aqxbxtx\", \"lrtin\", \"fyyuypr\", \"pyrocgm\", \"kwqbg\", \"ukccw\", \"wgsbpvx\", \"pcoivrv\", \"okhxaba\", \"bbuaibf\",\n \"ccvfm\", \"phpst\", \"yxtqiz\", \"cdfbo\", \"sijfljn\", \"gdlhn\", \"bqmbced\", \"tiejf\", \"aurqer\", \"olmyd\", \"prctay\",\n \"lwflhi\", \"bbehvta\", \"oxoda\", \"lklyc\", \"rzedhp\", \"kairil\", \"envan\", \"wdcwfk\", \"xoroddb\", \"womrlr\", \"ruxebe\",\n \"jnpywrd\", \"wrifvz\", \"zkewcd\", \"vllfrn\", \"uvdvjh\", \"bglpya\", \"vzokkbw\", \"apaoqt\", \"xpjizn\", \"xoajmd\", \"xapjwc\",\n \"jcknwg\", \"bjpreep\", \"ffkua\", \"ukcbah\", \"bugvkrf\", \"cbmmfs\", \"cwaczhl\", \"nsqaj\", \"sjeikg\", \"fayqif\", \"slowoh\",\n \"xjpvkpa\", \"ynunjle\", \"bqavt\", \"nkpqudr\", \"neikvd\", \"yuqlzg\", \"pdxbtrb\", \"cashlog\", \"iqiqy\", \"smjmxv\",\n \"zbtpbr\", \"zzamzcv\", \"jmakg\", \"txfswc\", \"pkaym\", \"swlde\", \"utann\", \"mqgpjne\", \"pslfvek\", \"nbiqhb\", \"bzsianu\",\n \"wnxgbi\", \"ahkeeiz\", \"dqdfjg\", \"bptdg\", \"pwita\", \"uqyflq\", \"txabjn\", \"yznjmve\", \"mukcqqf\", \"cxonbf\", \"ixuewjm\",\n \"pzlcat\", \"eikeeo\", \"scwsoa\", \"uaeyw\", \"oeorff\", \"gbqgd\", \"qboqiv\", \"hiulpb\", \"dbbdm\", \"qvdxx\", \"aypxbcn\",\n \"ykjwdbg\", \"pvfxn\", \"shrqyz\", \"zaxtu\", \"pfefgww\", \"jwifrw\", \"zxuud\", \"kpkwhlj\", \"lwptgd\", \"zpdmvsw\", \"takeb\",\n \"ynehl\", \"kixtod\", \"fyrgm\", \"qirzmr\", \"shyvec\", \"xjgzt\", \"bwfvht\", \"wyehh\", \"renzc\", \"nnibax\", \"slhfng\",\n \"yjtecc\", \"lghvbzf\", \"qroxvun\", \"mlsed\", \"rrudho\", \"cyffhh\", \"tjlxahp\", \"xmaepzk\", \"jvdzh\", \"bbvegrw\", \"cebcz\",\n \"odjpeam\", \"guerph\", \"tgmphgo\", \"ohtkqq\", \"jcxojz\", \"haeheae\", \"erydxni\", \"hatjxx\", \"kwmgkjw\", \"wmezvy\",\n \"hsuuvfi\", \"ineek\", \"grkxmhb\", \"alxkt\", \"rmspxdg\"]) == 13956\n assert s.minimumLengthEncoding([\"me\", \"time\"]) == 5\n assert s.minimumLengthEncoding(\n [\"yiyqbv\", \"njqvawn\", \"wnlovvp\", \"vogum\", \"jpolc\", \"zleec\", \"sxdrww\", \"rbowr\", \"xsjorra\", \"kwjsx\", \"vornum\",\n \"echku\", \"kuizegn\", \"rhuvv\", \"eemkh\", \"yshht\", \"pbixoa\", \"cmbxvtr\", \"iupia\", \"nmcbq\", \"mgrjsx\", \"ejvniwt\",\n \"svhsel\", \"kazenhf\", \"fevpm\", \"xcwqfgw\", \"ozikzc\", \"mywnmqt\", \"taorwjm\", \"gcshacq\", \"fgtasq\", \"qexygw\",\n \"ljmbari\", \"zfjudos\", \"rgxuzy\", \"kmzryaf\", \"exjfd\", \"mcqnebz\", \"ptoim\", \"zglfi\", \"fhneaz\", \"rexgc\", \"lhplwyr\",\n \"dthdp\", \"jizetec\", \"obyzg\", \"rqupa\", \"yphttge\", \"wdcdn\", \"wdomtr\", \"hchbd\", \"ytyra\", \"upytftl\", \"swbbi\",\n \"qpcybv\", \"dcoxspd\", \"dftkf\", \"nwjfmj\", \"ojbwy\", \"zofuy\", \"adqkt\", \"kpcply\", \"aeukw\", \"fqblb\", \"xurrbpo\",\n \"veioa\", \"puzvl\", \"bnzvlax\", \"tjzsdcw\", \"jarqr\", \"orxjbg\", \"ilrqdri\", \"syjuoyi\", \"htoqdco\", \"gwslw\", \"dpqyf\",\n \"jnkhv\", \"fpqhpr\", \"baewnvc\", \"caunsf\", \"qhbpe\", \"wlckl\", \"lmoroqe\", \"ddlak\", \"qipwbfp\", \"cefqs\", \"surczp\",\n \"jtmfuro\", \"ezhqau\", \"dlsco\", \"hywoqh\", \"lnifq\", \"hvfmu\", \"cqjdkok\", \"tggdact\", \"rwuowdk\", \"attnl\", \"lwhyq\",\n \"mqtsc\", \"bmwajiy\", \"nyohug\", \"vvfpt\", \"lbyazu\", \"sarwago\", \"iccztck\", \"ugsxcw\", \"rpwza\", \"yofmlll\", \"ulhdzhg\",\n \"lbaqk\", \"bwxxwc\", \"dmsbawg\", \"tjloy\", \"imbrkul\", \"xguke\", \"shlkuq\", \"lizjcdu\", \"kmvykl\", \"ilqxxjm\", \"rtbvvqt\",\n \"qisec\", \"zobzr\", \"thwntt\", \"afpifh\", \"uwiiovy\", \"hgsyecl\", \"pdgnm\", \"mqyesch\", \"suexztu\", \"msguuwu\", \"yrykkv\",\n \"xtoommc\", \"muteu\", \"bamml\", \"kkhlb\", \"jfrnx\", \"wpytor\", \"zzogpt\", \"yryxxt\", \"hzqofjd\", \"ehtildc\", \"ptclf\",\n \"nyltvd\", \"nrret\", \"qqqqt\", \"uuxunf\", \"jajxt\", \"lzdvlc\", \"gpdtjug\", \"hjsso\", \"jairua\", \"qarxuey\", \"rpwwjwv\",\n \"cjqypep\", \"tuzgcs\", \"oytqxb\", \"rgfmud\", \"stnwn\", \"tzzaop\", \"jpuopzg\", \"qeywd\", \"spnstrg\", \"dfwgntg\", \"yjyqk\",\n \"ioowc\", \"duqfg\", \"gmqxe\", \"xhlbby\", \"liurjk\", \"vdujfm\", \"xxyyn\", \"omapgc\", \"koemzbz\", \"ziiyako\", \"pjmhfrv\",\n \"bshtfgj\", \"ihjvt\", \"pnipuw\", \"fajiuj\", \"rdvcqzd\", \"mgknns\", \"ouwkm\", \"ejnklwc\", \"osepl\", \"gplpyvs\", \"paxrddg\",\n \"gsjlpd\", \"lgnmgl\", \"yifeeer\", \"hhnwlol\", \"fcmxs\", \"ilinwgm\", \"udhfdtq\", \"ceefc\", \"xweqx\", \"jfelwod\",\n \"rtywfjo\", \"kzwrgqx\", \"fcjriov\", \"fzytqv\", \"zcpcddo\", \"scpyzow\", \"kbzegu\", \"gclwr\", \"gmiwlp\", \"rtpka\",\n \"yiywuyy\", \"qceot\", \"dtrgn\", \"ntwbu\", \"fxobd\", \"zmxwza\", \"qcksyz\", \"wgbtmm\", \"pzorve\", \"hztydc\", \"jqlay\",\n \"ijdkbk\", \"uzjrps\", \"gfzibk\", \"gsxqj\", \"kgjrkdd\", \"smdeuk\", \"iwizewp\", \"owjie\", \"kcdccu\", \"ifltqr\", \"zrdfbm\",\n \"pznbcsk\", \"mtkpi\", \"cpasir\", \"flrxrm\", \"uxcxnv\", \"htlfcp\", \"ltukxfr\", \"ftbbha\", \"jhgjgyz\", \"qjreroc\",\n \"vcvtbid\", \"nrhlq\", \"gtkpot\", \"gyplqqg\", \"lnorig\", \"fixhufv\", \"ugcug\", \"ndfug\", \"wuorhe\", \"owocnkw\", \"rcnbf\",\n \"ioiiiui\", \"kakwtne\", \"svxtt\", \"wdrxogm\", \"ibrxs\", \"bddqi\", \"jeguac\", \"hlftdw\", \"nutgfjw\", \"krrzvf\", \"amxuloc\",\n \"deozdoe\", \"ovsvk\", \"sfqsl\", \"slgiw\", \"jbjujag\", \"mhiru\", \"uqksech\", \"davosw\", \"nlueljv\", \"rhtvdu\", \"ivdpdqa\",\n \"qnbenpq\", \"dtapqq\", \"hwwfpxl\", \"oyrfosn\", \"goxgmgo\", \"tbvutl\", \"cbbbcm\", \"iiugpk\", \"hinkem\", \"vvaitk\",\n \"pskyf\", \"hdnekg\", \"nqhfn\", \"dqbozx\", \"zcwpko\", \"kafyu\", \"jfegubk\", \"nofqzsk\", \"ujmxxg\", \"akwzemu\", \"yvhxb\",\n \"qqlwofi\", \"hmoecj\", \"qwgtlc\", \"jepvygq\", \"uzggm\", \"fztiews\", \"lvndvf\", \"vulax\", \"znqudh\", \"whgqi\", \"noguo\",\n \"vewkx\", \"uruvgf\", \"ubohmba\", \"aulzi\", \"flvfdlq\", \"yspfie\", \"wugif\", \"qndyiwa\", \"keihmct\", \"rggvn\", \"ojjmuoh\",\n \"sbbcl\", \"cdivmoz\", \"vkusmp\", \"mfddp\", \"kgohwvp\", \"rjbbxw\", \"vsgptj\", \"hbyjoz\", \"gufrv\", \"orxiv\", \"fxcqfw\",\n \"okppik\", \"qlouw\", \"lkryigo\", \"qccvc\", \"ixcnodg\", \"wlfilts\", \"ahqtevp\", \"kkbuha\", \"oehaez\", \"rzczib\", \"vxobk\",\n \"wmetvjs\", \"xfjgeq\", \"eadzl\", \"aeqdvch\", \"czojfq\", \"hxshidl\", \"ofswsj\", \"iwbqcmg\", \"schhwtt\", \"ltyth\", \"wiccu\",\n \"akill\", \"zaaji\", \"qepvfa\", \"mpvrkeu\", \"dcpenm\", \"wdhlk\", \"llqbby\", \"lronwkr\", \"rwtguo\", \"ofnvs\", \"lxdnwzf\",\n \"dctmilf\", \"zhckjd\", \"hajsuac\", \"wpylhy\", \"zhipvm\", \"ihikr\", \"zzwjgvr\", \"gdglrn\", \"skhow\", \"tlqtjl\", \"uypli\",\n \"evdva\", \"civide\", \"iroihm\", \"lvuzid\", \"vexat\", \"ngmvrz\", \"szdhbt\", \"ggrbz\", \"bsmovlt\", \"kguomvl\", \"onzvx\",\n \"nobgxw\", \"tqxemc\", \"vbiyx\", \"fpzpf\", \"ogtvf\", \"yuthri\", \"xszbn\", \"xcuhj\", \"nosnpbp\", \"mowsxg\", \"tfalyy\",\n \"kxombgm\", \"cukrz\", \"krmseq\", \"velzh\", \"kmufxj\", \"nvxlkq\", \"ualvras\", \"wytoucy\", \"qicqyym\", \"pbeujtv\",\n \"haojnbm\", \"xnfffpe\", \"wvoiald\", \"rlyvf\", \"sxamoxw\", \"ztqnmp\", \"biiavx\", \"lnjnzs\", \"arqdjdy\", \"pkrgokc\",\n \"qxswouj\", \"dgqah\", \"mnhzo\", \"ggilb\", \"qscrd\", \"ggvkimw\", \"qlxjys\", \"wximi\", \"aqlhio\", \"iavtvy\", \"grkqf\",\n \"dwrtut\", \"uozutfc\", \"fogxpdb\", \"ydtntlq\", \"vnmpmwp\", \"gtxhwq\", \"mlpihx\", \"yfpjlz\", \"hdvcquq\", \"nunny\",\n \"wklasgp\", \"wxduo\", \"topsqf\", \"tngcpzc\", \"mcrut\", \"pdnsmt\", \"kavaok\", \"seiqsqa\", \"bhgkiyt\", \"mawvhtp\",\n \"domcnrm\", \"fgusghc\", \"wdaufwz\", \"tzpuks\", \"kisndyz\", \"fwyieu\", \"wtdum\", \"ytxhl\", \"yhzkmuv\", \"nppnqe\", \"ccvhj\",\n \"dautnyq\", \"hkaliab\", \"kngan\", \"ebmhiop\", \"vsdkcef\", \"nmpcnd\", \"vxvnl\", \"cwcgu\", \"zsuneh\", \"qjgcmd\", \"awvba\",\n \"rzbisxo\", \"oilqrj\", \"neiazlm\", \"hlyrl\", \"tmiht\", \"lwqxxv\", \"gyblrw\", \"gnnjkb\", \"lrxiln\", \"xlwlseh\", \"npfwcvp\",\n \"yjcdhw\", \"rzndd\", \"orlhmip\", \"gatuojh\", \"osotgvv\", \"owksz\", \"kcocizf\", \"izlev\", \"smigns\", \"wtxfwo\", \"knwizte\",\n \"mqjojzp\", \"lkezye\", \"xqldbu\", \"cvbpyl\", \"aoipbz\", \"asrupt\", \"bdwkesh\", \"jpaykm\", \"pksbg\", \"gdbsibd\", \"lfxpwk\",\n \"rmnfph\", \"yzxwke\", \"xjwyusv\", \"yetar\", \"sytdz\", \"pnystzi\", \"yntcqo\", \"egoorl\", \"aydxu\", \"rfdrfhe\", \"flzkos\",\n \"mmjgev\", \"fbjwmvi\", \"jeouc\", \"lcmkri\", \"aggsb\", \"aaeazai\", \"amyxpey\", \"onxqpg\", \"qrjpxq\", \"zanea\", \"niwsgtv\",\n \"nsqja\", \"utgskd\", \"hlcum\", \"frygtl\", \"xjmqetz\", \"upqddd\", \"vxzdstm\", \"hcmtera\", \"ejstou\", \"xkcguf\", \"bokigdk\",\n \"vurnv\", \"zsgrje\", \"nbxlf\", \"tpilcx\", \"lvepux\", \"xacdtp\", \"amdgx\", \"ubbvnx\", \"xmvznh\", \"tlprri\", \"sthkn\",\n \"xhoad\", \"deotaxo\", \"pqzppmw\", \"xlcpx\", \"qwzrpyp\", \"lujabeb\", \"heskwyy\", \"mzzaaur\", \"vnestcs\", \"rryphdl\",\n \"ibdiabi\", \"eoiyt\", \"znflx\", \"clougix\", \"zzadxw\", \"lrrgtf\", \"lsdoakf\", \"yxfmqx\", \"qhnrry\", \"ktcdmv\", \"veygqu\",\n \"btjlo\", \"fcspsc\", \"gozoazm\", \"xcsqgz\", \"aazae\", \"nkuvask\", \"mzdgjq\", \"sihqdhy\", \"zadrwzw\", \"gzcyuea\",\n \"lpgccic\", \"fqtfuzw\", \"bjoqpkc\", \"oydpkxc\", \"sugnnu\", \"hyvygf\", \"axkxo\", \"rsmzb\", \"dlhqmac\", \"gbqby\", \"npqkj\",\n \"odbtb\", \"bdsib\", \"zyasxv\", \"ifxqcc\", \"lmnjwhr\", \"ibuyu\", \"uzhle\", \"ccpwhjr\", \"vhrojnz\", \"fkzfz\", \"fyesm\",\n \"dnvipvm\", \"jbbqn\", \"qdkgl\", \"xkvvgq\", \"dphugaf\", \"soxbfun\", \"rbgokx\", \"biveiz\", \"vbaqtn\", \"qapydgf\", \"llldu\",\n \"ottjpzu\", \"fwjuc\", \"cawio\", \"gbkwe\", \"rrnnxer\", \"luviy\", \"zsalse\", \"ckwdeox\", \"ozhqocm\", \"vtozfwz\", \"jztole\",\n \"ydqei\", \"bfugz\", \"psawjp\", \"dzlyrwp\", \"izuyrne\", \"rbwcfr\", \"vdvte\", \"usjbqs\", \"zzovkxr\", \"frfkwk\", \"mmtmdd\",\n \"sntka\", \"wachbzo\", \"rmzvj\", \"scbngo\", \"eqiuiwi\", \"qfakk\", \"cckcmt\", \"owhzow\", \"rejdlw\", \"iprsqdq\", \"twwaldw\",\n \"mfilzyk\", \"jygvx\", \"iewbo\", \"irhko\", \"zpazqhn\", \"ndqbg\", \"ayzxqdz\", \"zvpbh\", \"maapq\", \"pzitrfm\", \"qsgsurv\",\n \"viwcfff\", \"wpgenms\", \"tjmvu\", \"czuemc\", \"infxoo\", \"avhbw\", \"nugkqx\", \"xubakjp\", \"ndask\", \"utaqq\", \"njhuxq\",\n \"sdvuex\", \"tfmxqp\", \"bydovjo\", \"bizxjsp\", \"zoozxyv\", \"jegei\", \"gkpqobw\", \"psumbtg\", \"gkgoh\", \"sgcbpql\",\n \"xxkhy\", \"kdorkr\", \"hcomj\", \"ulrpyv\", \"rhplil\", \"tyyochd\", \"xhzul\", \"srdjmns\", \"kgukye\", \"yepvs\", \"xnobsjb\",\n \"umxmtub\", \"wvqasr\", \"igftpzw\", \"exhecn\", \"rreee\", \"jpxuvxh\", \"jriqf\", \"akexunb\", \"ekvdsoe\", \"ytzvj\",\n \"vfrlyae\", \"pmfai\", \"biouzle\", \"xkbce\", \"clzyi\", \"xhjoso\", \"wmxkxb\", \"dqzzig\", \"ydtby\", \"gskwj\", \"wlkwbz\",\n \"zepvllz\", \"zsgqp\", \"blntawk\", \"eynmil\", \"bdqyp\", \"wgtnqbc\", \"rrgaq\", \"gtafuzo\", \"qdiko\", \"kkcsdo\", \"zwqhs\",\n \"kugzbmf\", \"wtvvs\", \"kqsdx\", \"mxsuxiz\", \"pgbgjfe\", \"vodfr\", \"qbvwu\", \"vfwbhgw\", \"ayojye\", \"kolzfqg\", \"xnbecj\",\n \"akbcnf\", \"uutrn\", \"upmesa\", \"marqej\", \"bbucee\", \"bazqbau\", \"qikgsyf\", \"oeayzn\", \"uilxnzr\", \"vpnxknl\",\n \"btgtxgh\", \"vjaav\", \"zaxtzah\", \"msweps\", \"awduwld\", \"gzaep\", \"ngvgc\", \"qpoqdgn\", \"kimndg\", \"qilmmpw\",\n \"oafhlyp\", \"nyelgvw\", \"onymk\", \"feycbc\", \"dhcrx\", \"siqpfly\", \"tyvycmf\", \"huctqp\", \"uscjrp\", \"bbptd\", \"msdmu\",\n \"xlxhye\", \"xnyzcox\", \"kyskda\", \"injdkmp\", \"jiwus\", \"spjylwd\", \"eqcrnt\", \"snfiu\", \"jvwvge\", \"yfeaw\", \"mmdnsjj\",\n \"suzdw\", \"xiupf\", \"rjwjhng\", \"tqvasy\", \"rmibpa\", \"zuqax\", \"prpndnp\", \"efryqe\", \"pwuqfy\", \"wpqlfs\", \"aeswq\",\n \"cxkeiue\", \"jydxzfi\", \"tzfvwp\", \"zzgtw\", \"mupiusx\", \"sojavt\", \"dxmsgq\", \"migjiyj\", \"kixjk\", \"ywwvcpl\",\n \"khzcuo\", \"oykhx\", \"fochin\", \"foxbfkc\", \"sizjg\", \"wrjcvr\", \"ceadd\", \"tvfqgxq\", \"whzhche\", \"dcoeti\", \"mpilfib\",\n \"cphie\", \"ucpnjm\", \"ajltvx\", \"kpizym\", \"vevfsrs\", \"jznrri\", \"yvhxomr\", \"cbcnk\", \"yuwuhu\", \"jywuzed\", \"kqakusq\",\n \"jrnzgfo\", \"mjimzz\", \"mfjybnd\", \"ntqyq\", \"junxxck\", \"myvqajv\", \"kvuqs\", \"obfxw\", \"jwuba\", \"vnrvzvy\", \"aeric\",\n \"vtgda\", \"nkrocpt\", \"ahitg\", \"dzxtr\", \"zswwc\", \"yhxap\", \"fdhiwr\", \"cpxtqv\", \"izbmo\", \"zyioo\", \"vysnoe\",\n \"ouuyvj\", \"cumdhzn\", \"dbsmph\", \"cktjem\", \"vbmxy\", \"utgfyhc\", \"rqdeorp\", \"btnlmd\", \"chxwlt\", \"nsghoqi\",\n \"egycsm\", \"wkanat\", \"lzjyf\", \"donyx\", \"cchqsa\", \"xozzz\", \"yzmnf\", \"jfzuh\", \"dpcpg\", \"hlahz\", \"vobopk\",\n \"lssfeli\", \"ccttzi\", \"glzgqpv\", \"oyqzug\", \"qqhkrr\", \"euwotv\", \"hwbmtz\", \"hiylhly\", \"bppzne\", \"yetyyvs\",\n \"cnbwcby\", \"hzblk\", \"pfjmxt\", \"dsxvt\", \"vvkju\", \"zjrfr\", \"gdbhb\", \"udoad\", \"nbhpzfm\", \"iwetbym\", \"atmly\",\n \"tnxli\", \"myegb\", \"hiwqsk\", \"btrajk\", \"nhrmwn\", \"ftmbecv\", \"xopht\", \"eiikqy\", \"qizanwa\", \"cwxiatf\", \"jshjva\",\n \"llrtkn\", \"zhivu\", \"lmwiu\", \"oaeaqz\", \"oxotfub\", \"jnkafm\", \"juhrmq\", \"mqzbtw\", \"puiaxty\", \"dnahvoj\", \"gaxhz\",\n \"xfnay\", \"iqmlnlq\", \"xudhcg\", \"izpkz\", \"tqttmt\", \"bwnbs\", \"fdufd\", \"vhzyymh\", \"zhqtxr\", \"evbcrv\", \"xvnma\",\n \"dgcwy\", \"cwxzlbz\", \"oodiol\", \"teyim\", \"kqqfjub\", \"ftsqzi\", \"arfztkr\", \"oqlujx\", \"rpkkdov\", \"ptoff\", \"ivxaxr\",\n \"nxeept\", \"cacpl\", \"tehir\", \"spvggl\", \"qfzxkn\", \"bhwkukx\", \"fkdpuq\", \"xdrngre\", \"fnfplq\", \"dzbrl\", \"ufgxu\",\n \"sciec\", \"fgdydvw\", \"nmpaqxi\", \"ydsvfv\", \"natjz\", \"lruyvzf\", \"xznznxp\", \"mhfrh\", \"kddsk\", \"uwatn\", \"uklzs\",\n \"lnuta\", \"ryizc\", \"cvwko\", \"tnzpk\", \"ywpiv\", \"vbvcagq\", \"pzolw\", \"nmyfhg\", \"cshkofj\", \"ksptw\", \"kqejh\",\n \"zgzjqzo\", \"mxzrw\", \"enabosq\", \"vmubgc\", \"sfzcj\", \"hewvk\", \"ewhrq\", \"oifnsmi\", \"izdnvu\", \"cshgtk\", \"mqotuhd\",\n \"gnqgj\", \"rxailbm\", \"iyhxvtu\", \"ncjzklq\", \"zjmnoc\", \"awqwos\", \"ugujppc\", \"spbvfwl\", \"gntsvo\", \"euksu\",\n \"qnvneph\", \"crhmf\", \"brktmf\", \"mvgmr\", \"yzcskrp\", \"tihawec\", \"edqmxpn\", \"fxyymlr\", \"dzfkucm\", \"prldz\",\n \"gplrlhz\", \"bohwr\", \"bhebbk\", \"mmecj\", \"segydd\", \"ptslsb\", \"pyhgw\", \"cwmrq\", \"mjfhflh\", \"xhuid\", \"npxmb\",\n \"izilq\", \"dczhqh\", \"tgfnxtb\", \"zrylvo\", \"lctxrar\", \"ylhrbii\", \"rfxedv\", \"llvhzjq\", \"bjocv\", \"wbnex\", \"cnohnf\",\n \"xahrl\", \"rouvwyc\", \"hbhovgv\", \"dhucp\", \"ncmff\", \"ncsskg\", \"gsjbyin\", \"lroxscf\", \"whfaenl\", \"vsfultg\",\n \"floxkpy\", \"captoai\", \"qwolyex\", \"ggaypn\", \"wzunypd\", \"pjixeu\", \"gxnjkoc\", \"pqiqhn\", \"xakjmgz\", \"vqizkx\",\n \"gdzcxr\", \"kyxwdd\", \"pgxmazn\", \"qeuwf\", \"bduknm\", \"tcrcn\", \"nehgee\", \"wktbcgu\", \"jwqltdt\", \"wczkai\", \"drkqs\",\n \"qhdqnn\", \"oobxirc\", \"lbunv\", \"ifscr\", \"xnfpbrw\", \"yrrdbax\", \"fbocs\", \"tewne\", \"iobixe\", \"zgosas\", \"yhesn\",\n \"xlqwd\", \"pfcen\", \"slsjffx\", \"ilwatrc\", \"mhsmgp\", \"iteghl\", \"aqhufdl\", \"kxgpqcu\", \"ryrcgp\", \"azidf\", \"smlnl\",\n \"rocxvbt\", \"iutfc\", \"loapgbr\", \"musulp\", \"dqcnj\", \"tpgbkfh\", \"wvskii\", \"itkfopo\", \"kytyb\", \"rzahbu\", \"aewptd\",\n \"ohergbb\", \"cadxh\", \"aphwelj\", \"huooyzn\", \"gtttia\", \"izeyhcr\", \"cfvxz\", \"aitaxyp\", \"vypqost\", \"ebfnmif\",\n \"kgiucm\", \"zryyu\", \"oxgnbpt\", \"frpwo\", \"ouqvodl\", \"pdaazh\", \"gxwmf\", \"dozxsjm\", \"yndpsik\", \"zcwvu\", \"mihug\",\n \"jgodklw\", \"ysklw\", \"cfxqv\", \"yqvtz\", \"rctnp\", \"xjywa\", \"kpqyw\", \"hhtegzt\", \"rnwbeoi\", \"uyxqum\", \"jahcwbe\",\n \"jzjns\", \"ovwoaz\", \"oqmsrua\", \"natbejl\", \"deffv\", \"okgbr\", \"paqhy\", \"jkafhte\", \"lifsknp\", \"afmskh\", \"oemdro\",\n \"oxuwov\", \"qtyxa\", \"hkpfsm\", \"ulaubn\", \"tciurw\", \"myohwlo\", \"okuiejb\", \"ormoqsb\", \"gmipz\", \"hterzir\", \"ekxzre\",\n \"xkevge\", \"ihenf\", \"nnhzv\", \"eocjmx\", \"upzal\", \"oounfko\", \"myhbwub\", \"fwipva\", \"pkzzvpd\", \"nrupm\", \"vluzq\",\n \"fxkoyho\", \"atzktr\", \"aomrp\", \"qwpser\", \"ejagmb\", \"cfigelm\", \"bvanb\", \"cgcgabo\", \"hmjvlqt\", \"hxxocf\", \"ftqaud\",\n \"htuipy\", \"bhwmcn\", \"tgyvaqe\", \"lvuwh\", \"yiabzs\", \"rzzavu\", \"fiubm\", \"uuqsb\", \"riyakuf\", \"psscffd\", \"kvckzr\",\n \"fktmnf\", \"ivzqexi\", \"nhxzm\", \"kffjmb\", \"vdzxv\", \"esago\", \"bfikw\", \"gaiuxmz\", \"volokcm\", \"jypcs\", \"psibvs\",\n \"hxaxklf\", \"lmqwgy\", \"spnbimo\", \"mtihak\", \"xikoiy\", \"rmmtv\", \"phaqgxj\", \"zcuwkhk\", \"emodbyb\", \"ztahsya\",\n \"ieiqm\", \"lfoquh\", \"emznnq\", \"pnhlgut\", \"pgvads\", \"cqsjx\", \"lxnjei\", \"zpque\", \"rdjbiyb\", \"sxedpu\", \"potnqva\",\n \"iirkn\", \"rjmnrxd\", \"ksgcd\", \"waeymnh\", \"tizdz\", \"kproa\", \"wpttygd\", \"lvyze\", \"peewvgm\", \"fwtyzbw\", \"zitkk\",\n \"gfgqr\", \"udgvlz\", \"swqspo\", \"ohhvyq\", \"kgyuau\", \"hcerp\", \"pdomlm\", \"twabkk\", \"zfsea\", \"epiwp\", \"xgycjpt\",\n \"jtkdh\", \"mxmdm\", \"rtkzm\", \"qkacy\", \"nuvdiq\", \"agctak\", \"hypgyh\", \"ewtjp\", \"paysolw\", \"bcutebe\", \"xelxyb\",\n \"gzdvrth\", \"vpzfv\", \"cxrkt\", \"admiyzi\", \"lqlmn\", \"zbjpbg\", \"tlvdnli\", \"zetnox\", \"ylcsobo\", \"balajod\", \"igoume\",\n \"sxcgw\", \"sbkkafk\", \"fmndnnw\", \"incsa\", \"jyupkg\", \"uhvvc\", \"rswnbth\", \"nvprfj\", \"figqf\", \"znyidqi\", \"aijper\",\n \"euidr\", \"dftxkze\", \"vnppi\", \"splwifc\", \"fprgafl\", \"ixzaz\", \"mrhqtne\", \"dtkjsy\", \"dsmqrgy\", \"xfscz\", \"cymvmpu\",\n \"vptkfdx\", \"zrgrjq\", \"mqvwsur\", \"hdtlw\", \"ugdpwun\", \"cvxitc\", \"vytvqg\", \"pmtpfz\", \"nfdtdt\", \"umvwjuc\", \"jouxc\",\n \"qpypri\", \"pdhqp\", \"lmise\", \"wlsvcfg\", \"aqdkzcb\", \"qlrmrfz\", \"pbgoyi\", \"xmsskoh\", \"jjdye\", \"xvsdmq\", \"ymjeipy\",\n \"igjyv\", \"uiojvmc\", \"uckoww\", \"grlnyeg\", \"hpglp\", \"omnnyy\", \"iiliir\", \"cnucbcx\", \"pcxvs\", \"hipad\", \"xmiltkj\",\n \"oorwi\", \"qgoxjj\", \"jnmviqs\", \"wpleqn\", \"tudxw\", \"pcogem\", \"hgewaf\", \"niwfexy\", \"vcttgcb\", \"anjgovq\",\n \"epgmscd\", \"mdtru\", \"xvapv\", \"rydjik\", \"kopppcr\", \"mjbsmu\", \"unxoakz\", \"ldpsw\", \"frksjr\", \"vyxxg\", \"yyydri\",\n \"szidq\", \"qvbtd\", \"qratl\", \"xwfov\", \"bzhqyxl\", \"fskrtf\", \"pcpzmnv\", \"xuxwx\", \"vzbevnb\", \"ebaqz\", \"dbpuek\",\n \"ooqwj\", \"gaimp\", \"coelqh\", \"bwuceq\", \"oxpfjt\", \"zrqyc\", \"rwllk\", \"pqunv\", \"ufbnn\", \"tbnjoz\", \"kkqmrxu\",\n \"qyyrm\", \"hislf\", \"wyuck\", \"ubpre\", \"pdioi\", \"aryhv\", \"vdcxv\", \"rkgmaag\", \"czlzokw\", \"gtxuduz\", \"grpijx\",\n \"qzrar\", \"qhues\", \"rmznt\", \"sxxmved\", \"onjzuwl\", \"atbjhip\", \"nrardl\", \"alrocy\", \"cfkip\", \"ihtbf\", \"pqdgm\",\n \"hmokun\", \"dpghac\", \"otwml\", \"mnbzwa\", \"ehetlt\", \"rchvq\", \"lwjgywn\", \"lzdmjo\", \"nvhohdp\", \"tmshcpc\", \"gavjv\",\n \"ycnkv\", \"uynzh\", \"bvpnfjq\", \"lfbem\", \"qberui\", \"vrmmhx\", \"wpbqtfq\", \"jujpx\", \"dujgkof\", \"hrpbso\", \"zhcdt\",\n \"iybngyb\", \"rgeruza\", \"nesyxr\", \"cihgfe\", \"hjgskb\", \"zspxeqm\", \"inzrgyd\", \"crkjq\", \"iooshwp\", \"muvvj\", \"wakis\",\n \"rowibwa\", \"qikwypf\", \"aportho\", \"pubcgx\", \"vqoqpfi\", \"rnpbri\", \"ussjv\", \"looor\", \"xkzvdv\", \"tstegg\",\n \"zgiiokw\", \"rwvyaun\", \"mqqla\", \"asnqp\", \"nghuryl\", \"hlvhn\", \"ecuotnu\", \"judvbu\", \"xgvuw\", \"oeckn\", \"hdhttsg\",\n \"hcyhu\", \"klbyjc\", \"tnrmqnc\", \"mjojxhi\", \"kvdet\", \"vbmevim\", \"oglrzs\", \"afbscdi\", \"zxrffti\", \"firzgmz\",\n \"oenim\", \"wgpua\", \"asiep\", \"kyteq\", \"wpeneca\", \"qixmeoq\", \"zaofon\", \"csxxtr\", \"cpwmnl\", \"feylas\", \"idjuo\",\n \"mrtpvta\", \"jjvmjy\", \"mnljocc\", \"lnvjleq\", \"oognud\", \"rbyneq\", \"rhvomm\", \"fldrkpk\", \"znvrp\", \"myswmz\", \"jiloe\",\n \"juivjmo\", \"ylhbyzl\", \"ndmabkt\", \"sgdvlq\", \"pmnddmi\", \"utpuj\", \"kfisv\", \"nxfeell\", \"mxhgqd\", \"ccvdsdg\",\n \"emtybo\", \"zmkylbt\", \"mmrpi\", \"dkwlgq\", \"iwlappb\", \"uimsrnu\", \"mkxaxmi\", \"tcvll\", \"njggal\", \"kmqud\", \"evgzlh\",\n \"oaxizbp\", \"jiuej\", \"xknlp\", \"cyksydh\", \"gbixmz\", \"vtouyk\", \"sxjpkio\", \"qhubt\", \"kflvnb\", \"sjdfggl\", \"bxozyj\",\n \"xekbh\", \"wtmcb\", \"xtapfco\", \"rnornl\", \"ursdpki\", \"waonim\", \"eibfyed\", \"zniinaz\", \"uyfohq\", \"qcaxlt\",\n \"koyaapa\", \"pjuvbsi\", \"ecpdl\", \"ifaqwm\", \"yyumzc\", \"gvfngfp\", \"lttul\", \"flyza\", \"uasdlme\", \"oklhb\", \"wulkzzv\",\n \"ziwsxo\", \"jqcxiu\", \"qdzrwgm\", \"zjdwy\", \"uumns\", \"emlnp\", \"irnrqp\", \"gqkza\", \"oynpcz\", \"yxyea\", \"zpamf\",\n \"gyehxbv\", \"nplkhcc\", \"rxeekyo\", \"kecgp\", \"gseju\", \"nkisxqf\", \"vlyud\", \"fxxihhm\", \"yjgtml\", \"fehwpdi\",\n \"wclnvyy\", \"lriwrc\", \"ikparv\", \"volfh\", \"ysphh\", \"szrvrv\", \"rqlmz\", \"jyqut\", \"fyftsj\", \"uvwfip\", \"rngwgm\",\n \"mjwaz\", \"roehjki\", \"ploxokr\", \"yjbalp\", \"fspkq\", \"yfxrb\", \"kzulvk\", \"ordxp\", \"vdrrt\", \"wdiojwd\", \"ridzl\",\n \"niykdvu\", \"whyycmn\", \"riwcma\", \"bkhgkrb\", \"nsine\", \"emgtgf\", \"zoymw\", \"ljtvhzb\", \"kfyfdma\", \"piygxdl\",\n \"onfwgdf\", \"fwmkm\", \"vqbljay\", \"icife\", \"bxfli\", \"yeygr\", \"qenhgm\", \"mtxuckj\", \"kdcyx\", \"kwqhfcn\", \"ywkfy\",\n \"prbpw\", \"pheyc\", \"kmnds\", \"cacqs\", \"kvekiqy\", \"bfvfhdy\", \"gxulp\", \"skmcra\", \"exomt\", \"lcxue\", \"mnvvday\",\n \"rsddl\", \"gooegc\", \"udght\", \"doymnin\", \"ccdap\", \"wuive\", \"dyyln\", \"rynust\", \"luxabyg\", \"kdkkyyw\", \"vawqfsy\",\n \"rmeswm\", \"rcxzyv\", \"clpowz\", \"pdntqm\", \"tvjkkmz\", \"iiclw\", \"nhudzen\", \"cybhu\", \"crwtw\", \"enypnh\", \"ygekg\",\n \"hrjwqt\", \"peissge\", \"wangcy\", \"rbpoik\", \"raqulbf\", \"gyisnsj\", \"rgbqn\", \"lgvuzb\", \"djicf\", \"epnuu\", \"nsapc\",\n \"voatgh\", \"yorfehc\", \"jxfttat\", \"wyuivb\", \"bwopl\", \"odwdsh\", \"anchkv\", \"sepvew\", \"qoxxmae\", \"bpvqnj\", \"sngfo\",\n \"buoazou\", \"zhijssa\", \"janng\", \"uvdbd\", \"yfvkqo\", \"lcjii\", \"mvacvrz\", \"xztiar\", \"lpbtrqa\", \"ukbpdx\", \"okaqpgr\",\n \"idgqlj\", \"ewglgo\", \"ruymhi\", \"pcidw\", \"bvuqj\", \"npzch\", \"yppyan\", \"oiguirj\", \"iijvwqj\", \"jvbwjys\", \"yjtunfc\",\n \"iaikra\", \"oduhdgk\", \"ivixur\", \"ibcgai\", \"djzvcbx\", \"lmtsul\", \"lgnwzol\", \"wursq\", \"xsxbqwq\", \"jqvwnc\",\n \"dcwwvtb\", \"vwybnr\", \"bughwjl\", \"rnelxb\", \"hmacv\", \"ufgdygl\", \"aabuat\", \"oynwask\", \"gnfjjf\", \"zipbq\", \"zxstn\",\n \"jdrbprf\", \"jmkvny\", \"rblpql\", \"vykdj\", \"qaakyqw\", \"osbhddb\", \"avgldyy\", \"kvpoa\", \"fnqcliu\", \"zzlninw\",\n \"drsal\", \"omswys\", \"hwqcpct\", \"ecraq\", \"fvhsbjq\", \"raauy\", \"pfmoz\", \"vvqvcm\", \"tbjqjun\", \"jcfbegq\", \"otiwup\",\n \"axvvce\", \"dhpdnx\", \"pennr\", \"hvvmvzv\", \"binezl\", \"ygdmcuo\", \"ypwnqn\", \"aloxdv\", \"ucieh\", \"kovbtag\", \"rgfpaww\",\n \"fpbftg\", \"spjowfr\", \"zridoy\", \"blwbbf\", \"evwlxi\", \"itbcz\", \"hgixuo\", \"qmoqmjb\", \"tkeeis\", \"pjiaq\", \"rbpje\",\n \"ledoui\", \"ubecht\", \"mphdd\", \"uzswsbb\", \"ntsybr\", \"qmnijyp\", \"pqwawe\", \"ltytill\", \"dpnxy\", \"pkxqcol\", \"ayrdi\",\n \"mycnd\", \"knotsn\", \"zvcrjl\", \"qwroblg\", \"vtrktey\", \"dzilezi\", \"wzkxg\", \"varqc\", \"xlpttyc\", \"xxqhnl\", \"jpxywa\",\n \"kjdsh\", \"hdseebw\", \"bxqbp\", \"flazqce\", \"xrtab\", \"rupsfq\", \"asswer\", \"rhqof\", \"hjzdv\", \"addsgax\", \"cuahzjj\",\n \"xwdilr\", \"osqgg\", \"pfhwv\", \"rqorah\", \"ggdlnv\", \"truvaoj\", \"jzuldwf\", \"mjddj\", \"vixtn\", \"eslxoaj\", \"cmoypm\",\n \"jvvzs\", \"oqgxcc\", \"tptls\", \"wwgwbj\", \"tysuhg\", \"xbnqb\", \"iogjvg\", \"fbxdmr\", \"zdvsmx\", \"hiuja\", \"watrt\",\n \"kjawab\", \"entxk\", \"jmnkaox\", \"zznsox\", \"asmzc\", \"soblvp\", \"quyxjw\", \"udrdc\", \"hyylvvw\", \"gzfwxuv\", \"jjqmjw\",\n \"faegxbl\", \"lqjcg\", \"bzmruq\", \"bykuh\", \"miwhd\", \"ykgtwhk\", \"oyobzwi\", \"oltwpua\", \"ctulabr\", \"dwandd\", \"vhuhox\",\n \"vtlknw\", \"ywvln\", \"qemqdeg\", \"akezvx\", \"kjmjpv\", \"vwuftx\", \"kreaxnj\", \"fvfop\", \"cxabs\", \"jfacbje\", \"eecnz\",\n \"cmblit\", \"gfvpoq\", \"whywnh\", \"pghvx\", \"ohgkmf\", \"xxtiwd\", \"nkojni\", \"dlcicnp\", \"bwyvyyd\", \"gifup\", \"vgjfr\",\n \"hhteifi\", \"kjhffq\", \"pawqaxl\", \"yozro\", \"slxluvd\", \"amqcquy\", \"vnnxkr\", \"wgdur\", \"rvawiu\", \"thcwnc\", \"cddut\",\n \"vnrtrv\", \"fnfio\", \"nhvxe\", \"rfdqmj\", \"ucblh\", \"ccbnt\", \"lxckaoy\", \"fnwcbx\", \"gmdbiwt\", \"ypvwjy\", \"cbjazk\",\n \"qmujnm\", \"nsqot\", \"lhcqt\", \"ijxcts\", \"nujrms\", \"itxel\", \"ghukr\", \"qpwitlr\", \"gcafqrn\", \"lcoho\", \"lfzab\",\n \"vwhgceb\", \"vgsgy\", \"jrtgo\", \"ryxlz\", \"deoyq\", \"ybenly\", \"lyysca\", \"sodvazo\", \"hbnnoz\", \"ovgvda\", \"elwtjx\",\n \"soydmn\", \"trdsi\", \"mwwjwo\", \"vupwj\", \"dszpcv\", \"kkhjdj\", \"ewmyo\", \"nmpeq\", \"oepldcq\", \"xttrgu\", \"wbcbxi\",\n \"jakzk\", \"peukyw\", \"fvcqv\", \"xklwuu\", \"hsmva\", \"kslmkq\", \"azllbig\", \"stnzih\", \"wfyud\", \"ihauy\", \"cfxmj\",\n \"pdyogwv\", \"dcqdpa\", \"xhusy\", \"jfpmpmm\", \"odeiiw\", \"ozyaer\", \"uykzvma\", \"tuaznxj\", \"kdnbdki\", \"syrnsem\",\n \"fdysz\", \"hhrpo\", \"fglzfi\", \"vgcqzqm\", \"qhsjr\", \"bvboe\", \"dpfwpvg\", \"mvvry\", \"itnnr\", \"lgykbe\", \"pscow\",\n \"mkrgeqv\", \"czffv\", \"apteht\", \"jeqixsx\", \"ksmbe\", \"zamivv\", \"vvmyo\", \"cwwoce\", \"sppubxc\", \"qaich\", \"nmbxr\",\n \"tfkwfxi\", \"iakhezl\", \"fxujis\", \"fkwffe\", \"antaylq\", \"mmfgstq\", \"zxaacy\", \"zlswx\", \"pbqxil\", \"eupck\",\n \"qzcxpbe\", \"rjalbzr\", \"wioagbq\", \"kreec\", \"zsdcuft\", \"rrdzb\", \"ocdlvq\", \"oxiroo\", \"zcxsqh\", \"wbrsi\", \"fqike\",\n \"oskzupi\", \"thvof\", \"dicbyst\", \"iojwe\", \"hyfizq\", \"yoknhww\", \"nupiyyn\", \"ievah\", \"slcgmxg\", \"cnecpa\", \"lcwsoj\",\n \"hnqsc\", \"ghipbi\", \"exobr\", \"nwpnq\", \"dmhbj\", \"amdbmwl\", \"xfbzovs\", \"puizvu\", \"yvsus\", \"ykysqg\", \"bgqdv\",\n \"zgqbr\", \"zkjpkej\", \"crkot\", \"zciymk\", \"tleogn\", \"sayrmz\", \"elwma\", \"zugjva\", \"uifwsmw\", \"wstrg\", \"xbotd\",\n \"hinsg\", \"qpgyoyp\", \"xzfocdy\", \"mbvuepb\", \"dtphufk\", \"cyapnt\", \"yyehhad\", \"ohdrd\", \"mlibm\", \"qzdfil\",\n \"rdwszqx\", \"bzcbmyn\", \"uarjlg\", \"mtwpqmx\", \"nmagl\", \"cepniel\", \"tylvaa\", \"melhd\", \"jygeneg\", \"fdglfy\",\n \"xcpciu\", \"ayrel\", \"bxceshv\", \"kspyg\", \"iclkaz\", \"ykbzt\", \"nrnkzo\", \"kxkto\", \"fabzszn\", \"edalls\", \"nilmh\",\n \"wwawgnn\", \"gymbtx\", \"mzipa\", \"ajevx\", \"qppisv\", \"otqhsf\", \"ippxak\", \"bixnqd\", \"uqitwo\", \"soxcug\", \"loiscd\",\n \"wqrjk\", \"rqntoa\", \"fzpxlp\", \"tuaob\", \"pyqqms\", \"krbzmmj\", \"aijqpfg\", \"nstqrbu\", \"wmtiahz\", \"joplby\", \"jyszxq\",\n \"jnxtyhe\", \"lbvfv\"]) == 14011\n",
"step-ids": [
5,
7,
8,
10,
12
]
}
|
[
5,
7,
8,
10,
12
] |
<|reserved_special_token_0|>
class I2CTargetApplet(GlasgowApplet, name='i2c-target'):
logger = logging.getLogger(__name__)
help = 'accept I²C transactions'
description = """
Process transactions on the I²C bus as a software-defined target.
This applet allows emulating any I²C device in Python, provided that the I²C initiator supports
clock stretching and tolerates delays caused by host roundtrips. (Unfortunately, this excludes
many I²C initiators.)
The default emulated device is a dummy device that logs all transactions, acknowledges all
writes, and returns 0xFF in response to all reads.
"""
required_revision = 'C0'
__pins = 'scl', 'sda'
interface_cls = _DummyI2CTargetInterface
@classmethod
def add_build_arguments(cls, parser, access):
super().add_build_arguments(parser, access)
for pin in cls.__pins:
access.add_pin_argument(parser, pin, default=True)
def i2c_address(arg):
return int(arg, 0)
parser.add_argument('-A', '--address', type=i2c_address, metavar=
'I2C-ADDR', required=True, help='I²C address of the target')
def build(self, target, args):
self.mux_interface = iface = target.multiplexer.claim_interface(self,
args)
iface.add_subtarget(I2CTargetSubtarget(pads=iface.get_pads(args,
pins=self.__pins), out_fifo=iface.get_out_fifo(), in_fifo=iface
.get_in_fifo(), address=args.address))
@classmethod
def add_run_arguments(cls, parser, access):
super().add_run_arguments(parser, access)
parser.add_argument('--pulls', default=False, action='store_true',
help='enable integrated pull-ups')
async def run(self, device, args):
pulls = set()
if args.pulls:
pulls = {args.pin_scl, args.pin_sda}
iface = await device.demultiplexer.claim_interface(self, self.
mux_interface, args, pull_high=pulls)
return self.interface_cls(iface, self.logger)
async def interact(self, device, args, iface):
while True:
await iface.read_event()
class I2CTargetAppletTestCase(GlasgowAppletTestCase, applet=I2CTargetApplet):
@synthesis_test
def test_build(self):
self.assertBuilds(args=['-A', '0b1010000'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class I2CTargetSubtarget(Elaboratable):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class I2CTargetInterface(metaclass=ABCMeta):
def __init__(self, interface, logger):
self.lower = interface
self._logger = logger
self._level = (logging.DEBUG if self._logger.name == __name__ else
logging.TRACE)
def _log(self, message, *args):
self._logger.log(self._level, 'I²C: ' + message, *args)
async def read_event(self):
event, = await self.lower.read(1)
if event == Event.START:
self._log('event start')
await self.on_start()
elif event == Event.STOP:
self._log('event stop')
await self.on_stop()
elif event == Event.RESTART:
self._log('event restart')
await self.on_restart()
elif event == Event.WRITE:
data, = await self.lower.read(1)
self._log('event write data=<%02x>', data)
ack = await self.on_write(data)
assert isinstance(ack, bool)
self._log('write %s', 'ack' if ack else 'nak')
await self.lower.write([ack])
elif event == Event.READ:
self._log('event read')
data = await self.on_read()
assert isinstance(data, int) and data in range(256)
self._log('read data=<%02x>', data)
await self.lower.write([data])
else:
assert False
@abstractmethod
async def on_start(self):
pass
@abstractmethod
async def on_stop(self):
pass
@abstractmethod
async def on_restart(self):
pass
@abstractmethod
async def on_write(self, data):
pass
@abstractmethod
async def on_read(self):
pass
class _DummyI2CTargetInterface(I2CTargetInterface):
async def on_start(self):
pass
async def on_stop(self):
pass
async def on_restart(self):
pass
async def on_write(self, data):
return True
async def on_read(self):
return 255
class I2CTargetApplet(GlasgowApplet, name='i2c-target'):
logger = logging.getLogger(__name__)
help = 'accept I²C transactions'
description = """
Process transactions on the I²C bus as a software-defined target.
This applet allows emulating any I²C device in Python, provided that the I²C initiator supports
clock stretching and tolerates delays caused by host roundtrips. (Unfortunately, this excludes
many I²C initiators.)
The default emulated device is a dummy device that logs all transactions, acknowledges all
writes, and returns 0xFF in response to all reads.
"""
required_revision = 'C0'
__pins = 'scl', 'sda'
interface_cls = _DummyI2CTargetInterface
@classmethod
def add_build_arguments(cls, parser, access):
super().add_build_arguments(parser, access)
for pin in cls.__pins:
access.add_pin_argument(parser, pin, default=True)
def i2c_address(arg):
return int(arg, 0)
parser.add_argument('-A', '--address', type=i2c_address, metavar=
'I2C-ADDR', required=True, help='I²C address of the target')
def build(self, target, args):
self.mux_interface = iface = target.multiplexer.claim_interface(self,
args)
iface.add_subtarget(I2CTargetSubtarget(pads=iface.get_pads(args,
pins=self.__pins), out_fifo=iface.get_out_fifo(), in_fifo=iface
.get_in_fifo(), address=args.address))
@classmethod
def add_run_arguments(cls, parser, access):
super().add_run_arguments(parser, access)
parser.add_argument('--pulls', default=False, action='store_true',
help='enable integrated pull-ups')
async def run(self, device, args):
pulls = set()
if args.pulls:
pulls = {args.pin_scl, args.pin_sda}
iface = await device.demultiplexer.claim_interface(self, self.
mux_interface, args, pull_high=pulls)
return self.interface_cls(iface, self.logger)
async def interact(self, device, args, iface):
while True:
await iface.read_event()
class I2CTargetAppletTestCase(GlasgowAppletTestCase, applet=I2CTargetApplet):
@synthesis_test
def test_build(self):
self.assertBuilds(args=['-A', '0b1010000'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class I2CTargetSubtarget(Elaboratable):
<|reserved_special_token_0|>
def elaborate(self, platform):
m = Module()
m.submodules.i2c_target = i2c_target = I2CTarget(self.pads)
m.d.comb += i2c_target.address.eq(self.address)
with m.FSM():
w_data = Signal(8)
m.d.comb += i2c_target.busy.eq(1)
with m.State('IDLE'):
m.d.comb += i2c_target.busy.eq(0)
with m.If(i2c_target.start):
m.next = 'SEND-START-EVENT'
with m.Elif(i2c_target.stop):
m.next = 'SEND-STOP-EVENT'
with m.Elif(i2c_target.restart):
m.next = 'SEND-RESTART-EVENT'
with m.Elif(i2c_target.write):
m.d.sync += w_data.eq(i2c_target.data_i)
m.next = 'SEND-WRITE-EVENT'
with m.Elif(i2c_target.read):
m.next = 'SEND-READ-EVENT'
with m.State('SEND-START-EVENT'):
m.d.comb += [self.in_fifo.w_data.eq(Event.START), self.
in_fifo.w_en.eq(1)]
with m.If(self.in_fifo.w_rdy):
m.next = 'IDLE'
with m.State('SEND-STOP-EVENT'):
m.d.comb += [self.in_fifo.w_data.eq(Event.STOP), self.
in_fifo.w_en.eq(1)]
with m.If(self.in_fifo.w_rdy):
m.next = 'IDLE'
with m.State('SEND-RESTART-EVENT'):
m.d.comb += [self.in_fifo.w_data.eq(Event.RESTART), self.
in_fifo.w_en.eq(1)]
with m.If(self.in_fifo.w_rdy):
m.next = 'IDLE'
with m.State('SEND-WRITE-EVENT'):
m.d.comb += [self.in_fifo.w_data.eq(Event.WRITE), self.
in_fifo.w_en.eq(1)]
with m.If(self.in_fifo.w_rdy):
m.next = 'SEND-WRITE-DATA'
with m.State('SEND-WRITE-DATA'):
m.d.comb += [self.in_fifo.w_data.eq(w_data), self.in_fifo.
w_en.eq(1)]
with m.If(self.in_fifo.w_rdy):
m.next = 'RECV-WRITE-ACK'
with m.State('RECV-WRITE-ACK'):
with m.If(self.out_fifo.r_rdy):
m.d.comb += [i2c_target.ack_o.eq(self.out_fifo.r_data[0
]), self.out_fifo.r_en.eq(1)]
m.next = 'IDLE'
with m.State('SEND-READ-EVENT'):
m.d.comb += [self.in_fifo.w_data.eq(Event.READ), self.
in_fifo.w_en.eq(1)]
with m.If(self.in_fifo.w_rdy):
m.next = 'RECV-READ-DATA'
with m.State('RECV-READ-DATA'):
with m.If(self.out_fifo.r_rdy):
m.d.comb += [i2c_target.data_o.eq(self.out_fifo.r_data),
self.out_fifo.r_en.eq(1)]
m.next = 'IDLE'
return m
class I2CTargetInterface(metaclass=ABCMeta):
def __init__(self, interface, logger):
self.lower = interface
self._logger = logger
self._level = (logging.DEBUG if self._logger.name == __name__ else
logging.TRACE)
def _log(self, message, *args):
self._logger.log(self._level, 'I²C: ' + message, *args)
async def read_event(self):
event, = await self.lower.read(1)
if event == Event.START:
self._log('event start')
await self.on_start()
elif event == Event.STOP:
self._log('event stop')
await self.on_stop()
elif event == Event.RESTART:
self._log('event restart')
await self.on_restart()
elif event == Event.WRITE:
data, = await self.lower.read(1)
self._log('event write data=<%02x>', data)
ack = await self.on_write(data)
assert isinstance(ack, bool)
self._log('write %s', 'ack' if ack else 'nak')
await self.lower.write([ack])
elif event == Event.READ:
self._log('event read')
data = await self.on_read()
assert isinstance(data, int) and data in range(256)
self._log('read data=<%02x>', data)
await self.lower.write([data])
else:
assert False
@abstractmethod
async def on_start(self):
pass
@abstractmethod
async def on_stop(self):
pass
@abstractmethod
async def on_restart(self):
pass
@abstractmethod
async def on_write(self, data):
pass
@abstractmethod
async def on_read(self):
pass
class _DummyI2CTargetInterface(I2CTargetInterface):
async def on_start(self):
pass
async def on_stop(self):
pass
async def on_restart(self):
pass
async def on_write(self, data):
return True
async def on_read(self):
return 255
class I2CTargetApplet(GlasgowApplet, name='i2c-target'):
logger = logging.getLogger(__name__)
help = 'accept I²C transactions'
description = """
Process transactions on the I²C bus as a software-defined target.
This applet allows emulating any I²C device in Python, provided that the I²C initiator supports
clock stretching and tolerates delays caused by host roundtrips. (Unfortunately, this excludes
many I²C initiators.)
The default emulated device is a dummy device that logs all transactions, acknowledges all
writes, and returns 0xFF in response to all reads.
"""
required_revision = 'C0'
__pins = 'scl', 'sda'
interface_cls = _DummyI2CTargetInterface
@classmethod
def add_build_arguments(cls, parser, access):
super().add_build_arguments(parser, access)
for pin in cls.__pins:
access.add_pin_argument(parser, pin, default=True)
def i2c_address(arg):
return int(arg, 0)
parser.add_argument('-A', '--address', type=i2c_address, metavar=
'I2C-ADDR', required=True, help='I²C address of the target')
def build(self, target, args):
self.mux_interface = iface = target.multiplexer.claim_interface(self,
args)
iface.add_subtarget(I2CTargetSubtarget(pads=iface.get_pads(args,
pins=self.__pins), out_fifo=iface.get_out_fifo(), in_fifo=iface
.get_in_fifo(), address=args.address))
@classmethod
def add_run_arguments(cls, parser, access):
super().add_run_arguments(parser, access)
parser.add_argument('--pulls', default=False, action='store_true',
help='enable integrated pull-ups')
async def run(self, device, args):
pulls = set()
if args.pulls:
pulls = {args.pin_scl, args.pin_sda}
iface = await device.demultiplexer.claim_interface(self, self.
mux_interface, args, pull_high=pulls)
return self.interface_cls(iface, self.logger)
async def interact(self, device, args, iface):
while True:
await iface.read_event()
class I2CTargetAppletTestCase(GlasgowAppletTestCase, applet=I2CTargetApplet):
@synthesis_test
def test_build(self):
self.assertBuilds(args=['-A', '0b1010000'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Event(enum.IntEnum):
START = 16
STOP = 32
RESTART = 48
WRITE = 64
READ = 80
class I2CTargetSubtarget(Elaboratable):
def __init__(self, pads, out_fifo, in_fifo, address):
self.pads = pads
self.out_fifo = out_fifo
self.in_fifo = in_fifo
self.address = address
def elaborate(self, platform):
m = Module()
m.submodules.i2c_target = i2c_target = I2CTarget(self.pads)
m.d.comb += i2c_target.address.eq(self.address)
with m.FSM():
w_data = Signal(8)
m.d.comb += i2c_target.busy.eq(1)
with m.State('IDLE'):
m.d.comb += i2c_target.busy.eq(0)
with m.If(i2c_target.start):
m.next = 'SEND-START-EVENT'
with m.Elif(i2c_target.stop):
m.next = 'SEND-STOP-EVENT'
with m.Elif(i2c_target.restart):
m.next = 'SEND-RESTART-EVENT'
with m.Elif(i2c_target.write):
m.d.sync += w_data.eq(i2c_target.data_i)
m.next = 'SEND-WRITE-EVENT'
with m.Elif(i2c_target.read):
m.next = 'SEND-READ-EVENT'
with m.State('SEND-START-EVENT'):
m.d.comb += [self.in_fifo.w_data.eq(Event.START), self.
in_fifo.w_en.eq(1)]
with m.If(self.in_fifo.w_rdy):
m.next = 'IDLE'
with m.State('SEND-STOP-EVENT'):
m.d.comb += [self.in_fifo.w_data.eq(Event.STOP), self.
in_fifo.w_en.eq(1)]
with m.If(self.in_fifo.w_rdy):
m.next = 'IDLE'
with m.State('SEND-RESTART-EVENT'):
m.d.comb += [self.in_fifo.w_data.eq(Event.RESTART), self.
in_fifo.w_en.eq(1)]
with m.If(self.in_fifo.w_rdy):
m.next = 'IDLE'
with m.State('SEND-WRITE-EVENT'):
m.d.comb += [self.in_fifo.w_data.eq(Event.WRITE), self.
in_fifo.w_en.eq(1)]
with m.If(self.in_fifo.w_rdy):
m.next = 'SEND-WRITE-DATA'
with m.State('SEND-WRITE-DATA'):
m.d.comb += [self.in_fifo.w_data.eq(w_data), self.in_fifo.
w_en.eq(1)]
with m.If(self.in_fifo.w_rdy):
m.next = 'RECV-WRITE-ACK'
with m.State('RECV-WRITE-ACK'):
with m.If(self.out_fifo.r_rdy):
m.d.comb += [i2c_target.ack_o.eq(self.out_fifo.r_data[0
]), self.out_fifo.r_en.eq(1)]
m.next = 'IDLE'
with m.State('SEND-READ-EVENT'):
m.d.comb += [self.in_fifo.w_data.eq(Event.READ), self.
in_fifo.w_en.eq(1)]
with m.If(self.in_fifo.w_rdy):
m.next = 'RECV-READ-DATA'
with m.State('RECV-READ-DATA'):
with m.If(self.out_fifo.r_rdy):
m.d.comb += [i2c_target.data_o.eq(self.out_fifo.r_data),
self.out_fifo.r_en.eq(1)]
m.next = 'IDLE'
return m
class I2CTargetInterface(metaclass=ABCMeta):
def __init__(self, interface, logger):
self.lower = interface
self._logger = logger
self._level = (logging.DEBUG if self._logger.name == __name__ else
logging.TRACE)
def _log(self, message, *args):
self._logger.log(self._level, 'I²C: ' + message, *args)
async def read_event(self):
event, = await self.lower.read(1)
if event == Event.START:
self._log('event start')
await self.on_start()
elif event == Event.STOP:
self._log('event stop')
await self.on_stop()
elif event == Event.RESTART:
self._log('event restart')
await self.on_restart()
elif event == Event.WRITE:
data, = await self.lower.read(1)
self._log('event write data=<%02x>', data)
ack = await self.on_write(data)
assert isinstance(ack, bool)
self._log('write %s', 'ack' if ack else 'nak')
await self.lower.write([ack])
elif event == Event.READ:
self._log('event read')
data = await self.on_read()
assert isinstance(data, int) and data in range(256)
self._log('read data=<%02x>', data)
await self.lower.write([data])
else:
assert False
@abstractmethod
async def on_start(self):
pass
@abstractmethod
async def on_stop(self):
pass
@abstractmethod
async def on_restart(self):
pass
@abstractmethod
async def on_write(self, data):
pass
@abstractmethod
async def on_read(self):
pass
class _DummyI2CTargetInterface(I2CTargetInterface):
async def on_start(self):
pass
async def on_stop(self):
pass
async def on_restart(self):
pass
async def on_write(self, data):
return True
async def on_read(self):
return 255
class I2CTargetApplet(GlasgowApplet, name='i2c-target'):
logger = logging.getLogger(__name__)
help = 'accept I²C transactions'
description = """
Process transactions on the I²C bus as a software-defined target.
This applet allows emulating any I²C device in Python, provided that the I²C initiator supports
clock stretching and tolerates delays caused by host roundtrips. (Unfortunately, this excludes
many I²C initiators.)
The default emulated device is a dummy device that logs all transactions, acknowledges all
writes, and returns 0xFF in response to all reads.
"""
required_revision = 'C0'
__pins = 'scl', 'sda'
interface_cls = _DummyI2CTargetInterface
@classmethod
def add_build_arguments(cls, parser, access):
super().add_build_arguments(parser, access)
for pin in cls.__pins:
access.add_pin_argument(parser, pin, default=True)
def i2c_address(arg):
return int(arg, 0)
parser.add_argument('-A', '--address', type=i2c_address, metavar=
'I2C-ADDR', required=True, help='I²C address of the target')
def build(self, target, args):
self.mux_interface = iface = target.multiplexer.claim_interface(self,
args)
iface.add_subtarget(I2CTargetSubtarget(pads=iface.get_pads(args,
pins=self.__pins), out_fifo=iface.get_out_fifo(), in_fifo=iface
.get_in_fifo(), address=args.address))
@classmethod
def add_run_arguments(cls, parser, access):
super().add_run_arguments(parser, access)
parser.add_argument('--pulls', default=False, action='store_true',
help='enable integrated pull-ups')
async def run(self, device, args):
pulls = set()
if args.pulls:
pulls = {args.pin_scl, args.pin_sda}
iface = await device.demultiplexer.claim_interface(self, self.
mux_interface, args, pull_high=pulls)
return self.interface_cls(iface, self.logger)
async def interact(self, device, args, iface):
while True:
await iface.read_event()
class I2CTargetAppletTestCase(GlasgowAppletTestCase, applet=I2CTargetApplet):
@synthesis_test
def test_build(self):
self.assertBuilds(args=['-A', '0b1010000'])
<|reserved_special_token_1|>
import argparse
import logging
import enum
from abc import ABCMeta, abstractmethod
from nmigen import *
from ....gateware.pads import *
from ....gateware.i2c import I2CTarget
from ... import *
class Event(enum.IntEnum):
START = 0x10
STOP = 0x20
RESTART = 0x30
WRITE = 0x40
READ = 0x50
class I2CTargetSubtarget(Elaboratable):
def __init__(self, pads, out_fifo, in_fifo, address):
self.pads = pads
self.out_fifo = out_fifo
self.in_fifo = in_fifo
self.address = address
def elaborate(self, platform):
m = Module()
m.submodules.i2c_target = i2c_target = I2CTarget(self.pads)
m.d.comb += i2c_target.address.eq(self.address)
with m.FSM():
w_data = Signal(8)
m.d.comb += i2c_target.busy.eq(1)
with m.State("IDLE"):
m.d.comb += i2c_target.busy.eq(0)
with m.If(i2c_target.start):
m.next = "SEND-START-EVENT"
with m.Elif(i2c_target.stop):
m.next = "SEND-STOP-EVENT"
with m.Elif(i2c_target.restart):
m.next = "SEND-RESTART-EVENT"
with m.Elif(i2c_target.write):
m.d.sync += w_data.eq(i2c_target.data_i)
m.next = "SEND-WRITE-EVENT"
with m.Elif(i2c_target.read):
m.next = "SEND-READ-EVENT"
with m.State("SEND-START-EVENT"):
m.d.comb += [
self.in_fifo.w_data.eq(Event.START),
self.in_fifo.w_en.eq(1),
]
with m.If(self.in_fifo.w_rdy):
m.next = "IDLE"
with m.State("SEND-STOP-EVENT"):
m.d.comb += [
self.in_fifo.w_data.eq(Event.STOP),
self.in_fifo.w_en.eq(1),
]
with m.If(self.in_fifo.w_rdy):
m.next = "IDLE"
with m.State("SEND-RESTART-EVENT"):
m.d.comb += [
self.in_fifo.w_data.eq(Event.RESTART),
self.in_fifo.w_en.eq(1),
]
with m.If(self.in_fifo.w_rdy):
m.next = "IDLE"
with m.State("SEND-WRITE-EVENT"):
m.d.comb += [
self.in_fifo.w_data.eq(Event.WRITE),
self.in_fifo.w_en.eq(1),
]
with m.If(self.in_fifo.w_rdy):
m.next = "SEND-WRITE-DATA"
with m.State("SEND-WRITE-DATA"):
m.d.comb += [
self.in_fifo.w_data.eq(w_data),
self.in_fifo.w_en.eq(1),
]
with m.If(self.in_fifo.w_rdy):
m.next = "RECV-WRITE-ACK"
with m.State("RECV-WRITE-ACK"):
with m.If(self.out_fifo.r_rdy):
m.d.comb += [
i2c_target.ack_o.eq(self.out_fifo.r_data[0]),
self.out_fifo.r_en.eq(1),
]
m.next = "IDLE"
with m.State("SEND-READ-EVENT"):
m.d.comb += [
self.in_fifo.w_data.eq(Event.READ),
self.in_fifo.w_en.eq(1),
]
with m.If(self.in_fifo.w_rdy):
m.next = "RECV-READ-DATA"
with m.State("RECV-READ-DATA"):
with m.If(self.out_fifo.r_rdy):
m.d.comb += [
i2c_target.data_o.eq(self.out_fifo.r_data),
self.out_fifo.r_en.eq(1),
]
m.next = "IDLE"
return m
class I2CTargetInterface(metaclass=ABCMeta):
def __init__(self, interface, logger):
self.lower = interface
self._logger = logger
self._level = logging.DEBUG if self._logger.name == __name__ else logging.TRACE
def _log(self, message, *args):
self._logger.log(self._level, "I²C: " + message, *args)
async def read_event(self):
event, = await self.lower.read(1)
if event == Event.START:
self._log("event start")
await self.on_start()
elif event == Event.STOP:
self._log("event stop")
await self.on_stop()
elif event == Event.RESTART:
self._log("event restart")
await self.on_restart()
elif event == Event.WRITE:
data, = await self.lower.read(1)
self._log("event write data=<%02x>", data)
ack = await self.on_write(data)
assert isinstance(ack, bool)
self._log("write %s", "ack" if ack else "nak")
await self.lower.write([ack])
elif event == Event.READ:
self._log("event read")
data = await self.on_read()
assert isinstance(data, int) and data in range(256)
self._log("read data=<%02x>", data)
await self.lower.write([data])
else:
assert False
@abstractmethod
async def on_start(self):
pass
@abstractmethod
async def on_stop(self):
pass
@abstractmethod
async def on_restart(self):
pass
@abstractmethod
async def on_write(self, data):
pass
@abstractmethod
async def on_read(self):
pass
class _DummyI2CTargetInterface(I2CTargetInterface):
async def on_start(self):
pass
async def on_stop(self):
pass
async def on_restart(self):
pass
async def on_write(self, data):
return True
async def on_read(self):
return 0xFF
class I2CTargetApplet(GlasgowApplet, name="i2c-target"):
logger = logging.getLogger(__name__)
help = "accept I²C transactions"
description = """
Process transactions on the I²C bus as a software-defined target.
This applet allows emulating any I²C device in Python, provided that the I²C initiator supports
clock stretching and tolerates delays caused by host roundtrips. (Unfortunately, this excludes
many I²C initiators.)
The default emulated device is a dummy device that logs all transactions, acknowledges all
writes, and returns 0xFF in response to all reads.
"""
required_revision = "C0"
__pins = ("scl", "sda")
interface_cls = _DummyI2CTargetInterface
@classmethod
def add_build_arguments(cls, parser, access):
super().add_build_arguments(parser, access)
for pin in cls.__pins:
access.add_pin_argument(parser, pin, default=True)
def i2c_address(arg):
return int(arg, 0)
parser.add_argument(
"-A", "--address", type=i2c_address, metavar="I2C-ADDR", required=True,
help="I²C address of the target")
def build(self, target, args):
self.mux_interface = iface = target.multiplexer.claim_interface(self, args)
iface.add_subtarget(I2CTargetSubtarget(
pads=iface.get_pads(args, pins=self.__pins),
out_fifo=iface.get_out_fifo(),
in_fifo=iface.get_in_fifo(),
address=args.address,
))
@classmethod
def add_run_arguments(cls, parser, access):
super().add_run_arguments(parser, access)
parser.add_argument(
"--pulls", default=False, action="store_true",
help="enable integrated pull-ups")
async def run(self, device, args):
pulls = set()
if args.pulls:
pulls = {args.pin_scl, args.pin_sda}
iface = await device.demultiplexer.claim_interface(self, self.mux_interface, args,
pull_high=pulls)
return self.interface_cls(iface, self.logger)
async def interact(self, device, args, iface):
while True:
await iface.read_event()
# -------------------------------------------------------------------------------------------------
class I2CTargetAppletTestCase(GlasgowAppletTestCase, applet=I2CTargetApplet):
@synthesis_test
def test_build(self):
self.assertBuilds(args=["-A", "0b1010000"])
|
flexible
|
{
"blob_id": "0f2882971f08450e970e188ed2a06ae1683c682c",
"index": 7552,
"step-1": "<mask token>\n\n\nclass I2CTargetApplet(GlasgowApplet, name='i2c-target'):\n logger = logging.getLogger(__name__)\n help = 'accept I²C transactions'\n description = \"\"\"\n Process transactions on the I²C bus as a software-defined target.\n\n This applet allows emulating any I²C device in Python, provided that the I²C initiator supports\n clock stretching and tolerates delays caused by host roundtrips. (Unfortunately, this excludes\n many I²C initiators.)\n\n The default emulated device is a dummy device that logs all transactions, acknowledges all\n writes, and returns 0xFF in response to all reads.\n \"\"\"\n required_revision = 'C0'\n __pins = 'scl', 'sda'\n interface_cls = _DummyI2CTargetInterface\n\n @classmethod\n def add_build_arguments(cls, parser, access):\n super().add_build_arguments(parser, access)\n for pin in cls.__pins:\n access.add_pin_argument(parser, pin, default=True)\n\n def i2c_address(arg):\n return int(arg, 0)\n parser.add_argument('-A', '--address', type=i2c_address, metavar=\n 'I2C-ADDR', required=True, help='I²C address of the target')\n\n def build(self, target, args):\n self.mux_interface = iface = target.multiplexer.claim_interface(self,\n args)\n iface.add_subtarget(I2CTargetSubtarget(pads=iface.get_pads(args,\n pins=self.__pins), out_fifo=iface.get_out_fifo(), in_fifo=iface\n .get_in_fifo(), address=args.address))\n\n @classmethod\n def add_run_arguments(cls, parser, access):\n super().add_run_arguments(parser, access)\n parser.add_argument('--pulls', default=False, action='store_true',\n help='enable integrated pull-ups')\n\n async def run(self, device, args):\n pulls = set()\n if args.pulls:\n pulls = {args.pin_scl, args.pin_sda}\n iface = await device.demultiplexer.claim_interface(self, self.\n mux_interface, args, pull_high=pulls)\n return self.interface_cls(iface, self.logger)\n\n async def interact(self, device, args, iface):\n while True:\n await iface.read_event()\n\n\nclass I2CTargetAppletTestCase(GlasgowAppletTestCase, applet=I2CTargetApplet):\n\n @synthesis_test\n def test_build(self):\n self.assertBuilds(args=['-A', '0b1010000'])\n",
"step-2": "<mask token>\n\n\nclass I2CTargetSubtarget(Elaboratable):\n <mask token>\n <mask token>\n\n\nclass I2CTargetInterface(metaclass=ABCMeta):\n\n def __init__(self, interface, logger):\n self.lower = interface\n self._logger = logger\n self._level = (logging.DEBUG if self._logger.name == __name__ else\n logging.TRACE)\n\n def _log(self, message, *args):\n self._logger.log(self._level, 'I²C: ' + message, *args)\n\n async def read_event(self):\n event, = await self.lower.read(1)\n if event == Event.START:\n self._log('event start')\n await self.on_start()\n elif event == Event.STOP:\n self._log('event stop')\n await self.on_stop()\n elif event == Event.RESTART:\n self._log('event restart')\n await self.on_restart()\n elif event == Event.WRITE:\n data, = await self.lower.read(1)\n self._log('event write data=<%02x>', data)\n ack = await self.on_write(data)\n assert isinstance(ack, bool)\n self._log('write %s', 'ack' if ack else 'nak')\n await self.lower.write([ack])\n elif event == Event.READ:\n self._log('event read')\n data = await self.on_read()\n assert isinstance(data, int) and data in range(256)\n self._log('read data=<%02x>', data)\n await self.lower.write([data])\n else:\n assert False\n\n @abstractmethod\n async def on_start(self):\n pass\n\n @abstractmethod\n async def on_stop(self):\n pass\n\n @abstractmethod\n async def on_restart(self):\n pass\n\n @abstractmethod\n async def on_write(self, data):\n pass\n\n @abstractmethod\n async def on_read(self):\n pass\n\n\nclass _DummyI2CTargetInterface(I2CTargetInterface):\n\n async def on_start(self):\n pass\n\n async def on_stop(self):\n pass\n\n async def on_restart(self):\n pass\n\n async def on_write(self, data):\n return True\n\n async def on_read(self):\n return 255\n\n\nclass I2CTargetApplet(GlasgowApplet, name='i2c-target'):\n logger = logging.getLogger(__name__)\n help = 'accept I²C transactions'\n description = \"\"\"\n Process transactions on the I²C bus as a software-defined target.\n\n This applet allows emulating any I²C device in Python, provided that the I²C initiator supports\n clock stretching and tolerates delays caused by host roundtrips. (Unfortunately, this excludes\n many I²C initiators.)\n\n The default emulated device is a dummy device that logs all transactions, acknowledges all\n writes, and returns 0xFF in response to all reads.\n \"\"\"\n required_revision = 'C0'\n __pins = 'scl', 'sda'\n interface_cls = _DummyI2CTargetInterface\n\n @classmethod\n def add_build_arguments(cls, parser, access):\n super().add_build_arguments(parser, access)\n for pin in cls.__pins:\n access.add_pin_argument(parser, pin, default=True)\n\n def i2c_address(arg):\n return int(arg, 0)\n parser.add_argument('-A', '--address', type=i2c_address, metavar=\n 'I2C-ADDR', required=True, help='I²C address of the target')\n\n def build(self, target, args):\n self.mux_interface = iface = target.multiplexer.claim_interface(self,\n args)\n iface.add_subtarget(I2CTargetSubtarget(pads=iface.get_pads(args,\n pins=self.__pins), out_fifo=iface.get_out_fifo(), in_fifo=iface\n .get_in_fifo(), address=args.address))\n\n @classmethod\n def add_run_arguments(cls, parser, access):\n super().add_run_arguments(parser, access)\n parser.add_argument('--pulls', default=False, action='store_true',\n help='enable integrated pull-ups')\n\n async def run(self, device, args):\n pulls = set()\n if args.pulls:\n pulls = {args.pin_scl, args.pin_sda}\n iface = await device.demultiplexer.claim_interface(self, self.\n mux_interface, args, pull_high=pulls)\n return self.interface_cls(iface, self.logger)\n\n async def interact(self, device, args, iface):\n while True:\n await iface.read_event()\n\n\nclass I2CTargetAppletTestCase(GlasgowAppletTestCase, applet=I2CTargetApplet):\n\n @synthesis_test\n def test_build(self):\n self.assertBuilds(args=['-A', '0b1010000'])\n",
"step-3": "<mask token>\n\n\nclass I2CTargetSubtarget(Elaboratable):\n <mask token>\n\n def elaborate(self, platform):\n m = Module()\n m.submodules.i2c_target = i2c_target = I2CTarget(self.pads)\n m.d.comb += i2c_target.address.eq(self.address)\n with m.FSM():\n w_data = Signal(8)\n m.d.comb += i2c_target.busy.eq(1)\n with m.State('IDLE'):\n m.d.comb += i2c_target.busy.eq(0)\n with m.If(i2c_target.start):\n m.next = 'SEND-START-EVENT'\n with m.Elif(i2c_target.stop):\n m.next = 'SEND-STOP-EVENT'\n with m.Elif(i2c_target.restart):\n m.next = 'SEND-RESTART-EVENT'\n with m.Elif(i2c_target.write):\n m.d.sync += w_data.eq(i2c_target.data_i)\n m.next = 'SEND-WRITE-EVENT'\n with m.Elif(i2c_target.read):\n m.next = 'SEND-READ-EVENT'\n with m.State('SEND-START-EVENT'):\n m.d.comb += [self.in_fifo.w_data.eq(Event.START), self.\n in_fifo.w_en.eq(1)]\n with m.If(self.in_fifo.w_rdy):\n m.next = 'IDLE'\n with m.State('SEND-STOP-EVENT'):\n m.d.comb += [self.in_fifo.w_data.eq(Event.STOP), self.\n in_fifo.w_en.eq(1)]\n with m.If(self.in_fifo.w_rdy):\n m.next = 'IDLE'\n with m.State('SEND-RESTART-EVENT'):\n m.d.comb += [self.in_fifo.w_data.eq(Event.RESTART), self.\n in_fifo.w_en.eq(1)]\n with m.If(self.in_fifo.w_rdy):\n m.next = 'IDLE'\n with m.State('SEND-WRITE-EVENT'):\n m.d.comb += [self.in_fifo.w_data.eq(Event.WRITE), self.\n in_fifo.w_en.eq(1)]\n with m.If(self.in_fifo.w_rdy):\n m.next = 'SEND-WRITE-DATA'\n with m.State('SEND-WRITE-DATA'):\n m.d.comb += [self.in_fifo.w_data.eq(w_data), self.in_fifo.\n w_en.eq(1)]\n with m.If(self.in_fifo.w_rdy):\n m.next = 'RECV-WRITE-ACK'\n with m.State('RECV-WRITE-ACK'):\n with m.If(self.out_fifo.r_rdy):\n m.d.comb += [i2c_target.ack_o.eq(self.out_fifo.r_data[0\n ]), self.out_fifo.r_en.eq(1)]\n m.next = 'IDLE'\n with m.State('SEND-READ-EVENT'):\n m.d.comb += [self.in_fifo.w_data.eq(Event.READ), self.\n in_fifo.w_en.eq(1)]\n with m.If(self.in_fifo.w_rdy):\n m.next = 'RECV-READ-DATA'\n with m.State('RECV-READ-DATA'):\n with m.If(self.out_fifo.r_rdy):\n m.d.comb += [i2c_target.data_o.eq(self.out_fifo.r_data),\n self.out_fifo.r_en.eq(1)]\n m.next = 'IDLE'\n return m\n\n\nclass I2CTargetInterface(metaclass=ABCMeta):\n\n def __init__(self, interface, logger):\n self.lower = interface\n self._logger = logger\n self._level = (logging.DEBUG if self._logger.name == __name__ else\n logging.TRACE)\n\n def _log(self, message, *args):\n self._logger.log(self._level, 'I²C: ' + message, *args)\n\n async def read_event(self):\n event, = await self.lower.read(1)\n if event == Event.START:\n self._log('event start')\n await self.on_start()\n elif event == Event.STOP:\n self._log('event stop')\n await self.on_stop()\n elif event == Event.RESTART:\n self._log('event restart')\n await self.on_restart()\n elif event == Event.WRITE:\n data, = await self.lower.read(1)\n self._log('event write data=<%02x>', data)\n ack = await self.on_write(data)\n assert isinstance(ack, bool)\n self._log('write %s', 'ack' if ack else 'nak')\n await self.lower.write([ack])\n elif event == Event.READ:\n self._log('event read')\n data = await self.on_read()\n assert isinstance(data, int) and data in range(256)\n self._log('read data=<%02x>', data)\n await self.lower.write([data])\n else:\n assert False\n\n @abstractmethod\n async def on_start(self):\n pass\n\n @abstractmethod\n async def on_stop(self):\n pass\n\n @abstractmethod\n async def on_restart(self):\n pass\n\n @abstractmethod\n async def on_write(self, data):\n pass\n\n @abstractmethod\n async def on_read(self):\n pass\n\n\nclass _DummyI2CTargetInterface(I2CTargetInterface):\n\n async def on_start(self):\n pass\n\n async def on_stop(self):\n pass\n\n async def on_restart(self):\n pass\n\n async def on_write(self, data):\n return True\n\n async def on_read(self):\n return 255\n\n\nclass I2CTargetApplet(GlasgowApplet, name='i2c-target'):\n logger = logging.getLogger(__name__)\n help = 'accept I²C transactions'\n description = \"\"\"\n Process transactions on the I²C bus as a software-defined target.\n\n This applet allows emulating any I²C device in Python, provided that the I²C initiator supports\n clock stretching and tolerates delays caused by host roundtrips. (Unfortunately, this excludes\n many I²C initiators.)\n\n The default emulated device is a dummy device that logs all transactions, acknowledges all\n writes, and returns 0xFF in response to all reads.\n \"\"\"\n required_revision = 'C0'\n __pins = 'scl', 'sda'\n interface_cls = _DummyI2CTargetInterface\n\n @classmethod\n def add_build_arguments(cls, parser, access):\n super().add_build_arguments(parser, access)\n for pin in cls.__pins:\n access.add_pin_argument(parser, pin, default=True)\n\n def i2c_address(arg):\n return int(arg, 0)\n parser.add_argument('-A', '--address', type=i2c_address, metavar=\n 'I2C-ADDR', required=True, help='I²C address of the target')\n\n def build(self, target, args):\n self.mux_interface = iface = target.multiplexer.claim_interface(self,\n args)\n iface.add_subtarget(I2CTargetSubtarget(pads=iface.get_pads(args,\n pins=self.__pins), out_fifo=iface.get_out_fifo(), in_fifo=iface\n .get_in_fifo(), address=args.address))\n\n @classmethod\n def add_run_arguments(cls, parser, access):\n super().add_run_arguments(parser, access)\n parser.add_argument('--pulls', default=False, action='store_true',\n help='enable integrated pull-ups')\n\n async def run(self, device, args):\n pulls = set()\n if args.pulls:\n pulls = {args.pin_scl, args.pin_sda}\n iface = await device.demultiplexer.claim_interface(self, self.\n mux_interface, args, pull_high=pulls)\n return self.interface_cls(iface, self.logger)\n\n async def interact(self, device, args, iface):\n while True:\n await iface.read_event()\n\n\nclass I2CTargetAppletTestCase(GlasgowAppletTestCase, applet=I2CTargetApplet):\n\n @synthesis_test\n def test_build(self):\n self.assertBuilds(args=['-A', '0b1010000'])\n",
"step-4": "<mask token>\n\n\nclass Event(enum.IntEnum):\n START = 16\n STOP = 32\n RESTART = 48\n WRITE = 64\n READ = 80\n\n\nclass I2CTargetSubtarget(Elaboratable):\n\n def __init__(self, pads, out_fifo, in_fifo, address):\n self.pads = pads\n self.out_fifo = out_fifo\n self.in_fifo = in_fifo\n self.address = address\n\n def elaborate(self, platform):\n m = Module()\n m.submodules.i2c_target = i2c_target = I2CTarget(self.pads)\n m.d.comb += i2c_target.address.eq(self.address)\n with m.FSM():\n w_data = Signal(8)\n m.d.comb += i2c_target.busy.eq(1)\n with m.State('IDLE'):\n m.d.comb += i2c_target.busy.eq(0)\n with m.If(i2c_target.start):\n m.next = 'SEND-START-EVENT'\n with m.Elif(i2c_target.stop):\n m.next = 'SEND-STOP-EVENT'\n with m.Elif(i2c_target.restart):\n m.next = 'SEND-RESTART-EVENT'\n with m.Elif(i2c_target.write):\n m.d.sync += w_data.eq(i2c_target.data_i)\n m.next = 'SEND-WRITE-EVENT'\n with m.Elif(i2c_target.read):\n m.next = 'SEND-READ-EVENT'\n with m.State('SEND-START-EVENT'):\n m.d.comb += [self.in_fifo.w_data.eq(Event.START), self.\n in_fifo.w_en.eq(1)]\n with m.If(self.in_fifo.w_rdy):\n m.next = 'IDLE'\n with m.State('SEND-STOP-EVENT'):\n m.d.comb += [self.in_fifo.w_data.eq(Event.STOP), self.\n in_fifo.w_en.eq(1)]\n with m.If(self.in_fifo.w_rdy):\n m.next = 'IDLE'\n with m.State('SEND-RESTART-EVENT'):\n m.d.comb += [self.in_fifo.w_data.eq(Event.RESTART), self.\n in_fifo.w_en.eq(1)]\n with m.If(self.in_fifo.w_rdy):\n m.next = 'IDLE'\n with m.State('SEND-WRITE-EVENT'):\n m.d.comb += [self.in_fifo.w_data.eq(Event.WRITE), self.\n in_fifo.w_en.eq(1)]\n with m.If(self.in_fifo.w_rdy):\n m.next = 'SEND-WRITE-DATA'\n with m.State('SEND-WRITE-DATA'):\n m.d.comb += [self.in_fifo.w_data.eq(w_data), self.in_fifo.\n w_en.eq(1)]\n with m.If(self.in_fifo.w_rdy):\n m.next = 'RECV-WRITE-ACK'\n with m.State('RECV-WRITE-ACK'):\n with m.If(self.out_fifo.r_rdy):\n m.d.comb += [i2c_target.ack_o.eq(self.out_fifo.r_data[0\n ]), self.out_fifo.r_en.eq(1)]\n m.next = 'IDLE'\n with m.State('SEND-READ-EVENT'):\n m.d.comb += [self.in_fifo.w_data.eq(Event.READ), self.\n in_fifo.w_en.eq(1)]\n with m.If(self.in_fifo.w_rdy):\n m.next = 'RECV-READ-DATA'\n with m.State('RECV-READ-DATA'):\n with m.If(self.out_fifo.r_rdy):\n m.d.comb += [i2c_target.data_o.eq(self.out_fifo.r_data),\n self.out_fifo.r_en.eq(1)]\n m.next = 'IDLE'\n return m\n\n\nclass I2CTargetInterface(metaclass=ABCMeta):\n\n def __init__(self, interface, logger):\n self.lower = interface\n self._logger = logger\n self._level = (logging.DEBUG if self._logger.name == __name__ else\n logging.TRACE)\n\n def _log(self, message, *args):\n self._logger.log(self._level, 'I²C: ' + message, *args)\n\n async def read_event(self):\n event, = await self.lower.read(1)\n if event == Event.START:\n self._log('event start')\n await self.on_start()\n elif event == Event.STOP:\n self._log('event stop')\n await self.on_stop()\n elif event == Event.RESTART:\n self._log('event restart')\n await self.on_restart()\n elif event == Event.WRITE:\n data, = await self.lower.read(1)\n self._log('event write data=<%02x>', data)\n ack = await self.on_write(data)\n assert isinstance(ack, bool)\n self._log('write %s', 'ack' if ack else 'nak')\n await self.lower.write([ack])\n elif event == Event.READ:\n self._log('event read')\n data = await self.on_read()\n assert isinstance(data, int) and data in range(256)\n self._log('read data=<%02x>', data)\n await self.lower.write([data])\n else:\n assert False\n\n @abstractmethod\n async def on_start(self):\n pass\n\n @abstractmethod\n async def on_stop(self):\n pass\n\n @abstractmethod\n async def on_restart(self):\n pass\n\n @abstractmethod\n async def on_write(self, data):\n pass\n\n @abstractmethod\n async def on_read(self):\n pass\n\n\nclass _DummyI2CTargetInterface(I2CTargetInterface):\n\n async def on_start(self):\n pass\n\n async def on_stop(self):\n pass\n\n async def on_restart(self):\n pass\n\n async def on_write(self, data):\n return True\n\n async def on_read(self):\n return 255\n\n\nclass I2CTargetApplet(GlasgowApplet, name='i2c-target'):\n logger = logging.getLogger(__name__)\n help = 'accept I²C transactions'\n description = \"\"\"\n Process transactions on the I²C bus as a software-defined target.\n\n This applet allows emulating any I²C device in Python, provided that the I²C initiator supports\n clock stretching and tolerates delays caused by host roundtrips. (Unfortunately, this excludes\n many I²C initiators.)\n\n The default emulated device is a dummy device that logs all transactions, acknowledges all\n writes, and returns 0xFF in response to all reads.\n \"\"\"\n required_revision = 'C0'\n __pins = 'scl', 'sda'\n interface_cls = _DummyI2CTargetInterface\n\n @classmethod\n def add_build_arguments(cls, parser, access):\n super().add_build_arguments(parser, access)\n for pin in cls.__pins:\n access.add_pin_argument(parser, pin, default=True)\n\n def i2c_address(arg):\n return int(arg, 0)\n parser.add_argument('-A', '--address', type=i2c_address, metavar=\n 'I2C-ADDR', required=True, help='I²C address of the target')\n\n def build(self, target, args):\n self.mux_interface = iface = target.multiplexer.claim_interface(self,\n args)\n iface.add_subtarget(I2CTargetSubtarget(pads=iface.get_pads(args,\n pins=self.__pins), out_fifo=iface.get_out_fifo(), in_fifo=iface\n .get_in_fifo(), address=args.address))\n\n @classmethod\n def add_run_arguments(cls, parser, access):\n super().add_run_arguments(parser, access)\n parser.add_argument('--pulls', default=False, action='store_true',\n help='enable integrated pull-ups')\n\n async def run(self, device, args):\n pulls = set()\n if args.pulls:\n pulls = {args.pin_scl, args.pin_sda}\n iface = await device.demultiplexer.claim_interface(self, self.\n mux_interface, args, pull_high=pulls)\n return self.interface_cls(iface, self.logger)\n\n async def interact(self, device, args, iface):\n while True:\n await iface.read_event()\n\n\nclass I2CTargetAppletTestCase(GlasgowAppletTestCase, applet=I2CTargetApplet):\n\n @synthesis_test\n def test_build(self):\n self.assertBuilds(args=['-A', '0b1010000'])\n",
"step-5": "import argparse\nimport logging\nimport enum\nfrom abc import ABCMeta, abstractmethod\nfrom nmigen import *\n\nfrom ....gateware.pads import *\nfrom ....gateware.i2c import I2CTarget\nfrom ... import *\n\n\nclass Event(enum.IntEnum):\n START = 0x10\n STOP = 0x20\n RESTART = 0x30\n WRITE = 0x40\n READ = 0x50\n\n\nclass I2CTargetSubtarget(Elaboratable):\n def __init__(self, pads, out_fifo, in_fifo, address):\n self.pads = pads\n self.out_fifo = out_fifo\n self.in_fifo = in_fifo\n self.address = address\n\n def elaborate(self, platform):\n m = Module()\n\n m.submodules.i2c_target = i2c_target = I2CTarget(self.pads)\n m.d.comb += i2c_target.address.eq(self.address)\n\n with m.FSM():\n w_data = Signal(8)\n\n m.d.comb += i2c_target.busy.eq(1)\n with m.State(\"IDLE\"):\n m.d.comb += i2c_target.busy.eq(0)\n with m.If(i2c_target.start):\n m.next = \"SEND-START-EVENT\"\n with m.Elif(i2c_target.stop):\n m.next = \"SEND-STOP-EVENT\"\n with m.Elif(i2c_target.restart):\n m.next = \"SEND-RESTART-EVENT\"\n with m.Elif(i2c_target.write):\n m.d.sync += w_data.eq(i2c_target.data_i)\n m.next = \"SEND-WRITE-EVENT\"\n with m.Elif(i2c_target.read):\n m.next = \"SEND-READ-EVENT\"\n\n with m.State(\"SEND-START-EVENT\"):\n m.d.comb += [\n self.in_fifo.w_data.eq(Event.START),\n self.in_fifo.w_en.eq(1),\n ]\n with m.If(self.in_fifo.w_rdy):\n m.next = \"IDLE\"\n\n with m.State(\"SEND-STOP-EVENT\"):\n m.d.comb += [\n self.in_fifo.w_data.eq(Event.STOP),\n self.in_fifo.w_en.eq(1),\n ]\n with m.If(self.in_fifo.w_rdy):\n m.next = \"IDLE\"\n\n with m.State(\"SEND-RESTART-EVENT\"):\n m.d.comb += [\n self.in_fifo.w_data.eq(Event.RESTART),\n self.in_fifo.w_en.eq(1),\n ]\n with m.If(self.in_fifo.w_rdy):\n m.next = \"IDLE\"\n\n with m.State(\"SEND-WRITE-EVENT\"):\n m.d.comb += [\n self.in_fifo.w_data.eq(Event.WRITE),\n self.in_fifo.w_en.eq(1),\n ]\n with m.If(self.in_fifo.w_rdy):\n m.next = \"SEND-WRITE-DATA\"\n\n with m.State(\"SEND-WRITE-DATA\"):\n m.d.comb += [\n self.in_fifo.w_data.eq(w_data),\n self.in_fifo.w_en.eq(1),\n ]\n with m.If(self.in_fifo.w_rdy):\n m.next = \"RECV-WRITE-ACK\"\n\n with m.State(\"RECV-WRITE-ACK\"):\n with m.If(self.out_fifo.r_rdy):\n m.d.comb += [\n i2c_target.ack_o.eq(self.out_fifo.r_data[0]),\n self.out_fifo.r_en.eq(1),\n ]\n m.next = \"IDLE\"\n\n with m.State(\"SEND-READ-EVENT\"):\n m.d.comb += [\n self.in_fifo.w_data.eq(Event.READ),\n self.in_fifo.w_en.eq(1),\n ]\n with m.If(self.in_fifo.w_rdy):\n m.next = \"RECV-READ-DATA\"\n\n with m.State(\"RECV-READ-DATA\"):\n with m.If(self.out_fifo.r_rdy):\n m.d.comb += [\n i2c_target.data_o.eq(self.out_fifo.r_data),\n self.out_fifo.r_en.eq(1),\n ]\n m.next = \"IDLE\"\n\n return m\n\n\nclass I2CTargetInterface(metaclass=ABCMeta):\n def __init__(self, interface, logger):\n self.lower = interface\n self._logger = logger\n self._level = logging.DEBUG if self._logger.name == __name__ else logging.TRACE\n\n def _log(self, message, *args):\n self._logger.log(self._level, \"I²C: \" + message, *args)\n\n async def read_event(self):\n event, = await self.lower.read(1)\n if event == Event.START:\n self._log(\"event start\")\n await self.on_start()\n elif event == Event.STOP:\n self._log(\"event stop\")\n await self.on_stop()\n elif event == Event.RESTART:\n self._log(\"event restart\")\n await self.on_restart()\n elif event == Event.WRITE:\n data, = await self.lower.read(1)\n self._log(\"event write data=<%02x>\", data)\n ack = await self.on_write(data)\n assert isinstance(ack, bool)\n self._log(\"write %s\", \"ack\" if ack else \"nak\")\n await self.lower.write([ack])\n elif event == Event.READ:\n self._log(\"event read\")\n data = await self.on_read()\n assert isinstance(data, int) and data in range(256)\n self._log(\"read data=<%02x>\", data)\n await self.lower.write([data])\n else:\n assert False\n\n @abstractmethod\n async def on_start(self):\n pass\n\n @abstractmethod\n async def on_stop(self):\n pass\n\n @abstractmethod\n async def on_restart(self):\n pass\n\n @abstractmethod\n async def on_write(self, data):\n pass\n\n @abstractmethod\n async def on_read(self):\n pass\n\n\nclass _DummyI2CTargetInterface(I2CTargetInterface):\n async def on_start(self):\n pass\n\n async def on_stop(self):\n pass\n\n async def on_restart(self):\n pass\n\n async def on_write(self, data):\n return True\n\n async def on_read(self):\n return 0xFF\n\n\nclass I2CTargetApplet(GlasgowApplet, name=\"i2c-target\"):\n logger = logging.getLogger(__name__)\n help = \"accept I²C transactions\"\n description = \"\"\"\n Process transactions on the I²C bus as a software-defined target.\n\n This applet allows emulating any I²C device in Python, provided that the I²C initiator supports\n clock stretching and tolerates delays caused by host roundtrips. (Unfortunately, this excludes\n many I²C initiators.)\n\n The default emulated device is a dummy device that logs all transactions, acknowledges all\n writes, and returns 0xFF in response to all reads.\n \"\"\"\n required_revision = \"C0\"\n\n __pins = (\"scl\", \"sda\")\n interface_cls = _DummyI2CTargetInterface\n\n @classmethod\n def add_build_arguments(cls, parser, access):\n super().add_build_arguments(parser, access)\n\n for pin in cls.__pins:\n access.add_pin_argument(parser, pin, default=True)\n\n def i2c_address(arg):\n return int(arg, 0)\n parser.add_argument(\n \"-A\", \"--address\", type=i2c_address, metavar=\"I2C-ADDR\", required=True,\n help=\"I²C address of the target\")\n\n def build(self, target, args):\n self.mux_interface = iface = target.multiplexer.claim_interface(self, args)\n iface.add_subtarget(I2CTargetSubtarget(\n pads=iface.get_pads(args, pins=self.__pins),\n out_fifo=iface.get_out_fifo(),\n in_fifo=iface.get_in_fifo(),\n address=args.address,\n ))\n\n @classmethod\n def add_run_arguments(cls, parser, access):\n super().add_run_arguments(parser, access)\n\n parser.add_argument(\n \"--pulls\", default=False, action=\"store_true\",\n help=\"enable integrated pull-ups\")\n\n async def run(self, device, args):\n pulls = set()\n if args.pulls:\n pulls = {args.pin_scl, args.pin_sda}\n iface = await device.demultiplexer.claim_interface(self, self.mux_interface, args,\n pull_high=pulls)\n return self.interface_cls(iface, self.logger)\n\n async def interact(self, device, args, iface):\n while True:\n await iface.read_event()\n\n# -------------------------------------------------------------------------------------------------\n\nclass I2CTargetAppletTestCase(GlasgowAppletTestCase, applet=I2CTargetApplet):\n @synthesis_test\n def test_build(self):\n self.assertBuilds(args=[\"-A\", \"0b1010000\"])\n",
"step-ids": [
7,
12,
13,
16,
18
]
}
|
[
7,
12,
13,
16,
18
] |
sum_value = 0
for _ in range(5):
sum_value += int(input())
print(sum_value)
|
normal
|
{
"blob_id": "4add80894036e0395a6e6eb13e8a2db0d963de8c",
"index": 9654,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in range(5):\n sum_value += int(input())\nprint(sum_value)\n",
"step-3": "sum_value = 0\nfor _ in range(5):\n sum_value += int(input())\nprint(sum_value)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.grid_search import GridSearchCV
import matplotlib.pyplot as plt
def loadTrainSet(filepath):
raw = np.loadtxt(filepath, delimiter=',', dtype=np.str, skiprows=1)
X, y = raw[:,1:], raw[:,0]
trainSet = np.hstack((X, y.reshape(-1,1)))
return trainSet
def out(trainset):
trainset = trainset
X=trainset[:,:(trainset.shape[1]-1)]
y=trainset[:,(trainset.shape[1]-1)]
X=np.asarray(X)
y=np.asarray(y.T)[0]
return X,y
def gridsearchcv(X,y):
accuracy=[]
stdlist=[]
classifier = RandomForestClassifier(verbose=2, n_jobs=1,oob_score=1)
param_grid={'n_estimators':np.arange(1, 100, 10)}
# param_grid={'n_estimators':np.arange(1, 202, 10)}
# param_grid={'n_estimators':[200], 'criterion':['gini', 'entropy']}
# param_grid={'n_estimators':[200], 'max_features':np.append(np.arange(28-20, 28, 1), np.arange(28, 28+20, 1))}
# param_grid={'n_estimators':[200], 'max_depth':np.arange(40, 40+20, 1)}
# param_grid={'n_estimators':[200], 'min_samples_split':np.arange(2, 2+10, 1)}
# param_grid={'n_estimators':[200], 'min_samples_leaf':np.arange(1, 1+10, 1)}
# param_grid={'n_estimators':[200], 'max_leaf_nodes':np.arange(3000, 3000+1000, 100)}
grid = GridSearchCV(classifier , param_grid=param_grid)
grid.fit(X,y)
fig=plt.figure(1, figsize=(16, 12))
plt.clf()
ax1=fig.add_subplot(1,2,1)
ax2=fig.add_subplot(1,2,2)
scores=grid.grid_scores_
for i in range(len(scores)):
accu=scores[i][1]
stdnum=np.std(scores[i][2])
accuracy.append(accu)
stdlist.append(stdnum)
ax1.plot(np.arange(1, 100, 10),accuracy, linewidth=2)
ax2.plot(np.arange(1, 100, 10),stdlist, linewidth=2)
plt.axis('tight')
ax1.set_xlabel('n_estimators')
ax1.set_ylabel('accuracy')
ax2.set_xlabel('n_estimators')
ax2.set_ylabel('std_accuracy')
|
normal
|
{
"blob_id": "08f0b261b5a9b0f5133c468b3f92dc00285eda6a",
"index": 4477,
"step-1": "<mask token>\n\n\ndef gridsearchcv(X, y):\n accuracy = []\n stdlist = []\n classifier = RandomForestClassifier(verbose=2, n_jobs=1, oob_score=1)\n param_grid = {'n_estimators': np.arange(1, 100, 10)}\n grid = GridSearchCV(classifier, param_grid=param_grid)\n grid.fit(X, y)\n fig = plt.figure(1, figsize=(16, 12))\n plt.clf()\n ax1 = fig.add_subplot(1, 2, 1)\n ax2 = fig.add_subplot(1, 2, 2)\n scores = grid.grid_scores_\n for i in range(len(scores)):\n accu = scores[i][1]\n stdnum = np.std(scores[i][2])\n accuracy.append(accu)\n stdlist.append(stdnum)\n ax1.plot(np.arange(1, 100, 10), accuracy, linewidth=2)\n ax2.plot(np.arange(1, 100, 10), stdlist, linewidth=2)\n plt.axis('tight')\n ax1.set_xlabel('n_estimators')\n ax1.set_ylabel('accuracy')\n ax2.set_xlabel('n_estimators')\n ax2.set_ylabel('std_accuracy')\n",
"step-2": "<mask token>\n\n\ndef out(trainset):\n trainset = trainset\n X = trainset[:, :trainset.shape[1] - 1]\n y = trainset[:, trainset.shape[1] - 1]\n X = np.asarray(X)\n y = np.asarray(y.T)[0]\n return X, y\n\n\ndef gridsearchcv(X, y):\n accuracy = []\n stdlist = []\n classifier = RandomForestClassifier(verbose=2, n_jobs=1, oob_score=1)\n param_grid = {'n_estimators': np.arange(1, 100, 10)}\n grid = GridSearchCV(classifier, param_grid=param_grid)\n grid.fit(X, y)\n fig = plt.figure(1, figsize=(16, 12))\n plt.clf()\n ax1 = fig.add_subplot(1, 2, 1)\n ax2 = fig.add_subplot(1, 2, 2)\n scores = grid.grid_scores_\n for i in range(len(scores)):\n accu = scores[i][1]\n stdnum = np.std(scores[i][2])\n accuracy.append(accu)\n stdlist.append(stdnum)\n ax1.plot(np.arange(1, 100, 10), accuracy, linewidth=2)\n ax2.plot(np.arange(1, 100, 10), stdlist, linewidth=2)\n plt.axis('tight')\n ax1.set_xlabel('n_estimators')\n ax1.set_ylabel('accuracy')\n ax2.set_xlabel('n_estimators')\n ax2.set_ylabel('std_accuracy')\n",
"step-3": "<mask token>\n\n\ndef loadTrainSet(filepath):\n raw = np.loadtxt(filepath, delimiter=',', dtype=np.str, skiprows=1)\n X, y = raw[:, 1:], raw[:, 0]\n trainSet = np.hstack((X, y.reshape(-1, 1)))\n return trainSet\n\n\ndef out(trainset):\n trainset = trainset\n X = trainset[:, :trainset.shape[1] - 1]\n y = trainset[:, trainset.shape[1] - 1]\n X = np.asarray(X)\n y = np.asarray(y.T)[0]\n return X, y\n\n\ndef gridsearchcv(X, y):\n accuracy = []\n stdlist = []\n classifier = RandomForestClassifier(verbose=2, n_jobs=1, oob_score=1)\n param_grid = {'n_estimators': np.arange(1, 100, 10)}\n grid = GridSearchCV(classifier, param_grid=param_grid)\n grid.fit(X, y)\n fig = plt.figure(1, figsize=(16, 12))\n plt.clf()\n ax1 = fig.add_subplot(1, 2, 1)\n ax2 = fig.add_subplot(1, 2, 2)\n scores = grid.grid_scores_\n for i in range(len(scores)):\n accu = scores[i][1]\n stdnum = np.std(scores[i][2])\n accuracy.append(accu)\n stdlist.append(stdnum)\n ax1.plot(np.arange(1, 100, 10), accuracy, linewidth=2)\n ax2.plot(np.arange(1, 100, 10), stdlist, linewidth=2)\n plt.axis('tight')\n ax1.set_xlabel('n_estimators')\n ax1.set_ylabel('accuracy')\n ax2.set_xlabel('n_estimators')\n ax2.set_ylabel('std_accuracy')\n",
"step-4": "import numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.grid_search import GridSearchCV\nimport matplotlib.pyplot as plt\n\n\ndef loadTrainSet(filepath):\n raw = np.loadtxt(filepath, delimiter=',', dtype=np.str, skiprows=1)\n X, y = raw[:, 1:], raw[:, 0]\n trainSet = np.hstack((X, y.reshape(-1, 1)))\n return trainSet\n\n\ndef out(trainset):\n trainset = trainset\n X = trainset[:, :trainset.shape[1] - 1]\n y = trainset[:, trainset.shape[1] - 1]\n X = np.asarray(X)\n y = np.asarray(y.T)[0]\n return X, y\n\n\ndef gridsearchcv(X, y):\n accuracy = []\n stdlist = []\n classifier = RandomForestClassifier(verbose=2, n_jobs=1, oob_score=1)\n param_grid = {'n_estimators': np.arange(1, 100, 10)}\n grid = GridSearchCV(classifier, param_grid=param_grid)\n grid.fit(X, y)\n fig = plt.figure(1, figsize=(16, 12))\n plt.clf()\n ax1 = fig.add_subplot(1, 2, 1)\n ax2 = fig.add_subplot(1, 2, 2)\n scores = grid.grid_scores_\n for i in range(len(scores)):\n accu = scores[i][1]\n stdnum = np.std(scores[i][2])\n accuracy.append(accu)\n stdlist.append(stdnum)\n ax1.plot(np.arange(1, 100, 10), accuracy, linewidth=2)\n ax2.plot(np.arange(1, 100, 10), stdlist, linewidth=2)\n plt.axis('tight')\n ax1.set_xlabel('n_estimators')\n ax1.set_ylabel('accuracy')\n ax2.set_xlabel('n_estimators')\n ax2.set_ylabel('std_accuracy')\n",
"step-5": "import numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.grid_search import GridSearchCV\nimport matplotlib.pyplot as plt\n\ndef loadTrainSet(filepath):\n raw = np.loadtxt(filepath, delimiter=',', dtype=np.str, skiprows=1)\n X, y = raw[:,1:], raw[:,0]\n trainSet = np.hstack((X, y.reshape(-1,1)))\n return trainSet\n \n\ndef out(trainset):\n trainset = trainset\n X=trainset[:,:(trainset.shape[1]-1)]\n y=trainset[:,(trainset.shape[1]-1)]\n X=np.asarray(X)\n y=np.asarray(y.T)[0]\n return X,y\n\n\ndef gridsearchcv(X,y):\n accuracy=[]\n stdlist=[]\n classifier = RandomForestClassifier(verbose=2, n_jobs=1,oob_score=1)\n param_grid={'n_estimators':np.arange(1, 100, 10)}\n# param_grid={'n_estimators':np.arange(1, 202, 10)}\n# param_grid={'n_estimators':[200], 'criterion':['gini', 'entropy']}\n# param_grid={'n_estimators':[200], 'max_features':np.append(np.arange(28-20, 28, 1), np.arange(28, 28+20, 1))}\n# param_grid={'n_estimators':[200], 'max_depth':np.arange(40, 40+20, 1)}\n# param_grid={'n_estimators':[200], 'min_samples_split':np.arange(2, 2+10, 1)}\n# param_grid={'n_estimators':[200], 'min_samples_leaf':np.arange(1, 1+10, 1)}\n# param_grid={'n_estimators':[200], 'max_leaf_nodes':np.arange(3000, 3000+1000, 100)}\n\n grid = GridSearchCV(classifier , param_grid=param_grid)\n grid.fit(X,y)\n fig=plt.figure(1, figsize=(16, 12))\n plt.clf()\n ax1=fig.add_subplot(1,2,1)\n ax2=fig.add_subplot(1,2,2)\n scores=grid.grid_scores_\n for i in range(len(scores)):\n accu=scores[i][1]\n stdnum=np.std(scores[i][2])\n accuracy.append(accu)\n stdlist.append(stdnum) \n ax1.plot(np.arange(1, 100, 10),accuracy, linewidth=2)\n ax2.plot(np.arange(1, 100, 10),stdlist, linewidth=2)\n plt.axis('tight')\n ax1.set_xlabel('n_estimators')\n ax1.set_ylabel('accuracy')\n ax2.set_xlabel('n_estimators')\n ax2.set_ylabel('std_accuracy')\n ",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import sys
import os
import json
from collections import OrderedDict
from config import folder, portfolio_value
from datetime import datetime
import logging
# Logger setup
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def valid_date(datestring):
""" Determine if something is a valid date """
try:
datetime.strptime(datestring, '%Y-%m-%d')
return True
except ValueError as e:
logger.info('not a valid date: ' + e)
return False
def portfolio_value_on_date(date):
""" Retrieve the total portfolio value on a given data """
if valid_date(date):
try:
with open(folder + 'portfolio_balance.json', encoding='utf-8') as read_file:
data = json.loads(read_file.read(),
object_pairs_hook=OrderedDict)
return data[date]['daily_value']
except Exception:
logger.critical('couldnt read portfolio.json')
return 'something went horribly wrong trying to open the portfolio.json'
else:
return 'error on date format or date not in range'
def net_gain_loss_percentage():
""" Retrieve the net gain percentage in total value of portfolio at the end of the backtest """
try:
with open(folder + 'portfolio_balance.json', encoding='utf-8') as read_file:
data = json.loads(read_file.read(),
object_pairs_hook=OrderedDict)
net_gain_loss = data['final_portfolio'] / portfolio_value
logger.info('net gain loss is ' + net_gain_loss)
if net_gain_loss > 0:
return 'Your net gain is ' + str(net_gain_loss) + '%'
elif net_gain_loss == 0:
return 'You broke even'
else:
return 'Your net loss is ' + str(net_gain_loss) + '%'
except Exception:
logger.critical('couldnt read portfolio.json')
return 'something went horribly wrong trying to open the portfolio.json'
def max_drawdown():
""" Maximum percentage drawdown experienced in the backtest """
try:
with open(folder + 'portfolio_balance.json', encoding='utf-8') as read_file:
data = json.loads(read_file.read(),
object_pairs_hook=OrderedDict)
def daily_price():
""" Record daily volume in a generator """
for item in data:
if valid_date(item):
yield data[item]['daily_value']
# since the daily portfolio is already a running tally
# we just need to find the max and the min between them
max_price = max(daily_price())
min_price = min(daily_price())
draw = max_price / min_price
logger.info('draw percent: ' + draw)
return 'Max Drawdown is ' + str(draw) + '%'
except Exception:
logger.critical('couldnt read portfolio.json')
return 'something went horribly wrong trying to open the portfolio.json'
|
normal
|
{
"blob_id": "0bc72a558b9bd3b5f74ce5dfce586dd66c579710",
"index": 5776,
"step-1": "<mask token>\n\n\ndef valid_date(datestring):\n \"\"\" Determine if something is a valid date \"\"\"\n try:\n datetime.strptime(datestring, '%Y-%m-%d')\n return True\n except ValueError as e:\n logger.info('not a valid date: ' + e)\n return False\n\n\ndef portfolio_value_on_date(date):\n \"\"\" Retrieve the total portfolio value on a given data \"\"\"\n if valid_date(date):\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=\n OrderedDict)\n return data[date]['daily_value']\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json'\n )\n else:\n return 'error on date format or date not in range'\n\n\ndef net_gain_loss_percentage():\n \"\"\" Retrieve the net gain percentage in total value of portfolio at the end of the backtest \"\"\"\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=OrderedDict)\n net_gain_loss = data['final_portfolio'] / portfolio_value\n logger.info('net gain loss is ' + net_gain_loss)\n if net_gain_loss > 0:\n return 'Your net gain is ' + str(net_gain_loss) + '%'\n elif net_gain_loss == 0:\n return 'You broke even'\n else:\n return 'Your net loss is ' + str(net_gain_loss) + '%'\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json')\n\n\ndef max_drawdown():\n \"\"\" Maximum percentage drawdown experienced in the backtest \"\"\"\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=OrderedDict)\n\n def daily_price():\n \"\"\" Record daily volume in a generator \"\"\"\n for item in data:\n if valid_date(item):\n yield data[item]['daily_value']\n max_price = max(daily_price())\n min_price = min(daily_price())\n draw = max_price / min_price\n logger.info('draw percent: ' + draw)\n return 'Max Drawdown is ' + str(draw) + '%'\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json')\n",
"step-2": "<mask token>\nlogging.basicConfig(level=logging.INFO)\n\n\ndef valid_date(datestring):\n \"\"\" Determine if something is a valid date \"\"\"\n try:\n datetime.strptime(datestring, '%Y-%m-%d')\n return True\n except ValueError as e:\n logger.info('not a valid date: ' + e)\n return False\n\n\ndef portfolio_value_on_date(date):\n \"\"\" Retrieve the total portfolio value on a given data \"\"\"\n if valid_date(date):\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=\n OrderedDict)\n return data[date]['daily_value']\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json'\n )\n else:\n return 'error on date format or date not in range'\n\n\ndef net_gain_loss_percentage():\n \"\"\" Retrieve the net gain percentage in total value of portfolio at the end of the backtest \"\"\"\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=OrderedDict)\n net_gain_loss = data['final_portfolio'] / portfolio_value\n logger.info('net gain loss is ' + net_gain_loss)\n if net_gain_loss > 0:\n return 'Your net gain is ' + str(net_gain_loss) + '%'\n elif net_gain_loss == 0:\n return 'You broke even'\n else:\n return 'Your net loss is ' + str(net_gain_loss) + '%'\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json')\n\n\ndef max_drawdown():\n \"\"\" Maximum percentage drawdown experienced in the backtest \"\"\"\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=OrderedDict)\n\n def daily_price():\n \"\"\" Record daily volume in a generator \"\"\"\n for item in data:\n if valid_date(item):\n yield data[item]['daily_value']\n max_price = max(daily_price())\n min_price = min(daily_price())\n draw = max_price / min_price\n logger.info('draw percent: ' + draw)\n return 'Max Drawdown is ' + str(draw) + '%'\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json')\n",
"step-3": "<mask token>\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\n\ndef valid_date(datestring):\n \"\"\" Determine if something is a valid date \"\"\"\n try:\n datetime.strptime(datestring, '%Y-%m-%d')\n return True\n except ValueError as e:\n logger.info('not a valid date: ' + e)\n return False\n\n\ndef portfolio_value_on_date(date):\n \"\"\" Retrieve the total portfolio value on a given data \"\"\"\n if valid_date(date):\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=\n OrderedDict)\n return data[date]['daily_value']\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json'\n )\n else:\n return 'error on date format or date not in range'\n\n\ndef net_gain_loss_percentage():\n \"\"\" Retrieve the net gain percentage in total value of portfolio at the end of the backtest \"\"\"\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=OrderedDict)\n net_gain_loss = data['final_portfolio'] / portfolio_value\n logger.info('net gain loss is ' + net_gain_loss)\n if net_gain_loss > 0:\n return 'Your net gain is ' + str(net_gain_loss) + '%'\n elif net_gain_loss == 0:\n return 'You broke even'\n else:\n return 'Your net loss is ' + str(net_gain_loss) + '%'\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json')\n\n\ndef max_drawdown():\n \"\"\" Maximum percentage drawdown experienced in the backtest \"\"\"\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=OrderedDict)\n\n def daily_price():\n \"\"\" Record daily volume in a generator \"\"\"\n for item in data:\n if valid_date(item):\n yield data[item]['daily_value']\n max_price = max(daily_price())\n min_price = min(daily_price())\n draw = max_price / min_price\n logger.info('draw percent: ' + draw)\n return 'Max Drawdown is ' + str(draw) + '%'\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json')\n",
"step-4": "import sys\nimport os\nimport json\nfrom collections import OrderedDict\nfrom config import folder, portfolio_value\nfrom datetime import datetime\nimport logging\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\n\ndef valid_date(datestring):\n \"\"\" Determine if something is a valid date \"\"\"\n try:\n datetime.strptime(datestring, '%Y-%m-%d')\n return True\n except ValueError as e:\n logger.info('not a valid date: ' + e)\n return False\n\n\ndef portfolio_value_on_date(date):\n \"\"\" Retrieve the total portfolio value on a given data \"\"\"\n if valid_date(date):\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=\n OrderedDict)\n return data[date]['daily_value']\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json'\n )\n else:\n return 'error on date format or date not in range'\n\n\ndef net_gain_loss_percentage():\n \"\"\" Retrieve the net gain percentage in total value of portfolio at the end of the backtest \"\"\"\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=OrderedDict)\n net_gain_loss = data['final_portfolio'] / portfolio_value\n logger.info('net gain loss is ' + net_gain_loss)\n if net_gain_loss > 0:\n return 'Your net gain is ' + str(net_gain_loss) + '%'\n elif net_gain_loss == 0:\n return 'You broke even'\n else:\n return 'Your net loss is ' + str(net_gain_loss) + '%'\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json')\n\n\ndef max_drawdown():\n \"\"\" Maximum percentage drawdown experienced in the backtest \"\"\"\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=OrderedDict)\n\n def daily_price():\n \"\"\" Record daily volume in a generator \"\"\"\n for item in data:\n if valid_date(item):\n yield data[item]['daily_value']\n max_price = max(daily_price())\n min_price = min(daily_price())\n draw = max_price / min_price\n logger.info('draw percent: ' + draw)\n return 'Max Drawdown is ' + str(draw) + '%'\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json')\n",
"step-5": "import sys\nimport os\nimport json\nfrom collections import OrderedDict\nfrom config import folder, portfolio_value\nfrom datetime import datetime\nimport logging\n# Logger setup\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\n\ndef valid_date(datestring):\n \"\"\" Determine if something is a valid date \"\"\"\n try:\n datetime.strptime(datestring, '%Y-%m-%d')\n return True\n except ValueError as e:\n logger.info('not a valid date: ' + e)\n return False\n\n\ndef portfolio_value_on_date(date):\n \"\"\" Retrieve the total portfolio value on a given data \"\"\"\n if valid_date(date):\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8') as read_file:\n data = json.loads(read_file.read(),\n object_pairs_hook=OrderedDict)\n return data[date]['daily_value']\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return 'something went horribly wrong trying to open the portfolio.json'\n else:\n return 'error on date format or date not in range'\n\n\ndef net_gain_loss_percentage():\n \"\"\" Retrieve the net gain percentage in total value of portfolio at the end of the backtest \"\"\"\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8') as read_file:\n data = json.loads(read_file.read(),\n object_pairs_hook=OrderedDict)\n net_gain_loss = data['final_portfolio'] / portfolio_value\n logger.info('net gain loss is ' + net_gain_loss)\n if net_gain_loss > 0:\n return 'Your net gain is ' + str(net_gain_loss) + '%'\n elif net_gain_loss == 0:\n return 'You broke even'\n else:\n return 'Your net loss is ' + str(net_gain_loss) + '%'\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return 'something went horribly wrong trying to open the portfolio.json'\n\n\ndef max_drawdown():\n \"\"\" Maximum percentage drawdown experienced in the backtest \"\"\"\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8') as read_file:\n data = json.loads(read_file.read(),\n object_pairs_hook=OrderedDict)\n\n def daily_price():\n \"\"\" Record daily volume in a generator \"\"\"\n for item in data:\n if valid_date(item):\n yield data[item]['daily_value']\n\n # since the daily portfolio is already a running tally\n # we just need to find the max and the min between them\n max_price = max(daily_price())\n min_price = min(daily_price())\n draw = max_price / min_price\n logger.info('draw percent: ' + draw)\n return 'Max Drawdown is ' + str(draw) + '%'\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return 'something went horribly wrong trying to open the portfolio.json'\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# author Dominik Capkovic
# contact: domcapkovic@gmail.com; https://www.linkedin.com/in/dominik-čapkovič-b0ab8575/
# GitHub: https://github.com/kilimetr
packings_str = '''
Raschig Super-Ring | metal | 0.3 | 180000 | 315.0 | 0.960 | 3.560 | 2.340 | 0.750 | 0.760 | 1.500 | 0.450
Raschig Super-Ring | metal | 0.5 | 145000 | 250.0 | 0.975 | 3.350 | 2.200 | 0.620 | 0.780 | 1.450 | 0.430
Raschig Super-Ring | metal | 1.0 | 32000 | 160.0 | 0.980 | 3.491 | 2.200 | 0.750 | 0.500 | 1.290 | 0.440
Raschig Super-Ring | metal | 2.0 | 9500 | 97.6 | 0.985 | 3.326 | 2.096 | 0.720 | 0.464 | 1.323 | 0.400
Raschig Super-Ring | metal | 3.0 | 4300 | 80.0 | 0.982 | 3.260 | 2.100 | 0.620 | 0.430 | 0.850 | 0.300
Raschig Super-Ring | plastic | 2.0 | 9000 | 100.0 | 0.960 | 3.326 | 2.096 | 0.720 | 0.377 | 1.250 | 0.337
Ralu Flow | plastic | 1.0 | 33000 | 165.0 | 0.940 | 3.612 | 2.401 | 0.640 | 0.485 | 1.486 | 0.360
Ralu Flow | plastic | 2.0 | 4600 | 100.0 | 0.945 | 3.412 | 2.174 | 0.640 | 0.350 | 1.270 | 0.320
Pall ring | metal | 25.0 | 53900 | 223.5 | 0.954 | 2.627 | 2.083 | 0.719 | 0.957 | 1.440 | 0.336
Pall ring | metal | 35.0 | 19517 | 139.4 | 0.965 | 2.629 | 1.679 | 0.644 | 0.967 | 1.012 | 0.341
Pall ring | metal | 50.0 | 6242 | 112.6 | 0.951 | 2.725 | 1.580 | 0.784 | 0.763 | 1.192 | 0.410
Pall ring | plastic | 25.0 | 52300 | 225.0 | 0.887 | 2.696 | 2.064 | 0.528 | 0.865 | 0.905 | 0.446
Pall ring | plastic | 35.0 | 17000 | 151.1 | 0.906 | 2.654 | 1.742 | 0.718 | 0.927 | 0.856 | 0.380
Pall ring | plastic | 50.0 | 6765 | 111.1 | 0.919 | 2.816 | 1.757 | 0.593 | 0.698 | 1.239 | 0.368
Pall ring | ceramic | 50.0 | 7502 | 155.2 | 0.754 | 3.793 | 3.024 | 1.006 | 0.233 | 1.278 | 0.333
Ralu ring | metal | 25.0 | 51000 | 215.0 | 0.960 | 2.627 | 2.083 | 0.714 | 0.957 | 1.440 | 0.336
Ralu ring | metal | 38.0 | 14500 | 135.0 | 0.965 | 2.629 | 1.679 | 0.644 | 1.003 | 1.277 | 0.341
Ralu ring | metal | 50.0 | 6300 | 105.0 | 0.975 | 2.725 | 1.580 | 0.784 | 0.763 | 1.192 | 0.345
Ralu ring | plastic | 25.0 | 36000 | 190.0 | 0.940 | 2.841 | 1.989 | 0.719 | 0.800 | 1.320 | 0.333
Ralu ring | plastic | 38.0 | 13500 | 150.0 | 0.930 | 2.843 | 1.812 | 0.640 | 0.672 | 1.320 | 0.333
Ralu ring | plastic | 50.0 | 5770 | 95.2 | 0.983 | 2.843 | 1.812 | 0.640 | 0.468 | 1.520 | 0.303
NOR PAC ring | plastic | 25.0 | 48920 | 197.9 | 0.920 | 2.865 | 2.083 | 0 | 0.383 | 0.976 | 0.410
NOR PAC ring | plastic | 25.0 | 50000 | 202.0 | 0.953 | 3.277 | 2.472 | 0.601 | 0.397 | 0.883 | 0.366
NOR PAC ring | plastic | 35.0 | 17450 | 141.8 | 0.944 | 3.179 | 2.242 | 0.587 | 0.371 | 0.756 | 0.425
NOR PAC ring | plastic | 50.0 | 7330 | 86.8 | 0.947 | 2.959 | 1.786 | 0.651 | 0.350 | 1.080 | 0.322
Hiflow-ring | metal | 25.0 | 40790 | 202.9 | 0.962 | 2.918 | 2.177 | 0.799 | 0.689 | 1.641 | 0.402
Hiflow-ring | metal | 50.0 | 6815 | 117.1 | 0.925 | 2.894 | 1.871 | 1.038 | 0.327 | 1.478 | 0.345
Hiflow-ring | metal | 50.0 | 5000 | 92.3 | 0.977 | 2.702 | 1.626 | 0.876 | 0.421 | 1.168 | 0.408
Hiflow-ring | plastic | 25.0 | 46100 | 194.5 | 0.918 | 2.841 | 1.989 | 0 | 0.741 | 1.577 | 0.390
Hiflow-ring | plastic | 50S | 6050 | 82.0 | 0.942 | 2.866 | 1.702 | 0.881 | 0.414 | 1.219 | 0.342
Hiflow-ring | plastic | 50hydr | 6890 | 118.4 | 0.925 | 2.894 | 1.871 | 0 | 0.311 | 1.553 | 0.369
Hiflow-ring | ceramic | 20.0 | 121314 | 286.2 | 0.758 | 2.875 | 2.410 | 1.167 | 0.628 | 1.744 | 0.465
Hiflow-ring | ceramic | 38.0 | 13241 | 111.8 | 0.788 | 2.840 | 1.930 | 0 | 0.621 | 1.659 | 0.464
Hiflow-ring | ceramic | 50.0 | 5120 | 89.7 | 0.809 | 2.819 | 1.694 | 0 | 0.538 | 1.377 | 0.379
Glitsch Ring | metal | 30PMK | 29200 | 180.5 | 0.975 | 2.694 | 1.900 | 0.930 | 0.851 | 1.920 | 0.450
Glitsch Ring | metal | 30P | 31100 | 164.0 | 0.959 | 2.564 | 1.760 | 0.851 | 1.056 | 1.577 | 0.398
Glitsch CMR ring | metal | 0.5" | 560811 | 356.0 | 0.952 | 2.644 | 2.178 | 0 | 0.882 | 2.038 | 0.495
Glitsch CMR ring | metal | 1.0" | 158467 | 232.5 | 0.971 | 2.703 | 1.996 | 1.040 | 0.641 | 0 | 0
Glitsch CMR ring | metal | 1.5"T | 63547 | 188.0 | 0.972 | 2.790 | 1.870 | 0.870 | 0.627 | 0 | 0
Glitsch CMR ring | metal | 1.5" | 60744 | 174.9 | 0.974 | 2.697 | 1.841 | 0.935 | 0.632 | 0 | 0
TOP Pak ring | alu | 50.0 | 6871 | 105.5 | 0.956 | 2.528 | 1.579 | 0.881 | 0.604 | 1.326 | 0.389
Raschig ring | ceramic | 25.0 | 47700 | 190.0 | 0.680 | 2.454 | 1.899 | 0.577 | 1.329 | 1.361 | 0.412
Raschig ring | ceramic | 50.0 | 5990 | 95.0 | 0.830 | 2.482 | 1.547 | 0 | 0 | 1.416 | 0.210
VSP ring | metal | 25.0 | 33434 | 199.6 | 0.975 | 2.755 | 1.970 | 1.369 | 0.782 | 1.376 | 0.405
VSP ring | metal | 50.0 | 7841 | 104.6 | 0.980 | 2.806 | 1.689 | 1.135 | 0.773 | 1.222 | 0.420
Envi Pac ring | plastic | 32.0 | 53000 | 138.9 | 0.936 | 2.944 | 2.012 | 1.039 | 0.549 | 1.517 | 0.459
Envi Pac ring | plastic | 60.0 | 6800 | 98.4 | 0.961 | 2.987 | 1.864 | 0.794 | 0.338 | 1.522 | 0.296
Envi Pac ring | plastic | 80.0 | 2000 | 60.0 | 0.955 | 2.846 | 1.522 | 0.641 | 0.358 | 1.603 | 0.257
Bialecki ring | metal | 25.0 | 48533 | 210.0 | 0.956 | 2.521 | 1.856 | 0.692 | 0.891 | 1.461 | 0.331
Bialecki ring | metal | 35.0 | 18200 | 155.0 | 0.967 | 2.753 | 1.885 | 0.787 | 1.011 | 1.412 | 0.390
Bialecki ring | metal | 35.0 | 20736 | 176.6 | 0.945 | 0 | 0 | 0.690 | 0.460 | 1.405 | 0.377
Bialecki ring | metal | 50.0 | 6278 | 121.0 | 0.966 | 2.916 | 1.896 | 0.798 | 0.719 | 1.721 | 0.302
Tellerette | plastic | 25.0 | 37037 | 190.0 | 0.930 | 2.913 | 2.132 | 0.588 | 0.538 | 0.899 | 0
Hackette | plastic | 45.0 | 12000 | 139.5 | 0.928 | 2.832 | 1.966 | 0.643 | 0.399 | 0 | 0
Raflux ring | plastic | 15.0 | 193522 | 307.9 | 0.894 | 2.825 | 2.400 | 0.491 | 0.595 | 1.913 | 0.370
Berl saddle | ceramic | 13.0 | 691505 | 545.0 | 0.650 | 0 | 0 | 0.833 | 0 | 1.364 | 0.232
Berl saddle | ceramic | 25.0 | 80080 | 260.0 | 0.680 | 0 | 0 | 0.620 | 0 | 1.246 | 0.387
DIN-PAK | plastic | 47.0 | 28168 | 131.2 | 0.923 | 2.929 | 1.991 | 1.173 | 0.514 | 1.690 | 0.354
DIN-PAK | plastic | 70.0 | 9763 | 110.7 | 0.938 | 2.970 | 1.912 | 0.991 | 0.378 | 1.527 | 0.326
Ralu pak | metal | YC-250 | 0 | 250.0 | 0.945 | 3.178 | 2.558 | 0 | 0.191 | 1.334 | 0.385
Mellapak | metal | 250Y | 0 | 250.0 | 0.970 | 3.157 | 2.464 | 0.554 | 0.292 | 0 | 0
Gempack | metal | A2T-304 | 0 | 202.0 | 0.977 | 2.986 | 2.099 | 0.678 | 0.344 | 0 | 0
Impulse packing | metal | 250.0 | 0 | 250.0 | 0.975 | 2.610 | 1.996 | 0.431 | 0.262 | 0.983 | 0.270
Impulse packing | ceramic | 100.0 | 0 | 91.4 | 0.838 | 2.664 | 1.655 | 1.900 | 0.417 | 1.317 | 0.327
Montz packing | metal | B1-200 | 0 | 200.0 | 0.979 | 3.116 | 2.339 | 0.547 | 0.355 | 0.971 | 0.390
Montz packing | metal | B2-300 | 0 | 300.0 | 0.930 | 3.098 | 2.464 | 0.482 | 0.295 | 1.165 | 0.422
Montz packing | plastic | C1-200 | 0 | 200.0 | 0.954 | 0 | 0 | 0 | 0.453 | 1.006 | 0.412
Montz packing | plastic | C2-200 | 0 | 200.0 | 0.900 | 2.653 | 1.973 | 0 | 0.481 | 0.739 | 0
Euroform | plastic | PN-110 | 0 | 110.0 | 0.936 | 3.075 | 1.975 | 0.511 | 0.250 | 0.973 | 0.167
'''
packings = []
for line in packings_str.strip().splitlines():
line_items = line.split(" | ")
line_items = [s.strip() for s in line_items]
name, material, size, N, a, eps, CS, CFl, Ch, CP0, CL, CV = line_items
packings.append({
'name': name,
'material': material,
'size': size,
'N': int(N),
'a': float(a),
'eps': float(eps),
'CS': float(CS),
'CFl': float(CFl),
'Ch': float(Ch),
'CP0': float(CP0),
'CL': float(CL),
'CV': float(CV),
})
# EXPORTING PACKING NAME
seen_packing_name = set()
export_packing_name = []
for i in range(len(packings)):
if packings[i]["name"] not in seen_packing_name:
seen_packing_name.add(packings[i]["name"])
export_packing_name.append(packings[i]["name"])
else:
pass
# # EXPORT PACKING SURFACEAREA
# export_packing_surfacearea = []
# for item in packings:
# if item["name"] == type_packing:
# export_packing_surfacearea.append(item["a"])
# print(export_packing_surfacearea)
|
normal
|
{
"blob_id": "c4f656b96ddc86ab2575bd5ec646833cce95e6a9",
"index": 1717,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in packings_str.strip().splitlines():\n line_items = line.split(' | ')\n line_items = [s.strip() for s in line_items]\n name, material, size, N, a, eps, CS, CFl, Ch, CP0, CL, CV = line_items\n packings.append({'name': name, 'material': material, 'size': size, 'N':\n int(N), 'a': float(a), 'eps': float(eps), 'CS': float(CS), 'CFl':\n float(CFl), 'Ch': float(Ch), 'CP0': float(CP0), 'CL': float(CL),\n 'CV': float(CV)})\n<mask token>\nfor i in range(len(packings)):\n if packings[i]['name'] not in seen_packing_name:\n seen_packing_name.add(packings[i]['name'])\n export_packing_name.append(packings[i]['name'])\n else:\n pass\n",
"step-3": "packings_str = \"\"\"\n Raschig Super-Ring | metal | 0.3 | 180000 | 315.0 | 0.960 | 3.560 | 2.340 | 0.750 | 0.760 | 1.500 | 0.450\n Raschig Super-Ring | metal | 0.5 | 145000 | 250.0 | 0.975 | 3.350 | 2.200 | 0.620 | 0.780 | 1.450 | 0.430\n Raschig Super-Ring | metal | 1.0 | 32000 | 160.0 | 0.980 | 3.491 | 2.200 | 0.750 | 0.500 | 1.290 | 0.440\n Raschig Super-Ring | metal | 2.0 | 9500 | 97.6 | 0.985 | 3.326 | 2.096 | 0.720 | 0.464 | 1.323 | 0.400\n Raschig Super-Ring | metal | 3.0 | 4300 | 80.0 | 0.982 | 3.260 | 2.100 | 0.620 | 0.430 | 0.850 | 0.300\n Raschig Super-Ring | plastic | 2.0 | 9000 | 100.0 | 0.960 | 3.326 | 2.096 | 0.720 | 0.377 | 1.250 | 0.337\n Ralu Flow | plastic | 1.0 | 33000 | 165.0 | 0.940 | 3.612 | 2.401 | 0.640 | 0.485 | 1.486 | 0.360\n Ralu Flow | plastic | 2.0 | 4600 | 100.0 | 0.945 | 3.412 | 2.174 | 0.640 | 0.350 | 1.270 | 0.320\n Pall ring | metal | 25.0 | 53900 | 223.5 | 0.954 | 2.627 | 2.083 | 0.719 | 0.957 | 1.440 | 0.336\n Pall ring | metal | 35.0 | 19517 | 139.4 | 0.965 | 2.629 | 1.679 | 0.644 | 0.967 | 1.012 | 0.341\n Pall ring | metal | 50.0 | 6242 | 112.6 | 0.951 | 2.725 | 1.580 | 0.784 | 0.763 | 1.192 | 0.410\n Pall ring | plastic | 25.0 | 52300 | 225.0 | 0.887 | 2.696 | 2.064 | 0.528 | 0.865 | 0.905 | 0.446\n Pall ring | plastic | 35.0 | 17000 | 151.1 | 0.906 | 2.654 | 1.742 | 0.718 | 0.927 | 0.856 | 0.380\n Pall ring | plastic | 50.0 | 6765 | 111.1 | 0.919 | 2.816 | 1.757 | 0.593 | 0.698 | 1.239 | 0.368\n Pall ring | ceramic | 50.0 | 7502 | 155.2 | 0.754 | 3.793 | 3.024 | 1.006 | 0.233 | 1.278 | 0.333\n Ralu ring | metal | 25.0 | 51000 | 215.0 | 0.960 | 2.627 | 2.083 | 0.714 | 0.957 | 1.440 | 0.336\n Ralu ring | metal | 38.0 | 14500 | 135.0 | 0.965 | 2.629 | 1.679 | 0.644 | 1.003 | 1.277 | 0.341\n Ralu ring | metal | 50.0 | 6300 | 105.0 | 0.975 | 2.725 | 1.580 | 0.784 | 0.763 | 1.192 | 0.345\n Ralu ring | plastic | 25.0 | 36000 | 190.0 | 0.940 | 2.841 | 1.989 | 0.719 | 0.800 | 1.320 | 0.333\n Ralu ring | plastic | 38.0 | 13500 | 150.0 | 0.930 | 2.843 | 1.812 | 0.640 | 0.672 | 1.320 | 0.333\n Ralu ring | plastic | 50.0 | 5770 | 95.2 | 0.983 | 2.843 | 1.812 | 0.640 | 0.468 | 1.520 | 0.303\n NOR PAC ring | plastic | 25.0 | 48920 | 197.9 | 0.920 | 2.865 | 2.083 | 0 | 0.383 | 0.976 | 0.410\n NOR PAC ring | plastic | 25.0 | 50000 | 202.0 | 0.953 | 3.277 | 2.472 | 0.601 | 0.397 | 0.883 | 0.366\n NOR PAC ring | plastic | 35.0 | 17450 | 141.8 | 0.944 | 3.179 | 2.242 | 0.587 | 0.371 | 0.756 | 0.425\n NOR PAC ring | plastic | 50.0 | 7330 | 86.8 | 0.947 | 2.959 | 1.786 | 0.651 | 0.350 | 1.080 | 0.322\n Hiflow-ring | metal | 25.0 | 40790 | 202.9 | 0.962 | 2.918 | 2.177 | 0.799 | 0.689 | 1.641 | 0.402\n Hiflow-ring | metal | 50.0 | 6815 | 117.1 | 0.925 | 2.894 | 1.871 | 1.038 | 0.327 | 1.478 | 0.345\n Hiflow-ring | metal | 50.0 | 5000 | 92.3 | 0.977 | 2.702 | 1.626 | 0.876 | 0.421 | 1.168 | 0.408\n Hiflow-ring | plastic | 25.0 | 46100 | 194.5 | 0.918 | 2.841 | 1.989 | 0 | 0.741 | 1.577 | 0.390\n Hiflow-ring | plastic | 50S | 6050 | 82.0 | 0.942 | 2.866 | 1.702 | 0.881 | 0.414 | 1.219 | 0.342\n Hiflow-ring | plastic | 50hydr | 6890 | 118.4 | 0.925 | 2.894 | 1.871 | 0 | 0.311 | 1.553 | 0.369\n Hiflow-ring | ceramic | 20.0 | 121314 | 286.2 | 0.758 | 2.875 | 2.410 | 1.167 | 0.628 | 1.744 | 0.465\n Hiflow-ring | ceramic | 38.0 | 13241 | 111.8 | 0.788 | 2.840 | 1.930 | 0 | 0.621 | 1.659 | 0.464\n Hiflow-ring | ceramic | 50.0 | 5120 | 89.7 | 0.809 | 2.819 | 1.694 | 0 | 0.538 | 1.377 | 0.379\n Glitsch Ring | metal | 30PMK | 29200 | 180.5 | 0.975 | 2.694 | 1.900 | 0.930 | 0.851 | 1.920 | 0.450\n Glitsch Ring | metal | 30P | 31100 | 164.0 | 0.959 | 2.564 | 1.760 | 0.851 | 1.056 | 1.577 | 0.398\n Glitsch CMR ring | metal | 0.5\" | 560811 | 356.0 | 0.952 | 2.644 | 2.178 | 0 | 0.882 | 2.038 | 0.495\n Glitsch CMR ring | metal | 1.0\" | 158467 | 232.5 | 0.971 | 2.703 | 1.996 | 1.040 | 0.641 | 0 | 0\n Glitsch CMR ring | metal | 1.5\"T | 63547 | 188.0 | 0.972 | 2.790 | 1.870 | 0.870 | 0.627 | 0 | 0\n Glitsch CMR ring | metal | 1.5\" | 60744 | 174.9 | 0.974 | 2.697 | 1.841 | 0.935 | 0.632 | 0 | 0\n TOP Pak ring | alu | 50.0 | 6871 | 105.5 | 0.956 | 2.528 | 1.579 | 0.881 | 0.604 | 1.326 | 0.389\n Raschig ring | ceramic | 25.0 | 47700 | 190.0 | 0.680 | 2.454 | 1.899 | 0.577 | 1.329 | 1.361 | 0.412\n Raschig ring | ceramic | 50.0 | 5990 | 95.0 | 0.830 | 2.482 | 1.547 | 0 | 0 | 1.416 | 0.210\n VSP ring | metal | 25.0 | 33434 | 199.6 | 0.975 | 2.755 | 1.970 | 1.369 | 0.782 | 1.376 | 0.405\n VSP ring | metal | 50.0 | 7841 | 104.6 | 0.980 | 2.806 | 1.689 | 1.135 | 0.773 | 1.222 | 0.420\n Envi Pac ring | plastic | 32.0 | 53000 | 138.9 | 0.936 | 2.944 | 2.012 | 1.039 | 0.549 | 1.517 | 0.459\n Envi Pac ring | plastic | 60.0 | 6800 | 98.4 | 0.961 | 2.987 | 1.864 | 0.794 | 0.338 | 1.522 | 0.296\n Envi Pac ring | plastic | 80.0 | 2000 | 60.0 | 0.955 | 2.846 | 1.522 | 0.641 | 0.358 | 1.603 | 0.257\n Bialecki ring | metal | 25.0 | 48533 | 210.0 | 0.956 | 2.521 | 1.856 | 0.692 | 0.891 | 1.461 | 0.331\n Bialecki ring | metal | 35.0 | 18200 | 155.0 | 0.967 | 2.753 | 1.885 | 0.787 | 1.011 | 1.412 | 0.390\n Bialecki ring | metal | 35.0 | 20736 | 176.6 | 0.945 | 0 | 0 | 0.690 | 0.460 | 1.405 | 0.377\n Bialecki ring | metal | 50.0 | 6278 | 121.0 | 0.966 | 2.916 | 1.896 | 0.798 | 0.719 | 1.721 | 0.302\n Tellerette | plastic | 25.0 | 37037 | 190.0 | 0.930 | 2.913 | 2.132 | 0.588 | 0.538 | 0.899 | 0\n Hackette | plastic | 45.0 | 12000 | 139.5 | 0.928 | 2.832 | 1.966 | 0.643 | 0.399 | 0 | 0\n Raflux ring | plastic | 15.0 | 193522 | 307.9 | 0.894 | 2.825 | 2.400 | 0.491 | 0.595 | 1.913 | 0.370\n Berl saddle | ceramic | 13.0 | 691505 | 545.0 | 0.650 | 0 | 0 | 0.833 | 0 | 1.364 | 0.232\n Berl saddle | ceramic | 25.0 | 80080 | 260.0 | 0.680 | 0 | 0 | 0.620 | 0 | 1.246 | 0.387\n DIN-PAK | plastic | 47.0 | 28168 | 131.2 | 0.923 | 2.929 | 1.991 | 1.173 | 0.514 | 1.690 | 0.354\n DIN-PAK | plastic | 70.0 | 9763 | 110.7 | 0.938 | 2.970 | 1.912 | 0.991 | 0.378 | 1.527 | 0.326\n Ralu pak | metal | YC-250 | 0 | 250.0 | 0.945 | 3.178 | 2.558 | 0 | 0.191 | 1.334 | 0.385\n Mellapak | metal | 250Y | 0 | 250.0 | 0.970 | 3.157 | 2.464 | 0.554 | 0.292 | 0 | 0\n Gempack | metal | A2T-304 | 0 | 202.0 | 0.977 | 2.986 | 2.099 | 0.678 | 0.344 | 0 | 0\n Impulse packing | metal | 250.0 | 0 | 250.0 | 0.975 | 2.610 | 1.996 | 0.431 | 0.262 | 0.983 | 0.270\n Impulse packing | ceramic | 100.0 | 0 | 91.4 | 0.838 | 2.664 | 1.655 | 1.900 | 0.417 | 1.317 | 0.327\n Montz packing | metal | B1-200 | 0 | 200.0 | 0.979 | 3.116 | 2.339 | 0.547 | 0.355 | 0.971 | 0.390\n Montz packing | metal | B2-300 | 0 | 300.0 | 0.930 | 3.098 | 2.464 | 0.482 | 0.295 | 1.165 | 0.422\n Montz packing | plastic | C1-200 | 0 | 200.0 | 0.954 | 0 | 0 | 0 | 0.453 | 1.006 | 0.412\n Montz packing | plastic | C2-200 | 0 | 200.0 | 0.900 | 2.653 | 1.973 | 0 | 0.481 | 0.739 | 0\n Euroform | plastic | PN-110 | 0 | 110.0 | 0.936 | 3.075 | 1.975 | 0.511 | 0.250 | 0.973 | 0.167\n\"\"\"\npackings = []\nfor line in packings_str.strip().splitlines():\n line_items = line.split(' | ')\n line_items = [s.strip() for s in line_items]\n name, material, size, N, a, eps, CS, CFl, Ch, CP0, CL, CV = line_items\n packings.append({'name': name, 'material': material, 'size': size, 'N':\n int(N), 'a': float(a), 'eps': float(eps), 'CS': float(CS), 'CFl':\n float(CFl), 'Ch': float(Ch), 'CP0': float(CP0), 'CL': float(CL),\n 'CV': float(CV)})\nseen_packing_name = set()\nexport_packing_name = []\nfor i in range(len(packings)):\n if packings[i]['name'] not in seen_packing_name:\n seen_packing_name.add(packings[i]['name'])\n export_packing_name.append(packings[i]['name'])\n else:\n pass\n",
"step-4": "# author Dominik Capkovic \n# contact: domcapkovic@gmail.com; https://www.linkedin.com/in/dominik-čapkovič-b0ab8575/\n# GitHub: https://github.com/kilimetr\n\n\npackings_str = '''\n Raschig Super-Ring | metal | 0.3 | 180000 | 315.0 | 0.960 | 3.560 | 2.340 | 0.750 | 0.760 | 1.500 | 0.450\n Raschig Super-Ring | metal | 0.5 | 145000 | 250.0 | 0.975 | 3.350 | 2.200 | 0.620 | 0.780 | 1.450 | 0.430\n Raschig Super-Ring | metal | 1.0 | 32000 | 160.0 | 0.980 | 3.491 | 2.200 | 0.750 | 0.500 | 1.290 | 0.440\n Raschig Super-Ring | metal | 2.0 | 9500 | 97.6 | 0.985 | 3.326 | 2.096 | 0.720 | 0.464 | 1.323 | 0.400\n Raschig Super-Ring | metal | 3.0 | 4300 | 80.0 | 0.982 | 3.260 | 2.100 | 0.620 | 0.430 | 0.850 | 0.300\n Raschig Super-Ring | plastic | 2.0 | 9000 | 100.0 | 0.960 | 3.326 | 2.096 | 0.720 | 0.377 | 1.250 | 0.337\n Ralu Flow | plastic | 1.0 | 33000 | 165.0 | 0.940 | 3.612 | 2.401 | 0.640 | 0.485 | 1.486 | 0.360\n Ralu Flow | plastic | 2.0 | 4600 | 100.0 | 0.945 | 3.412 | 2.174 | 0.640 | 0.350 | 1.270 | 0.320\n Pall ring | metal | 25.0 | 53900 | 223.5 | 0.954 | 2.627 | 2.083 | 0.719 | 0.957 | 1.440 | 0.336\n Pall ring | metal | 35.0 | 19517 | 139.4 | 0.965 | 2.629 | 1.679 | 0.644 | 0.967 | 1.012 | 0.341\n Pall ring | metal | 50.0 | 6242 | 112.6 | 0.951 | 2.725 | 1.580 | 0.784 | 0.763 | 1.192 | 0.410\n Pall ring | plastic | 25.0 | 52300 | 225.0 | 0.887 | 2.696 | 2.064 | 0.528 | 0.865 | 0.905 | 0.446\n Pall ring | plastic | 35.0 | 17000 | 151.1 | 0.906 | 2.654 | 1.742 | 0.718 | 0.927 | 0.856 | 0.380\n Pall ring | plastic | 50.0 | 6765 | 111.1 | 0.919 | 2.816 | 1.757 | 0.593 | 0.698 | 1.239 | 0.368\n Pall ring | ceramic | 50.0 | 7502 | 155.2 | 0.754 | 3.793 | 3.024 | 1.006 | 0.233 | 1.278 | 0.333\n Ralu ring | metal | 25.0 | 51000 | 215.0 | 0.960 | 2.627 | 2.083 | 0.714 | 0.957 | 1.440 | 0.336\n Ralu ring | metal | 38.0 | 14500 | 135.0 | 0.965 | 2.629 | 1.679 | 0.644 | 1.003 | 1.277 | 0.341\n Ralu ring | metal | 50.0 | 6300 | 105.0 | 0.975 | 2.725 | 1.580 | 0.784 | 0.763 | 1.192 | 0.345\n Ralu ring | plastic | 25.0 | 36000 | 190.0 | 0.940 | 2.841 | 1.989 | 0.719 | 0.800 | 1.320 | 0.333\n Ralu ring | plastic | 38.0 | 13500 | 150.0 | 0.930 | 2.843 | 1.812 | 0.640 | 0.672 | 1.320 | 0.333\n Ralu ring | plastic | 50.0 | 5770 | 95.2 | 0.983 | 2.843 | 1.812 | 0.640 | 0.468 | 1.520 | 0.303\n NOR PAC ring | plastic | 25.0 | 48920 | 197.9 | 0.920 | 2.865 | 2.083 | 0 | 0.383 | 0.976 | 0.410\n NOR PAC ring | plastic | 25.0 | 50000 | 202.0 | 0.953 | 3.277 | 2.472 | 0.601 | 0.397 | 0.883 | 0.366\n NOR PAC ring | plastic | 35.0 | 17450 | 141.8 | 0.944 | 3.179 | 2.242 | 0.587 | 0.371 | 0.756 | 0.425\n NOR PAC ring | plastic | 50.0 | 7330 | 86.8 | 0.947 | 2.959 | 1.786 | 0.651 | 0.350 | 1.080 | 0.322\n Hiflow-ring | metal | 25.0 | 40790 | 202.9 | 0.962 | 2.918 | 2.177 | 0.799 | 0.689 | 1.641 | 0.402\n Hiflow-ring | metal | 50.0 | 6815 | 117.1 | 0.925 | 2.894 | 1.871 | 1.038 | 0.327 | 1.478 | 0.345\n Hiflow-ring | metal | 50.0 | 5000 | 92.3 | 0.977 | 2.702 | 1.626 | 0.876 | 0.421 | 1.168 | 0.408\n Hiflow-ring | plastic | 25.0 | 46100 | 194.5 | 0.918 | 2.841 | 1.989 | 0 | 0.741 | 1.577 | 0.390\n Hiflow-ring | plastic | 50S | 6050 | 82.0 | 0.942 | 2.866 | 1.702 | 0.881 | 0.414 | 1.219 | 0.342\n Hiflow-ring | plastic | 50hydr | 6890 | 118.4 | 0.925 | 2.894 | 1.871 | 0 | 0.311 | 1.553 | 0.369\n Hiflow-ring | ceramic | 20.0 | 121314 | 286.2 | 0.758 | 2.875 | 2.410 | 1.167 | 0.628 | 1.744 | 0.465\n Hiflow-ring | ceramic | 38.0 | 13241 | 111.8 | 0.788 | 2.840 | 1.930 | 0 | 0.621 | 1.659 | 0.464\n Hiflow-ring | ceramic | 50.0 | 5120 | 89.7 | 0.809 | 2.819 | 1.694 | 0 | 0.538 | 1.377 | 0.379\n Glitsch Ring | metal | 30PMK | 29200 | 180.5 | 0.975 | 2.694 | 1.900 | 0.930 | 0.851 | 1.920 | 0.450\n Glitsch Ring | metal | 30P | 31100 | 164.0 | 0.959 | 2.564 | 1.760 | 0.851 | 1.056 | 1.577 | 0.398\n Glitsch CMR ring | metal | 0.5\" | 560811 | 356.0 | 0.952 | 2.644 | 2.178 | 0 | 0.882 | 2.038 | 0.495\n Glitsch CMR ring | metal | 1.0\" | 158467 | 232.5 | 0.971 | 2.703 | 1.996 | 1.040 | 0.641 | 0 | 0\n Glitsch CMR ring | metal | 1.5\"T | 63547 | 188.0 | 0.972 | 2.790 | 1.870 | 0.870 | 0.627 | 0 | 0\n Glitsch CMR ring | metal | 1.5\" | 60744 | 174.9 | 0.974 | 2.697 | 1.841 | 0.935 | 0.632 | 0 | 0\n TOP Pak ring | alu | 50.0 | 6871 | 105.5 | 0.956 | 2.528 | 1.579 | 0.881 | 0.604 | 1.326 | 0.389\n Raschig ring | ceramic | 25.0 | 47700 | 190.0 | 0.680 | 2.454 | 1.899 | 0.577 | 1.329 | 1.361 | 0.412\n Raschig ring | ceramic | 50.0 | 5990 | 95.0 | 0.830 | 2.482 | 1.547 | 0 | 0 | 1.416 | 0.210\n VSP ring | metal | 25.0 | 33434 | 199.6 | 0.975 | 2.755 | 1.970 | 1.369 | 0.782 | 1.376 | 0.405\n VSP ring | metal | 50.0 | 7841 | 104.6 | 0.980 | 2.806 | 1.689 | 1.135 | 0.773 | 1.222 | 0.420\n Envi Pac ring | plastic | 32.0 | 53000 | 138.9 | 0.936 | 2.944 | 2.012 | 1.039 | 0.549 | 1.517 | 0.459\n Envi Pac ring | plastic | 60.0 | 6800 | 98.4 | 0.961 | 2.987 | 1.864 | 0.794 | 0.338 | 1.522 | 0.296\n Envi Pac ring | plastic | 80.0 | 2000 | 60.0 | 0.955 | 2.846 | 1.522 | 0.641 | 0.358 | 1.603 | 0.257\n Bialecki ring | metal | 25.0 | 48533 | 210.0 | 0.956 | 2.521 | 1.856 | 0.692 | 0.891 | 1.461 | 0.331\n Bialecki ring | metal | 35.0 | 18200 | 155.0 | 0.967 | 2.753 | 1.885 | 0.787 | 1.011 | 1.412 | 0.390\n Bialecki ring | metal | 35.0 | 20736 | 176.6 | 0.945 | 0 | 0 | 0.690 | 0.460 | 1.405 | 0.377\n Bialecki ring | metal | 50.0 | 6278 | 121.0 | 0.966 | 2.916 | 1.896 | 0.798 | 0.719 | 1.721 | 0.302\n Tellerette | plastic | 25.0 | 37037 | 190.0 | 0.930 | 2.913 | 2.132 | 0.588 | 0.538 | 0.899 | 0\n Hackette | plastic | 45.0 | 12000 | 139.5 | 0.928 | 2.832 | 1.966 | 0.643 | 0.399 | 0 | 0\n Raflux ring | plastic | 15.0 | 193522 | 307.9 | 0.894 | 2.825 | 2.400 | 0.491 | 0.595 | 1.913 | 0.370\n Berl saddle | ceramic | 13.0 | 691505 | 545.0 | 0.650 | 0 | 0 | 0.833 | 0 | 1.364 | 0.232\n Berl saddle | ceramic | 25.0 | 80080 | 260.0 | 0.680 | 0 | 0 | 0.620 | 0 | 1.246 | 0.387\n DIN-PAK | plastic | 47.0 | 28168 | 131.2 | 0.923 | 2.929 | 1.991 | 1.173 | 0.514 | 1.690 | 0.354\n DIN-PAK | plastic | 70.0 | 9763 | 110.7 | 0.938 | 2.970 | 1.912 | 0.991 | 0.378 | 1.527 | 0.326\n Ralu pak | metal | YC-250 | 0 | 250.0 | 0.945 | 3.178 | 2.558 | 0 | 0.191 | 1.334 | 0.385\n Mellapak | metal | 250Y | 0 | 250.0 | 0.970 | 3.157 | 2.464 | 0.554 | 0.292 | 0 | 0\n Gempack | metal | A2T-304 | 0 | 202.0 | 0.977 | 2.986 | 2.099 | 0.678 | 0.344 | 0 | 0\n Impulse packing | metal | 250.0 | 0 | 250.0 | 0.975 | 2.610 | 1.996 | 0.431 | 0.262 | 0.983 | 0.270\n Impulse packing | ceramic | 100.0 | 0 | 91.4 | 0.838 | 2.664 | 1.655 | 1.900 | 0.417 | 1.317 | 0.327\n Montz packing | metal | B1-200 | 0 | 200.0 | 0.979 | 3.116 | 2.339 | 0.547 | 0.355 | 0.971 | 0.390\n Montz packing | metal | B2-300 | 0 | 300.0 | 0.930 | 3.098 | 2.464 | 0.482 | 0.295 | 1.165 | 0.422\n Montz packing | plastic | C1-200 | 0 | 200.0 | 0.954 | 0 | 0 | 0 | 0.453 | 1.006 | 0.412\n Montz packing | plastic | C2-200 | 0 | 200.0 | 0.900 | 2.653 | 1.973 | 0 | 0.481 | 0.739 | 0\n Euroform | plastic | PN-110 | 0 | 110.0 | 0.936 | 3.075 | 1.975 | 0.511 | 0.250 | 0.973 | 0.167\n'''\n\n\n\npackings = []\n\nfor line in packings_str.strip().splitlines():\n line_items = line.split(\" | \")\n line_items = [s.strip() for s in line_items]\n name, material, size, N, a, eps, CS, CFl, Ch, CP0, CL, CV = line_items\n packings.append({\n 'name': name,\n 'material': material,\n 'size': size,\n 'N': int(N),\n 'a': float(a),\n 'eps': float(eps),\n 'CS': float(CS),\n 'CFl': float(CFl),\n 'Ch': float(Ch),\n 'CP0': float(CP0),\n 'CL': float(CL),\n 'CV': float(CV),\n })\n\n\n\n# EXPORTING PACKING NAME\nseen_packing_name = set()\nexport_packing_name = []\n\nfor i in range(len(packings)):\n if packings[i][\"name\"] not in seen_packing_name:\n seen_packing_name.add(packings[i][\"name\"]) \n export_packing_name.append(packings[i][\"name\"])\n else:\n pass\n\n\n# # EXPORT PACKING SURFACEAREA\n# export_packing_surfacearea = []\n\n# for item in packings:\n# if item[\"name\"] == type_packing:\n# export_packing_surfacearea.append(item[\"a\"])\n\n# print(export_packing_surfacearea)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getTitlesFromAll(amount, rating='all'):
output = ''
for i in range(1, amount + 1):
try:
if rating == 'all':
html = urllib.request.urlopen('https://habr.com/all/page' +
str(i) + '/').read()
else:
html = urllib.request.urlopen('https://habr.com/all/' +
rating + '/page' + str(i) + '/').read()
except urllib.error.HTTPError:
print('Error 404 Not Found')
break
soup = BeautifulSoup(html, 'html.parser')
title = soup.find_all('a', class_='post__title_link')
for i in title:
i = i.get_text()
output += '- "' + i + '",\n'
return output
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getTitlesFromAll(amount, rating='all'):
output = ''
for i in range(1, amount + 1):
try:
if rating == 'all':
html = urllib.request.urlopen('https://habr.com/all/page' +
str(i) + '/').read()
else:
html = urllib.request.urlopen('https://habr.com/all/' +
rating + '/page' + str(i) + '/').read()
except urllib.error.HTTPError:
print('Error 404 Not Found')
break
soup = BeautifulSoup(html, 'html.parser')
title = soup.find_all('a', class_='post__title_link')
for i in title:
i = i.get_text()
output += '- "' + i + '",\n'
return output
def getTitlesFromTop(amount, age='daily'):
output = ''
for i in range(1, amount + 1):
try:
html = urllib.request.urlopen('https://habr.com/top/' + age +
'/page' + str(i) + '/').read()
except urllib.error.HTTPError:
print('Error 404 Not Found')
break
soup = BeautifulSoup(html, 'html.parser')
title = soup.find_all('a', class_='post__title_link')
for i in title:
i = i.get_text()
output += '- "' + i + '",\n'
return output
<|reserved_special_token_1|>
import urllib.request
from bs4 import BeautifulSoup
def getTitlesFromAll(amount, rating='all'):
output = ''
for i in range(1, amount + 1):
try:
if rating == 'all':
html = urllib.request.urlopen('https://habr.com/all/page' +
str(i) + '/').read()
else:
html = urllib.request.urlopen('https://habr.com/all/' +
rating + '/page' + str(i) + '/').read()
except urllib.error.HTTPError:
print('Error 404 Not Found')
break
soup = BeautifulSoup(html, 'html.parser')
title = soup.find_all('a', class_='post__title_link')
for i in title:
i = i.get_text()
output += '- "' + i + '",\n'
return output
def getTitlesFromTop(amount, age='daily'):
output = ''
for i in range(1, amount + 1):
try:
html = urllib.request.urlopen('https://habr.com/top/' + age +
'/page' + str(i) + '/').read()
except urllib.error.HTTPError:
print('Error 404 Not Found')
break
soup = BeautifulSoup(html, 'html.parser')
title = soup.find_all('a', class_='post__title_link')
for i in title:
i = i.get_text()
output += '- "' + i + '",\n'
return output
<|reserved_special_token_1|>
import urllib.request
from bs4 import BeautifulSoup
def getTitlesFromAll(amount, rating='all'):
output = ''
for i in range(1, amount+1):
try:
if rating == 'all':
html = urllib.request.urlopen('https://habr.com/all/page'+ str(i) +'/').read()
else:
html = urllib.request.urlopen('https://habr.com/all/'+ rating +'/page'+ str(i) +'/').read()
except urllib.error.HTTPError:
print('Error 404 Not Found')
break
soup = BeautifulSoup(html, 'html.parser')
title = soup.find_all('a', class_ = 'post__title_link')
for i in title:
i = i.get_text()
output += ('- "'+i+'",\n')
return output
def getTitlesFromTop(amount, age='daily'):
output = ''
for i in range(1, amount+1):
try:
html = urllib.request.urlopen('https://habr.com/top/'+ age +'/page'+ str(i) +'/').read()
except urllib.error.HTTPError:
print('Error 404 Not Found')
break
soup = BeautifulSoup(html, 'html.parser')
title = soup.find_all('a', class_ = 'post__title_link')
for i in title:
i = i.get_text()
output += ('- "'+i+'",\n')
return output
|
flexible
|
{
"blob_id": "d6cfea95c76021bdbfbb4471878c653564c9accd",
"index": 1816,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef getTitlesFromAll(amount, rating='all'):\n output = ''\n for i in range(1, amount + 1):\n try:\n if rating == 'all':\n html = urllib.request.urlopen('https://habr.com/all/page' +\n str(i) + '/').read()\n else:\n html = urllib.request.urlopen('https://habr.com/all/' +\n rating + '/page' + str(i) + '/').read()\n except urllib.error.HTTPError:\n print('Error 404 Not Found')\n break\n soup = BeautifulSoup(html, 'html.parser')\n title = soup.find_all('a', class_='post__title_link')\n for i in title:\n i = i.get_text()\n output += '- \"' + i + '\",\\n'\n return output\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef getTitlesFromAll(amount, rating='all'):\n output = ''\n for i in range(1, amount + 1):\n try:\n if rating == 'all':\n html = urllib.request.urlopen('https://habr.com/all/page' +\n str(i) + '/').read()\n else:\n html = urllib.request.urlopen('https://habr.com/all/' +\n rating + '/page' + str(i) + '/').read()\n except urllib.error.HTTPError:\n print('Error 404 Not Found')\n break\n soup = BeautifulSoup(html, 'html.parser')\n title = soup.find_all('a', class_='post__title_link')\n for i in title:\n i = i.get_text()\n output += '- \"' + i + '\",\\n'\n return output\n\n\ndef getTitlesFromTop(amount, age='daily'):\n output = ''\n for i in range(1, amount + 1):\n try:\n html = urllib.request.urlopen('https://habr.com/top/' + age +\n '/page' + str(i) + '/').read()\n except urllib.error.HTTPError:\n print('Error 404 Not Found')\n break\n soup = BeautifulSoup(html, 'html.parser')\n title = soup.find_all('a', class_='post__title_link')\n for i in title:\n i = i.get_text()\n output += '- \"' + i + '\",\\n'\n return output\n",
"step-4": "import urllib.request\nfrom bs4 import BeautifulSoup\n\n\ndef getTitlesFromAll(amount, rating='all'):\n output = ''\n for i in range(1, amount + 1):\n try:\n if rating == 'all':\n html = urllib.request.urlopen('https://habr.com/all/page' +\n str(i) + '/').read()\n else:\n html = urllib.request.urlopen('https://habr.com/all/' +\n rating + '/page' + str(i) + '/').read()\n except urllib.error.HTTPError:\n print('Error 404 Not Found')\n break\n soup = BeautifulSoup(html, 'html.parser')\n title = soup.find_all('a', class_='post__title_link')\n for i in title:\n i = i.get_text()\n output += '- \"' + i + '\",\\n'\n return output\n\n\ndef getTitlesFromTop(amount, age='daily'):\n output = ''\n for i in range(1, amount + 1):\n try:\n html = urllib.request.urlopen('https://habr.com/top/' + age +\n '/page' + str(i) + '/').read()\n except urllib.error.HTTPError:\n print('Error 404 Not Found')\n break\n soup = BeautifulSoup(html, 'html.parser')\n title = soup.find_all('a', class_='post__title_link')\n for i in title:\n i = i.get_text()\n output += '- \"' + i + '\",\\n'\n return output\n",
"step-5": "import urllib.request\nfrom bs4 import BeautifulSoup\n\ndef getTitlesFromAll(amount, rating='all'):\n output = ''\n for i in range(1, amount+1):\n try:\n if rating == 'all':\n html = urllib.request.urlopen('https://habr.com/all/page'+ str(i) +'/').read()\n else:\n html = urllib.request.urlopen('https://habr.com/all/'+ rating +'/page'+ str(i) +'/').read()\n except urllib.error.HTTPError:\n print('Error 404 Not Found')\n break\n soup = BeautifulSoup(html, 'html.parser')\n title = soup.find_all('a', class_ = 'post__title_link')\n for i in title:\n i = i.get_text()\n output += ('- \"'+i+'\",\\n')\n return output\n\ndef getTitlesFromTop(amount, age='daily'):\n output = ''\n for i in range(1, amount+1):\n try:\n html = urllib.request.urlopen('https://habr.com/top/'+ age +'/page'+ str(i) +'/').read()\n except urllib.error.HTTPError:\n print('Error 404 Not Found')\n break\n soup = BeautifulSoup(html, 'html.parser')\n title = soup.find_all('a', class_ = 'post__title_link')\n for i in title:\n i = i.get_text()\n output += ('- \"'+i+'\",\\n')\n return output\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def slices(series, length):
if length <= 0:
raise ValueError('Length has to be at least 1')
elif length > len(series) or len(series) == 0:
raise ValueError('Length has to be larger than len of series')
elif length == len(series):
return [series]
else:
result = []
for i in range(0, len(series) - length + 1):
result.append(series[i:i + length])
return result
<|reserved_special_token_1|>
def slices(series, length):
if length <= 0:
raise ValueError("Length has to be at least 1")
elif length > len(series) or len(series) == 0:
raise ValueError("Length has to be larger than len of series")
elif length == len(series):
return [series]
else:
result = []
for i in range(0, len(series) - length + 1):
result.append(series[i:i+length])
return result
|
flexible
|
{
"blob_id": "207bb7c79de069ad5d980d18cdfc5c4ab86c5197",
"index": 6544,
"step-1": "<mask token>\n",
"step-2": "def slices(series, length):\n if length <= 0:\n raise ValueError('Length has to be at least 1')\n elif length > len(series) or len(series) == 0:\n raise ValueError('Length has to be larger than len of series')\n elif length == len(series):\n return [series]\n else:\n result = []\n for i in range(0, len(series) - length + 1):\n result.append(series[i:i + length])\n return result\n",
"step-3": "def slices(series, length):\n if length <= 0:\n raise ValueError(\"Length has to be at least 1\")\n elif length > len(series) or len(series) == 0:\n raise ValueError(\"Length has to be larger than len of series\")\n elif length == len(series):\n return [series]\n else:\n result = []\n for i in range(0, len(series) - length + 1):\n result.append(series[i:i+length])\n return result\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# You have an array arr of length n where arr[i] = (2 * i) + 1 for all valid values of i (i.e. 0 <= i < n).
# In one operation, you can select two indices x and y where 0 <= x, y < n and subtract 1 from arr[x] and add 1 to arr[y]
# (i.e. perform arr[x] -=1 and arr[y] += 1). The goal is to make all the elements of the array equal.
# It is guaranteed that all the elements of the array can be made equal using some operations.
# Given an integer n, the length of the array. Return the minimum number of operations needed to make all the elements of arr equal.
def minOps(n):
if n <= 1:
return 0
res = 0
if n%2 == 1:
for i in range(1, n//2 + 1):
res += i*2
return res
else:
for j in range(1, n//2 + 1):
res += j * 2 - 1
return res
def summationMin(n):
return n*n//4
for i in range(10):
print(summationMin(i))
|
normal
|
{
"blob_id": "d67842c05af9241dbe7e038a9b2dc4223ee7ef4d",
"index": 8055,
"step-1": "<mask token>\n",
"step-2": "def minOps(n):\n if n <= 1:\n return 0\n res = 0\n if n % 2 == 1:\n for i in range(1, n // 2 + 1):\n res += i * 2\n return res\n else:\n for j in range(1, n // 2 + 1):\n res += j * 2 - 1\n return res\n\n\n<mask token>\n",
"step-3": "def minOps(n):\n if n <= 1:\n return 0\n res = 0\n if n % 2 == 1:\n for i in range(1, n // 2 + 1):\n res += i * 2\n return res\n else:\n for j in range(1, n // 2 + 1):\n res += j * 2 - 1\n return res\n\n\ndef summationMin(n):\n return n * n // 4\n\n\n<mask token>\n",
"step-4": "def minOps(n):\n if n <= 1:\n return 0\n res = 0\n if n % 2 == 1:\n for i in range(1, n // 2 + 1):\n res += i * 2\n return res\n else:\n for j in range(1, n // 2 + 1):\n res += j * 2 - 1\n return res\n\n\ndef summationMin(n):\n return n * n // 4\n\n\nfor i in range(10):\n print(summationMin(i))\n",
"step-5": "# You have an array arr of length n where arr[i] = (2 * i) + 1 for all valid values of i (i.e. 0 <= i < n).\n\n# In one operation, you can select two indices x and y where 0 <= x, y < n and subtract 1 from arr[x] and add 1 to arr[y] \n# (i.e. perform arr[x] -=1 and arr[y] += 1). The goal is to make all the elements of the array equal. \n# It is guaranteed that all the elements of the array can be made equal using some operations.\n\n# Given an integer n, the length of the array. Return the minimum number of operations needed to make all the elements of arr equal.\n\ndef minOps(n):\n if n <= 1:\n return 0\n res = 0\n if n%2 == 1:\n for i in range(1, n//2 + 1):\n res += i*2\n return res\n else:\n for j in range(1, n//2 + 1):\n res += j * 2 - 1\n return res\n\ndef summationMin(n):\n return n*n//4\n\n\nfor i in range(10):\n print(summationMin(i))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from graphviz import Digraph
from math import log2, ceil
def hue_to_rgb(p, q, t):
if t < 0: t += 1
if t > 1: t -= 1
if t < 1/6: return p + (q - p) * 6 * t
if t < 1/2: return q
if t < 2/3: return p + (q - p) * (2/3 - t) * 6
return p
def hsl_to_rgb(h, s, l):
h /= 360
q = l * (1 + s) if l < 0.5 else l + s - l * s
p = 2 * l - q
r = hue_to_rgb(p, q, h + 1/3)
g = hue_to_rgb(p, q, h)
b = hue_to_rgb(p, q, h - 1/3)
return r, g, b
def rgb_to_hex(r, g, b):
return f'#{int(r*255):02x}{int(g*255):02x}{int(b*255):02x}'
def hue(h):
return rgb_to_hex(*hsl_to_rgb(h, 0.5, 0.5))
def dfs(node, val):
if node.val == val: return node
for child in node.children:
found = dfs(child, val)
if found: return found
return None
def bfs(node, val):
q = [node]
while q:
node = q.pop(0)
if node.val == val: return node
q.extend(node.children)
return None
class Node:
def __init__(self, val, children=None, parent=None):
self.id = str(val)
self.val = val
self.parent = parent
self.depth = -1
self.size = -1
self.index = -1
self.attrs = {}
self._index = []
self.children = children if children else []
for child in self.children: child.under(self)
def by_index(self, index): return self._index[index]
def process(self, root):
index = Counter()
def dfs(node, depth):
node.depth = depth
node.size = 1
node.index = index.inc()
root._index.append(node)
for child in node.children:
dfs(child, depth + 1)
node.size += child.size
dfs(root, 0)
def adopt(self, child): self.children.append(child)
def under(self, parent): self.parent = parent
def __repr__(self): return f'{self.val} (d{self.depth} s{self.size})'
def render(self):
dot = Digraph(format=FORMAT,
node_attr={'shape': 'plaintext'},
edge_attr={'arrowsize': '0.5'},
)
self.render_(dot)
dot.render('binary_lifting', view=True)
def render_(self, dot):
dot.node(self.id, str(self), **self.attrs)
for child in self.children:
dot.edge(self.id, child.id)
child.render_(dot)
def find(self, val):
return dfs(self, val)
def example():
g = Node(1, [
Node(2, [
Node(4), Node(5, [
Node(8), Node(9, [
Node(10), Node(11, [
Node(18), Node(19, [
Node(22), Node(23), Node(24)
]), Node(20), Node(21)
])
])
])
]),
Node(3, [
Node(6, [
Node(12), Node(13, [
Node(14), Node(15, [
Node(16), Node(17)
])
])
]), Node(7)
])
])
g.process(g)
return g
dummy = Node(-1)
def climb(node):
path = [node]
while node.parent:
node = node.parent
path.append(node)
return path
class Counter:
def __init__(self):
self.count = 0
def inc(self):
count, self.count = self.count, self.count + 1
return count
class Lifting:
def __init__(self, root):
self.root = root
self.up = []
self.process(root)
@property
def l(self):
n = self.root.size
return ceil(log2(n))
def process(self, root):
timer = Counter()
tin, tout = {}, {}
n = root.size
up = []
for _ in range(n): up.append([None] * (self.l+1))
def dfs(node, parent):
print('visit', node.index)
tin[node.index] = timer.inc()
up[node.index][0] = parent.index
for i in range(1, self.l+1): up[node.index][i] = up[up[node.index][i-1]][i-1]
for child in node.children:
if child != parent: dfs(child, node)
tout[node.index] = timer.inc()
dfs(root, root)
self.up = up
self.tin = tin
self.tout = tout
print(tin)
print(tout)
def is_ancestor(self, a, b):
ai, bi = a.index, b.index
return self.tin[ai] <= self.tin[bi] and self.tout[ai] >= self.tout[bi]
def lca(self, a, b):
if self.is_ancestor(a, b): return a
if self.is_ancestor(b, a): return b
for i in range(self.l, -1, -1):
print('i', i, 'index', a.index)
index = self.up[a.index][i]
p = self.root.by_index(index)
if not self.is_ancestor(p, b): a = p
index = self.up[a.index][0]
return self.root.by_index(index)
def lca_slow(self, a, b):
path_a = climb(a)[::-1]
path_b = climb(b)[::-1]
for i in range(len(path_a)):
if path_a[i] != path_b[i]:
return path_a[i - 1]
return path_a[-1]
def render(self):
dot = Digraph(format=FORMAT,
node_attr={'shape': 'plaintext'},
edge_attr={'arrowsize': '0.5'},
engine='dot',
)
self.root.render_(dot)
for i in range(len(self.up)):
angle = i/len(self.up)*360.0 + i%2*180.0
color = hue(angle)
for j in range(self.l+1):
p = self.up[i][j]
if p != 0:
a = self.root.by_index(i)
b = self.root.by_index(p)
dot.edge(a.id, b.id, style='dashed', color=color)
dot.render('binary_lifting', view=True)
FORMAT = 'svg'
if __name__ == '__main__':
g = example()
l = Lifting(g)
#p = l.lca_slow(g.find(10), g.find(17))
a = g.find(8)
b = g.find(20)
p = l.lca(a, b)
a.attrs['fontcolor'] = 'red'
b.attrs['fontcolor'] = 'red'
p.attrs['fontcolor'] = 'green'
l.render()
|
normal
|
{
"blob_id": "881afd6877508243fa5056d2a82d88ba69ffb8c0",
"index": 7801,
"step-1": "<mask token>\n\n\nclass Node:\n\n def __init__(self, val, children=None, parent=None):\n self.id = str(val)\n self.val = val\n self.parent = parent\n self.depth = -1\n self.size = -1\n self.index = -1\n self.attrs = {}\n self._index = []\n self.children = children if children else []\n for child in self.children:\n child.under(self)\n\n def by_index(self, index):\n return self._index[index]\n\n def process(self, root):\n index = Counter()\n\n def dfs(node, depth):\n node.depth = depth\n node.size = 1\n node.index = index.inc()\n root._index.append(node)\n for child in node.children:\n dfs(child, depth + 1)\n node.size += child.size\n dfs(root, 0)\n\n def adopt(self, child):\n self.children.append(child)\n\n def under(self, parent):\n self.parent = parent\n\n def __repr__(self):\n return f'{self.val} (d{self.depth} s{self.size})'\n\n def render(self):\n dot = Digraph(format=FORMAT, node_attr={'shape': 'plaintext'},\n edge_attr={'arrowsize': '0.5'})\n self.render_(dot)\n dot.render('binary_lifting', view=True)\n\n def render_(self, dot):\n dot.node(self.id, str(self), **self.attrs)\n for child in self.children:\n dot.edge(self.id, child.id)\n child.render_(dot)\n\n def find(self, val):\n return dfs(self, val)\n\n\n<mask token>\n\n\nclass Counter:\n\n def __init__(self):\n self.count = 0\n\n def inc(self):\n count, self.count = self.count, self.count + 1\n return count\n\n\nclass Lifting:\n\n def __init__(self, root):\n self.root = root\n self.up = []\n self.process(root)\n\n @property\n def l(self):\n n = self.root.size\n return ceil(log2(n))\n\n def process(self, root):\n timer = Counter()\n tin, tout = {}, {}\n n = root.size\n up = []\n for _ in range(n):\n up.append([None] * (self.l + 1))\n\n def dfs(node, parent):\n print('visit', node.index)\n tin[node.index] = timer.inc()\n up[node.index][0] = parent.index\n for i in range(1, self.l + 1):\n up[node.index][i] = up[up[node.index][i - 1]][i - 1]\n for child in node.children:\n if child != parent:\n dfs(child, node)\n tout[node.index] = timer.inc()\n dfs(root, root)\n self.up = up\n self.tin = tin\n self.tout = tout\n print(tin)\n print(tout)\n\n def is_ancestor(self, a, b):\n ai, bi = a.index, b.index\n return self.tin[ai] <= self.tin[bi] and self.tout[ai] >= self.tout[bi]\n\n def lca(self, a, b):\n if self.is_ancestor(a, b):\n return a\n if self.is_ancestor(b, a):\n return b\n for i in range(self.l, -1, -1):\n print('i', i, 'index', a.index)\n index = self.up[a.index][i]\n p = self.root.by_index(index)\n if not self.is_ancestor(p, b):\n a = p\n index = self.up[a.index][0]\n return self.root.by_index(index)\n\n def lca_slow(self, a, b):\n path_a = climb(a)[::-1]\n path_b = climb(b)[::-1]\n for i in range(len(path_a)):\n if path_a[i] != path_b[i]:\n return path_a[i - 1]\n return path_a[-1]\n\n def render(self):\n dot = Digraph(format=FORMAT, node_attr={'shape': 'plaintext'},\n edge_attr={'arrowsize': '0.5'}, engine='dot')\n self.root.render_(dot)\n for i in range(len(self.up)):\n angle = i / len(self.up) * 360.0 + i % 2 * 180.0\n color = hue(angle)\n for j in range(self.l + 1):\n p = self.up[i][j]\n if p != 0:\n a = self.root.by_index(i)\n b = self.root.by_index(p)\n dot.edge(a.id, b.id, style='dashed', color=color)\n dot.render('binary_lifting', view=True)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef hsl_to_rgb(h, s, l):\n h /= 360\n q = l * (1 + s) if l < 0.5 else l + s - l * s\n p = 2 * l - q\n r = hue_to_rgb(p, q, h + 1 / 3)\n g = hue_to_rgb(p, q, h)\n b = hue_to_rgb(p, q, h - 1 / 3)\n return r, g, b\n\n\n<mask token>\n\n\nclass Node:\n\n def __init__(self, val, children=None, parent=None):\n self.id = str(val)\n self.val = val\n self.parent = parent\n self.depth = -1\n self.size = -1\n self.index = -1\n self.attrs = {}\n self._index = []\n self.children = children if children else []\n for child in self.children:\n child.under(self)\n\n def by_index(self, index):\n return self._index[index]\n\n def process(self, root):\n index = Counter()\n\n def dfs(node, depth):\n node.depth = depth\n node.size = 1\n node.index = index.inc()\n root._index.append(node)\n for child in node.children:\n dfs(child, depth + 1)\n node.size += child.size\n dfs(root, 0)\n\n def adopt(self, child):\n self.children.append(child)\n\n def under(self, parent):\n self.parent = parent\n\n def __repr__(self):\n return f'{self.val} (d{self.depth} s{self.size})'\n\n def render(self):\n dot = Digraph(format=FORMAT, node_attr={'shape': 'plaintext'},\n edge_attr={'arrowsize': '0.5'})\n self.render_(dot)\n dot.render('binary_lifting', view=True)\n\n def render_(self, dot):\n dot.node(self.id, str(self), **self.attrs)\n for child in self.children:\n dot.edge(self.id, child.id)\n child.render_(dot)\n\n def find(self, val):\n return dfs(self, val)\n\n\n<mask token>\n\n\nclass Counter:\n\n def __init__(self):\n self.count = 0\n\n def inc(self):\n count, self.count = self.count, self.count + 1\n return count\n\n\nclass Lifting:\n\n def __init__(self, root):\n self.root = root\n self.up = []\n self.process(root)\n\n @property\n def l(self):\n n = self.root.size\n return ceil(log2(n))\n\n def process(self, root):\n timer = Counter()\n tin, tout = {}, {}\n n = root.size\n up = []\n for _ in range(n):\n up.append([None] * (self.l + 1))\n\n def dfs(node, parent):\n print('visit', node.index)\n tin[node.index] = timer.inc()\n up[node.index][0] = parent.index\n for i in range(1, self.l + 1):\n up[node.index][i] = up[up[node.index][i - 1]][i - 1]\n for child in node.children:\n if child != parent:\n dfs(child, node)\n tout[node.index] = timer.inc()\n dfs(root, root)\n self.up = up\n self.tin = tin\n self.tout = tout\n print(tin)\n print(tout)\n\n def is_ancestor(self, a, b):\n ai, bi = a.index, b.index\n return self.tin[ai] <= self.tin[bi] and self.tout[ai] >= self.tout[bi]\n\n def lca(self, a, b):\n if self.is_ancestor(a, b):\n return a\n if self.is_ancestor(b, a):\n return b\n for i in range(self.l, -1, -1):\n print('i', i, 'index', a.index)\n index = self.up[a.index][i]\n p = self.root.by_index(index)\n if not self.is_ancestor(p, b):\n a = p\n index = self.up[a.index][0]\n return self.root.by_index(index)\n\n def lca_slow(self, a, b):\n path_a = climb(a)[::-1]\n path_b = climb(b)[::-1]\n for i in range(len(path_a)):\n if path_a[i] != path_b[i]:\n return path_a[i - 1]\n return path_a[-1]\n\n def render(self):\n dot = Digraph(format=FORMAT, node_attr={'shape': 'plaintext'},\n edge_attr={'arrowsize': '0.5'}, engine='dot')\n self.root.render_(dot)\n for i in range(len(self.up)):\n angle = i / len(self.up) * 360.0 + i % 2 * 180.0\n color = hue(angle)\n for j in range(self.l + 1):\n p = self.up[i][j]\n if p != 0:\n a = self.root.by_index(i)\n b = self.root.by_index(p)\n dot.edge(a.id, b.id, style='dashed', color=color)\n dot.render('binary_lifting', view=True)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef hue_to_rgb(p, q, t):\n if t < 0:\n t += 1\n if t > 1:\n t -= 1\n if t < 1 / 6:\n return p + (q - p) * 6 * t\n if t < 1 / 2:\n return q\n if t < 2 / 3:\n return p + (q - p) * (2 / 3 - t) * 6\n return p\n\n\ndef hsl_to_rgb(h, s, l):\n h /= 360\n q = l * (1 + s) if l < 0.5 else l + s - l * s\n p = 2 * l - q\n r = hue_to_rgb(p, q, h + 1 / 3)\n g = hue_to_rgb(p, q, h)\n b = hue_to_rgb(p, q, h - 1 / 3)\n return r, g, b\n\n\n<mask token>\n\n\ndef dfs(node, val):\n if node.val == val:\n return node\n for child in node.children:\n found = dfs(child, val)\n if found:\n return found\n return None\n\n\n<mask token>\n\n\nclass Node:\n\n def __init__(self, val, children=None, parent=None):\n self.id = str(val)\n self.val = val\n self.parent = parent\n self.depth = -1\n self.size = -1\n self.index = -1\n self.attrs = {}\n self._index = []\n self.children = children if children else []\n for child in self.children:\n child.under(self)\n\n def by_index(self, index):\n return self._index[index]\n\n def process(self, root):\n index = Counter()\n\n def dfs(node, depth):\n node.depth = depth\n node.size = 1\n node.index = index.inc()\n root._index.append(node)\n for child in node.children:\n dfs(child, depth + 1)\n node.size += child.size\n dfs(root, 0)\n\n def adopt(self, child):\n self.children.append(child)\n\n def under(self, parent):\n self.parent = parent\n\n def __repr__(self):\n return f'{self.val} (d{self.depth} s{self.size})'\n\n def render(self):\n dot = Digraph(format=FORMAT, node_attr={'shape': 'plaintext'},\n edge_attr={'arrowsize': '0.5'})\n self.render_(dot)\n dot.render('binary_lifting', view=True)\n\n def render_(self, dot):\n dot.node(self.id, str(self), **self.attrs)\n for child in self.children:\n dot.edge(self.id, child.id)\n child.render_(dot)\n\n def find(self, val):\n return dfs(self, val)\n\n\n<mask token>\n\n\ndef climb(node):\n path = [node]\n while node.parent:\n node = node.parent\n path.append(node)\n return path\n\n\nclass Counter:\n\n def __init__(self):\n self.count = 0\n\n def inc(self):\n count, self.count = self.count, self.count + 1\n return count\n\n\nclass Lifting:\n\n def __init__(self, root):\n self.root = root\n self.up = []\n self.process(root)\n\n @property\n def l(self):\n n = self.root.size\n return ceil(log2(n))\n\n def process(self, root):\n timer = Counter()\n tin, tout = {}, {}\n n = root.size\n up = []\n for _ in range(n):\n up.append([None] * (self.l + 1))\n\n def dfs(node, parent):\n print('visit', node.index)\n tin[node.index] = timer.inc()\n up[node.index][0] = parent.index\n for i in range(1, self.l + 1):\n up[node.index][i] = up[up[node.index][i - 1]][i - 1]\n for child in node.children:\n if child != parent:\n dfs(child, node)\n tout[node.index] = timer.inc()\n dfs(root, root)\n self.up = up\n self.tin = tin\n self.tout = tout\n print(tin)\n print(tout)\n\n def is_ancestor(self, a, b):\n ai, bi = a.index, b.index\n return self.tin[ai] <= self.tin[bi] and self.tout[ai] >= self.tout[bi]\n\n def lca(self, a, b):\n if self.is_ancestor(a, b):\n return a\n if self.is_ancestor(b, a):\n return b\n for i in range(self.l, -1, -1):\n print('i', i, 'index', a.index)\n index = self.up[a.index][i]\n p = self.root.by_index(index)\n if not self.is_ancestor(p, b):\n a = p\n index = self.up[a.index][0]\n return self.root.by_index(index)\n\n def lca_slow(self, a, b):\n path_a = climb(a)[::-1]\n path_b = climb(b)[::-1]\n for i in range(len(path_a)):\n if path_a[i] != path_b[i]:\n return path_a[i - 1]\n return path_a[-1]\n\n def render(self):\n dot = Digraph(format=FORMAT, node_attr={'shape': 'plaintext'},\n edge_attr={'arrowsize': '0.5'}, engine='dot')\n self.root.render_(dot)\n for i in range(len(self.up)):\n angle = i / len(self.up) * 360.0 + i % 2 * 180.0\n color = hue(angle)\n for j in range(self.l + 1):\n p = self.up[i][j]\n if p != 0:\n a = self.root.by_index(i)\n b = self.root.by_index(p)\n dot.edge(a.id, b.id, style='dashed', color=color)\n dot.render('binary_lifting', view=True)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef hue_to_rgb(p, q, t):\n if t < 0:\n t += 1\n if t > 1:\n t -= 1\n if t < 1 / 6:\n return p + (q - p) * 6 * t\n if t < 1 / 2:\n return q\n if t < 2 / 3:\n return p + (q - p) * (2 / 3 - t) * 6\n return p\n\n\ndef hsl_to_rgb(h, s, l):\n h /= 360\n q = l * (1 + s) if l < 0.5 else l + s - l * s\n p = 2 * l - q\n r = hue_to_rgb(p, q, h + 1 / 3)\n g = hue_to_rgb(p, q, h)\n b = hue_to_rgb(p, q, h - 1 / 3)\n return r, g, b\n\n\ndef rgb_to_hex(r, g, b):\n return f'#{int(r * 255):02x}{int(g * 255):02x}{int(b * 255):02x}'\n\n\ndef hue(h):\n return rgb_to_hex(*hsl_to_rgb(h, 0.5, 0.5))\n\n\ndef dfs(node, val):\n if node.val == val:\n return node\n for child in node.children:\n found = dfs(child, val)\n if found:\n return found\n return None\n\n\ndef bfs(node, val):\n q = [node]\n while q:\n node = q.pop(0)\n if node.val == val:\n return node\n q.extend(node.children)\n return None\n\n\nclass Node:\n\n def __init__(self, val, children=None, parent=None):\n self.id = str(val)\n self.val = val\n self.parent = parent\n self.depth = -1\n self.size = -1\n self.index = -1\n self.attrs = {}\n self._index = []\n self.children = children if children else []\n for child in self.children:\n child.under(self)\n\n def by_index(self, index):\n return self._index[index]\n\n def process(self, root):\n index = Counter()\n\n def dfs(node, depth):\n node.depth = depth\n node.size = 1\n node.index = index.inc()\n root._index.append(node)\n for child in node.children:\n dfs(child, depth + 1)\n node.size += child.size\n dfs(root, 0)\n\n def adopt(self, child):\n self.children.append(child)\n\n def under(self, parent):\n self.parent = parent\n\n def __repr__(self):\n return f'{self.val} (d{self.depth} s{self.size})'\n\n def render(self):\n dot = Digraph(format=FORMAT, node_attr={'shape': 'plaintext'},\n edge_attr={'arrowsize': '0.5'})\n self.render_(dot)\n dot.render('binary_lifting', view=True)\n\n def render_(self, dot):\n dot.node(self.id, str(self), **self.attrs)\n for child in self.children:\n dot.edge(self.id, child.id)\n child.render_(dot)\n\n def find(self, val):\n return dfs(self, val)\n\n\ndef example():\n g = Node(1, [Node(2, [Node(4), Node(5, [Node(8), Node(9, [Node(10),\n Node(11, [Node(18), Node(19, [Node(22), Node(23), Node(24)]), Node(\n 20), Node(21)])])])]), Node(3, [Node(6, [Node(12), Node(13, [Node(\n 14), Node(15, [Node(16), Node(17)])])]), Node(7)])])\n g.process(g)\n return g\n\n\n<mask token>\n\n\ndef climb(node):\n path = [node]\n while node.parent:\n node = node.parent\n path.append(node)\n return path\n\n\nclass Counter:\n\n def __init__(self):\n self.count = 0\n\n def inc(self):\n count, self.count = self.count, self.count + 1\n return count\n\n\nclass Lifting:\n\n def __init__(self, root):\n self.root = root\n self.up = []\n self.process(root)\n\n @property\n def l(self):\n n = self.root.size\n return ceil(log2(n))\n\n def process(self, root):\n timer = Counter()\n tin, tout = {}, {}\n n = root.size\n up = []\n for _ in range(n):\n up.append([None] * (self.l + 1))\n\n def dfs(node, parent):\n print('visit', node.index)\n tin[node.index] = timer.inc()\n up[node.index][0] = parent.index\n for i in range(1, self.l + 1):\n up[node.index][i] = up[up[node.index][i - 1]][i - 1]\n for child in node.children:\n if child != parent:\n dfs(child, node)\n tout[node.index] = timer.inc()\n dfs(root, root)\n self.up = up\n self.tin = tin\n self.tout = tout\n print(tin)\n print(tout)\n\n def is_ancestor(self, a, b):\n ai, bi = a.index, b.index\n return self.tin[ai] <= self.tin[bi] and self.tout[ai] >= self.tout[bi]\n\n def lca(self, a, b):\n if self.is_ancestor(a, b):\n return a\n if self.is_ancestor(b, a):\n return b\n for i in range(self.l, -1, -1):\n print('i', i, 'index', a.index)\n index = self.up[a.index][i]\n p = self.root.by_index(index)\n if not self.is_ancestor(p, b):\n a = p\n index = self.up[a.index][0]\n return self.root.by_index(index)\n\n def lca_slow(self, a, b):\n path_a = climb(a)[::-1]\n path_b = climb(b)[::-1]\n for i in range(len(path_a)):\n if path_a[i] != path_b[i]:\n return path_a[i - 1]\n return path_a[-1]\n\n def render(self):\n dot = Digraph(format=FORMAT, node_attr={'shape': 'plaintext'},\n edge_attr={'arrowsize': '0.5'}, engine='dot')\n self.root.render_(dot)\n for i in range(len(self.up)):\n angle = i / len(self.up) * 360.0 + i % 2 * 180.0\n color = hue(angle)\n for j in range(self.l + 1):\n p = self.up[i][j]\n if p != 0:\n a = self.root.by_index(i)\n b = self.root.by_index(p)\n dot.edge(a.id, b.id, style='dashed', color=color)\n dot.render('binary_lifting', view=True)\n\n\n<mask token>\n",
"step-5": "from graphviz import Digraph\nfrom math import log2, ceil\n\ndef hue_to_rgb(p, q, t):\n if t < 0: t += 1\n if t > 1: t -= 1\n if t < 1/6: return p + (q - p) * 6 * t\n if t < 1/2: return q\n if t < 2/3: return p + (q - p) * (2/3 - t) * 6\n return p\n\ndef hsl_to_rgb(h, s, l):\n h /= 360\n q = l * (1 + s) if l < 0.5 else l + s - l * s\n p = 2 * l - q\n r = hue_to_rgb(p, q, h + 1/3)\n g = hue_to_rgb(p, q, h)\n b = hue_to_rgb(p, q, h - 1/3)\n return r, g, b\n\ndef rgb_to_hex(r, g, b):\n return f'#{int(r*255):02x}{int(g*255):02x}{int(b*255):02x}'\n\ndef hue(h):\n return rgb_to_hex(*hsl_to_rgb(h, 0.5, 0.5))\n\ndef dfs(node, val):\n if node.val == val: return node\n for child in node.children:\n found = dfs(child, val)\n if found: return found\n return None\n\ndef bfs(node, val):\n q = [node]\n while q:\n node = q.pop(0)\n if node.val == val: return node\n q.extend(node.children)\n return None\n\nclass Node:\n def __init__(self, val, children=None, parent=None):\n self.id = str(val)\n self.val = val\n self.parent = parent\n self.depth = -1\n self.size = -1\n self.index = -1\n self.attrs = {}\n self._index = []\n self.children = children if children else []\n for child in self.children: child.under(self)\n def by_index(self, index): return self._index[index]\n def process(self, root):\n index = Counter()\n def dfs(node, depth):\n node.depth = depth\n node.size = 1\n node.index = index.inc()\n root._index.append(node)\n for child in node.children:\n dfs(child, depth + 1)\n node.size += child.size\n dfs(root, 0)\n def adopt(self, child): self.children.append(child)\n def under(self, parent): self.parent = parent\n def __repr__(self): return f'{self.val} (d{self.depth} s{self.size})'\n def render(self):\n dot = Digraph(format=FORMAT,\n node_attr={'shape': 'plaintext'},\n edge_attr={'arrowsize': '0.5'},\n )\n self.render_(dot)\n dot.render('binary_lifting', view=True)\n def render_(self, dot):\n dot.node(self.id, str(self), **self.attrs)\n for child in self.children:\n dot.edge(self.id, child.id)\n child.render_(dot)\n def find(self, val):\n return dfs(self, val)\n\ndef example():\n g = Node(1, [\n Node(2, [\n Node(4), Node(5, [\n Node(8), Node(9, [\n Node(10), Node(11, [\n Node(18), Node(19, [\n Node(22), Node(23), Node(24)\n ]), Node(20), Node(21)\n ])\n ])\n ])\n ]),\n Node(3, [\n Node(6, [\n Node(12), Node(13, [\n Node(14), Node(15, [\n Node(16), Node(17)\n ])\n ])\n ]), Node(7)\n ])\n ])\n g.process(g)\n return g\n\ndummy = Node(-1)\n\ndef climb(node):\n path = [node]\n while node.parent:\n node = node.parent\n path.append(node)\n return path\n\nclass Counter:\n def __init__(self):\n self.count = 0\n def inc(self):\n count, self.count = self.count, self.count + 1\n return count\n\nclass Lifting:\n def __init__(self, root):\n self.root = root\n self.up = []\n self.process(root)\n @property\n def l(self):\n n = self.root.size\n return ceil(log2(n))\n def process(self, root):\n timer = Counter()\n tin, tout = {}, {}\n n = root.size\n up = []\n for _ in range(n): up.append([None] * (self.l+1))\n def dfs(node, parent):\n print('visit', node.index)\n tin[node.index] = timer.inc()\n up[node.index][0] = parent.index\n for i in range(1, self.l+1): up[node.index][i] = up[up[node.index][i-1]][i-1]\n for child in node.children:\n if child != parent: dfs(child, node)\n tout[node.index] = timer.inc()\n dfs(root, root)\n self.up = up\n self.tin = tin\n self.tout = tout\n print(tin)\n print(tout)\n def is_ancestor(self, a, b):\n ai, bi = a.index, b.index\n return self.tin[ai] <= self.tin[bi] and self.tout[ai] >= self.tout[bi]\n def lca(self, a, b):\n if self.is_ancestor(a, b): return a\n if self.is_ancestor(b, a): return b\n for i in range(self.l, -1, -1):\n print('i', i, 'index', a.index)\n index = self.up[a.index][i]\n p = self.root.by_index(index)\n if not self.is_ancestor(p, b): a = p\n index = self.up[a.index][0]\n return self.root.by_index(index)\n def lca_slow(self, a, b):\n path_a = climb(a)[::-1]\n path_b = climb(b)[::-1]\n for i in range(len(path_a)):\n if path_a[i] != path_b[i]:\n return path_a[i - 1]\n return path_a[-1]\n def render(self):\n dot = Digraph(format=FORMAT,\n node_attr={'shape': 'plaintext'},\n edge_attr={'arrowsize': '0.5'},\n engine='dot',\n )\n self.root.render_(dot)\n for i in range(len(self.up)):\n angle = i/len(self.up)*360.0 + i%2*180.0\n color = hue(angle)\n for j in range(self.l+1):\n p = self.up[i][j]\n if p != 0:\n a = self.root.by_index(i)\n b = self.root.by_index(p)\n dot.edge(a.id, b.id, style='dashed', color=color)\n dot.render('binary_lifting', view=True)\n\n\nFORMAT = 'svg'\n\nif __name__ == '__main__':\n g = example()\n l = Lifting(g)\n #p = l.lca_slow(g.find(10), g.find(17))\n a = g.find(8)\n b = g.find(20)\n p = l.lca(a, b)\n a.attrs['fontcolor'] = 'red'\n b.attrs['fontcolor'] = 'red'\n p.attrs['fontcolor'] = 'green'\n l.render()\n",
"step-ids": [
21,
22,
25,
29,
33
]
}
|
[
21,
22,
25,
29,
33
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getData():
weather_data = pd.read_csv('data/weather_data.csv')
currentMonth = datetime.now().month
currentHour = datetime.now().hour
currentMonthGroup = currentMonth // 2
hoep_data = []
temp = weather_data.iloc[:, 2]
for i in range(len(temp)):
weather_data.iloc[i, 1] = (currentHour + i) % 24
if currentMonthGroup == 0:
hoep_data = temp.apply(lambda x: (2.02887 * x + 39.633) / 100)
elif currentMonthGroup == 1:
hoep_data = temp.apply(lambda x: (0.453122 * x + 19.8276) / 100)
elif currentMonthGroup == 2:
hoep_data = temp.apply(lambda x: (1.13665 * x - 11.0085) / 100)
elif currentMonthGroup == 3:
hoep_data = temp.apply(lambda x: (1.90245 * x - 23.2826) / 100)
elif currentMonthGroup == 4:
hoep_data = temp.apply(lambda x: (1.39145 * x - 8.97971) / 100)
else:
hoep_data = temp.apply(lambda x: (1.72767 * x + 21.3536) / 100)
load_data = pd.read_excel('data/load_data.xlsx', index_col=0, engine=
'openpyxl')
load_sched = np.arange(48)
for i in range(len(temp)):
load_sched[i] = load_data.iloc[weather_data.iloc[i, 1],
currentMonthGroup]
WMST = 0.003499
start_time = time.time()
def constraint1(x):
for i in range(48):
if i == 0:
x[48] = 0
else:
x[48 + i] = x[48 + i - 1] + x[i]
return x[0:48] + x[48:96]
def constraint2(x):
for i in range(48):
if i == 0:
x[48] = 0
else:
x[48 + i] = x[48 + i - 1] + x[i]
return 10000 - (x[0:48] + x[48:96])
power = ((-5000, 5000),) * 48
storage = ((0, 10000),) * 48
def MEC(x):
return sum(sum((load_sched + np.array([x[0:48]])) * (np.array(
hoep_data) + WMST)))
x0 = np.array([np.ones(48), np.ones(48)])
bounds = power + storage
cons1 = {'type': 'ineq', 'fun': constraint1}
cons2 = {'type': 'ineq', 'fun': constraint2}
cons = [cons1, cons2]
sol = minimize(MEC, x0, method='SLSQP', bounds=bounds, constraints=cons,
options={'maxiter': 150, 'disp': True})
input_var = {'EA_w_bill': round(sol.fun, 2)}
return input_var
<|reserved_special_token_1|>
import numpy as np
import pandas as pd
from scipy.optimize import minimize
from datetime import datetime
import time
from functions import weather_scraper
def getData():
weather_data = pd.read_csv('data/weather_data.csv')
currentMonth = datetime.now().month
currentHour = datetime.now().hour
currentMonthGroup = currentMonth // 2
hoep_data = []
temp = weather_data.iloc[:, 2]
for i in range(len(temp)):
weather_data.iloc[i, 1] = (currentHour + i) % 24
if currentMonthGroup == 0:
hoep_data = temp.apply(lambda x: (2.02887 * x + 39.633) / 100)
elif currentMonthGroup == 1:
hoep_data = temp.apply(lambda x: (0.453122 * x + 19.8276) / 100)
elif currentMonthGroup == 2:
hoep_data = temp.apply(lambda x: (1.13665 * x - 11.0085) / 100)
elif currentMonthGroup == 3:
hoep_data = temp.apply(lambda x: (1.90245 * x - 23.2826) / 100)
elif currentMonthGroup == 4:
hoep_data = temp.apply(lambda x: (1.39145 * x - 8.97971) / 100)
else:
hoep_data = temp.apply(lambda x: (1.72767 * x + 21.3536) / 100)
load_data = pd.read_excel('data/load_data.xlsx', index_col=0, engine=
'openpyxl')
load_sched = np.arange(48)
for i in range(len(temp)):
load_sched[i] = load_data.iloc[weather_data.iloc[i, 1],
currentMonthGroup]
WMST = 0.003499
start_time = time.time()
def constraint1(x):
for i in range(48):
if i == 0:
x[48] = 0
else:
x[48 + i] = x[48 + i - 1] + x[i]
return x[0:48] + x[48:96]
def constraint2(x):
for i in range(48):
if i == 0:
x[48] = 0
else:
x[48 + i] = x[48 + i - 1] + x[i]
return 10000 - (x[0:48] + x[48:96])
power = ((-5000, 5000),) * 48
storage = ((0, 10000),) * 48
def MEC(x):
return sum(sum((load_sched + np.array([x[0:48]])) * (np.array(
hoep_data) + WMST)))
x0 = np.array([np.ones(48), np.ones(48)])
bounds = power + storage
cons1 = {'type': 'ineq', 'fun': constraint1}
cons2 = {'type': 'ineq', 'fun': constraint2}
cons = [cons1, cons2]
sol = minimize(MEC, x0, method='SLSQP', bounds=bounds, constraints=cons,
options={'maxiter': 150, 'disp': True})
input_var = {'EA_w_bill': round(sol.fun, 2)}
return input_var
<|reserved_special_token_1|>
import numpy as np
import pandas as pd
from scipy.optimize import minimize
from datetime import datetime
import time
from functions import weather_scraper
def getData():
# # run weather_scraper.py to fetch new weather data
# weather_scraper.getData()
## Read in csv file "weather_data.csv"
weather_data = pd.read_csv("data/weather_data.csv")
# Grab the current month & hour
currentMonth = datetime.now().month
currentHour = datetime.now().hour
# Determine which month group the current month is [0,5]
currentMonthGroup = currentMonth // 2
hoep_data = []
temp = weather_data.iloc[:,2]
# Change hour string to number from 0-23
for i in range(len(temp)):
weather_data.iloc[i,1] = (currentHour + i) % 24
# Convert temperature data to HOEP data
if (currentMonthGroup == 0) :
hoep_data = temp.apply(lambda x: (2.02887*x + 39.633)/100)
elif (currentMonthGroup == 1):
hoep_data = temp.apply(lambda x: (0.453122*x + 19.8276)/100)
elif (currentMonthGroup == 2):
hoep_data = temp.apply(lambda x: (1.13665*x - 11.0085)/100)
elif (currentMonthGroup == 3):
hoep_data = temp.apply(lambda x: (1.90245*x - 23.2826)/100)
elif (currentMonthGroup == 4):
hoep_data = temp.apply(lambda x: (1.39145*x - 8.97971)/100)
else:
hoep_data = temp.apply(lambda x: (1.72767*x + 21.3536)/100)
# Load in the load_data
load_data = pd.read_excel('data/load_data.xlsx', index_col=0, engine = 'openpyxl')
# Create loading schedule based on current time of day and month
load_sched = np.arange(48)
for i in range(len(temp)):
load_sched[i] = load_data.iloc[ weather_data.iloc[i,1] , currentMonthGroup]
WMST = 0.003499
## x[0:48] = PCEA
## x[48:96] = ESB
start_time = time.time()
# Constraints to ensure that ESB falls within limits
def constraint1(x):
for i in range(48):
if (i == 0):
x[48] = 0
else:
x[48+i] = x[48+i-1] + x[i]
return x[0:48] + x[48:96]
def constraint2(x):
for i in range(48):
if (i == 0):
x[48] = 0
else:
x[48+i] = x[48+i-1] + x[i]
return 10000 - (x[0:48]+ x[48:96])
power = ((-5000, 5000),) * 48
storage = ((0, 10000),) * 48
#Objective
def MEC(x): # ( PDLL + PCEA ) x HOEP
return sum(sum( (load_sched + np.array([x[0:48]])) * (np.array(hoep_data)+WMST) ))
x0 = np.array([np.ones(48), np.ones(48)])
bounds = (power + storage)
cons1 = {'type': 'ineq', 'fun': constraint1}
cons2 = {'type': 'ineq', 'fun': constraint2}
cons = ([cons1, cons2])
sol = minimize(MEC, x0, method='SLSQP',bounds=bounds,constraints=cons,options= {'maxiter':150,'disp':True})
input_var = {"EA_w_bill": round(sol.fun,2)}
return input_var
|
flexible
|
{
"blob_id": "7a1bd2b4734527a414c6173ea8edb150221f8042",
"index": 363,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef getData():\n weather_data = pd.read_csv('data/weather_data.csv')\n currentMonth = datetime.now().month\n currentHour = datetime.now().hour\n currentMonthGroup = currentMonth // 2\n hoep_data = []\n temp = weather_data.iloc[:, 2]\n for i in range(len(temp)):\n weather_data.iloc[i, 1] = (currentHour + i) % 24\n if currentMonthGroup == 0:\n hoep_data = temp.apply(lambda x: (2.02887 * x + 39.633) / 100)\n elif currentMonthGroup == 1:\n hoep_data = temp.apply(lambda x: (0.453122 * x + 19.8276) / 100)\n elif currentMonthGroup == 2:\n hoep_data = temp.apply(lambda x: (1.13665 * x - 11.0085) / 100)\n elif currentMonthGroup == 3:\n hoep_data = temp.apply(lambda x: (1.90245 * x - 23.2826) / 100)\n elif currentMonthGroup == 4:\n hoep_data = temp.apply(lambda x: (1.39145 * x - 8.97971) / 100)\n else:\n hoep_data = temp.apply(lambda x: (1.72767 * x + 21.3536) / 100)\n load_data = pd.read_excel('data/load_data.xlsx', index_col=0, engine=\n 'openpyxl')\n load_sched = np.arange(48)\n for i in range(len(temp)):\n load_sched[i] = load_data.iloc[weather_data.iloc[i, 1],\n currentMonthGroup]\n WMST = 0.003499\n start_time = time.time()\n\n def constraint1(x):\n for i in range(48):\n if i == 0:\n x[48] = 0\n else:\n x[48 + i] = x[48 + i - 1] + x[i]\n return x[0:48] + x[48:96]\n\n def constraint2(x):\n for i in range(48):\n if i == 0:\n x[48] = 0\n else:\n x[48 + i] = x[48 + i - 1] + x[i]\n return 10000 - (x[0:48] + x[48:96])\n power = ((-5000, 5000),) * 48\n storage = ((0, 10000),) * 48\n\n def MEC(x):\n return sum(sum((load_sched + np.array([x[0:48]])) * (np.array(\n hoep_data) + WMST)))\n x0 = np.array([np.ones(48), np.ones(48)])\n bounds = power + storage\n cons1 = {'type': 'ineq', 'fun': constraint1}\n cons2 = {'type': 'ineq', 'fun': constraint2}\n cons = [cons1, cons2]\n sol = minimize(MEC, x0, method='SLSQP', bounds=bounds, constraints=cons,\n options={'maxiter': 150, 'disp': True})\n input_var = {'EA_w_bill': round(sol.fun, 2)}\n return input_var\n",
"step-3": "import numpy as np\nimport pandas as pd\nfrom scipy.optimize import minimize\nfrom datetime import datetime\nimport time\nfrom functions import weather_scraper\n\n\ndef getData():\n weather_data = pd.read_csv('data/weather_data.csv')\n currentMonth = datetime.now().month\n currentHour = datetime.now().hour\n currentMonthGroup = currentMonth // 2\n hoep_data = []\n temp = weather_data.iloc[:, 2]\n for i in range(len(temp)):\n weather_data.iloc[i, 1] = (currentHour + i) % 24\n if currentMonthGroup == 0:\n hoep_data = temp.apply(lambda x: (2.02887 * x + 39.633) / 100)\n elif currentMonthGroup == 1:\n hoep_data = temp.apply(lambda x: (0.453122 * x + 19.8276) / 100)\n elif currentMonthGroup == 2:\n hoep_data = temp.apply(lambda x: (1.13665 * x - 11.0085) / 100)\n elif currentMonthGroup == 3:\n hoep_data = temp.apply(lambda x: (1.90245 * x - 23.2826) / 100)\n elif currentMonthGroup == 4:\n hoep_data = temp.apply(lambda x: (1.39145 * x - 8.97971) / 100)\n else:\n hoep_data = temp.apply(lambda x: (1.72767 * x + 21.3536) / 100)\n load_data = pd.read_excel('data/load_data.xlsx', index_col=0, engine=\n 'openpyxl')\n load_sched = np.arange(48)\n for i in range(len(temp)):\n load_sched[i] = load_data.iloc[weather_data.iloc[i, 1],\n currentMonthGroup]\n WMST = 0.003499\n start_time = time.time()\n\n def constraint1(x):\n for i in range(48):\n if i == 0:\n x[48] = 0\n else:\n x[48 + i] = x[48 + i - 1] + x[i]\n return x[0:48] + x[48:96]\n\n def constraint2(x):\n for i in range(48):\n if i == 0:\n x[48] = 0\n else:\n x[48 + i] = x[48 + i - 1] + x[i]\n return 10000 - (x[0:48] + x[48:96])\n power = ((-5000, 5000),) * 48\n storage = ((0, 10000),) * 48\n\n def MEC(x):\n return sum(sum((load_sched + np.array([x[0:48]])) * (np.array(\n hoep_data) + WMST)))\n x0 = np.array([np.ones(48), np.ones(48)])\n bounds = power + storage\n cons1 = {'type': 'ineq', 'fun': constraint1}\n cons2 = {'type': 'ineq', 'fun': constraint2}\n cons = [cons1, cons2]\n sol = minimize(MEC, x0, method='SLSQP', bounds=bounds, constraints=cons,\n options={'maxiter': 150, 'disp': True})\n input_var = {'EA_w_bill': round(sol.fun, 2)}\n return input_var\n",
"step-4": "import numpy as np\nimport pandas as pd\nfrom scipy.optimize import minimize\nfrom datetime import datetime\nimport time\nfrom functions import weather_scraper\n\ndef getData():\n # # run weather_scraper.py to fetch new weather data\n # weather_scraper.getData()\n\n ## Read in csv file \"weather_data.csv\"\n weather_data = pd.read_csv(\"data/weather_data.csv\")\n\n # Grab the current month & hour\n currentMonth = datetime.now().month\n currentHour = datetime.now().hour\n\n # Determine which month group the current month is [0,5]\n currentMonthGroup = currentMonth // 2\n\n hoep_data = []\n temp = weather_data.iloc[:,2]\n\n # Change hour string to number from 0-23\n for i in range(len(temp)): \n weather_data.iloc[i,1] = (currentHour + i) % 24\n\n # Convert temperature data to HOEP data\n if (currentMonthGroup == 0) :\n hoep_data = temp.apply(lambda x: (2.02887*x + 39.633)/100)\n elif (currentMonthGroup == 1):\n hoep_data = temp.apply(lambda x: (0.453122*x + 19.8276)/100)\n elif (currentMonthGroup == 2):\n hoep_data = temp.apply(lambda x: (1.13665*x - 11.0085)/100)\n elif (currentMonthGroup == 3):\n hoep_data = temp.apply(lambda x: (1.90245*x - 23.2826)/100)\n elif (currentMonthGroup == 4): \n hoep_data = temp.apply(lambda x: (1.39145*x - 8.97971)/100)\n else:\n hoep_data = temp.apply(lambda x: (1.72767*x + 21.3536)/100)\n\n # Load in the load_data\n load_data = pd.read_excel('data/load_data.xlsx', index_col=0, engine = 'openpyxl')\n\n # Create loading schedule based on current time of day and month\n load_sched = np.arange(48)\n\n for i in range(len(temp)):\n load_sched[i] = load_data.iloc[ weather_data.iloc[i,1] , currentMonthGroup]\n\n WMST = 0.003499 \n\n ## x[0:48] = PCEA\n ## x[48:96] = ESB\n\n start_time = time.time()\n\n # Constraints to ensure that ESB falls within limits\n def constraint1(x):\n for i in range(48):\n if (i == 0):\n x[48] = 0\n else:\n x[48+i] = x[48+i-1] + x[i]\n return x[0:48] + x[48:96]\n \n def constraint2(x):\n for i in range(48):\n if (i == 0):\n x[48] = 0\n else:\n x[48+i] = x[48+i-1] + x[i]\n return 10000 - (x[0:48]+ x[48:96])\n \n\n power = ((-5000, 5000),) * 48\n storage = ((0, 10000),) * 48\n\n #Objective\n def MEC(x): # ( PDLL + PCEA ) x HOEP\n return sum(sum( (load_sched + np.array([x[0:48]])) * (np.array(hoep_data)+WMST) ))\n\n x0 = np.array([np.ones(48), np.ones(48)])\n\n bounds = (power + storage)\n cons1 = {'type': 'ineq', 'fun': constraint1}\n cons2 = {'type': 'ineq', 'fun': constraint2}\n\n cons = ([cons1, cons2])\n\n sol = minimize(MEC, x0, method='SLSQP',bounds=bounds,constraints=cons,options= {'maxiter':150,'disp':True})\n\n input_var = {\"EA_w_bill\": round(sol.fun,2)}\n\n return input_var\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import logging
from typing import Sequence
from django.core.exceptions import ValidationError
from django.db import IntegrityError
from django.db.models import F, Q
from django.utils import timezone
from sentry_sdk import capture_exception
from sentry.models import (
Environment,
Project,
Release,
ReleaseEnvironment,
ReleaseProjectEnvironment,
ReleaseStatus,
)
from sentry.release_health import release_monitor
from sentry.release_health.release_monitor.base import Totals
from sentry.tasks.base import instrumented_task
from sentry.utils import metrics
CHUNK_SIZE = 1000
MAX_SECONDS = 60
logger = logging.getLogger("sentry.tasks.releasemonitor")
@instrumented_task(
name="sentry.release_health.tasks.monitor_release_adoption",
queue="releasemonitor",
default_retry_delay=5,
max_retries=5,
) # type: ignore
def monitor_release_adoption(**kwargs) -> None:
metrics.incr("sentry.tasks.monitor_release_adoption.start", sample_rate=1.0)
with metrics.timer(
"sentry.tasks.monitor_release_adoption.process_projects_with_sessions", sample_rate=1.0
):
for org_id, project_ids in release_monitor.fetch_projects_with_recent_sessions().items():
process_projects_with_sessions.delay(org_id, project_ids)
@instrumented_task(
name="sentry.tasks.process_projects_with_sessions",
queue="releasemonitor",
default_retry_delay=5,
max_retries=5,
) # type: ignore
def process_projects_with_sessions(org_id, project_ids) -> None:
# Takes a single org id and a list of project ids
with metrics.timer("sentry.tasks.monitor_release_adoption.process_projects_with_sessions.core"):
# Set the `has_sessions` flag for these projects
Project.objects.filter(
organization_id=org_id,
id__in=project_ids,
flags=F("flags").bitand(~Project.flags.has_sessions),
).update(flags=F("flags").bitor(Project.flags.has_sessions))
totals = release_monitor.fetch_project_release_health_totals(org_id, project_ids)
adopted_ids = adopt_releases(org_id, totals)
cleanup_adopted_releases(project_ids, adopted_ids)
def adopt_releases(org_id: int, totals: Totals) -> Sequence[int]:
# Using the totals calculated in sum_sessions_and_releases, mark any releases as adopted if they reach a threshold.
adopted_ids = []
with metrics.timer(
"sentry.tasks.monitor_release_adoption.process_projects_with_sessions.updates"
):
for project_id, project_totals in totals.items():
for environment, environment_totals in project_totals.items():
total_releases = len(environment_totals["releases"])
for release_version in environment_totals["releases"]:
threshold = 0.1 / total_releases
if (
environment
and environment_totals["total_sessions"] != 0
and environment_totals["releases"][release_version]
/ environment_totals["total_sessions"]
>= threshold
):
rpe = None
try:
rpe = ReleaseProjectEnvironment.objects.get(
project_id=project_id,
release_id=Release.objects.get(
organization=org_id, version=release_version
).id,
environment__name=environment,
environment__organization_id=org_id,
)
updates = {}
if rpe.adopted is None:
updates["adopted"] = timezone.now()
if rpe.unadopted is not None:
updates["unadopted"] = None
if updates:
rpe.update(**updates)
except (Release.DoesNotExist, ReleaseProjectEnvironment.DoesNotExist):
metrics.incr("sentry.tasks.process_projects_with_sessions.creating_rpe")
try:
env = Environment.objects.get_or_create(
name=environment, organization_id=org_id
)[0]
try:
release = Release.objects.get_or_create(
organization_id=org_id,
version=release_version,
defaults={
"status": ReleaseStatus.OPEN,
},
)[0]
except IntegrityError:
release = Release.objects.get(
organization_id=org_id, version=release_version
)
except ValidationError:
release = None
logger.exception(
"sentry.tasks.process_projects_with_sessions.creating_rpe.ValidationError",
extra={
"org_id": org_id,
"release_version": release_version,
},
)
if release:
release.add_project(Project.objects.get(id=project_id))
ReleaseEnvironment.objects.get_or_create(
environment=env, organization_id=org_id, release=release
)
rpe = ReleaseProjectEnvironment.objects.create(
project_id=project_id,
release_id=release.id,
environment=env,
adopted=timezone.now(),
)
except (
Project.DoesNotExist,
Environment.DoesNotExist,
Release.DoesNotExist,
ReleaseEnvironment.DoesNotExist,
) as exc:
metrics.incr(
"sentry.tasks.process_projects_with_sessions.skipped_update"
)
capture_exception(exc)
if rpe:
adopted_ids.append(rpe.id)
return adopted_ids
def cleanup_adopted_releases(project_ids: Sequence[int], adopted_ids: Sequence[int]) -> None:
# Cleanup; adopted releases need to be marked as unadopted if they are not in `adopted_ids`
with metrics.timer(
"sentry.tasks.monitor_release_adoption.process_projects_with_sessions.cleanup"
):
ReleaseProjectEnvironment.objects.filter(
project_id__in=project_ids, unadopted__isnull=True
).exclude(Q(adopted=None) | Q(id__in=adopted_ids)).update(unadopted=timezone.now())
|
normal
|
{
"blob_id": "eb4271aa5abe3ddc05048858205e6ef807a4f8ac",
"index": 6863,
"step-1": "<mask token>\n\n\n@instrumented_task(name=\n 'sentry.release_health.tasks.monitor_release_adoption', queue=\n 'releasemonitor', default_retry_delay=5, max_retries=5)\ndef monitor_release_adoption(**kwargs) ->None:\n metrics.incr('sentry.tasks.monitor_release_adoption.start', sample_rate=1.0\n )\n with metrics.timer(\n 'sentry.tasks.monitor_release_adoption.process_projects_with_sessions',\n sample_rate=1.0):\n for org_id, project_ids in release_monitor.fetch_projects_with_recent_sessions(\n ).items():\n process_projects_with_sessions.delay(org_id, project_ids)\n\n\n@instrumented_task(name='sentry.tasks.process_projects_with_sessions',\n queue='releasemonitor', default_retry_delay=5, max_retries=5)\ndef process_projects_with_sessions(org_id, project_ids) ->None:\n with metrics.timer(\n 'sentry.tasks.monitor_release_adoption.process_projects_with_sessions.core'\n ):\n Project.objects.filter(organization_id=org_id, id__in=project_ids,\n flags=F('flags').bitand(~Project.flags.has_sessions)).update(flags\n =F('flags').bitor(Project.flags.has_sessions))\n totals = release_monitor.fetch_project_release_health_totals(org_id,\n project_ids)\n adopted_ids = adopt_releases(org_id, totals)\n cleanup_adopted_releases(project_ids, adopted_ids)\n\n\ndef adopt_releases(org_id: int, totals: Totals) ->Sequence[int]:\n adopted_ids = []\n with metrics.timer(\n 'sentry.tasks.monitor_release_adoption.process_projects_with_sessions.updates'\n ):\n for project_id, project_totals in totals.items():\n for environment, environment_totals in project_totals.items():\n total_releases = len(environment_totals['releases'])\n for release_version in environment_totals['releases']:\n threshold = 0.1 / total_releases\n if environment and environment_totals['total_sessions'\n ] != 0 and environment_totals['releases'][\n release_version] / environment_totals['total_sessions'\n ] >= threshold:\n rpe = None\n try:\n rpe = ReleaseProjectEnvironment.objects.get(\n project_id=project_id, release_id=Release.\n objects.get(organization=org_id, version=\n release_version).id, environment__name=\n environment, environment__organization_id=\n org_id)\n updates = {}\n if rpe.adopted is None:\n updates['adopted'] = timezone.now()\n if rpe.unadopted is not None:\n updates['unadopted'] = None\n if updates:\n rpe.update(**updates)\n except (Release.DoesNotExist,\n ReleaseProjectEnvironment.DoesNotExist):\n metrics.incr(\n 'sentry.tasks.process_projects_with_sessions.creating_rpe'\n )\n try:\n env = Environment.objects.get_or_create(name\n =environment, organization_id=org_id)[0]\n try:\n release = Release.objects.get_or_create(\n organization_id=org_id, version=\n release_version, defaults={'status':\n ReleaseStatus.OPEN})[0]\n except IntegrityError:\n release = Release.objects.get(\n organization_id=org_id, version=\n release_version)\n except ValidationError:\n release = None\n logger.exception(\n 'sentry.tasks.process_projects_with_sessions.creating_rpe.ValidationError'\n , extra={'org_id': org_id,\n 'release_version': release_version})\n if release:\n release.add_project(Project.objects.get\n (id=project_id))\n ReleaseEnvironment.objects.get_or_create(\n environment=env, organization_id=\n org_id, release=release)\n rpe = (ReleaseProjectEnvironment.\n objects.create(project_id=\n project_id, release_id=release.id,\n environment=env, adopted=timezone.\n now()))\n except (Project.DoesNotExist, Environment.\n DoesNotExist, Release.DoesNotExist,\n ReleaseEnvironment.DoesNotExist) as exc:\n metrics.incr(\n 'sentry.tasks.process_projects_with_sessions.skipped_update'\n )\n capture_exception(exc)\n if rpe:\n adopted_ids.append(rpe.id)\n return adopted_ids\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@instrumented_task(name=\n 'sentry.release_health.tasks.monitor_release_adoption', queue=\n 'releasemonitor', default_retry_delay=5, max_retries=5)\ndef monitor_release_adoption(**kwargs) ->None:\n metrics.incr('sentry.tasks.monitor_release_adoption.start', sample_rate=1.0\n )\n with metrics.timer(\n 'sentry.tasks.monitor_release_adoption.process_projects_with_sessions',\n sample_rate=1.0):\n for org_id, project_ids in release_monitor.fetch_projects_with_recent_sessions(\n ).items():\n process_projects_with_sessions.delay(org_id, project_ids)\n\n\n@instrumented_task(name='sentry.tasks.process_projects_with_sessions',\n queue='releasemonitor', default_retry_delay=5, max_retries=5)\ndef process_projects_with_sessions(org_id, project_ids) ->None:\n with metrics.timer(\n 'sentry.tasks.monitor_release_adoption.process_projects_with_sessions.core'\n ):\n Project.objects.filter(organization_id=org_id, id__in=project_ids,\n flags=F('flags').bitand(~Project.flags.has_sessions)).update(flags\n =F('flags').bitor(Project.flags.has_sessions))\n totals = release_monitor.fetch_project_release_health_totals(org_id,\n project_ids)\n adopted_ids = adopt_releases(org_id, totals)\n cleanup_adopted_releases(project_ids, adopted_ids)\n\n\ndef adopt_releases(org_id: int, totals: Totals) ->Sequence[int]:\n adopted_ids = []\n with metrics.timer(\n 'sentry.tasks.monitor_release_adoption.process_projects_with_sessions.updates'\n ):\n for project_id, project_totals in totals.items():\n for environment, environment_totals in project_totals.items():\n total_releases = len(environment_totals['releases'])\n for release_version in environment_totals['releases']:\n threshold = 0.1 / total_releases\n if environment and environment_totals['total_sessions'\n ] != 0 and environment_totals['releases'][\n release_version] / environment_totals['total_sessions'\n ] >= threshold:\n rpe = None\n try:\n rpe = ReleaseProjectEnvironment.objects.get(\n project_id=project_id, release_id=Release.\n objects.get(organization=org_id, version=\n release_version).id, environment__name=\n environment, environment__organization_id=\n org_id)\n updates = {}\n if rpe.adopted is None:\n updates['adopted'] = timezone.now()\n if rpe.unadopted is not None:\n updates['unadopted'] = None\n if updates:\n rpe.update(**updates)\n except (Release.DoesNotExist,\n ReleaseProjectEnvironment.DoesNotExist):\n metrics.incr(\n 'sentry.tasks.process_projects_with_sessions.creating_rpe'\n )\n try:\n env = Environment.objects.get_or_create(name\n =environment, organization_id=org_id)[0]\n try:\n release = Release.objects.get_or_create(\n organization_id=org_id, version=\n release_version, defaults={'status':\n ReleaseStatus.OPEN})[0]\n except IntegrityError:\n release = Release.objects.get(\n organization_id=org_id, version=\n release_version)\n except ValidationError:\n release = None\n logger.exception(\n 'sentry.tasks.process_projects_with_sessions.creating_rpe.ValidationError'\n , extra={'org_id': org_id,\n 'release_version': release_version})\n if release:\n release.add_project(Project.objects.get\n (id=project_id))\n ReleaseEnvironment.objects.get_or_create(\n environment=env, organization_id=\n org_id, release=release)\n rpe = (ReleaseProjectEnvironment.\n objects.create(project_id=\n project_id, release_id=release.id,\n environment=env, adopted=timezone.\n now()))\n except (Project.DoesNotExist, Environment.\n DoesNotExist, Release.DoesNotExist,\n ReleaseEnvironment.DoesNotExist) as exc:\n metrics.incr(\n 'sentry.tasks.process_projects_with_sessions.skipped_update'\n )\n capture_exception(exc)\n if rpe:\n adopted_ids.append(rpe.id)\n return adopted_ids\n\n\ndef cleanup_adopted_releases(project_ids: Sequence[int], adopted_ids:\n Sequence[int]) ->None:\n with metrics.timer(\n 'sentry.tasks.monitor_release_adoption.process_projects_with_sessions.cleanup'\n ):\n ReleaseProjectEnvironment.objects.filter(project_id__in=project_ids,\n unadopted__isnull=True).exclude(Q(adopted=None) | Q(id__in=\n adopted_ids)).update(unadopted=timezone.now())\n",
"step-3": "<mask token>\nCHUNK_SIZE = 1000\nMAX_SECONDS = 60\nlogger = logging.getLogger('sentry.tasks.releasemonitor')\n\n\n@instrumented_task(name=\n 'sentry.release_health.tasks.monitor_release_adoption', queue=\n 'releasemonitor', default_retry_delay=5, max_retries=5)\ndef monitor_release_adoption(**kwargs) ->None:\n metrics.incr('sentry.tasks.monitor_release_adoption.start', sample_rate=1.0\n )\n with metrics.timer(\n 'sentry.tasks.monitor_release_adoption.process_projects_with_sessions',\n sample_rate=1.0):\n for org_id, project_ids in release_monitor.fetch_projects_with_recent_sessions(\n ).items():\n process_projects_with_sessions.delay(org_id, project_ids)\n\n\n@instrumented_task(name='sentry.tasks.process_projects_with_sessions',\n queue='releasemonitor', default_retry_delay=5, max_retries=5)\ndef process_projects_with_sessions(org_id, project_ids) ->None:\n with metrics.timer(\n 'sentry.tasks.monitor_release_adoption.process_projects_with_sessions.core'\n ):\n Project.objects.filter(organization_id=org_id, id__in=project_ids,\n flags=F('flags').bitand(~Project.flags.has_sessions)).update(flags\n =F('flags').bitor(Project.flags.has_sessions))\n totals = release_monitor.fetch_project_release_health_totals(org_id,\n project_ids)\n adopted_ids = adopt_releases(org_id, totals)\n cleanup_adopted_releases(project_ids, adopted_ids)\n\n\ndef adopt_releases(org_id: int, totals: Totals) ->Sequence[int]:\n adopted_ids = []\n with metrics.timer(\n 'sentry.tasks.monitor_release_adoption.process_projects_with_sessions.updates'\n ):\n for project_id, project_totals in totals.items():\n for environment, environment_totals in project_totals.items():\n total_releases = len(environment_totals['releases'])\n for release_version in environment_totals['releases']:\n threshold = 0.1 / total_releases\n if environment and environment_totals['total_sessions'\n ] != 0 and environment_totals['releases'][\n release_version] / environment_totals['total_sessions'\n ] >= threshold:\n rpe = None\n try:\n rpe = ReleaseProjectEnvironment.objects.get(\n project_id=project_id, release_id=Release.\n objects.get(organization=org_id, version=\n release_version).id, environment__name=\n environment, environment__organization_id=\n org_id)\n updates = {}\n if rpe.adopted is None:\n updates['adopted'] = timezone.now()\n if rpe.unadopted is not None:\n updates['unadopted'] = None\n if updates:\n rpe.update(**updates)\n except (Release.DoesNotExist,\n ReleaseProjectEnvironment.DoesNotExist):\n metrics.incr(\n 'sentry.tasks.process_projects_with_sessions.creating_rpe'\n )\n try:\n env = Environment.objects.get_or_create(name\n =environment, organization_id=org_id)[0]\n try:\n release = Release.objects.get_or_create(\n organization_id=org_id, version=\n release_version, defaults={'status':\n ReleaseStatus.OPEN})[0]\n except IntegrityError:\n release = Release.objects.get(\n organization_id=org_id, version=\n release_version)\n except ValidationError:\n release = None\n logger.exception(\n 'sentry.tasks.process_projects_with_sessions.creating_rpe.ValidationError'\n , extra={'org_id': org_id,\n 'release_version': release_version})\n if release:\n release.add_project(Project.objects.get\n (id=project_id))\n ReleaseEnvironment.objects.get_or_create(\n environment=env, organization_id=\n org_id, release=release)\n rpe = (ReleaseProjectEnvironment.\n objects.create(project_id=\n project_id, release_id=release.id,\n environment=env, adopted=timezone.\n now()))\n except (Project.DoesNotExist, Environment.\n DoesNotExist, Release.DoesNotExist,\n ReleaseEnvironment.DoesNotExist) as exc:\n metrics.incr(\n 'sentry.tasks.process_projects_with_sessions.skipped_update'\n )\n capture_exception(exc)\n if rpe:\n adopted_ids.append(rpe.id)\n return adopted_ids\n\n\ndef cleanup_adopted_releases(project_ids: Sequence[int], adopted_ids:\n Sequence[int]) ->None:\n with metrics.timer(\n 'sentry.tasks.monitor_release_adoption.process_projects_with_sessions.cleanup'\n ):\n ReleaseProjectEnvironment.objects.filter(project_id__in=project_ids,\n unadopted__isnull=True).exclude(Q(adopted=None) | Q(id__in=\n adopted_ids)).update(unadopted=timezone.now())\n",
"step-4": "import logging\nfrom typing import Sequence\nfrom django.core.exceptions import ValidationError\nfrom django.db import IntegrityError\nfrom django.db.models import F, Q\nfrom django.utils import timezone\nfrom sentry_sdk import capture_exception\nfrom sentry.models import Environment, Project, Release, ReleaseEnvironment, ReleaseProjectEnvironment, ReleaseStatus\nfrom sentry.release_health import release_monitor\nfrom sentry.release_health.release_monitor.base import Totals\nfrom sentry.tasks.base import instrumented_task\nfrom sentry.utils import metrics\nCHUNK_SIZE = 1000\nMAX_SECONDS = 60\nlogger = logging.getLogger('sentry.tasks.releasemonitor')\n\n\n@instrumented_task(name=\n 'sentry.release_health.tasks.monitor_release_adoption', queue=\n 'releasemonitor', default_retry_delay=5, max_retries=5)\ndef monitor_release_adoption(**kwargs) ->None:\n metrics.incr('sentry.tasks.monitor_release_adoption.start', sample_rate=1.0\n )\n with metrics.timer(\n 'sentry.tasks.monitor_release_adoption.process_projects_with_sessions',\n sample_rate=1.0):\n for org_id, project_ids in release_monitor.fetch_projects_with_recent_sessions(\n ).items():\n process_projects_with_sessions.delay(org_id, project_ids)\n\n\n@instrumented_task(name='sentry.tasks.process_projects_with_sessions',\n queue='releasemonitor', default_retry_delay=5, max_retries=5)\ndef process_projects_with_sessions(org_id, project_ids) ->None:\n with metrics.timer(\n 'sentry.tasks.monitor_release_adoption.process_projects_with_sessions.core'\n ):\n Project.objects.filter(organization_id=org_id, id__in=project_ids,\n flags=F('flags').bitand(~Project.flags.has_sessions)).update(flags\n =F('flags').bitor(Project.flags.has_sessions))\n totals = release_monitor.fetch_project_release_health_totals(org_id,\n project_ids)\n adopted_ids = adopt_releases(org_id, totals)\n cleanup_adopted_releases(project_ids, adopted_ids)\n\n\ndef adopt_releases(org_id: int, totals: Totals) ->Sequence[int]:\n adopted_ids = []\n with metrics.timer(\n 'sentry.tasks.monitor_release_adoption.process_projects_with_sessions.updates'\n ):\n for project_id, project_totals in totals.items():\n for environment, environment_totals in project_totals.items():\n total_releases = len(environment_totals['releases'])\n for release_version in environment_totals['releases']:\n threshold = 0.1 / total_releases\n if environment and environment_totals['total_sessions'\n ] != 0 and environment_totals['releases'][\n release_version] / environment_totals['total_sessions'\n ] >= threshold:\n rpe = None\n try:\n rpe = ReleaseProjectEnvironment.objects.get(\n project_id=project_id, release_id=Release.\n objects.get(organization=org_id, version=\n release_version).id, environment__name=\n environment, environment__organization_id=\n org_id)\n updates = {}\n if rpe.adopted is None:\n updates['adopted'] = timezone.now()\n if rpe.unadopted is not None:\n updates['unadopted'] = None\n if updates:\n rpe.update(**updates)\n except (Release.DoesNotExist,\n ReleaseProjectEnvironment.DoesNotExist):\n metrics.incr(\n 'sentry.tasks.process_projects_with_sessions.creating_rpe'\n )\n try:\n env = Environment.objects.get_or_create(name\n =environment, organization_id=org_id)[0]\n try:\n release = Release.objects.get_or_create(\n organization_id=org_id, version=\n release_version, defaults={'status':\n ReleaseStatus.OPEN})[0]\n except IntegrityError:\n release = Release.objects.get(\n organization_id=org_id, version=\n release_version)\n except ValidationError:\n release = None\n logger.exception(\n 'sentry.tasks.process_projects_with_sessions.creating_rpe.ValidationError'\n , extra={'org_id': org_id,\n 'release_version': release_version})\n if release:\n release.add_project(Project.objects.get\n (id=project_id))\n ReleaseEnvironment.objects.get_or_create(\n environment=env, organization_id=\n org_id, release=release)\n rpe = (ReleaseProjectEnvironment.\n objects.create(project_id=\n project_id, release_id=release.id,\n environment=env, adopted=timezone.\n now()))\n except (Project.DoesNotExist, Environment.\n DoesNotExist, Release.DoesNotExist,\n ReleaseEnvironment.DoesNotExist) as exc:\n metrics.incr(\n 'sentry.tasks.process_projects_with_sessions.skipped_update'\n )\n capture_exception(exc)\n if rpe:\n adopted_ids.append(rpe.id)\n return adopted_ids\n\n\ndef cleanup_adopted_releases(project_ids: Sequence[int], adopted_ids:\n Sequence[int]) ->None:\n with metrics.timer(\n 'sentry.tasks.monitor_release_adoption.process_projects_with_sessions.cleanup'\n ):\n ReleaseProjectEnvironment.objects.filter(project_id__in=project_ids,\n unadopted__isnull=True).exclude(Q(adopted=None) | Q(id__in=\n adopted_ids)).update(unadopted=timezone.now())\n",
"step-5": "import logging\nfrom typing import Sequence\n\nfrom django.core.exceptions import ValidationError\nfrom django.db import IntegrityError\nfrom django.db.models import F, Q\nfrom django.utils import timezone\nfrom sentry_sdk import capture_exception\n\nfrom sentry.models import (\n Environment,\n Project,\n Release,\n ReleaseEnvironment,\n ReleaseProjectEnvironment,\n ReleaseStatus,\n)\nfrom sentry.release_health import release_monitor\nfrom sentry.release_health.release_monitor.base import Totals\nfrom sentry.tasks.base import instrumented_task\nfrom sentry.utils import metrics\n\nCHUNK_SIZE = 1000\nMAX_SECONDS = 60\n\nlogger = logging.getLogger(\"sentry.tasks.releasemonitor\")\n\n\n@instrumented_task(\n name=\"sentry.release_health.tasks.monitor_release_adoption\",\n queue=\"releasemonitor\",\n default_retry_delay=5,\n max_retries=5,\n) # type: ignore\ndef monitor_release_adoption(**kwargs) -> None:\n metrics.incr(\"sentry.tasks.monitor_release_adoption.start\", sample_rate=1.0)\n with metrics.timer(\n \"sentry.tasks.monitor_release_adoption.process_projects_with_sessions\", sample_rate=1.0\n ):\n for org_id, project_ids in release_monitor.fetch_projects_with_recent_sessions().items():\n process_projects_with_sessions.delay(org_id, project_ids)\n\n\n@instrumented_task(\n name=\"sentry.tasks.process_projects_with_sessions\",\n queue=\"releasemonitor\",\n default_retry_delay=5,\n max_retries=5,\n) # type: ignore\ndef process_projects_with_sessions(org_id, project_ids) -> None:\n # Takes a single org id and a list of project ids\n\n with metrics.timer(\"sentry.tasks.monitor_release_adoption.process_projects_with_sessions.core\"):\n # Set the `has_sessions` flag for these projects\n Project.objects.filter(\n organization_id=org_id,\n id__in=project_ids,\n flags=F(\"flags\").bitand(~Project.flags.has_sessions),\n ).update(flags=F(\"flags\").bitor(Project.flags.has_sessions))\n\n totals = release_monitor.fetch_project_release_health_totals(org_id, project_ids)\n\n adopted_ids = adopt_releases(org_id, totals)\n\n cleanup_adopted_releases(project_ids, adopted_ids)\n\n\ndef adopt_releases(org_id: int, totals: Totals) -> Sequence[int]:\n # Using the totals calculated in sum_sessions_and_releases, mark any releases as adopted if they reach a threshold.\n adopted_ids = []\n with metrics.timer(\n \"sentry.tasks.monitor_release_adoption.process_projects_with_sessions.updates\"\n ):\n for project_id, project_totals in totals.items():\n for environment, environment_totals in project_totals.items():\n total_releases = len(environment_totals[\"releases\"])\n for release_version in environment_totals[\"releases\"]:\n threshold = 0.1 / total_releases\n if (\n environment\n and environment_totals[\"total_sessions\"] != 0\n and environment_totals[\"releases\"][release_version]\n / environment_totals[\"total_sessions\"]\n >= threshold\n ):\n rpe = None\n try:\n rpe = ReleaseProjectEnvironment.objects.get(\n project_id=project_id,\n release_id=Release.objects.get(\n organization=org_id, version=release_version\n ).id,\n environment__name=environment,\n environment__organization_id=org_id,\n )\n\n updates = {}\n if rpe.adopted is None:\n updates[\"adopted\"] = timezone.now()\n\n if rpe.unadopted is not None:\n updates[\"unadopted\"] = None\n\n if updates:\n rpe.update(**updates)\n\n except (Release.DoesNotExist, ReleaseProjectEnvironment.DoesNotExist):\n metrics.incr(\"sentry.tasks.process_projects_with_sessions.creating_rpe\")\n try:\n env = Environment.objects.get_or_create(\n name=environment, organization_id=org_id\n )[0]\n try:\n release = Release.objects.get_or_create(\n organization_id=org_id,\n version=release_version,\n defaults={\n \"status\": ReleaseStatus.OPEN,\n },\n )[0]\n except IntegrityError:\n release = Release.objects.get(\n organization_id=org_id, version=release_version\n )\n except ValidationError:\n release = None\n logger.exception(\n \"sentry.tasks.process_projects_with_sessions.creating_rpe.ValidationError\",\n extra={\n \"org_id\": org_id,\n \"release_version\": release_version,\n },\n )\n\n if release:\n release.add_project(Project.objects.get(id=project_id))\n\n ReleaseEnvironment.objects.get_or_create(\n environment=env, organization_id=org_id, release=release\n )\n\n rpe = ReleaseProjectEnvironment.objects.create(\n project_id=project_id,\n release_id=release.id,\n environment=env,\n adopted=timezone.now(),\n )\n except (\n Project.DoesNotExist,\n Environment.DoesNotExist,\n Release.DoesNotExist,\n ReleaseEnvironment.DoesNotExist,\n ) as exc:\n metrics.incr(\n \"sentry.tasks.process_projects_with_sessions.skipped_update\"\n )\n capture_exception(exc)\n if rpe:\n adopted_ids.append(rpe.id)\n\n return adopted_ids\n\n\ndef cleanup_adopted_releases(project_ids: Sequence[int], adopted_ids: Sequence[int]) -> None:\n # Cleanup; adopted releases need to be marked as unadopted if they are not in `adopted_ids`\n with metrics.timer(\n \"sentry.tasks.monitor_release_adoption.process_projects_with_sessions.cleanup\"\n ):\n ReleaseProjectEnvironment.objects.filter(\n project_id__in=project_ids, unadopted__isnull=True\n ).exclude(Q(adopted=None) | Q(id__in=adopted_ids)).update(unadopted=timezone.now())\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# -*- coding: utf-8 -*-
import scrapy
class Heiyan2Spider(scrapy.Spider):
name = 'heiyan2'
allowed_domains = ['heiyan.com']
start_urls = ['http://heiyan.com/']
def parse(self, response):
pass
|
normal
|
{
"blob_id": "d13c6d71bb871496b0c6ad2451a2f561484e7c68",
"index": 9634,
"step-1": "<mask token>\n\n\nclass Heiyan2Spider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Heiyan2Spider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self, response):\n pass\n",
"step-3": "<mask token>\n\n\nclass Heiyan2Spider(scrapy.Spider):\n name = 'heiyan2'\n allowed_domains = ['heiyan.com']\n start_urls = ['http://heiyan.com/']\n\n def parse(self, response):\n pass\n",
"step-4": "import scrapy\n\n\nclass Heiyan2Spider(scrapy.Spider):\n name = 'heiyan2'\n allowed_domains = ['heiyan.com']\n start_urls = ['http://heiyan.com/']\n\n def parse(self, response):\n pass\n",
"step-5": "# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass Heiyan2Spider(scrapy.Spider):\n name = 'heiyan2'\n allowed_domains = ['heiyan.com']\n start_urls = ['http://heiyan.com/']\n\n def parse(self, response):\n pass\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#Multiple Word Palindromes
#Ex 72 extended
word = input("Word: ")
new = []
o = []
r = []
#canceling out the spaces
for i in range(len(word)):
if word[i] in ".,?!" or word[i] == ' ':
pass
else:
new.append(word[i])
#original
for i in range(len(new)):
o.append(new[i])
#reverse
for i in range(len(new)):
r.append(new[-i - 1])
print(new)
print(o)
print(r)
same_count = 0
for i in range(len(new)):
if o[i] == r[i]:
same_count += 1
else:
pass
if same_count == len(new):
print("Palindrome")
else:
print("Non Palindrome")
|
normal
|
{
"blob_id": "c6ab82d7f59faeee2a74e90a96c2348b046d0889",
"index": 7382,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(len(word)):\n if word[i] in '.,?!' or word[i] == ' ':\n pass\n else:\n new.append(word[i])\nfor i in range(len(new)):\n o.append(new[i])\nfor i in range(len(new)):\n r.append(new[-i - 1])\nprint(new)\nprint(o)\nprint(r)\n<mask token>\nfor i in range(len(new)):\n if o[i] == r[i]:\n same_count += 1\n else:\n pass\nif same_count == len(new):\n print('Palindrome')\nelse:\n print('Non Palindrome')\n",
"step-3": "word = input('Word: ')\nnew = []\no = []\nr = []\nfor i in range(len(word)):\n if word[i] in '.,?!' or word[i] == ' ':\n pass\n else:\n new.append(word[i])\nfor i in range(len(new)):\n o.append(new[i])\nfor i in range(len(new)):\n r.append(new[-i - 1])\nprint(new)\nprint(o)\nprint(r)\nsame_count = 0\nfor i in range(len(new)):\n if o[i] == r[i]:\n same_count += 1\n else:\n pass\nif same_count == len(new):\n print('Palindrome')\nelse:\n print('Non Palindrome')\n",
"step-4": "#Multiple Word Palindromes\n#Ex 72 extended\n\nword = input(\"Word: \")\nnew = []\no = []\nr = []\n#canceling out the spaces\nfor i in range(len(word)):\n if word[i] in \".,?!\" or word[i] == ' ':\n pass\n else:\n new.append(word[i])\n\n#original\nfor i in range(len(new)):\n o.append(new[i])\n#reverse\nfor i in range(len(new)):\n r.append(new[-i - 1])\n\nprint(new)\nprint(o)\nprint(r)\nsame_count = 0\nfor i in range(len(new)):\n if o[i] == r[i]:\n same_count += 1\n else:\n pass\n\nif same_count == len(new):\n print(\"Palindrome\")\nelse:\n print(\"Non Palindrome\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#-*- coding: utf-8 -*-
# Copyright (C) 2011 by
# Jordi Torrents <jtorrents@milnou.net>
# Aric Hagberg <hagberg@lanl.gov>
# All rights reserved.
# BSD license.
import itertools
import networkx as nx
__author__ = """\n""".join(['Jordi Torrents <jtorrents@milnou.net>',
'Aric Hagberg (hagberg@lanl.gov)'])
__all__ = ['clustering',
'average_clustering',
'latapy_clustering',
'robins_alexander_clustering']
# functions for computing clustering of pairs
def cc_dot(nu, nv):
return float(len(nu & nv)) / len(nu | nv)
def cc_max(nu, nv):
return float(len(nu & nv)) / max(len(nu), len(nv))
def cc_min(nu, nv):
return float(len(nu & nv)) / min(len(nu), len(nv))
modes = {'dot': cc_dot,
'min': cc_min,
'max': cc_max}
def latapy_clustering(G, nodes=None, mode='dot'):
r"""Compute a bipartite clustering coefficient for nodes.
The bipartie clustering coefficient is a measure of local density
of connections defined as [1]_:
.. math::
c_u = \frac{\sum_{v \in N(N(u))} c_{uv} }{|N(N(u))|}
where `N(N(u))` are the second order neighbors of `u` in `G` excluding `u`,
and `c_{uv}` is the pairwise clustering coefficient between nodes
`u` and `v`.
The mode selects the function for `c_{uv}` which can be:
`dot`:
.. math::
c_{uv}=\frac{|N(u)\cap N(v)|}{|N(u) \cup N(v)|}
`min`:
.. math::
c_{uv}=\frac{|N(u)\cap N(v)|}{min(|N(u)|,|N(v)|)}
`max`:
.. math::
c_{uv}=\frac{|N(u)\cap N(v)|}{max(|N(u)|,|N(v)|)}
Parameters
----------
G : graph
A bipartite graph
nodes : list or iterable (optional)
Compute bipartite clustering for these nodes. The default
is all nodes in G.
mode : string
The pariwise bipartite clustering method to be used in the computation.
It must be "dot", "max", or "min".
Returns
-------
clustering : dictionary
A dictionary keyed by node with the clustering coefficient value.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.path_graph(4) # path graphs are bipartite
>>> c = bipartite.clustering(G)
>>> c[0]
0.5
>>> c = bipartite.clustering(G,mode='min')
>>> c[0]
1.0
See Also
--------
robins_alexander_clustering
square_clustering
average_clustering
References
----------
.. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).
Basic notions for the analysis of large two-mode networks.
Social Networks 30(1), 31--48.
"""
if not nx.algorithms.bipartite.is_bipartite(G):
raise nx.NetworkXError("Graph is not bipartite")
try:
cc_func = modes[mode]
except KeyError:
raise nx.NetworkXError(
"Mode for bipartite clustering must be: dot, min or max")
if nodes is None:
nodes = G
ccs = {}
for v in nodes:
cc = 0.0
nbrs2 = set([u for nbr in G[v] for u in G[nbr]]) - set([v])
for u in nbrs2:
cc += cc_func(set(G[u]), set(G[v]))
if cc > 0.0: # len(nbrs2)>0
cc /= len(nbrs2)
ccs[v] = cc
return ccs
clustering = latapy_clustering
def average_clustering(G, nodes=None, mode='dot'):
r"""Compute the average bipartite clustering coefficient.
A clustering coefficient for the whole graph is the average,
.. math::
C = \frac{1}{n}\sum_{v \in G} c_v,
where `n` is the number of nodes in `G`.
Similar measures for the two bipartite sets can be defined [1]_
.. math::
C_X = \frac{1}{|X|}\sum_{v \in X} c_v,
where `X` is a bipartite set of `G`.
Parameters
----------
G : graph
a bipartite graph
nodes : list or iterable, optional
A container of nodes to use in computing the average.
The nodes should be either the entire graph (the default) or one of the
bipartite sets.
mode : string
The pariwise bipartite clustering method.
It must be "dot", "max", or "min"
Returns
-------
clustering : float
The average bipartite clustering for the given set of nodes or the
entire graph if no nodes are specified.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G=nx.star_graph(3) # star graphs are bipartite
>>> bipartite.average_clustering(G)
0.75
>>> X,Y=bipartite.sets(G)
>>> bipartite.average_clustering(G,X)
0.0
>>> bipartite.average_clustering(G,Y)
1.0
See Also
--------
clustering
Notes
-----
The container of nodes passed to this function must contain all of the nodes
in one of the bipartite sets ("top" or "bottom") in order to compute
the correct average bipartite clustering coefficients.
See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
for further details on how bipartite graphs are handled in NetworkX.
References
----------
.. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).
Basic notions for the analysis of large two-mode networks.
Social Networks 30(1), 31--48.
"""
if nodes is None:
nodes = G
ccs = latapy_clustering(G, nodes=nodes, mode=mode)
return float(sum(ccs[v] for v in nodes)) / len(nodes)
def robins_alexander_clustering(G):
r"""Compute the bipartite clustering of G.
Robins and Alexander [1]_ defined bipartite clustering coefficient as
four times the number of four cycles `C_4` divided by the number of
three paths `L_3` in a bipartite graph:
.. math::
CC_4 = \frac{4 * C_4}{L_3}
Parameters
----------
G : graph
a bipartite graph
Returns
-------
clustering : float
The Robins and Alexander bipartite clustering for the input graph.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.davis_southern_women_graph()
>>> print(round(bipartite.robins_alexander_clustering(G), 3))
0.468
See Also
--------
latapy_clustering
square_clustering
References
----------
.. [1] Robins, G. and M. Alexander (2004). Small worlds among interlocking
directors: Network structure and distance in bipartite graphs.
Computational & Mathematical Organization Theory 10(1), 69–94.
"""
if G.order() < 4 or G.size() < 3:
return 0
L_3 = _threepaths(G)
if L_3 == 0:
return 0
C_4 = _four_cycles(G)
return (4. * C_4) / L_3
def _four_cycles(G):
cycles = 0
for v in G:
for u, w in itertools.combinations(G[v], 2):
cycles += len((set(G[u]) & set(G[w])) - set([v]))
return cycles / 4
def _threepaths(G):
paths = 0
for v in G:
for u in G[v]:
for w in set(G[u]) - set([v]):
paths += len(set(G[w]) - set([v, u]))
# Divide by two because we count each three path twice
# one for each possible starting point
return paths / 2
|
normal
|
{
"blob_id": "a21c132ba9f24ff2c695bf66cae074705025d6b1",
"index": 8063,
"step-1": "<mask token>\n\n\ndef cc_dot(nu, nv):\n return float(len(nu & nv)) / len(nu | nv)\n\n\ndef cc_max(nu, nv):\n return float(len(nu & nv)) / max(len(nu), len(nv))\n\n\n<mask token>\n\n\ndef average_clustering(G, nodes=None, mode='dot'):\n \"\"\"Compute the average bipartite clustering coefficient.\n\n A clustering coefficient for the whole graph is the average, \n\n .. math::\n\n C = \\\\frac{1}{n}\\\\sum_{v \\\\in G} c_v,\n\n where `n` is the number of nodes in `G`.\n\n Similar measures for the two bipartite sets can be defined [1]_\n\n .. math::\n\n C_X = \\\\frac{1}{|X|}\\\\sum_{v \\\\in X} c_v,\n\n where `X` is a bipartite set of `G`.\n\n Parameters\n ----------\n G : graph\n a bipartite graph\n\n nodes : list or iterable, optional\n A container of nodes to use in computing the average. \n The nodes should be either the entire graph (the default) or one of the \n bipartite sets.\n\n mode : string\n The pariwise bipartite clustering method. \n It must be \"dot\", \"max\", or \"min\" \n\n Returns\n -------\n clustering : float\n The average bipartite clustering for the given set of nodes or the \n entire graph if no nodes are specified.\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G=nx.star_graph(3) # star graphs are bipartite\n >>> bipartite.average_clustering(G) \n 0.75\n >>> X,Y=bipartite.sets(G)\n >>> bipartite.average_clustering(G,X) \n 0.0\n >>> bipartite.average_clustering(G,Y) \n 1.0\n\n See Also\n --------\n clustering\n\n Notes \n -----\n The container of nodes passed to this function must contain all of the nodes\n in one of the bipartite sets (\"top\" or \"bottom\") in order to compute \n the correct average bipartite clustering coefficients.\n See :mod:`bipartite documentation <networkx.algorithms.bipartite>`\n for further details on how bipartite graphs are handled in NetworkX.\n\n\n References\n ----------\n .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).\n Basic notions for the analysis of large two-mode networks. \n Social Networks 30(1), 31--48.\n \"\"\"\n if nodes is None:\n nodes = G\n ccs = latapy_clustering(G, nodes=nodes, mode=mode)\n return float(sum(ccs[v] for v in nodes)) / len(nodes)\n\n\ndef robins_alexander_clustering(G):\n \"\"\"Compute the bipartite clustering of G.\n\n Robins and Alexander [1]_ defined bipartite clustering coefficient as\n four times the number of four cycles `C_4` divided by the number of\n three paths `L_3` in a bipartite graph:\n\n .. math::\n\n CC_4 = \\\\frac{4 * C_4}{L_3}\n\n Parameters\n ----------\n G : graph\n a bipartite graph\n\n Returns\n -------\n clustering : float\n The Robins and Alexander bipartite clustering for the input graph.\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G = nx.davis_southern_women_graph()\n >>> print(round(bipartite.robins_alexander_clustering(G), 3))\n 0.468\n\n See Also\n --------\n latapy_clustering\n square_clustering\n\n References\n ----------\n .. [1] Robins, G. and M. Alexander (2004). Small worlds among interlocking \n directors: Network structure and distance in bipartite graphs. \n Computational & Mathematical Organization Theory 10(1), 69–94.\n\n \"\"\"\n if G.order() < 4 or G.size() < 3:\n return 0\n L_3 = _threepaths(G)\n if L_3 == 0:\n return 0\n C_4 = _four_cycles(G)\n return 4.0 * C_4 / L_3\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef cc_dot(nu, nv):\n return float(len(nu & nv)) / len(nu | nv)\n\n\ndef cc_max(nu, nv):\n return float(len(nu & nv)) / max(len(nu), len(nv))\n\n\ndef cc_min(nu, nv):\n return float(len(nu & nv)) / min(len(nu), len(nv))\n\n\n<mask token>\n\n\ndef latapy_clustering(G, nodes=None, mode='dot'):\n \"\"\"Compute a bipartite clustering coefficient for nodes.\n\n The bipartie clustering coefficient is a measure of local density\n of connections defined as [1]_:\n\n .. math::\n\n c_u = \\\\frac{\\\\sum_{v \\\\in N(N(u))} c_{uv} }{|N(N(u))|}\n\n where `N(N(u))` are the second order neighbors of `u` in `G` excluding `u`, \n and `c_{uv}` is the pairwise clustering coefficient between nodes \n `u` and `v`.\n\n The mode selects the function for `c_{uv}` which can be:\n\n `dot`: \n\n .. math::\n\n c_{uv}=\\\\frac{|N(u)\\\\cap N(v)|}{|N(u) \\\\cup N(v)|}\n\n `min`: \n\n .. math::\n\n c_{uv}=\\\\frac{|N(u)\\\\cap N(v)|}{min(|N(u)|,|N(v)|)}\n\n `max`: \n\n .. math::\n\n c_{uv}=\\\\frac{|N(u)\\\\cap N(v)|}{max(|N(u)|,|N(v)|)}\n\n\n Parameters\n ----------\n G : graph\n A bipartite graph\n\n nodes : list or iterable (optional)\n Compute bipartite clustering for these nodes. The default \n is all nodes in G.\n\n mode : string\n The pariwise bipartite clustering method to be used in the computation.\n It must be \"dot\", \"max\", or \"min\". \n\n Returns\n -------\n clustering : dictionary\n A dictionary keyed by node with the clustering coefficient value.\n\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G = nx.path_graph(4) # path graphs are bipartite\n >>> c = bipartite.clustering(G) \n >>> c[0]\n 0.5\n >>> c = bipartite.clustering(G,mode='min') \n >>> c[0]\n 1.0\n\n See Also\n --------\n robins_alexander_clustering\n square_clustering\n average_clustering\n\n References\n ----------\n .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).\n Basic notions for the analysis of large two-mode networks. \n Social Networks 30(1), 31--48.\n \"\"\"\n if not nx.algorithms.bipartite.is_bipartite(G):\n raise nx.NetworkXError('Graph is not bipartite')\n try:\n cc_func = modes[mode]\n except KeyError:\n raise nx.NetworkXError(\n 'Mode for bipartite clustering must be: dot, min or max')\n if nodes is None:\n nodes = G\n ccs = {}\n for v in nodes:\n cc = 0.0\n nbrs2 = set([u for nbr in G[v] for u in G[nbr]]) - set([v])\n for u in nbrs2:\n cc += cc_func(set(G[u]), set(G[v]))\n if cc > 0.0:\n cc /= len(nbrs2)\n ccs[v] = cc\n return ccs\n\n\n<mask token>\n\n\ndef average_clustering(G, nodes=None, mode='dot'):\n \"\"\"Compute the average bipartite clustering coefficient.\n\n A clustering coefficient for the whole graph is the average, \n\n .. math::\n\n C = \\\\frac{1}{n}\\\\sum_{v \\\\in G} c_v,\n\n where `n` is the number of nodes in `G`.\n\n Similar measures for the two bipartite sets can be defined [1]_\n\n .. math::\n\n C_X = \\\\frac{1}{|X|}\\\\sum_{v \\\\in X} c_v,\n\n where `X` is a bipartite set of `G`.\n\n Parameters\n ----------\n G : graph\n a bipartite graph\n\n nodes : list or iterable, optional\n A container of nodes to use in computing the average. \n The nodes should be either the entire graph (the default) or one of the \n bipartite sets.\n\n mode : string\n The pariwise bipartite clustering method. \n It must be \"dot\", \"max\", or \"min\" \n\n Returns\n -------\n clustering : float\n The average bipartite clustering for the given set of nodes or the \n entire graph if no nodes are specified.\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G=nx.star_graph(3) # star graphs are bipartite\n >>> bipartite.average_clustering(G) \n 0.75\n >>> X,Y=bipartite.sets(G)\n >>> bipartite.average_clustering(G,X) \n 0.0\n >>> bipartite.average_clustering(G,Y) \n 1.0\n\n See Also\n --------\n clustering\n\n Notes \n -----\n The container of nodes passed to this function must contain all of the nodes\n in one of the bipartite sets (\"top\" or \"bottom\") in order to compute \n the correct average bipartite clustering coefficients.\n See :mod:`bipartite documentation <networkx.algorithms.bipartite>`\n for further details on how bipartite graphs are handled in NetworkX.\n\n\n References\n ----------\n .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).\n Basic notions for the analysis of large two-mode networks. \n Social Networks 30(1), 31--48.\n \"\"\"\n if nodes is None:\n nodes = G\n ccs = latapy_clustering(G, nodes=nodes, mode=mode)\n return float(sum(ccs[v] for v in nodes)) / len(nodes)\n\n\ndef robins_alexander_clustering(G):\n \"\"\"Compute the bipartite clustering of G.\n\n Robins and Alexander [1]_ defined bipartite clustering coefficient as\n four times the number of four cycles `C_4` divided by the number of\n three paths `L_3` in a bipartite graph:\n\n .. math::\n\n CC_4 = \\\\frac{4 * C_4}{L_3}\n\n Parameters\n ----------\n G : graph\n a bipartite graph\n\n Returns\n -------\n clustering : float\n The Robins and Alexander bipartite clustering for the input graph.\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G = nx.davis_southern_women_graph()\n >>> print(round(bipartite.robins_alexander_clustering(G), 3))\n 0.468\n\n See Also\n --------\n latapy_clustering\n square_clustering\n\n References\n ----------\n .. [1] Robins, G. and M. Alexander (2004). Small worlds among interlocking \n directors: Network structure and distance in bipartite graphs. \n Computational & Mathematical Organization Theory 10(1), 69–94.\n\n \"\"\"\n if G.order() < 4 or G.size() < 3:\n return 0\n L_3 = _threepaths(G)\n if L_3 == 0:\n return 0\n C_4 = _four_cycles(G)\n return 4.0 * C_4 / L_3\n\n\ndef _four_cycles(G):\n cycles = 0\n for v in G:\n for u, w in itertools.combinations(G[v], 2):\n cycles += len((set(G[u]) & set(G[w])) - set([v]))\n return cycles / 4\n\n\ndef _threepaths(G):\n paths = 0\n for v in G:\n for u in G[v]:\n for w in (set(G[u]) - set([v])):\n paths += len(set(G[w]) - set([v, u]))\n return paths / 2\n",
"step-3": "<mask token>\n__author__ = '\\n'.join(['Jordi Torrents <jtorrents@milnou.net>',\n 'Aric Hagberg (hagberg@lanl.gov)'])\n__all__ = ['clustering', 'average_clustering', 'latapy_clustering',\n 'robins_alexander_clustering']\n\n\ndef cc_dot(nu, nv):\n return float(len(nu & nv)) / len(nu | nv)\n\n\ndef cc_max(nu, nv):\n return float(len(nu & nv)) / max(len(nu), len(nv))\n\n\ndef cc_min(nu, nv):\n return float(len(nu & nv)) / min(len(nu), len(nv))\n\n\nmodes = {'dot': cc_dot, 'min': cc_min, 'max': cc_max}\n\n\ndef latapy_clustering(G, nodes=None, mode='dot'):\n \"\"\"Compute a bipartite clustering coefficient for nodes.\n\n The bipartie clustering coefficient is a measure of local density\n of connections defined as [1]_:\n\n .. math::\n\n c_u = \\\\frac{\\\\sum_{v \\\\in N(N(u))} c_{uv} }{|N(N(u))|}\n\n where `N(N(u))` are the second order neighbors of `u` in `G` excluding `u`, \n and `c_{uv}` is the pairwise clustering coefficient between nodes \n `u` and `v`.\n\n The mode selects the function for `c_{uv}` which can be:\n\n `dot`: \n\n .. math::\n\n c_{uv}=\\\\frac{|N(u)\\\\cap N(v)|}{|N(u) \\\\cup N(v)|}\n\n `min`: \n\n .. math::\n\n c_{uv}=\\\\frac{|N(u)\\\\cap N(v)|}{min(|N(u)|,|N(v)|)}\n\n `max`: \n\n .. math::\n\n c_{uv}=\\\\frac{|N(u)\\\\cap N(v)|}{max(|N(u)|,|N(v)|)}\n\n\n Parameters\n ----------\n G : graph\n A bipartite graph\n\n nodes : list or iterable (optional)\n Compute bipartite clustering for these nodes. The default \n is all nodes in G.\n\n mode : string\n The pariwise bipartite clustering method to be used in the computation.\n It must be \"dot\", \"max\", or \"min\". \n\n Returns\n -------\n clustering : dictionary\n A dictionary keyed by node with the clustering coefficient value.\n\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G = nx.path_graph(4) # path graphs are bipartite\n >>> c = bipartite.clustering(G) \n >>> c[0]\n 0.5\n >>> c = bipartite.clustering(G,mode='min') \n >>> c[0]\n 1.0\n\n See Also\n --------\n robins_alexander_clustering\n square_clustering\n average_clustering\n\n References\n ----------\n .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).\n Basic notions for the analysis of large two-mode networks. \n Social Networks 30(1), 31--48.\n \"\"\"\n if not nx.algorithms.bipartite.is_bipartite(G):\n raise nx.NetworkXError('Graph is not bipartite')\n try:\n cc_func = modes[mode]\n except KeyError:\n raise nx.NetworkXError(\n 'Mode for bipartite clustering must be: dot, min or max')\n if nodes is None:\n nodes = G\n ccs = {}\n for v in nodes:\n cc = 0.0\n nbrs2 = set([u for nbr in G[v] for u in G[nbr]]) - set([v])\n for u in nbrs2:\n cc += cc_func(set(G[u]), set(G[v]))\n if cc > 0.0:\n cc /= len(nbrs2)\n ccs[v] = cc\n return ccs\n\n\nclustering = latapy_clustering\n\n\ndef average_clustering(G, nodes=None, mode='dot'):\n \"\"\"Compute the average bipartite clustering coefficient.\n\n A clustering coefficient for the whole graph is the average, \n\n .. math::\n\n C = \\\\frac{1}{n}\\\\sum_{v \\\\in G} c_v,\n\n where `n` is the number of nodes in `G`.\n\n Similar measures for the two bipartite sets can be defined [1]_\n\n .. math::\n\n C_X = \\\\frac{1}{|X|}\\\\sum_{v \\\\in X} c_v,\n\n where `X` is a bipartite set of `G`.\n\n Parameters\n ----------\n G : graph\n a bipartite graph\n\n nodes : list or iterable, optional\n A container of nodes to use in computing the average. \n The nodes should be either the entire graph (the default) or one of the \n bipartite sets.\n\n mode : string\n The pariwise bipartite clustering method. \n It must be \"dot\", \"max\", or \"min\" \n\n Returns\n -------\n clustering : float\n The average bipartite clustering for the given set of nodes or the \n entire graph if no nodes are specified.\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G=nx.star_graph(3) # star graphs are bipartite\n >>> bipartite.average_clustering(G) \n 0.75\n >>> X,Y=bipartite.sets(G)\n >>> bipartite.average_clustering(G,X) \n 0.0\n >>> bipartite.average_clustering(G,Y) \n 1.0\n\n See Also\n --------\n clustering\n\n Notes \n -----\n The container of nodes passed to this function must contain all of the nodes\n in one of the bipartite sets (\"top\" or \"bottom\") in order to compute \n the correct average bipartite clustering coefficients.\n See :mod:`bipartite documentation <networkx.algorithms.bipartite>`\n for further details on how bipartite graphs are handled in NetworkX.\n\n\n References\n ----------\n .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).\n Basic notions for the analysis of large two-mode networks. \n Social Networks 30(1), 31--48.\n \"\"\"\n if nodes is None:\n nodes = G\n ccs = latapy_clustering(G, nodes=nodes, mode=mode)\n return float(sum(ccs[v] for v in nodes)) / len(nodes)\n\n\ndef robins_alexander_clustering(G):\n \"\"\"Compute the bipartite clustering of G.\n\n Robins and Alexander [1]_ defined bipartite clustering coefficient as\n four times the number of four cycles `C_4` divided by the number of\n three paths `L_3` in a bipartite graph:\n\n .. math::\n\n CC_4 = \\\\frac{4 * C_4}{L_3}\n\n Parameters\n ----------\n G : graph\n a bipartite graph\n\n Returns\n -------\n clustering : float\n The Robins and Alexander bipartite clustering for the input graph.\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G = nx.davis_southern_women_graph()\n >>> print(round(bipartite.robins_alexander_clustering(G), 3))\n 0.468\n\n See Also\n --------\n latapy_clustering\n square_clustering\n\n References\n ----------\n .. [1] Robins, G. and M. Alexander (2004). Small worlds among interlocking \n directors: Network structure and distance in bipartite graphs. \n Computational & Mathematical Organization Theory 10(1), 69–94.\n\n \"\"\"\n if G.order() < 4 or G.size() < 3:\n return 0\n L_3 = _threepaths(G)\n if L_3 == 0:\n return 0\n C_4 = _four_cycles(G)\n return 4.0 * C_4 / L_3\n\n\ndef _four_cycles(G):\n cycles = 0\n for v in G:\n for u, w in itertools.combinations(G[v], 2):\n cycles += len((set(G[u]) & set(G[w])) - set([v]))\n return cycles / 4\n\n\ndef _threepaths(G):\n paths = 0\n for v in G:\n for u in G[v]:\n for w in (set(G[u]) - set([v])):\n paths += len(set(G[w]) - set([v, u]))\n return paths / 2\n",
"step-4": "import itertools\nimport networkx as nx\n__author__ = '\\n'.join(['Jordi Torrents <jtorrents@milnou.net>',\n 'Aric Hagberg (hagberg@lanl.gov)'])\n__all__ = ['clustering', 'average_clustering', 'latapy_clustering',\n 'robins_alexander_clustering']\n\n\ndef cc_dot(nu, nv):\n return float(len(nu & nv)) / len(nu | nv)\n\n\ndef cc_max(nu, nv):\n return float(len(nu & nv)) / max(len(nu), len(nv))\n\n\ndef cc_min(nu, nv):\n return float(len(nu & nv)) / min(len(nu), len(nv))\n\n\nmodes = {'dot': cc_dot, 'min': cc_min, 'max': cc_max}\n\n\ndef latapy_clustering(G, nodes=None, mode='dot'):\n \"\"\"Compute a bipartite clustering coefficient for nodes.\n\n The bipartie clustering coefficient is a measure of local density\n of connections defined as [1]_:\n\n .. math::\n\n c_u = \\\\frac{\\\\sum_{v \\\\in N(N(u))} c_{uv} }{|N(N(u))|}\n\n where `N(N(u))` are the second order neighbors of `u` in `G` excluding `u`, \n and `c_{uv}` is the pairwise clustering coefficient between nodes \n `u` and `v`.\n\n The mode selects the function for `c_{uv}` which can be:\n\n `dot`: \n\n .. math::\n\n c_{uv}=\\\\frac{|N(u)\\\\cap N(v)|}{|N(u) \\\\cup N(v)|}\n\n `min`: \n\n .. math::\n\n c_{uv}=\\\\frac{|N(u)\\\\cap N(v)|}{min(|N(u)|,|N(v)|)}\n\n `max`: \n\n .. math::\n\n c_{uv}=\\\\frac{|N(u)\\\\cap N(v)|}{max(|N(u)|,|N(v)|)}\n\n\n Parameters\n ----------\n G : graph\n A bipartite graph\n\n nodes : list or iterable (optional)\n Compute bipartite clustering for these nodes. The default \n is all nodes in G.\n\n mode : string\n The pariwise bipartite clustering method to be used in the computation.\n It must be \"dot\", \"max\", or \"min\". \n\n Returns\n -------\n clustering : dictionary\n A dictionary keyed by node with the clustering coefficient value.\n\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G = nx.path_graph(4) # path graphs are bipartite\n >>> c = bipartite.clustering(G) \n >>> c[0]\n 0.5\n >>> c = bipartite.clustering(G,mode='min') \n >>> c[0]\n 1.0\n\n See Also\n --------\n robins_alexander_clustering\n square_clustering\n average_clustering\n\n References\n ----------\n .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).\n Basic notions for the analysis of large two-mode networks. \n Social Networks 30(1), 31--48.\n \"\"\"\n if not nx.algorithms.bipartite.is_bipartite(G):\n raise nx.NetworkXError('Graph is not bipartite')\n try:\n cc_func = modes[mode]\n except KeyError:\n raise nx.NetworkXError(\n 'Mode for bipartite clustering must be: dot, min or max')\n if nodes is None:\n nodes = G\n ccs = {}\n for v in nodes:\n cc = 0.0\n nbrs2 = set([u for nbr in G[v] for u in G[nbr]]) - set([v])\n for u in nbrs2:\n cc += cc_func(set(G[u]), set(G[v]))\n if cc > 0.0:\n cc /= len(nbrs2)\n ccs[v] = cc\n return ccs\n\n\nclustering = latapy_clustering\n\n\ndef average_clustering(G, nodes=None, mode='dot'):\n \"\"\"Compute the average bipartite clustering coefficient.\n\n A clustering coefficient for the whole graph is the average, \n\n .. math::\n\n C = \\\\frac{1}{n}\\\\sum_{v \\\\in G} c_v,\n\n where `n` is the number of nodes in `G`.\n\n Similar measures for the two bipartite sets can be defined [1]_\n\n .. math::\n\n C_X = \\\\frac{1}{|X|}\\\\sum_{v \\\\in X} c_v,\n\n where `X` is a bipartite set of `G`.\n\n Parameters\n ----------\n G : graph\n a bipartite graph\n\n nodes : list or iterable, optional\n A container of nodes to use in computing the average. \n The nodes should be either the entire graph (the default) or one of the \n bipartite sets.\n\n mode : string\n The pariwise bipartite clustering method. \n It must be \"dot\", \"max\", or \"min\" \n\n Returns\n -------\n clustering : float\n The average bipartite clustering for the given set of nodes or the \n entire graph if no nodes are specified.\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G=nx.star_graph(3) # star graphs are bipartite\n >>> bipartite.average_clustering(G) \n 0.75\n >>> X,Y=bipartite.sets(G)\n >>> bipartite.average_clustering(G,X) \n 0.0\n >>> bipartite.average_clustering(G,Y) \n 1.0\n\n See Also\n --------\n clustering\n\n Notes \n -----\n The container of nodes passed to this function must contain all of the nodes\n in one of the bipartite sets (\"top\" or \"bottom\") in order to compute \n the correct average bipartite clustering coefficients.\n See :mod:`bipartite documentation <networkx.algorithms.bipartite>`\n for further details on how bipartite graphs are handled in NetworkX.\n\n\n References\n ----------\n .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).\n Basic notions for the analysis of large two-mode networks. \n Social Networks 30(1), 31--48.\n \"\"\"\n if nodes is None:\n nodes = G\n ccs = latapy_clustering(G, nodes=nodes, mode=mode)\n return float(sum(ccs[v] for v in nodes)) / len(nodes)\n\n\ndef robins_alexander_clustering(G):\n \"\"\"Compute the bipartite clustering of G.\n\n Robins and Alexander [1]_ defined bipartite clustering coefficient as\n four times the number of four cycles `C_4` divided by the number of\n three paths `L_3` in a bipartite graph:\n\n .. math::\n\n CC_4 = \\\\frac{4 * C_4}{L_3}\n\n Parameters\n ----------\n G : graph\n a bipartite graph\n\n Returns\n -------\n clustering : float\n The Robins and Alexander bipartite clustering for the input graph.\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G = nx.davis_southern_women_graph()\n >>> print(round(bipartite.robins_alexander_clustering(G), 3))\n 0.468\n\n See Also\n --------\n latapy_clustering\n square_clustering\n\n References\n ----------\n .. [1] Robins, G. and M. Alexander (2004). Small worlds among interlocking \n directors: Network structure and distance in bipartite graphs. \n Computational & Mathematical Organization Theory 10(1), 69–94.\n\n \"\"\"\n if G.order() < 4 or G.size() < 3:\n return 0\n L_3 = _threepaths(G)\n if L_3 == 0:\n return 0\n C_4 = _four_cycles(G)\n return 4.0 * C_4 / L_3\n\n\ndef _four_cycles(G):\n cycles = 0\n for v in G:\n for u, w in itertools.combinations(G[v], 2):\n cycles += len((set(G[u]) & set(G[w])) - set([v]))\n return cycles / 4\n\n\ndef _threepaths(G):\n paths = 0\n for v in G:\n for u in G[v]:\n for w in (set(G[u]) - set([v])):\n paths += len(set(G[w]) - set([v, u]))\n return paths / 2\n",
"step-5": "#-*- coding: utf-8 -*-\n# Copyright (C) 2011 by\n# Jordi Torrents <jtorrents@milnou.net>\n# Aric Hagberg <hagberg@lanl.gov>\n# All rights reserved.\n# BSD license.\nimport itertools\nimport networkx as nx\n__author__ = \"\"\"\\n\"\"\".join(['Jordi Torrents <jtorrents@milnou.net>',\n 'Aric Hagberg (hagberg@lanl.gov)'])\n__all__ = ['clustering',\n 'average_clustering',\n 'latapy_clustering',\n 'robins_alexander_clustering']\n\n# functions for computing clustering of pairs\n\n\ndef cc_dot(nu, nv):\n return float(len(nu & nv)) / len(nu | nv)\n\n\ndef cc_max(nu, nv):\n return float(len(nu & nv)) / max(len(nu), len(nv))\n\n\ndef cc_min(nu, nv):\n return float(len(nu & nv)) / min(len(nu), len(nv))\n\n\nmodes = {'dot': cc_dot,\n 'min': cc_min,\n 'max': cc_max}\n\n\ndef latapy_clustering(G, nodes=None, mode='dot'):\n r\"\"\"Compute a bipartite clustering coefficient for nodes.\n\n The bipartie clustering coefficient is a measure of local density\n of connections defined as [1]_:\n\n .. math::\n\n c_u = \\frac{\\sum_{v \\in N(N(u))} c_{uv} }{|N(N(u))|}\n\n where `N(N(u))` are the second order neighbors of `u` in `G` excluding `u`, \n and `c_{uv}` is the pairwise clustering coefficient between nodes \n `u` and `v`.\n\n The mode selects the function for `c_{uv}` which can be:\n\n `dot`: \n\n .. math::\n\n c_{uv}=\\frac{|N(u)\\cap N(v)|}{|N(u) \\cup N(v)|}\n\n `min`: \n\n .. math::\n\n c_{uv}=\\frac{|N(u)\\cap N(v)|}{min(|N(u)|,|N(v)|)}\n\n `max`: \n\n .. math::\n\n c_{uv}=\\frac{|N(u)\\cap N(v)|}{max(|N(u)|,|N(v)|)}\n\n\n Parameters\n ----------\n G : graph\n A bipartite graph\n\n nodes : list or iterable (optional)\n Compute bipartite clustering for these nodes. The default \n is all nodes in G.\n\n mode : string\n The pariwise bipartite clustering method to be used in the computation.\n It must be \"dot\", \"max\", or \"min\". \n\n Returns\n -------\n clustering : dictionary\n A dictionary keyed by node with the clustering coefficient value.\n\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G = nx.path_graph(4) # path graphs are bipartite\n >>> c = bipartite.clustering(G) \n >>> c[0]\n 0.5\n >>> c = bipartite.clustering(G,mode='min') \n >>> c[0]\n 1.0\n\n See Also\n --------\n robins_alexander_clustering\n square_clustering\n average_clustering\n\n References\n ----------\n .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).\n Basic notions for the analysis of large two-mode networks. \n Social Networks 30(1), 31--48.\n \"\"\"\n if not nx.algorithms.bipartite.is_bipartite(G):\n raise nx.NetworkXError(\"Graph is not bipartite\")\n\n try:\n cc_func = modes[mode]\n except KeyError:\n raise nx.NetworkXError(\n \"Mode for bipartite clustering must be: dot, min or max\")\n\n if nodes is None:\n nodes = G\n ccs = {}\n for v in nodes:\n cc = 0.0\n nbrs2 = set([u for nbr in G[v] for u in G[nbr]]) - set([v])\n for u in nbrs2:\n cc += cc_func(set(G[u]), set(G[v]))\n if cc > 0.0: # len(nbrs2)>0\n cc /= len(nbrs2)\n ccs[v] = cc\n return ccs\n\n\nclustering = latapy_clustering\n\n\ndef average_clustering(G, nodes=None, mode='dot'):\n r\"\"\"Compute the average bipartite clustering coefficient.\n\n A clustering coefficient for the whole graph is the average, \n\n .. math::\n\n C = \\frac{1}{n}\\sum_{v \\in G} c_v,\n\n where `n` is the number of nodes in `G`.\n\n Similar measures for the two bipartite sets can be defined [1]_\n\n .. math::\n\n C_X = \\frac{1}{|X|}\\sum_{v \\in X} c_v,\n\n where `X` is a bipartite set of `G`.\n\n Parameters\n ----------\n G : graph\n a bipartite graph\n\n nodes : list or iterable, optional\n A container of nodes to use in computing the average. \n The nodes should be either the entire graph (the default) or one of the \n bipartite sets.\n\n mode : string\n The pariwise bipartite clustering method. \n It must be \"dot\", \"max\", or \"min\" \n\n Returns\n -------\n clustering : float\n The average bipartite clustering for the given set of nodes or the \n entire graph if no nodes are specified.\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G=nx.star_graph(3) # star graphs are bipartite\n >>> bipartite.average_clustering(G) \n 0.75\n >>> X,Y=bipartite.sets(G)\n >>> bipartite.average_clustering(G,X) \n 0.0\n >>> bipartite.average_clustering(G,Y) \n 1.0\n\n See Also\n --------\n clustering\n\n Notes \n -----\n The container of nodes passed to this function must contain all of the nodes\n in one of the bipartite sets (\"top\" or \"bottom\") in order to compute \n the correct average bipartite clustering coefficients.\n See :mod:`bipartite documentation <networkx.algorithms.bipartite>`\n for further details on how bipartite graphs are handled in NetworkX.\n\n\n References\n ----------\n .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).\n Basic notions for the analysis of large two-mode networks. \n Social Networks 30(1), 31--48.\n \"\"\"\n if nodes is None:\n nodes = G\n ccs = latapy_clustering(G, nodes=nodes, mode=mode)\n return float(sum(ccs[v] for v in nodes)) / len(nodes)\n\n\ndef robins_alexander_clustering(G):\n r\"\"\"Compute the bipartite clustering of G.\n\n Robins and Alexander [1]_ defined bipartite clustering coefficient as\n four times the number of four cycles `C_4` divided by the number of\n three paths `L_3` in a bipartite graph:\n\n .. math::\n\n CC_4 = \\frac{4 * C_4}{L_3}\n\n Parameters\n ----------\n G : graph\n a bipartite graph\n\n Returns\n -------\n clustering : float\n The Robins and Alexander bipartite clustering for the input graph.\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G = nx.davis_southern_women_graph()\n >>> print(round(bipartite.robins_alexander_clustering(G), 3))\n 0.468\n\n See Also\n --------\n latapy_clustering\n square_clustering\n\n References\n ----------\n .. [1] Robins, G. and M. Alexander (2004). Small worlds among interlocking \n directors: Network structure and distance in bipartite graphs. \n Computational & Mathematical Organization Theory 10(1), 69–94.\n\n \"\"\"\n if G.order() < 4 or G.size() < 3:\n return 0\n L_3 = _threepaths(G)\n if L_3 == 0:\n return 0\n C_4 = _four_cycles(G)\n return (4. * C_4) / L_3\n\n\ndef _four_cycles(G):\n cycles = 0\n for v in G:\n for u, w in itertools.combinations(G[v], 2):\n cycles += len((set(G[u]) & set(G[w])) - set([v]))\n return cycles / 4\n\n\ndef _threepaths(G):\n paths = 0\n for v in G:\n for u in G[v]:\n for w in set(G[u]) - set([v]):\n paths += len(set(G[w]) - set([v, u]))\n # Divide by two because we count each three path twice\n # one for each possible starting point\n return paths / 2\n",
"step-ids": [
4,
8,
9,
10,
11
]
}
|
[
4,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def extractLetters(sourceFolder, trainRatio=0.8, destFolder=
'./data/separateLetters'):
"""
Parameters
----------
sourceFolder : string
DESCRIPTION.
trainRatio : float, optional
DESCRIPTION. The default is 0.8.
destFolder : string, optional
DESCRIPTION. The default is './data/separateLetters'.
Returns
-------
None.
"""
letterCounts = {}
capImages = os.listdir(sourceFolder)
nImages = len(capImages)
iSplit = int(nImages * trainRatio)
trainTestSplit = [capImages[:iSplit], capImages[iSplit:]]
with open('trainTestSplit.dat', 'wb') as f:
pickle.dump(trainTestSplit, f)
nTrain = len(trainTestSplit[0])
for iImage, capImage in enumerate(trainTestSplit[0]):
print('Processing image ' + str(iImage + 1) + ' of ' + str(nTrain))
capLabel = capImage.split('.')[0]
imageData = cv2.imread(os.path.join(sourceFolder, capImage))
imageData = cv2.cvtColor(imageData, cv2.COLOR_BGR2GRAY)
imageData = cv2.copyMakeBorder(imageData, 8, 8, 8, 8, cv2.
BORDER_REPLICATE)
letterRegions = locateLetterRegions(imageData)
if len(letterRegions) != len(capLabel):
continue
for letterRegion, letterLabel in zip(letterRegions, capLabel):
x, y, w, h = letterRegion
letterImage = imageData[y:y + h, x:x + w]
savePath = os.path.join(destFolder, letterLabel)
if not os.path.exists(savePath):
os.makedirs(savePath)
if letterLabel not in letterCounts:
letterCounts[letterLabel] = 1
else:
letterCounts[letterLabel] += 1
letterCount = letterCounts[letterLabel]
fileName = os.path.join(savePath, '{}.png'.format(str(
letterCount).zfill(6)))
cv2.imwrite(fileName, letterImage)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
import cv2
import pickle
from utils import locateLetterRegions
def extractLetters(sourceFolder, trainRatio=0.8, destFolder=
'./data/separateLetters'):
"""
Parameters
----------
sourceFolder : string
DESCRIPTION.
trainRatio : float, optional
DESCRIPTION. The default is 0.8.
destFolder : string, optional
DESCRIPTION. The default is './data/separateLetters'.
Returns
-------
None.
"""
letterCounts = {}
capImages = os.listdir(sourceFolder)
nImages = len(capImages)
iSplit = int(nImages * trainRatio)
trainTestSplit = [capImages[:iSplit], capImages[iSplit:]]
with open('trainTestSplit.dat', 'wb') as f:
pickle.dump(trainTestSplit, f)
nTrain = len(trainTestSplit[0])
for iImage, capImage in enumerate(trainTestSplit[0]):
print('Processing image ' + str(iImage + 1) + ' of ' + str(nTrain))
capLabel = capImage.split('.')[0]
imageData = cv2.imread(os.path.join(sourceFolder, capImage))
imageData = cv2.cvtColor(imageData, cv2.COLOR_BGR2GRAY)
imageData = cv2.copyMakeBorder(imageData, 8, 8, 8, 8, cv2.
BORDER_REPLICATE)
letterRegions = locateLetterRegions(imageData)
if len(letterRegions) != len(capLabel):
continue
for letterRegion, letterLabel in zip(letterRegions, capLabel):
x, y, w, h = letterRegion
letterImage = imageData[y:y + h, x:x + w]
savePath = os.path.join(destFolder, letterLabel)
if not os.path.exists(savePath):
os.makedirs(savePath)
if letterLabel not in letterCounts:
letterCounts[letterLabel] = 1
else:
letterCounts[letterLabel] += 1
letterCount = letterCounts[letterLabel]
fileName = os.path.join(savePath, '{}.png'.format(str(
letterCount).zfill(6)))
cv2.imwrite(fileName, letterImage)
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 19 09:31:20 2021
@author: dclabby
"""
import os
import cv2
import pickle
from utils import locateLetterRegions
# # Constants
# sourceFolder = '/home/dclabby/Documents/Springboard/HDAIML_SEP/Semester03/MachineLearning/Project/solving_captchas_code_examples/solving_captchas_code_examples/generated_captcha_images/'
# destFolder = './data/separateLetters'
# trainRatio = 0.8 # proportion of data set that will be used for training & validation (i.e. 1 - testRatio)
def extractLetters(sourceFolder, trainRatio=0.8, destFolder='./data/separateLetters'):
"""
Parameters
----------
sourceFolder : string
DESCRIPTION.
trainRatio : float, optional
DESCRIPTION. The default is 0.8.
destFolder : string, optional
DESCRIPTION. The default is './data/separateLetters'.
Returns
-------
None.
"""
letterCounts = {}
# Get a list of all the captcha images to be processed
capImages = os.listdir(sourceFolder)
# loop over the image paths
nImages = len(capImages)
# note: the original script uses all images for training (train/test split is implemented later, but test data is actually used for validation)
# therefore, should make a train/test split here & keep the test data separate
iSplit = int(nImages*trainRatio)
trainTestSplit = [capImages[:iSplit], capImages[iSplit:]] # [train, test]
# save the list of training and test data, so that test data can be identified later
with open('trainTestSplit.dat', "wb") as f:
pickle.dump(trainTestSplit, f)
# with open('trainTestSplit.dat', "rb") as f:
# trainTestSplit = pickle.load(f)
nTrain = len(trainTestSplit[0])
for (iImage, capImage) in enumerate(trainTestSplit[0]):#enumerate(capImages):
print('Processing image ' + str(iImage+1) + ' of ' + str(nTrain))#str(nImages))
# Separate the filename from its extension, and use filename as the captcha's label (i.e. "2A2X.png" -> "2A2X")
capLabel = capImage.split('.')[0]
# Load image
# imageData = cv2.imread(sourceFolder + capImage)
imageData = cv2.imread(os.path.join(sourceFolder, capImage))
#cv2.imshow(capLabel + ' - original', imageData)
# Convert to grayscale
imageData = cv2.cvtColor(imageData, cv2.COLOR_BGR2GRAY)
#cv2.imshow(capLabel + ' - gray', imageData)
# Add padding
imageData = cv2.copyMakeBorder(imageData, 8, 8, 8, 8, cv2.BORDER_REPLICATE)
#cv2.imshow(capLabel + ' - padding', imageData)
# Locate letter regions
letterRegions = locateLetterRegions(imageData)
# If the number of contours does not equal the number of letters in the label it is concluded that letter extraction
# was not successful, and this example will not be used in training data
if len(letterRegions) != len(capLabel):
continue
# Save each letter as a separate image
for letterRegion, letterLabel in zip(letterRegions, capLabel):
# Get coordinates (x, y) and dimensions (w, h) of letter region
x, y, w, h = letterRegion
# extract the letter from the original image
letterImage = imageData[y:y + h, x:x + w]
# # extract the letter from the original image, with a 2 pixel margin
# letterImage = imageData[y - 2:y + h + 2, x - 2:x + w + 2] # note: image data arranged with rows corresponding to the vertical (y), & columns corresponding to the horizontal (x)
#cv2.imshow(letterLabel, letterImage)
# define folder path where letters will be saved & create folder if it does not exist
savePath = os.path.join(destFolder, letterLabel)
if not os.path.exists(savePath):
os.makedirs(savePath)
# initialize or increment the letterCounts dictionary for the key corresponding to the present letter
if letterLabel not in letterCounts:
letterCounts[letterLabel] = 1
else:
letterCounts[letterLabel] += 1
letterCount = letterCounts[letterLabel]
# write the letter image to a file based on its letter count
fileName = os.path.join(savePath, "{}.png".format(str(letterCount).zfill(6)))
cv2.imwrite(fileName, letterImage)
|
flexible
|
{
"blob_id": "6109efeb3462ac2c5a94a68fbfa4f2f0617dd927",
"index": 1221,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef extractLetters(sourceFolder, trainRatio=0.8, destFolder=\n './data/separateLetters'):\n \"\"\" \n\n Parameters\n ----------\n sourceFolder : string\n DESCRIPTION.\n trainRatio : float, optional\n DESCRIPTION. The default is 0.8.\n destFolder : string, optional\n DESCRIPTION. The default is './data/separateLetters'.\n\n Returns\n -------\n None.\n\n \"\"\"\n letterCounts = {}\n capImages = os.listdir(sourceFolder)\n nImages = len(capImages)\n iSplit = int(nImages * trainRatio)\n trainTestSplit = [capImages[:iSplit], capImages[iSplit:]]\n with open('trainTestSplit.dat', 'wb') as f:\n pickle.dump(trainTestSplit, f)\n nTrain = len(trainTestSplit[0])\n for iImage, capImage in enumerate(trainTestSplit[0]):\n print('Processing image ' + str(iImage + 1) + ' of ' + str(nTrain))\n capLabel = capImage.split('.')[0]\n imageData = cv2.imread(os.path.join(sourceFolder, capImage))\n imageData = cv2.cvtColor(imageData, cv2.COLOR_BGR2GRAY)\n imageData = cv2.copyMakeBorder(imageData, 8, 8, 8, 8, cv2.\n BORDER_REPLICATE)\n letterRegions = locateLetterRegions(imageData)\n if len(letterRegions) != len(capLabel):\n continue\n for letterRegion, letterLabel in zip(letterRegions, capLabel):\n x, y, w, h = letterRegion\n letterImage = imageData[y:y + h, x:x + w]\n savePath = os.path.join(destFolder, letterLabel)\n if not os.path.exists(savePath):\n os.makedirs(savePath)\n if letterLabel not in letterCounts:\n letterCounts[letterLabel] = 1\n else:\n letterCounts[letterLabel] += 1\n letterCount = letterCounts[letterLabel]\n fileName = os.path.join(savePath, '{}.png'.format(str(\n letterCount).zfill(6)))\n cv2.imwrite(fileName, letterImage)\n",
"step-3": "<mask token>\nimport os\nimport cv2\nimport pickle\nfrom utils import locateLetterRegions\n\n\ndef extractLetters(sourceFolder, trainRatio=0.8, destFolder=\n './data/separateLetters'):\n \"\"\" \n\n Parameters\n ----------\n sourceFolder : string\n DESCRIPTION.\n trainRatio : float, optional\n DESCRIPTION. The default is 0.8.\n destFolder : string, optional\n DESCRIPTION. The default is './data/separateLetters'.\n\n Returns\n -------\n None.\n\n \"\"\"\n letterCounts = {}\n capImages = os.listdir(sourceFolder)\n nImages = len(capImages)\n iSplit = int(nImages * trainRatio)\n trainTestSplit = [capImages[:iSplit], capImages[iSplit:]]\n with open('trainTestSplit.dat', 'wb') as f:\n pickle.dump(trainTestSplit, f)\n nTrain = len(trainTestSplit[0])\n for iImage, capImage in enumerate(trainTestSplit[0]):\n print('Processing image ' + str(iImage + 1) + ' of ' + str(nTrain))\n capLabel = capImage.split('.')[0]\n imageData = cv2.imread(os.path.join(sourceFolder, capImage))\n imageData = cv2.cvtColor(imageData, cv2.COLOR_BGR2GRAY)\n imageData = cv2.copyMakeBorder(imageData, 8, 8, 8, 8, cv2.\n BORDER_REPLICATE)\n letterRegions = locateLetterRegions(imageData)\n if len(letterRegions) != len(capLabel):\n continue\n for letterRegion, letterLabel in zip(letterRegions, capLabel):\n x, y, w, h = letterRegion\n letterImage = imageData[y:y + h, x:x + w]\n savePath = os.path.join(destFolder, letterLabel)\n if not os.path.exists(savePath):\n os.makedirs(savePath)\n if letterLabel not in letterCounts:\n letterCounts[letterLabel] = 1\n else:\n letterCounts[letterLabel] += 1\n letterCount = letterCounts[letterLabel]\n fileName = os.path.join(savePath, '{}.png'.format(str(\n letterCount).zfill(6)))\n cv2.imwrite(fileName, letterImage)\n",
"step-4": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 19 09:31:20 2021\n\n@author: dclabby\n\"\"\"\nimport os\nimport cv2\nimport pickle\nfrom utils import locateLetterRegions\n\n# # Constants\n# sourceFolder = '/home/dclabby/Documents/Springboard/HDAIML_SEP/Semester03/MachineLearning/Project/solving_captchas_code_examples/solving_captchas_code_examples/generated_captcha_images/'\n# destFolder = './data/separateLetters'\n# trainRatio = 0.8 # proportion of data set that will be used for training & validation (i.e. 1 - testRatio)\n\ndef extractLetters(sourceFolder, trainRatio=0.8, destFolder='./data/separateLetters'):\n \"\"\" \n\n Parameters\n ----------\n sourceFolder : string\n DESCRIPTION.\n trainRatio : float, optional\n DESCRIPTION. The default is 0.8.\n destFolder : string, optional\n DESCRIPTION. The default is './data/separateLetters'.\n\n Returns\n -------\n None.\n\n \"\"\"\n \n letterCounts = {}\n \n # Get a list of all the captcha images to be processed\n capImages = os.listdir(sourceFolder)\n \n # loop over the image paths\n nImages = len(capImages) \n # note: the original script uses all images for training (train/test split is implemented later, but test data is actually used for validation)\n # therefore, should make a train/test split here & keep the test data separate\n iSplit = int(nImages*trainRatio)\n trainTestSplit = [capImages[:iSplit], capImages[iSplit:]] # [train, test]\n \n # save the list of training and test data, so that test data can be identified later\n with open('trainTestSplit.dat', \"wb\") as f:\n pickle.dump(trainTestSplit, f)\n # with open('trainTestSplit.dat', \"rb\") as f:\n # trainTestSplit = pickle.load(f)\n \n nTrain = len(trainTestSplit[0])\n for (iImage, capImage) in enumerate(trainTestSplit[0]):#enumerate(capImages):\n print('Processing image ' + str(iImage+1) + ' of ' + str(nTrain))#str(nImages))\n \n # Separate the filename from its extension, and use filename as the captcha's label (i.e. \"2A2X.png\" -> \"2A2X\")\n capLabel = capImage.split('.')[0]\n \n # Load image\n # imageData = cv2.imread(sourceFolder + capImage)\n imageData = cv2.imread(os.path.join(sourceFolder, capImage))\n #cv2.imshow(capLabel + ' - original', imageData)\n \n # Convert to grayscale\n imageData = cv2.cvtColor(imageData, cv2.COLOR_BGR2GRAY)\n #cv2.imshow(capLabel + ' - gray', imageData) \n \n # Add padding\n imageData = cv2.copyMakeBorder(imageData, 8, 8, 8, 8, cv2.BORDER_REPLICATE)\n #cv2.imshow(capLabel + ' - padding', imageData) \n \n # Locate letter regions\n letterRegions = locateLetterRegions(imageData)\n \n # If the number of contours does not equal the number of letters in the label it is concluded that letter extraction\n # was not successful, and this example will not be used in training data\n if len(letterRegions) != len(capLabel):\n continue\n \n # Save each letter as a separate image\n for letterRegion, letterLabel in zip(letterRegions, capLabel):\n # Get coordinates (x, y) and dimensions (w, h) of letter region\n x, y, w, h = letterRegion\n \n # extract the letter from the original image\n letterImage = imageData[y:y + h, x:x + w] \n # # extract the letter from the original image, with a 2 pixel margin\n # letterImage = imageData[y - 2:y + h + 2, x - 2:x + w + 2] # note: image data arranged with rows corresponding to the vertical (y), & columns corresponding to the horizontal (x) \n #cv2.imshow(letterLabel, letterImage) \n \n # define folder path where letters will be saved & create folder if it does not exist\n savePath = os.path.join(destFolder, letterLabel)\n if not os.path.exists(savePath):\n os.makedirs(savePath)\n \n # initialize or increment the letterCounts dictionary for the key corresponding to the present letter\n if letterLabel not in letterCounts:\n letterCounts[letterLabel] = 1\n else:\n letterCounts[letterLabel] += 1\n letterCount = letterCounts[letterLabel]\n \n # write the letter image to a file based on its letter count\n fileName = os.path.join(savePath, \"{}.png\".format(str(letterCount).zfill(6)))\n cv2.imwrite(fileName, letterImage)\n \n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import csv
import glob
import random
import sys
from math import ceil, floor
from os.path import basename, exists, dirname, isfile
import numpy as np
import keras
from keras import Model, Input, regularizers
from keras.layers import TimeDistributed, LSTMCell, Reshape, Dense, Lambda, Dropout, Concatenate
from keras.callbacks import ModelCheckpoint, TensorBoard, LearningRateScheduler
from keras.optimizers import Adam, SGD
from sklearn.metrics import confusion_matrix, accuracy_score # , classification_report
from sklearn.preprocessing import LabelBinarizer
from tqdm import tqdm
from Dataset.Dataset_Utils.augmenter import NoAug
from Dataset.Dataset_Utils.datagen import DataGenerator as DataGen
from Dataset.Dataset_Utils.dataset_tools import print_cm
from Models.model_sharma import SharmaNet
from audio_classifier import AudioClassifier, from_arff_to_feture
from frames_classifier import FramesClassifier
from test_models import *
classes = ["Angry", "Disgust", "Fear", "Happy", "Neutral", "Sad", "Surprise"]
def my_model():
r1, r2 = regularizers.l2(1e-5), regularizers.l2(1e-5)
frame_input = Input(shape=(16, 1024))
audio_input = Input(shape=(16, 1582))
x = Concatenate(name='fusion1')([frame_input, audio_input])
x = TimeDistributed(Dense(100, activation='tanh', kernel_regularizer=r1, name='ff_logit_lstm'))(x)
x = TimeDistributed(Dropout(0.5))(x)
x = TimeDistributed(Dense(7, activation='softmax', kernel_regularizer=r2, name='ff_logit'))(x)
x = Lambda(lambda y: tf.reduce_mean(y, axis=1))(x)
return Model([audio_input, frame_input], x)
class VideoClassifier:
def __init__(self, train_mode="late_fusion", video_model_path=None, time_step=16,
base_path="/user/vlongobardi/AFEW/aligned/", feature_name="emobase2010_100", stride=1):
self.time_step = time_step
self.train_mode = train_mode
self.feature_name = feature_name
self.classes = classes
self.lb = LabelBinarizer()
self.lb.fit_transform(np.array(classes))
self.feature_num = 1582
self.offset = ceil(int(self.feature_name.split("_")[1]) / 2 / 40)
self.stride = stride
if video_model_path is not None:
try:
self.model = my_model()
self.model.load_weights(video_model_path)
print("VideoClassifier loaded successfully", video_model_path)
except:
print("Exception")
else:
t_files = glob.glob(base_path + "Train" + "/*/*csv")
v_files = glob.glob(base_path + "Val" + "/*/*csv")
self.csv_fusion = self.generate_feature(t_files, v_files)
self.do_training()
def do_training(self):
skips = 0
iters = 1
bs = 16
ep = 150
opts = ["SGD"]#, "Adam"]
lrs = [0.01]
models = [my_model]
models_name = [x.__name__ for x in models]
for index, model in enumerate(models):
for opt in opts:
for lr in lrs:
for iteration in range(iters):
if skips > 0:
skips -= 1
continue
train_infos = {
"iteration": iteration, "model_name": models_name[index],
"batch_size": bs, "epoch": ep, "lr": lr, "opt": opt
}
print(
"\n\n################################################################################\n"
"############################## ITERATION " + str(iteration + 1) + " of " + str(iters) +
" ###########################\n######################################################" +
" ########################\nepochs:", ep, "batch_size:", bs, "\nmodel:", models_name[index],
"in", models_name, "\nopt:", opt, "in", opts, "\nlr:", lr, "in", lrs)
train_infos["generator1"] = self.early_gen_train
train_infos["generator2"] = self.early_gen_new_val
t_files, v_files = self.csv_fusion["train"], self.csv_fusion["val"]
m = model()
self.train(t_files, v_files, train_infos, m)
def generate_feature(self, t_files, v_files):
if not exists('features_path_early_fusion_train_' + self.feature_name + '.csv'):
print("\n##### GENERATING CSV FOR EARLY FUSION... #####")
csv_early_fusion = {
"train": self._generate_data_for_early_fusion(t_files, "train"),
"val": self._generate_data_for_early_fusion(v_files, "val")
}
print("\n##### CSV GENERATED! #####")
else:
csv_early_fusion = {}
for name in ["train", "val"]:
csv_early_fusion[name] = self.load_early_csv(name)
return csv_early_fusion
def load_early_csv(self, dataset):
csv_early_fusion = {}
print("Opening csv: features_path_early_fusion_" + dataset + "_" + self.feature_name + '.csv')
with open('features_path_early_fusion_' + dataset + "_" + self.feature_name + '.csv', 'r') as f:
f.readline()
csv_reader = csv.reader(f)
for clip_id, ground_truth, frame_label, audio_label in csv_reader:
if clip_id not in csv_early_fusion:
csv_early_fusion[clip_id] = []
csv_early_fusion[clip_id].append([ground_truth, frame_label, audio_label])
return csv_early_fusion
def _generate_data_for_early_fusion(self, files, name):
# '/user/vlongobardi/AFEW/aligned/Train/Angry/012738600.csv'
# '/user/vlongobardi/early_feature/framefeature/Train/Angry/012738600_0.dat'
# '/user/vlongobardi/early_feature/emobase2010_600/Train/Angry/012738600_0.arff'
if "full" in self.feature_name:
frame_to_discard = 0
else:
window_size = int(self.feature_name.split("_")[1])
frame_to_discard = ceil(window_size / 2 / 40)
my_csv = {}
for file in tqdm(files):
clip_id_temp = file.split(".")[0]
base_path = clip_id_temp.replace("AFEW/aligned", "early_feature/framefeature") + "*"
frames_features_path = glob.glob(base_path)
audio_features_path = glob.glob(
base_path.replace("early_feature/framefeature", "early_feature/" + self.feature_name))
frames_features_path.sort(key=lambda x: int(x.split("_")[-1].split(".")[0]))
if "full" not in self.feature_name:
audio_features_path.sort(key=lambda x: int(x.split("_")[-1].split(".")[0]))
ground_truth = basename(dirname(clip_id_temp))
clip_id = basename(clip_id_temp)
# discard video frames based on window size
frames_features_path = frames_features_path[frame_to_discard:]
if len(frames_features_path) < 16:
continue
# print("FRAME TOO FEW SAMPLES:", len(frames_features_path), clip_id)
if len(audio_features_path) < 16 and "full" not in self.feature_name:
continue
# print("AUDIO TOO FEW SAMPLES:", len(audio_features_path), clip_id)
for index, frame in enumerate(frames_features_path):
if clip_id not in my_csv.keys():
my_csv[clip_id] = []
if "full" not in self.feature_name:
my_csv[clip_id].append([ground_truth, frame, audio_features_path[index]])
else:
my_csv[clip_id].append([ground_truth, frame, audio_features_path[0]])
with open('features_path_early_fusion_' + name + "_" + self.feature_name + '.csv', 'w') as f:
f.write("clip_id, ground_truth, frame_label, audio_label\n")
for key in my_csv:
for line in my_csv[key]:
f.write(key + "," + line[0] + "," + line[1] + "," + line[2] + "\n")
return my_csv
def early_gen_train(self, list_files, batch_size):
c = 0
clip_ids = list(self.csv_fusion["train"].keys())
random.shuffle(clip_ids)
while True:
labels = []
features = [np.zeros((batch_size, self.time_step, self.feature_num)).astype('float'),
np.zeros((batch_size, self.time_step, 1024)).astype('float')]
for i in range(c, c + batch_size):
clip_id = clip_ids[i]
video_info = self.csv_fusion["train"][clip_id]
ground_truth = video_info[0][0]
# first_frame_num = int(video_info[0][1].split("_")[-1].split(".")[0])
start = random.randint(0, len(video_info) - self.time_step)
for index, elem in enumerate(video_info[start:self.time_step + start]):
_, frame_path, audio_path = elem
if not isfile(frame_path):
start += 1
if start >= len(video_info):
raise
continue
frame_feature = np.load(frame_path)
features[0][i - c][index] = np.array(from_arff_to_feture(audio_path)).reshape(self.feature_num, )
features[1][i - c][index] = frame_feature.reshape(1024, )
labels.append(ground_truth)
c += batch_size
if c + batch_size > len(clip_ids):
c = 0
random.shuffle(clip_ids)
labels = self.lb.transform(np.array(labels)).reshape((batch_size, 7))
yield features, labels
def early_gen_new_val(self, list_files, batch_size, mode="val", stride=1):
""" stride 50% sul su tutti i file """
c = 0
labels = features = []
clip_ids = list(list_files.keys())
while True:
for clip_id in tqdm(clip_ids):
video_info = list_files[clip_id]
ground_truth = video_info[0][0]
for start in range(0, len(video_info) - self.time_step, self.time_step // stride):
if c == 0:
labels = []
features = [np.zeros((batch_size, self.time_step, self.feature_num)).astype('float'),
np.zeros((batch_size, self.time_step, 1024)).astype('float')]
for index, elem in enumerate(video_info[start:self.time_step + start]):
_, frame_path, audio_path = elem
frame_feature = np.load(frame_path)
features[0][c][index] = np.array(from_arff_to_feture(audio_path)).reshape(
self.feature_num, )
features[1][c][index] = frame_feature.reshape(1024, )
labels.append(ground_truth)
c += 1
if c == batch_size:
c = 0
labels = self.lb.transform(np.array(labels)).reshape((batch_size, 7))
yield features, labels
if mode == "eval":
break
def early_gen_test_clip(self, list_files, clip_id, stride=1):
""" stride su singolo file, quindi va richiamato per ogni file """
ground_truth = list_files[0][0]
start = 0
end = len(list_files) - self.time_step
while True:
labels = []
features = [np.zeros((1, self.time_step, self.feature_num)).astype('float'),
np.zeros((1, self.time_step, 1024)).astype('float')]
for index, elem in enumerate(list_files[start:start + self.time_step]):
_, frame_path, audio_path = elem
frame_feature = np.load(frame_path)
features[0][0][index] = np.array(from_arff_to_feture(audio_path)).reshape(self.feature_num, )
features[1][0][index] = frame_feature.reshape(1024, )
labels.append(ground_truth)
start += self.time_step // stride
if start >= end:
break
labels = self.lb.transform(np.array(labels)).reshape((1, 7))
yield features, labels
def get_validation_dim(self):
if self.stride == 2:
if "full" in self.feature_name:
return 141
elif "600" in self.feature_name:
return 0
elif "300" in self.feature_name:
return 114
elif "100" in self.feature_name:
return 128
elif self.stride == 1:
if "full" in self.feature_name:
return 76
elif "600" in self.feature_name:
return 0
elif "300" in self.feature_name:
return 63
elif "100" in self.feature_name:
return 69
elif self.stride == self.time_step:
return 0
def train(self, train_files, val_files, train_data, model):
if train_data["opt"] == "Adam":
optimizer = Adam(lr=train_data["lr"])
else:
optimizer = SGD(lr=train_data["lr"])
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
train_gen = train_data["generator1"](train_files, train_data["batch_size"])
no_of_training_images = len(train_files)
no_of_val_images = self.get_validation_dim()
print("no_of_val_images:", no_of_val_images)
val_gen = train_data["generator2"](val_files, train_data["batch_size"])
# stride = 1, no overlapping
# stride = 2, overlapping: 50%
# stride = time_step, stride: 1
model_name = "_lr" + str(train_data["lr"]) + "_Opt" + train_data["opt"] + "_Model" + str(
train_data["model_name"]) + "_Feature" + self.feature_name + "_" + str(
train_data["iteration"]) + "_" + self.train_mode # + "_modelType" + str(self.model_type)
model_name += "stride" + str(self.stride)
model_name += ".h5"
def custom_scheduler(epoch):
if epoch < 50:
print(0.1)
return 0.1
if epoch < 100:
print(0.01)
return 0.01
if epoch < 125:
print(0.001)
return 0.001
else:
print(0.0001)
return 0.0001
#print(0.1 / 10 ** (floor(epoch / 40) + 1))
#return 0.1 / 10 ** (floor(epoch / 40) + 1)
class CheckValCMCallback(keras.callbacks.Callback):
def __init__(self, m, dim, validation_files, epoch):
super().__init__()
self.vc = m
self.dim = dim
self.val_files = validation_files
self.epoch = epoch
self.accs = []
def on_epoch_end(self, epoch, logs=None):
csv_fusion = self.vc.load_early_csv("val")
# gen = self.vc.early_gen_new_val(csv_fusion, 16, "eval")
# predictions = []
# ground_truths = []
# for x in gen:
# ground_truths.append(self.vc.lb.inverse_transform(x[1])[0])
# pred = self.model.predict(x[0])
# pred = self.vc.lb.inverse_transform(pred)
# predictions.append(pred[0])
# self.vc.print_stats(ground_truths, predictions, "Video" + str(epoch))
gen = self.vc.early_gen_new_val(csv_fusion, 16, "eval")
acc = self.model.evaluate_generator(gen, self.dim, workers=0)
self.accs.append(acc)
print("Evaluate:", acc)
if self.epoch == epoch + 1:
print("Validation_Accuracy =", self.accs)
cb = [ModelCheckpoint(
filepath=str(
"weights_new_fusion/videoModel__t{accuracy:.4f}_epoch{epoch:02d}" + model_name),
monitor="val_accuracy", save_weights_only=True),
TensorBoard(log_dir="NewFusionLogs_sched/" + self.train_mode + "/" + self.feature_name, write_graph=True,
write_images=True)]
cb += [LearningRateScheduler(custom_scheduler)]
#cb += [CheckValCMCallback(self, no_of_val_images, val_files, train_data["epoch"])]
history = model.fit_generator(train_gen,
validation_data=val_gen,
epochs=train_data["epoch"],
steps_per_epoch=(no_of_training_images * 2 // train_data["batch_size"]),
validation_steps=(no_of_val_images),
workers=0, verbose=1, callbacks=cb)
print("\n\nTrain_Accuracy =", history.history['accuracy'])
print("\nVal_Accuracy =", history.history['val_accuracy'])
print("\n\nTrain_Loss =", history.history['loss'])
print("\nVal_Loss =", history.history['val_loss'])
def print_stats(self, ground_truths, predictions, name):
cm = confusion_matrix(ground_truths, predictions, self.classes)
print("###" + name + " Results###\n")
# print_cm(cm, self.classes)
# print("\n\n")
print_cm(np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=3), self.classes)
print("\n\n")
print("Accuracy score: ", accuracy_score(ground_truths, predictions), "\n\n")
# print("Report")
# print(classification_report(ground_truths, predictions))
print("#################################################################end###\n\n\n")
def print_confusion_matrix(self, stride=1):
""" IMPLEMENT FOR EARLY FUSION MISSING """
csv_fusion = {}
predictions = []
ground_truths = []
if self.train_mode == "early_fusion":
csv_fusion = self.load_early_csv("val")
print("CSV loaded", len(csv_fusion))
gen = self.early_gen_new_val(csv_fusion, 1, "eval", stride)
for x in gen:
ground_truths.append(self.lb.inverse_transform(x[1])[0])
pred = self.model.predict(x[0])
pred = self.lb.inverse_transform(pred)
predictions.append(pred[0])
# print("\ngt, pred", self.lb.inverse_transform(x[1]), pred)
self.print_stats(ground_truths, predictions, "Video")
else:
with open('lables_late_fusion' + self.feature_name + '.csv', 'r') as f:
f.readline()
csv_reader = csv.reader(f)
for row in csv_reader:
csv_fusion[row[0]] = [row[1], row[2], row[3]]
a_p = []
f_p = []
files = glob.glob("/user/vlongobardi/late_feature/" + self.feature_name + "/*/*csv")
for file in files:
clip_id = basename(file).split(".")[0]
ground_truth, frame_pred, audio_pred = csv_fusion[clip_id]
sample = np.append(self.lb.transform(np.array([audio_pred])), self.lb.transform(np.array([frame_pred])))
pred = self.model.predict(sample.reshape((1, 14)))
pred = self.lb.inverse_transform(pred)[0]
predictions.append(pred)
a_p.append(audio_pred)
f_p.append(frame_pred)
ground_truths.append(ground_truth)
self.print_stats(ground_truths, predictions, "Video")
self.print_stats(ground_truths, a_p, "Audio")
self.print_stats(ground_truths, f_p, "Frame")
if __name__ == "__main__":
if sys.argv[1] == "late":
print("LATE")
model_path = [
"audio_models/audioModel_0.2285_epoch135_lr0.1_OptSGD_Modela_model7_Featureemobase2010_100_3.h5",
"audio_models/audioModel_0.2650_epoch01_lr0.01_OptSGD_Modela_model7_Featureemobase2010_300_2.h5",
"audio_models/audioModel_0.2865_epoch13_lr0.001_OptSGD_Modela_model7_Featureemobase2010_600_0.h5",
"audio_models/audioModel_0.3668_epoch67_lr0.001_OptSGD_Modela_model7_Featureemobase2010_full_2.h5"
]
for mp in model_path:
vc = VideoClassifier(train_mode="late_fusion", audio_model_path=mp)
elif sys.argv[1] == "early":
# mt = int(sys.argv[2])
print("EARLY") # , Model_type:", mt)
arff_paths = {"e1": "emobase2010_100", "i1": "IS09_emotion_100",
"e3": "emobase2010_300", "i3": "IS09_emotion_300",
"e6": "emobase2010_600", "i6": "IS09_emotion_600",
"ef": "emobase2010_full", "if": "IS09_emotion_full"}
vc = VideoClassifier(train_mode="early_fusion", feature_name=arff_paths[sys.argv[2]]) # , model_type=mt)
|
normal
|
{
"blob_id": "c925bed2f4d8120e156caebbe8e6bf9d6a51ee37",
"index": 3330,
"step-1": "<mask token>\n\n\nclass VideoClassifier:\n\n def __init__(self, train_mode='late_fusion', video_model_path=None,\n time_step=16, base_path='/user/vlongobardi/AFEW/aligned/',\n feature_name='emobase2010_100', stride=1):\n self.time_step = time_step\n self.train_mode = train_mode\n self.feature_name = feature_name\n self.classes = classes\n self.lb = LabelBinarizer()\n self.lb.fit_transform(np.array(classes))\n self.feature_num = 1582\n self.offset = ceil(int(self.feature_name.split('_')[1]) / 2 / 40)\n self.stride = stride\n if video_model_path is not None:\n try:\n self.model = my_model()\n self.model.load_weights(video_model_path)\n print('VideoClassifier loaded successfully', video_model_path)\n except:\n print('Exception')\n else:\n t_files = glob.glob(base_path + 'Train' + '/*/*csv')\n v_files = glob.glob(base_path + 'Val' + '/*/*csv')\n self.csv_fusion = self.generate_feature(t_files, v_files)\n self.do_training()\n <mask token>\n <mask token>\n\n def load_early_csv(self, dataset):\n csv_early_fusion = {}\n print('Opening csv: features_path_early_fusion_' + dataset + '_' +\n self.feature_name + '.csv')\n with open('features_path_early_fusion_' + dataset + '_' + self.\n feature_name + '.csv', 'r') as f:\n f.readline()\n csv_reader = csv.reader(f)\n for clip_id, ground_truth, frame_label, audio_label in csv_reader:\n if clip_id not in csv_early_fusion:\n csv_early_fusion[clip_id] = []\n csv_early_fusion[clip_id].append([ground_truth, frame_label,\n audio_label])\n return csv_early_fusion\n <mask token>\n\n def early_gen_train(self, list_files, batch_size):\n c = 0\n clip_ids = list(self.csv_fusion['train'].keys())\n random.shuffle(clip_ids)\n while True:\n labels = []\n features = [np.zeros((batch_size, self.time_step, self.\n feature_num)).astype('float'), np.zeros((batch_size, self.\n time_step, 1024)).astype('float')]\n for i in range(c, c + batch_size):\n clip_id = clip_ids[i]\n video_info = self.csv_fusion['train'][clip_id]\n ground_truth = video_info[0][0]\n start = random.randint(0, len(video_info) - self.time_step)\n for index, elem in enumerate(video_info[start:self.\n time_step + start]):\n _, frame_path, audio_path = elem\n if not isfile(frame_path):\n start += 1\n if start >= len(video_info):\n raise\n continue\n frame_feature = np.load(frame_path)\n features[0][i - c][index] = np.array(from_arff_to_feture\n (audio_path)).reshape(self.feature_num)\n features[1][i - c][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n c += batch_size\n if c + batch_size > len(clip_ids):\n c = 0\n random.shuffle(clip_ids)\n labels = self.lb.transform(np.array(labels)).reshape((\n batch_size, 7))\n yield features, labels\n <mask token>\n <mask token>\n\n def get_validation_dim(self):\n if self.stride == 2:\n if 'full' in self.feature_name:\n return 141\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 114\n elif '100' in self.feature_name:\n return 128\n elif self.stride == 1:\n if 'full' in self.feature_name:\n return 76\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 63\n elif '100' in self.feature_name:\n return 69\n elif self.stride == self.time_step:\n return 0\n\n def train(self, train_files, val_files, train_data, model):\n if train_data['opt'] == 'Adam':\n optimizer = Adam(lr=train_data['lr'])\n else:\n optimizer = SGD(lr=train_data['lr'])\n model.compile(optimizer=optimizer, loss='categorical_crossentropy',\n metrics=['accuracy'])\n model.summary()\n train_gen = train_data['generator1'](train_files, train_data[\n 'batch_size'])\n no_of_training_images = len(train_files)\n no_of_val_images = self.get_validation_dim()\n print('no_of_val_images:', no_of_val_images)\n val_gen = train_data['generator2'](val_files, train_data['batch_size'])\n model_name = '_lr' + str(train_data['lr']) + '_Opt' + train_data['opt'\n ] + '_Model' + str(train_data['model_name']\n ) + '_Feature' + self.feature_name + '_' + str(train_data[\n 'iteration']) + '_' + self.train_mode\n model_name += 'stride' + str(self.stride)\n model_name += '.h5'\n\n def custom_scheduler(epoch):\n if epoch < 50:\n print(0.1)\n return 0.1\n if epoch < 100:\n print(0.01)\n return 0.01\n if epoch < 125:\n print(0.001)\n return 0.001\n else:\n print(0.0001)\n return 0.0001\n\n\n class CheckValCMCallback(keras.callbacks.Callback):\n\n def __init__(self, m, dim, validation_files, epoch):\n super().__init__()\n self.vc = m\n self.dim = dim\n self.val_files = validation_files\n self.epoch = epoch\n self.accs = []\n\n def on_epoch_end(self, epoch, logs=None):\n csv_fusion = self.vc.load_early_csv('val')\n gen = self.vc.early_gen_new_val(csv_fusion, 16, 'eval')\n acc = self.model.evaluate_generator(gen, self.dim, workers=0)\n self.accs.append(acc)\n print('Evaluate:', acc)\n if self.epoch == epoch + 1:\n print('Validation_Accuracy =', self.accs)\n cb = [ModelCheckpoint(filepath=str(\n 'weights_new_fusion/videoModel__t{accuracy:.4f}_epoch{epoch:02d}' +\n model_name), monitor='val_accuracy', save_weights_only=True),\n TensorBoard(log_dir='NewFusionLogs_sched/' + self.train_mode +\n '/' + self.feature_name, write_graph=True, write_images=True)]\n cb += [LearningRateScheduler(custom_scheduler)]\n history = model.fit_generator(train_gen, validation_data=val_gen,\n epochs=train_data['epoch'], steps_per_epoch=\n no_of_training_images * 2 // train_data['batch_size'],\n validation_steps=no_of_val_images, workers=0, verbose=1,\n callbacks=cb)\n print('\\n\\nTrain_Accuracy =', history.history['accuracy'])\n print('\\nVal_Accuracy =', history.history['val_accuracy'])\n print('\\n\\nTrain_Loss =', history.history['loss'])\n print('\\nVal_Loss =', history.history['val_loss'])\n <mask token>\n\n def print_confusion_matrix(self, stride=1):\n \"\"\" IMPLEMENT FOR EARLY FUSION MISSING \"\"\"\n csv_fusion = {}\n predictions = []\n ground_truths = []\n if self.train_mode == 'early_fusion':\n csv_fusion = self.load_early_csv('val')\n print('CSV loaded', len(csv_fusion))\n gen = self.early_gen_new_val(csv_fusion, 1, 'eval', stride)\n for x in gen:\n ground_truths.append(self.lb.inverse_transform(x[1])[0])\n pred = self.model.predict(x[0])\n pred = self.lb.inverse_transform(pred)\n predictions.append(pred[0])\n self.print_stats(ground_truths, predictions, 'Video')\n else:\n with open('lables_late_fusion' + self.feature_name + '.csv', 'r'\n ) as f:\n f.readline()\n csv_reader = csv.reader(f)\n for row in csv_reader:\n csv_fusion[row[0]] = [row[1], row[2], row[3]]\n a_p = []\n f_p = []\n files = glob.glob('/user/vlongobardi/late_feature/' + self.\n feature_name + '/*/*csv')\n for file in files:\n clip_id = basename(file).split('.')[0]\n ground_truth, frame_pred, audio_pred = csv_fusion[clip_id]\n sample = np.append(self.lb.transform(np.array([audio_pred])\n ), self.lb.transform(np.array([frame_pred])))\n pred = self.model.predict(sample.reshape((1, 14)))\n pred = self.lb.inverse_transform(pred)[0]\n predictions.append(pred)\n a_p.append(audio_pred)\n f_p.append(frame_pred)\n ground_truths.append(ground_truth)\n self.print_stats(ground_truths, predictions, 'Video')\n self.print_stats(ground_truths, a_p, 'Audio')\n self.print_stats(ground_truths, f_p, 'Frame')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass VideoClassifier:\n\n def __init__(self, train_mode='late_fusion', video_model_path=None,\n time_step=16, base_path='/user/vlongobardi/AFEW/aligned/',\n feature_name='emobase2010_100', stride=1):\n self.time_step = time_step\n self.train_mode = train_mode\n self.feature_name = feature_name\n self.classes = classes\n self.lb = LabelBinarizer()\n self.lb.fit_transform(np.array(classes))\n self.feature_num = 1582\n self.offset = ceil(int(self.feature_name.split('_')[1]) / 2 / 40)\n self.stride = stride\n if video_model_path is not None:\n try:\n self.model = my_model()\n self.model.load_weights(video_model_path)\n print('VideoClassifier loaded successfully', video_model_path)\n except:\n print('Exception')\n else:\n t_files = glob.glob(base_path + 'Train' + '/*/*csv')\n v_files = glob.glob(base_path + 'Val' + '/*/*csv')\n self.csv_fusion = self.generate_feature(t_files, v_files)\n self.do_training()\n <mask token>\n <mask token>\n\n def load_early_csv(self, dataset):\n csv_early_fusion = {}\n print('Opening csv: features_path_early_fusion_' + dataset + '_' +\n self.feature_name + '.csv')\n with open('features_path_early_fusion_' + dataset + '_' + self.\n feature_name + '.csv', 'r') as f:\n f.readline()\n csv_reader = csv.reader(f)\n for clip_id, ground_truth, frame_label, audio_label in csv_reader:\n if clip_id not in csv_early_fusion:\n csv_early_fusion[clip_id] = []\n csv_early_fusion[clip_id].append([ground_truth, frame_label,\n audio_label])\n return csv_early_fusion\n\n def _generate_data_for_early_fusion(self, files, name):\n if 'full' in self.feature_name:\n frame_to_discard = 0\n else:\n window_size = int(self.feature_name.split('_')[1])\n frame_to_discard = ceil(window_size / 2 / 40)\n my_csv = {}\n for file in tqdm(files):\n clip_id_temp = file.split('.')[0]\n base_path = clip_id_temp.replace('AFEW/aligned',\n 'early_feature/framefeature') + '*'\n frames_features_path = glob.glob(base_path)\n audio_features_path = glob.glob(base_path.replace(\n 'early_feature/framefeature', 'early_feature/' + self.\n feature_name))\n frames_features_path.sort(key=lambda x: int(x.split('_')[-1].\n split('.')[0]))\n if 'full' not in self.feature_name:\n audio_features_path.sort(key=lambda x: int(x.split('_')[-1]\n .split('.')[0]))\n ground_truth = basename(dirname(clip_id_temp))\n clip_id = basename(clip_id_temp)\n frames_features_path = frames_features_path[frame_to_discard:]\n if len(frames_features_path) < 16:\n continue\n if len(audio_features_path\n ) < 16 and 'full' not in self.feature_name:\n continue\n for index, frame in enumerate(frames_features_path):\n if clip_id not in my_csv.keys():\n my_csv[clip_id] = []\n if 'full' not in self.feature_name:\n my_csv[clip_id].append([ground_truth, frame,\n audio_features_path[index]])\n else:\n my_csv[clip_id].append([ground_truth, frame,\n audio_features_path[0]])\n with open('features_path_early_fusion_' + name + '_' + self.\n feature_name + '.csv', 'w') as f:\n f.write('clip_id, ground_truth, frame_label, audio_label\\n')\n for key in my_csv:\n for line in my_csv[key]:\n f.write(key + ',' + line[0] + ',' + line[1] + ',' +\n line[2] + '\\n')\n return my_csv\n\n def early_gen_train(self, list_files, batch_size):\n c = 0\n clip_ids = list(self.csv_fusion['train'].keys())\n random.shuffle(clip_ids)\n while True:\n labels = []\n features = [np.zeros((batch_size, self.time_step, self.\n feature_num)).astype('float'), np.zeros((batch_size, self.\n time_step, 1024)).astype('float')]\n for i in range(c, c + batch_size):\n clip_id = clip_ids[i]\n video_info = self.csv_fusion['train'][clip_id]\n ground_truth = video_info[0][0]\n start = random.randint(0, len(video_info) - self.time_step)\n for index, elem in enumerate(video_info[start:self.\n time_step + start]):\n _, frame_path, audio_path = elem\n if not isfile(frame_path):\n start += 1\n if start >= len(video_info):\n raise\n continue\n frame_feature = np.load(frame_path)\n features[0][i - c][index] = np.array(from_arff_to_feture\n (audio_path)).reshape(self.feature_num)\n features[1][i - c][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n c += batch_size\n if c + batch_size > len(clip_ids):\n c = 0\n random.shuffle(clip_ids)\n labels = self.lb.transform(np.array(labels)).reshape((\n batch_size, 7))\n yield features, labels\n\n def early_gen_new_val(self, list_files, batch_size, mode='val', stride=1):\n \"\"\" stride 50% sul su tutti i file \"\"\"\n c = 0\n labels = features = []\n clip_ids = list(list_files.keys())\n while True:\n for clip_id in tqdm(clip_ids):\n video_info = list_files[clip_id]\n ground_truth = video_info[0][0]\n for start in range(0, len(video_info) - self.time_step, \n self.time_step // stride):\n if c == 0:\n labels = []\n features = [np.zeros((batch_size, self.time_step,\n self.feature_num)).astype('float'), np.zeros((\n batch_size, self.time_step, 1024)).astype('float')]\n for index, elem in enumerate(video_info[start:self.\n time_step + start]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][c][index] = np.array(from_arff_to_feture\n (audio_path)).reshape(self.feature_num)\n features[1][c][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n c += 1\n if c == batch_size:\n c = 0\n labels = self.lb.transform(np.array(labels)).reshape((\n batch_size, 7))\n yield features, labels\n if mode == 'eval':\n break\n\n def early_gen_test_clip(self, list_files, clip_id, stride=1):\n \"\"\" stride su singolo file, quindi va richiamato per ogni file \"\"\"\n ground_truth = list_files[0][0]\n start = 0\n end = len(list_files) - self.time_step\n while True:\n labels = []\n features = [np.zeros((1, self.time_step, self.feature_num)).\n astype('float'), np.zeros((1, self.time_step, 1024)).astype\n ('float')]\n for index, elem in enumerate(list_files[start:start + self.\n time_step]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][0][index] = np.array(from_arff_to_feture(\n audio_path)).reshape(self.feature_num)\n features[1][0][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n start += self.time_step // stride\n if start >= end:\n break\n labels = self.lb.transform(np.array(labels)).reshape((1, 7))\n yield features, labels\n\n def get_validation_dim(self):\n if self.stride == 2:\n if 'full' in self.feature_name:\n return 141\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 114\n elif '100' in self.feature_name:\n return 128\n elif self.stride == 1:\n if 'full' in self.feature_name:\n return 76\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 63\n elif '100' in self.feature_name:\n return 69\n elif self.stride == self.time_step:\n return 0\n\n def train(self, train_files, val_files, train_data, model):\n if train_data['opt'] == 'Adam':\n optimizer = Adam(lr=train_data['lr'])\n else:\n optimizer = SGD(lr=train_data['lr'])\n model.compile(optimizer=optimizer, loss='categorical_crossentropy',\n metrics=['accuracy'])\n model.summary()\n train_gen = train_data['generator1'](train_files, train_data[\n 'batch_size'])\n no_of_training_images = len(train_files)\n no_of_val_images = self.get_validation_dim()\n print('no_of_val_images:', no_of_val_images)\n val_gen = train_data['generator2'](val_files, train_data['batch_size'])\n model_name = '_lr' + str(train_data['lr']) + '_Opt' + train_data['opt'\n ] + '_Model' + str(train_data['model_name']\n ) + '_Feature' + self.feature_name + '_' + str(train_data[\n 'iteration']) + '_' + self.train_mode\n model_name += 'stride' + str(self.stride)\n model_name += '.h5'\n\n def custom_scheduler(epoch):\n if epoch < 50:\n print(0.1)\n return 0.1\n if epoch < 100:\n print(0.01)\n return 0.01\n if epoch < 125:\n print(0.001)\n return 0.001\n else:\n print(0.0001)\n return 0.0001\n\n\n class CheckValCMCallback(keras.callbacks.Callback):\n\n def __init__(self, m, dim, validation_files, epoch):\n super().__init__()\n self.vc = m\n self.dim = dim\n self.val_files = validation_files\n self.epoch = epoch\n self.accs = []\n\n def on_epoch_end(self, epoch, logs=None):\n csv_fusion = self.vc.load_early_csv('val')\n gen = self.vc.early_gen_new_val(csv_fusion, 16, 'eval')\n acc = self.model.evaluate_generator(gen, self.dim, workers=0)\n self.accs.append(acc)\n print('Evaluate:', acc)\n if self.epoch == epoch + 1:\n print('Validation_Accuracy =', self.accs)\n cb = [ModelCheckpoint(filepath=str(\n 'weights_new_fusion/videoModel__t{accuracy:.4f}_epoch{epoch:02d}' +\n model_name), monitor='val_accuracy', save_weights_only=True),\n TensorBoard(log_dir='NewFusionLogs_sched/' + self.train_mode +\n '/' + self.feature_name, write_graph=True, write_images=True)]\n cb += [LearningRateScheduler(custom_scheduler)]\n history = model.fit_generator(train_gen, validation_data=val_gen,\n epochs=train_data['epoch'], steps_per_epoch=\n no_of_training_images * 2 // train_data['batch_size'],\n validation_steps=no_of_val_images, workers=0, verbose=1,\n callbacks=cb)\n print('\\n\\nTrain_Accuracy =', history.history['accuracy'])\n print('\\nVal_Accuracy =', history.history['val_accuracy'])\n print('\\n\\nTrain_Loss =', history.history['loss'])\n print('\\nVal_Loss =', history.history['val_loss'])\n\n def print_stats(self, ground_truths, predictions, name):\n cm = confusion_matrix(ground_truths, predictions, self.classes)\n print('###' + name + ' Results###\\n')\n print_cm(np.around(cm.astype('float') / cm.sum(axis=1)[:, np.\n newaxis], decimals=3), self.classes)\n print('\\n\\n')\n print('Accuracy score: ', accuracy_score(ground_truths, predictions\n ), '\\n\\n')\n print(\n '#################################################################end###\\n\\n\\n'\n )\n\n def print_confusion_matrix(self, stride=1):\n \"\"\" IMPLEMENT FOR EARLY FUSION MISSING \"\"\"\n csv_fusion = {}\n predictions = []\n ground_truths = []\n if self.train_mode == 'early_fusion':\n csv_fusion = self.load_early_csv('val')\n print('CSV loaded', len(csv_fusion))\n gen = self.early_gen_new_val(csv_fusion, 1, 'eval', stride)\n for x in gen:\n ground_truths.append(self.lb.inverse_transform(x[1])[0])\n pred = self.model.predict(x[0])\n pred = self.lb.inverse_transform(pred)\n predictions.append(pred[0])\n self.print_stats(ground_truths, predictions, 'Video')\n else:\n with open('lables_late_fusion' + self.feature_name + '.csv', 'r'\n ) as f:\n f.readline()\n csv_reader = csv.reader(f)\n for row in csv_reader:\n csv_fusion[row[0]] = [row[1], row[2], row[3]]\n a_p = []\n f_p = []\n files = glob.glob('/user/vlongobardi/late_feature/' + self.\n feature_name + '/*/*csv')\n for file in files:\n clip_id = basename(file).split('.')[0]\n ground_truth, frame_pred, audio_pred = csv_fusion[clip_id]\n sample = np.append(self.lb.transform(np.array([audio_pred])\n ), self.lb.transform(np.array([frame_pred])))\n pred = self.model.predict(sample.reshape((1, 14)))\n pred = self.lb.inverse_transform(pred)[0]\n predictions.append(pred)\n a_p.append(audio_pred)\n f_p.append(frame_pred)\n ground_truths.append(ground_truth)\n self.print_stats(ground_truths, predictions, 'Video')\n self.print_stats(ground_truths, a_p, 'Audio')\n self.print_stats(ground_truths, f_p, 'Frame')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass VideoClassifier:\n\n def __init__(self, train_mode='late_fusion', video_model_path=None,\n time_step=16, base_path='/user/vlongobardi/AFEW/aligned/',\n feature_name='emobase2010_100', stride=1):\n self.time_step = time_step\n self.train_mode = train_mode\n self.feature_name = feature_name\n self.classes = classes\n self.lb = LabelBinarizer()\n self.lb.fit_transform(np.array(classes))\n self.feature_num = 1582\n self.offset = ceil(int(self.feature_name.split('_')[1]) / 2 / 40)\n self.stride = stride\n if video_model_path is not None:\n try:\n self.model = my_model()\n self.model.load_weights(video_model_path)\n print('VideoClassifier loaded successfully', video_model_path)\n except:\n print('Exception')\n else:\n t_files = glob.glob(base_path + 'Train' + '/*/*csv')\n v_files = glob.glob(base_path + 'Val' + '/*/*csv')\n self.csv_fusion = self.generate_feature(t_files, v_files)\n self.do_training()\n\n def do_training(self):\n skips = 0\n iters = 1\n bs = 16\n ep = 150\n opts = ['SGD']\n lrs = [0.01]\n models = [my_model]\n models_name = [x.__name__ for x in models]\n for index, model in enumerate(models):\n for opt in opts:\n for lr in lrs:\n for iteration in range(iters):\n if skips > 0:\n skips -= 1\n continue\n train_infos = {'iteration': iteration, 'model_name':\n models_name[index], 'batch_size': bs, 'epoch':\n ep, 'lr': lr, 'opt': opt}\n print(\n \"\"\"\n\n################################################################################\n############################## ITERATION \"\"\"\n + str(iteration + 1) + ' of ' + str(iters) +\n \"\"\" ###########################\n######################################################\"\"\"\n + \"\"\" ########################\nepochs:\"\"\", ep,\n 'batch_size:', bs, '\\nmodel:', models_name[\n index], 'in', models_name, '\\nopt:', opt, 'in',\n opts, '\\nlr:', lr, 'in', lrs)\n train_infos['generator1'] = self.early_gen_train\n train_infos['generator2'] = self.early_gen_new_val\n t_files, v_files = self.csv_fusion['train'\n ], self.csv_fusion['val']\n m = model()\n self.train(t_files, v_files, train_infos, m)\n\n def generate_feature(self, t_files, v_files):\n if not exists('features_path_early_fusion_train_' + self.\n feature_name + '.csv'):\n print('\\n##### GENERATING CSV FOR EARLY FUSION... #####')\n csv_early_fusion = {'train': self.\n _generate_data_for_early_fusion(t_files, 'train'), 'val':\n self._generate_data_for_early_fusion(v_files, 'val')}\n print('\\n##### CSV GENERATED! #####')\n else:\n csv_early_fusion = {}\n for name in ['train', 'val']:\n csv_early_fusion[name] = self.load_early_csv(name)\n return csv_early_fusion\n\n def load_early_csv(self, dataset):\n csv_early_fusion = {}\n print('Opening csv: features_path_early_fusion_' + dataset + '_' +\n self.feature_name + '.csv')\n with open('features_path_early_fusion_' + dataset + '_' + self.\n feature_name + '.csv', 'r') as f:\n f.readline()\n csv_reader = csv.reader(f)\n for clip_id, ground_truth, frame_label, audio_label in csv_reader:\n if clip_id not in csv_early_fusion:\n csv_early_fusion[clip_id] = []\n csv_early_fusion[clip_id].append([ground_truth, frame_label,\n audio_label])\n return csv_early_fusion\n\n def _generate_data_for_early_fusion(self, files, name):\n if 'full' in self.feature_name:\n frame_to_discard = 0\n else:\n window_size = int(self.feature_name.split('_')[1])\n frame_to_discard = ceil(window_size / 2 / 40)\n my_csv = {}\n for file in tqdm(files):\n clip_id_temp = file.split('.')[0]\n base_path = clip_id_temp.replace('AFEW/aligned',\n 'early_feature/framefeature') + '*'\n frames_features_path = glob.glob(base_path)\n audio_features_path = glob.glob(base_path.replace(\n 'early_feature/framefeature', 'early_feature/' + self.\n feature_name))\n frames_features_path.sort(key=lambda x: int(x.split('_')[-1].\n split('.')[0]))\n if 'full' not in self.feature_name:\n audio_features_path.sort(key=lambda x: int(x.split('_')[-1]\n .split('.')[0]))\n ground_truth = basename(dirname(clip_id_temp))\n clip_id = basename(clip_id_temp)\n frames_features_path = frames_features_path[frame_to_discard:]\n if len(frames_features_path) < 16:\n continue\n if len(audio_features_path\n ) < 16 and 'full' not in self.feature_name:\n continue\n for index, frame in enumerate(frames_features_path):\n if clip_id not in my_csv.keys():\n my_csv[clip_id] = []\n if 'full' not in self.feature_name:\n my_csv[clip_id].append([ground_truth, frame,\n audio_features_path[index]])\n else:\n my_csv[clip_id].append([ground_truth, frame,\n audio_features_path[0]])\n with open('features_path_early_fusion_' + name + '_' + self.\n feature_name + '.csv', 'w') as f:\n f.write('clip_id, ground_truth, frame_label, audio_label\\n')\n for key in my_csv:\n for line in my_csv[key]:\n f.write(key + ',' + line[0] + ',' + line[1] + ',' +\n line[2] + '\\n')\n return my_csv\n\n def early_gen_train(self, list_files, batch_size):\n c = 0\n clip_ids = list(self.csv_fusion['train'].keys())\n random.shuffle(clip_ids)\n while True:\n labels = []\n features = [np.zeros((batch_size, self.time_step, self.\n feature_num)).astype('float'), np.zeros((batch_size, self.\n time_step, 1024)).astype('float')]\n for i in range(c, c + batch_size):\n clip_id = clip_ids[i]\n video_info = self.csv_fusion['train'][clip_id]\n ground_truth = video_info[0][0]\n start = random.randint(0, len(video_info) - self.time_step)\n for index, elem in enumerate(video_info[start:self.\n time_step + start]):\n _, frame_path, audio_path = elem\n if not isfile(frame_path):\n start += 1\n if start >= len(video_info):\n raise\n continue\n frame_feature = np.load(frame_path)\n features[0][i - c][index] = np.array(from_arff_to_feture\n (audio_path)).reshape(self.feature_num)\n features[1][i - c][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n c += batch_size\n if c + batch_size > len(clip_ids):\n c = 0\n random.shuffle(clip_ids)\n labels = self.lb.transform(np.array(labels)).reshape((\n batch_size, 7))\n yield features, labels\n\n def early_gen_new_val(self, list_files, batch_size, mode='val', stride=1):\n \"\"\" stride 50% sul su tutti i file \"\"\"\n c = 0\n labels = features = []\n clip_ids = list(list_files.keys())\n while True:\n for clip_id in tqdm(clip_ids):\n video_info = list_files[clip_id]\n ground_truth = video_info[0][0]\n for start in range(0, len(video_info) - self.time_step, \n self.time_step // stride):\n if c == 0:\n labels = []\n features = [np.zeros((batch_size, self.time_step,\n self.feature_num)).astype('float'), np.zeros((\n batch_size, self.time_step, 1024)).astype('float')]\n for index, elem in enumerate(video_info[start:self.\n time_step + start]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][c][index] = np.array(from_arff_to_feture\n (audio_path)).reshape(self.feature_num)\n features[1][c][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n c += 1\n if c == batch_size:\n c = 0\n labels = self.lb.transform(np.array(labels)).reshape((\n batch_size, 7))\n yield features, labels\n if mode == 'eval':\n break\n\n def early_gen_test_clip(self, list_files, clip_id, stride=1):\n \"\"\" stride su singolo file, quindi va richiamato per ogni file \"\"\"\n ground_truth = list_files[0][0]\n start = 0\n end = len(list_files) - self.time_step\n while True:\n labels = []\n features = [np.zeros((1, self.time_step, self.feature_num)).\n astype('float'), np.zeros((1, self.time_step, 1024)).astype\n ('float')]\n for index, elem in enumerate(list_files[start:start + self.\n time_step]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][0][index] = np.array(from_arff_to_feture(\n audio_path)).reshape(self.feature_num)\n features[1][0][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n start += self.time_step // stride\n if start >= end:\n break\n labels = self.lb.transform(np.array(labels)).reshape((1, 7))\n yield features, labels\n\n def get_validation_dim(self):\n if self.stride == 2:\n if 'full' in self.feature_name:\n return 141\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 114\n elif '100' in self.feature_name:\n return 128\n elif self.stride == 1:\n if 'full' in self.feature_name:\n return 76\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 63\n elif '100' in self.feature_name:\n return 69\n elif self.stride == self.time_step:\n return 0\n\n def train(self, train_files, val_files, train_data, model):\n if train_data['opt'] == 'Adam':\n optimizer = Adam(lr=train_data['lr'])\n else:\n optimizer = SGD(lr=train_data['lr'])\n model.compile(optimizer=optimizer, loss='categorical_crossentropy',\n metrics=['accuracy'])\n model.summary()\n train_gen = train_data['generator1'](train_files, train_data[\n 'batch_size'])\n no_of_training_images = len(train_files)\n no_of_val_images = self.get_validation_dim()\n print('no_of_val_images:', no_of_val_images)\n val_gen = train_data['generator2'](val_files, train_data['batch_size'])\n model_name = '_lr' + str(train_data['lr']) + '_Opt' + train_data['opt'\n ] + '_Model' + str(train_data['model_name']\n ) + '_Feature' + self.feature_name + '_' + str(train_data[\n 'iteration']) + '_' + self.train_mode\n model_name += 'stride' + str(self.stride)\n model_name += '.h5'\n\n def custom_scheduler(epoch):\n if epoch < 50:\n print(0.1)\n return 0.1\n if epoch < 100:\n print(0.01)\n return 0.01\n if epoch < 125:\n print(0.001)\n return 0.001\n else:\n print(0.0001)\n return 0.0001\n\n\n class CheckValCMCallback(keras.callbacks.Callback):\n\n def __init__(self, m, dim, validation_files, epoch):\n super().__init__()\n self.vc = m\n self.dim = dim\n self.val_files = validation_files\n self.epoch = epoch\n self.accs = []\n\n def on_epoch_end(self, epoch, logs=None):\n csv_fusion = self.vc.load_early_csv('val')\n gen = self.vc.early_gen_new_val(csv_fusion, 16, 'eval')\n acc = self.model.evaluate_generator(gen, self.dim, workers=0)\n self.accs.append(acc)\n print('Evaluate:', acc)\n if self.epoch == epoch + 1:\n print('Validation_Accuracy =', self.accs)\n cb = [ModelCheckpoint(filepath=str(\n 'weights_new_fusion/videoModel__t{accuracy:.4f}_epoch{epoch:02d}' +\n model_name), monitor='val_accuracy', save_weights_only=True),\n TensorBoard(log_dir='NewFusionLogs_sched/' + self.train_mode +\n '/' + self.feature_name, write_graph=True, write_images=True)]\n cb += [LearningRateScheduler(custom_scheduler)]\n history = model.fit_generator(train_gen, validation_data=val_gen,\n epochs=train_data['epoch'], steps_per_epoch=\n no_of_training_images * 2 // train_data['batch_size'],\n validation_steps=no_of_val_images, workers=0, verbose=1,\n callbacks=cb)\n print('\\n\\nTrain_Accuracy =', history.history['accuracy'])\n print('\\nVal_Accuracy =', history.history['val_accuracy'])\n print('\\n\\nTrain_Loss =', history.history['loss'])\n print('\\nVal_Loss =', history.history['val_loss'])\n\n def print_stats(self, ground_truths, predictions, name):\n cm = confusion_matrix(ground_truths, predictions, self.classes)\n print('###' + name + ' Results###\\n')\n print_cm(np.around(cm.astype('float') / cm.sum(axis=1)[:, np.\n newaxis], decimals=3), self.classes)\n print('\\n\\n')\n print('Accuracy score: ', accuracy_score(ground_truths, predictions\n ), '\\n\\n')\n print(\n '#################################################################end###\\n\\n\\n'\n )\n\n def print_confusion_matrix(self, stride=1):\n \"\"\" IMPLEMENT FOR EARLY FUSION MISSING \"\"\"\n csv_fusion = {}\n predictions = []\n ground_truths = []\n if self.train_mode == 'early_fusion':\n csv_fusion = self.load_early_csv('val')\n print('CSV loaded', len(csv_fusion))\n gen = self.early_gen_new_val(csv_fusion, 1, 'eval', stride)\n for x in gen:\n ground_truths.append(self.lb.inverse_transform(x[1])[0])\n pred = self.model.predict(x[0])\n pred = self.lb.inverse_transform(pred)\n predictions.append(pred[0])\n self.print_stats(ground_truths, predictions, 'Video')\n else:\n with open('lables_late_fusion' + self.feature_name + '.csv', 'r'\n ) as f:\n f.readline()\n csv_reader = csv.reader(f)\n for row in csv_reader:\n csv_fusion[row[0]] = [row[1], row[2], row[3]]\n a_p = []\n f_p = []\n files = glob.glob('/user/vlongobardi/late_feature/' + self.\n feature_name + '/*/*csv')\n for file in files:\n clip_id = basename(file).split('.')[0]\n ground_truth, frame_pred, audio_pred = csv_fusion[clip_id]\n sample = np.append(self.lb.transform(np.array([audio_pred])\n ), self.lb.transform(np.array([frame_pred])))\n pred = self.model.predict(sample.reshape((1, 14)))\n pred = self.lb.inverse_transform(pred)[0]\n predictions.append(pred)\n a_p.append(audio_pred)\n f_p.append(frame_pred)\n ground_truths.append(ground_truth)\n self.print_stats(ground_truths, predictions, 'Video')\n self.print_stats(ground_truths, a_p, 'Audio')\n self.print_stats(ground_truths, f_p, 'Frame')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef my_model():\n r1, r2 = regularizers.l2(1e-05), regularizers.l2(1e-05)\n frame_input = Input(shape=(16, 1024))\n audio_input = Input(shape=(16, 1582))\n x = Concatenate(name='fusion1')([frame_input, audio_input])\n x = TimeDistributed(Dense(100, activation='tanh', kernel_regularizer=r1,\n name='ff_logit_lstm'))(x)\n x = TimeDistributed(Dropout(0.5))(x)\n x = TimeDistributed(Dense(7, activation='softmax', kernel_regularizer=\n r2, name='ff_logit'))(x)\n x = Lambda(lambda y: tf.reduce_mean(y, axis=1))(x)\n return Model([audio_input, frame_input], x)\n\n\nclass VideoClassifier:\n\n def __init__(self, train_mode='late_fusion', video_model_path=None,\n time_step=16, base_path='/user/vlongobardi/AFEW/aligned/',\n feature_name='emobase2010_100', stride=1):\n self.time_step = time_step\n self.train_mode = train_mode\n self.feature_name = feature_name\n self.classes = classes\n self.lb = LabelBinarizer()\n self.lb.fit_transform(np.array(classes))\n self.feature_num = 1582\n self.offset = ceil(int(self.feature_name.split('_')[1]) / 2 / 40)\n self.stride = stride\n if video_model_path is not None:\n try:\n self.model = my_model()\n self.model.load_weights(video_model_path)\n print('VideoClassifier loaded successfully', video_model_path)\n except:\n print('Exception')\n else:\n t_files = glob.glob(base_path + 'Train' + '/*/*csv')\n v_files = glob.glob(base_path + 'Val' + '/*/*csv')\n self.csv_fusion = self.generate_feature(t_files, v_files)\n self.do_training()\n\n def do_training(self):\n skips = 0\n iters = 1\n bs = 16\n ep = 150\n opts = ['SGD']\n lrs = [0.01]\n models = [my_model]\n models_name = [x.__name__ for x in models]\n for index, model in enumerate(models):\n for opt in opts:\n for lr in lrs:\n for iteration in range(iters):\n if skips > 0:\n skips -= 1\n continue\n train_infos = {'iteration': iteration, 'model_name':\n models_name[index], 'batch_size': bs, 'epoch':\n ep, 'lr': lr, 'opt': opt}\n print(\n \"\"\"\n\n################################################################################\n############################## ITERATION \"\"\"\n + str(iteration + 1) + ' of ' + str(iters) +\n \"\"\" ###########################\n######################################################\"\"\"\n + \"\"\" ########################\nepochs:\"\"\", ep,\n 'batch_size:', bs, '\\nmodel:', models_name[\n index], 'in', models_name, '\\nopt:', opt, 'in',\n opts, '\\nlr:', lr, 'in', lrs)\n train_infos['generator1'] = self.early_gen_train\n train_infos['generator2'] = self.early_gen_new_val\n t_files, v_files = self.csv_fusion['train'\n ], self.csv_fusion['val']\n m = model()\n self.train(t_files, v_files, train_infos, m)\n\n def generate_feature(self, t_files, v_files):\n if not exists('features_path_early_fusion_train_' + self.\n feature_name + '.csv'):\n print('\\n##### GENERATING CSV FOR EARLY FUSION... #####')\n csv_early_fusion = {'train': self.\n _generate_data_for_early_fusion(t_files, 'train'), 'val':\n self._generate_data_for_early_fusion(v_files, 'val')}\n print('\\n##### CSV GENERATED! #####')\n else:\n csv_early_fusion = {}\n for name in ['train', 'val']:\n csv_early_fusion[name] = self.load_early_csv(name)\n return csv_early_fusion\n\n def load_early_csv(self, dataset):\n csv_early_fusion = {}\n print('Opening csv: features_path_early_fusion_' + dataset + '_' +\n self.feature_name + '.csv')\n with open('features_path_early_fusion_' + dataset + '_' + self.\n feature_name + '.csv', 'r') as f:\n f.readline()\n csv_reader = csv.reader(f)\n for clip_id, ground_truth, frame_label, audio_label in csv_reader:\n if clip_id not in csv_early_fusion:\n csv_early_fusion[clip_id] = []\n csv_early_fusion[clip_id].append([ground_truth, frame_label,\n audio_label])\n return csv_early_fusion\n\n def _generate_data_for_early_fusion(self, files, name):\n if 'full' in self.feature_name:\n frame_to_discard = 0\n else:\n window_size = int(self.feature_name.split('_')[1])\n frame_to_discard = ceil(window_size / 2 / 40)\n my_csv = {}\n for file in tqdm(files):\n clip_id_temp = file.split('.')[0]\n base_path = clip_id_temp.replace('AFEW/aligned',\n 'early_feature/framefeature') + '*'\n frames_features_path = glob.glob(base_path)\n audio_features_path = glob.glob(base_path.replace(\n 'early_feature/framefeature', 'early_feature/' + self.\n feature_name))\n frames_features_path.sort(key=lambda x: int(x.split('_')[-1].\n split('.')[0]))\n if 'full' not in self.feature_name:\n audio_features_path.sort(key=lambda x: int(x.split('_')[-1]\n .split('.')[0]))\n ground_truth = basename(dirname(clip_id_temp))\n clip_id = basename(clip_id_temp)\n frames_features_path = frames_features_path[frame_to_discard:]\n if len(frames_features_path) < 16:\n continue\n if len(audio_features_path\n ) < 16 and 'full' not in self.feature_name:\n continue\n for index, frame in enumerate(frames_features_path):\n if clip_id not in my_csv.keys():\n my_csv[clip_id] = []\n if 'full' not in self.feature_name:\n my_csv[clip_id].append([ground_truth, frame,\n audio_features_path[index]])\n else:\n my_csv[clip_id].append([ground_truth, frame,\n audio_features_path[0]])\n with open('features_path_early_fusion_' + name + '_' + self.\n feature_name + '.csv', 'w') as f:\n f.write('clip_id, ground_truth, frame_label, audio_label\\n')\n for key in my_csv:\n for line in my_csv[key]:\n f.write(key + ',' + line[0] + ',' + line[1] + ',' +\n line[2] + '\\n')\n return my_csv\n\n def early_gen_train(self, list_files, batch_size):\n c = 0\n clip_ids = list(self.csv_fusion['train'].keys())\n random.shuffle(clip_ids)\n while True:\n labels = []\n features = [np.zeros((batch_size, self.time_step, self.\n feature_num)).astype('float'), np.zeros((batch_size, self.\n time_step, 1024)).astype('float')]\n for i in range(c, c + batch_size):\n clip_id = clip_ids[i]\n video_info = self.csv_fusion['train'][clip_id]\n ground_truth = video_info[0][0]\n start = random.randint(0, len(video_info) - self.time_step)\n for index, elem in enumerate(video_info[start:self.\n time_step + start]):\n _, frame_path, audio_path = elem\n if not isfile(frame_path):\n start += 1\n if start >= len(video_info):\n raise\n continue\n frame_feature = np.load(frame_path)\n features[0][i - c][index] = np.array(from_arff_to_feture\n (audio_path)).reshape(self.feature_num)\n features[1][i - c][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n c += batch_size\n if c + batch_size > len(clip_ids):\n c = 0\n random.shuffle(clip_ids)\n labels = self.lb.transform(np.array(labels)).reshape((\n batch_size, 7))\n yield features, labels\n\n def early_gen_new_val(self, list_files, batch_size, mode='val', stride=1):\n \"\"\" stride 50% sul su tutti i file \"\"\"\n c = 0\n labels = features = []\n clip_ids = list(list_files.keys())\n while True:\n for clip_id in tqdm(clip_ids):\n video_info = list_files[clip_id]\n ground_truth = video_info[0][0]\n for start in range(0, len(video_info) - self.time_step, \n self.time_step // stride):\n if c == 0:\n labels = []\n features = [np.zeros((batch_size, self.time_step,\n self.feature_num)).astype('float'), np.zeros((\n batch_size, self.time_step, 1024)).astype('float')]\n for index, elem in enumerate(video_info[start:self.\n time_step + start]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][c][index] = np.array(from_arff_to_feture\n (audio_path)).reshape(self.feature_num)\n features[1][c][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n c += 1\n if c == batch_size:\n c = 0\n labels = self.lb.transform(np.array(labels)).reshape((\n batch_size, 7))\n yield features, labels\n if mode == 'eval':\n break\n\n def early_gen_test_clip(self, list_files, clip_id, stride=1):\n \"\"\" stride su singolo file, quindi va richiamato per ogni file \"\"\"\n ground_truth = list_files[0][0]\n start = 0\n end = len(list_files) - self.time_step\n while True:\n labels = []\n features = [np.zeros((1, self.time_step, self.feature_num)).\n astype('float'), np.zeros((1, self.time_step, 1024)).astype\n ('float')]\n for index, elem in enumerate(list_files[start:start + self.\n time_step]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][0][index] = np.array(from_arff_to_feture(\n audio_path)).reshape(self.feature_num)\n features[1][0][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n start += self.time_step // stride\n if start >= end:\n break\n labels = self.lb.transform(np.array(labels)).reshape((1, 7))\n yield features, labels\n\n def get_validation_dim(self):\n if self.stride == 2:\n if 'full' in self.feature_name:\n return 141\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 114\n elif '100' in self.feature_name:\n return 128\n elif self.stride == 1:\n if 'full' in self.feature_name:\n return 76\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 63\n elif '100' in self.feature_name:\n return 69\n elif self.stride == self.time_step:\n return 0\n\n def train(self, train_files, val_files, train_data, model):\n if train_data['opt'] == 'Adam':\n optimizer = Adam(lr=train_data['lr'])\n else:\n optimizer = SGD(lr=train_data['lr'])\n model.compile(optimizer=optimizer, loss='categorical_crossentropy',\n metrics=['accuracy'])\n model.summary()\n train_gen = train_data['generator1'](train_files, train_data[\n 'batch_size'])\n no_of_training_images = len(train_files)\n no_of_val_images = self.get_validation_dim()\n print('no_of_val_images:', no_of_val_images)\n val_gen = train_data['generator2'](val_files, train_data['batch_size'])\n model_name = '_lr' + str(train_data['lr']) + '_Opt' + train_data['opt'\n ] + '_Model' + str(train_data['model_name']\n ) + '_Feature' + self.feature_name + '_' + str(train_data[\n 'iteration']) + '_' + self.train_mode\n model_name += 'stride' + str(self.stride)\n model_name += '.h5'\n\n def custom_scheduler(epoch):\n if epoch < 50:\n print(0.1)\n return 0.1\n if epoch < 100:\n print(0.01)\n return 0.01\n if epoch < 125:\n print(0.001)\n return 0.001\n else:\n print(0.0001)\n return 0.0001\n\n\n class CheckValCMCallback(keras.callbacks.Callback):\n\n def __init__(self, m, dim, validation_files, epoch):\n super().__init__()\n self.vc = m\n self.dim = dim\n self.val_files = validation_files\n self.epoch = epoch\n self.accs = []\n\n def on_epoch_end(self, epoch, logs=None):\n csv_fusion = self.vc.load_early_csv('val')\n gen = self.vc.early_gen_new_val(csv_fusion, 16, 'eval')\n acc = self.model.evaluate_generator(gen, self.dim, workers=0)\n self.accs.append(acc)\n print('Evaluate:', acc)\n if self.epoch == epoch + 1:\n print('Validation_Accuracy =', self.accs)\n cb = [ModelCheckpoint(filepath=str(\n 'weights_new_fusion/videoModel__t{accuracy:.4f}_epoch{epoch:02d}' +\n model_name), monitor='val_accuracy', save_weights_only=True),\n TensorBoard(log_dir='NewFusionLogs_sched/' + self.train_mode +\n '/' + self.feature_name, write_graph=True, write_images=True)]\n cb += [LearningRateScheduler(custom_scheduler)]\n history = model.fit_generator(train_gen, validation_data=val_gen,\n epochs=train_data['epoch'], steps_per_epoch=\n no_of_training_images * 2 // train_data['batch_size'],\n validation_steps=no_of_val_images, workers=0, verbose=1,\n callbacks=cb)\n print('\\n\\nTrain_Accuracy =', history.history['accuracy'])\n print('\\nVal_Accuracy =', history.history['val_accuracy'])\n print('\\n\\nTrain_Loss =', history.history['loss'])\n print('\\nVal_Loss =', history.history['val_loss'])\n\n def print_stats(self, ground_truths, predictions, name):\n cm = confusion_matrix(ground_truths, predictions, self.classes)\n print('###' + name + ' Results###\\n')\n print_cm(np.around(cm.astype('float') / cm.sum(axis=1)[:, np.\n newaxis], decimals=3), self.classes)\n print('\\n\\n')\n print('Accuracy score: ', accuracy_score(ground_truths, predictions\n ), '\\n\\n')\n print(\n '#################################################################end###\\n\\n\\n'\n )\n\n def print_confusion_matrix(self, stride=1):\n \"\"\" IMPLEMENT FOR EARLY FUSION MISSING \"\"\"\n csv_fusion = {}\n predictions = []\n ground_truths = []\n if self.train_mode == 'early_fusion':\n csv_fusion = self.load_early_csv('val')\n print('CSV loaded', len(csv_fusion))\n gen = self.early_gen_new_val(csv_fusion, 1, 'eval', stride)\n for x in gen:\n ground_truths.append(self.lb.inverse_transform(x[1])[0])\n pred = self.model.predict(x[0])\n pred = self.lb.inverse_transform(pred)\n predictions.append(pred[0])\n self.print_stats(ground_truths, predictions, 'Video')\n else:\n with open('lables_late_fusion' + self.feature_name + '.csv', 'r'\n ) as f:\n f.readline()\n csv_reader = csv.reader(f)\n for row in csv_reader:\n csv_fusion[row[0]] = [row[1], row[2], row[3]]\n a_p = []\n f_p = []\n files = glob.glob('/user/vlongobardi/late_feature/' + self.\n feature_name + '/*/*csv')\n for file in files:\n clip_id = basename(file).split('.')[0]\n ground_truth, frame_pred, audio_pred = csv_fusion[clip_id]\n sample = np.append(self.lb.transform(np.array([audio_pred])\n ), self.lb.transform(np.array([frame_pred])))\n pred = self.model.predict(sample.reshape((1, 14)))\n pred = self.lb.inverse_transform(pred)[0]\n predictions.append(pred)\n a_p.append(audio_pred)\n f_p.append(frame_pred)\n ground_truths.append(ground_truth)\n self.print_stats(ground_truths, predictions, 'Video')\n self.print_stats(ground_truths, a_p, 'Audio')\n self.print_stats(ground_truths, f_p, 'Frame')\n\n\n<mask token>\n",
"step-5": "import csv\nimport glob\nimport random\nimport sys\nfrom math import ceil, floor\nfrom os.path import basename, exists, dirname, isfile\n\nimport numpy as np\nimport keras\nfrom keras import Model, Input, regularizers\nfrom keras.layers import TimeDistributed, LSTMCell, Reshape, Dense, Lambda, Dropout, Concatenate\nfrom keras.callbacks import ModelCheckpoint, TensorBoard, LearningRateScheduler\nfrom keras.optimizers import Adam, SGD\nfrom sklearn.metrics import confusion_matrix, accuracy_score # , classification_report\nfrom sklearn.preprocessing import LabelBinarizer\nfrom tqdm import tqdm\n\nfrom Dataset.Dataset_Utils.augmenter import NoAug\nfrom Dataset.Dataset_Utils.datagen import DataGenerator as DataGen\nfrom Dataset.Dataset_Utils.dataset_tools import print_cm\nfrom Models.model_sharma import SharmaNet\nfrom audio_classifier import AudioClassifier, from_arff_to_feture\nfrom frames_classifier import FramesClassifier\nfrom test_models import *\n\nclasses = [\"Angry\", \"Disgust\", \"Fear\", \"Happy\", \"Neutral\", \"Sad\", \"Surprise\"]\n\n\ndef my_model():\n r1, r2 = regularizers.l2(1e-5), regularizers.l2(1e-5)\n frame_input = Input(shape=(16, 1024))\n audio_input = Input(shape=(16, 1582))\n x = Concatenate(name='fusion1')([frame_input, audio_input])\n x = TimeDistributed(Dense(100, activation='tanh', kernel_regularizer=r1, name='ff_logit_lstm'))(x)\n x = TimeDistributed(Dropout(0.5))(x)\n x = TimeDistributed(Dense(7, activation='softmax', kernel_regularizer=r2, name='ff_logit'))(x)\n x = Lambda(lambda y: tf.reduce_mean(y, axis=1))(x)\n return Model([audio_input, frame_input], x)\n\n\nclass VideoClassifier:\n\n def __init__(self, train_mode=\"late_fusion\", video_model_path=None, time_step=16,\n base_path=\"/user/vlongobardi/AFEW/aligned/\", feature_name=\"emobase2010_100\", stride=1):\n self.time_step = time_step\n self.train_mode = train_mode\n self.feature_name = feature_name\n self.classes = classes\n self.lb = LabelBinarizer()\n self.lb.fit_transform(np.array(classes))\n self.feature_num = 1582\n self.offset = ceil(int(self.feature_name.split(\"_\")[1]) / 2 / 40)\n self.stride = stride\n\n if video_model_path is not None:\n try:\n self.model = my_model()\n self.model.load_weights(video_model_path)\n print(\"VideoClassifier loaded successfully\", video_model_path)\n except:\n print(\"Exception\")\n else:\n t_files = glob.glob(base_path + \"Train\" + \"/*/*csv\")\n v_files = glob.glob(base_path + \"Val\" + \"/*/*csv\")\n self.csv_fusion = self.generate_feature(t_files, v_files)\n self.do_training()\n\n def do_training(self):\n skips = 0\n iters = 1\n bs = 16\n ep = 150\n opts = [\"SGD\"]#, \"Adam\"]\n lrs = [0.01]\n models = [my_model]\n models_name = [x.__name__ for x in models]\n for index, model in enumerate(models):\n for opt in opts:\n for lr in lrs:\n for iteration in range(iters):\n\n if skips > 0:\n skips -= 1\n continue\n\n train_infos = {\n \"iteration\": iteration, \"model_name\": models_name[index],\n \"batch_size\": bs, \"epoch\": ep, \"lr\": lr, \"opt\": opt\n }\n\n print(\n \"\\n\\n################################################################################\\n\"\n \"############################## ITERATION \" + str(iteration + 1) + \" of \" + str(iters) +\n \" ###########################\\n######################################################\" +\n \" ########################\\nepochs:\", ep, \"batch_size:\", bs, \"\\nmodel:\", models_name[index],\n \"in\", models_name, \"\\nopt:\", opt, \"in\", opts, \"\\nlr:\", lr, \"in\", lrs)\n\n train_infos[\"generator1\"] = self.early_gen_train\n train_infos[\"generator2\"] = self.early_gen_new_val\n t_files, v_files = self.csv_fusion[\"train\"], self.csv_fusion[\"val\"]\n m = model()\n\n self.train(t_files, v_files, train_infos, m)\n\n def generate_feature(self, t_files, v_files):\n if not exists('features_path_early_fusion_train_' + self.feature_name + '.csv'):\n print(\"\\n##### GENERATING CSV FOR EARLY FUSION... #####\")\n csv_early_fusion = {\n \"train\": self._generate_data_for_early_fusion(t_files, \"train\"),\n \"val\": self._generate_data_for_early_fusion(v_files, \"val\")\n }\n print(\"\\n##### CSV GENERATED! #####\")\n else:\n csv_early_fusion = {}\n for name in [\"train\", \"val\"]:\n csv_early_fusion[name] = self.load_early_csv(name)\n return csv_early_fusion\n\n def load_early_csv(self, dataset):\n csv_early_fusion = {}\n print(\"Opening csv: features_path_early_fusion_\" + dataset + \"_\" + self.feature_name + '.csv')\n with open('features_path_early_fusion_' + dataset + \"_\" + self.feature_name + '.csv', 'r') as f:\n f.readline()\n csv_reader = csv.reader(f)\n for clip_id, ground_truth, frame_label, audio_label in csv_reader:\n if clip_id not in csv_early_fusion:\n csv_early_fusion[clip_id] = []\n csv_early_fusion[clip_id].append([ground_truth, frame_label, audio_label])\n return csv_early_fusion\n\n def _generate_data_for_early_fusion(self, files, name):\n # '/user/vlongobardi/AFEW/aligned/Train/Angry/012738600.csv'\n # '/user/vlongobardi/early_feature/framefeature/Train/Angry/012738600_0.dat'\n # '/user/vlongobardi/early_feature/emobase2010_600/Train/Angry/012738600_0.arff'\n if \"full\" in self.feature_name:\n frame_to_discard = 0\n else:\n window_size = int(self.feature_name.split(\"_\")[1])\n frame_to_discard = ceil(window_size / 2 / 40)\n my_csv = {}\n for file in tqdm(files):\n clip_id_temp = file.split(\".\")[0]\n base_path = clip_id_temp.replace(\"AFEW/aligned\", \"early_feature/framefeature\") + \"*\"\n frames_features_path = glob.glob(base_path)\n audio_features_path = glob.glob(\n base_path.replace(\"early_feature/framefeature\", \"early_feature/\" + self.feature_name))\n frames_features_path.sort(key=lambda x: int(x.split(\"_\")[-1].split(\".\")[0]))\n if \"full\" not in self.feature_name:\n audio_features_path.sort(key=lambda x: int(x.split(\"_\")[-1].split(\".\")[0]))\n ground_truth = basename(dirname(clip_id_temp))\n clip_id = basename(clip_id_temp)\n\n # discard video frames based on window size\n frames_features_path = frames_features_path[frame_to_discard:]\n if len(frames_features_path) < 16:\n continue\n # print(\"FRAME TOO FEW SAMPLES:\", len(frames_features_path), clip_id)\n if len(audio_features_path) < 16 and \"full\" not in self.feature_name:\n continue\n # print(\"AUDIO TOO FEW SAMPLES:\", len(audio_features_path), clip_id)\n for index, frame in enumerate(frames_features_path):\n if clip_id not in my_csv.keys():\n my_csv[clip_id] = []\n if \"full\" not in self.feature_name:\n my_csv[clip_id].append([ground_truth, frame, audio_features_path[index]])\n else:\n my_csv[clip_id].append([ground_truth, frame, audio_features_path[0]])\n with open('features_path_early_fusion_' + name + \"_\" + self.feature_name + '.csv', 'w') as f:\n f.write(\"clip_id, ground_truth, frame_label, audio_label\\n\")\n for key in my_csv:\n for line in my_csv[key]:\n f.write(key + \",\" + line[0] + \",\" + line[1] + \",\" + line[2] + \"\\n\")\n return my_csv\n\n def early_gen_train(self, list_files, batch_size):\n c = 0\n clip_ids = list(self.csv_fusion[\"train\"].keys())\n random.shuffle(clip_ids)\n while True:\n labels = []\n features = [np.zeros((batch_size, self.time_step, self.feature_num)).astype('float'),\n np.zeros((batch_size, self.time_step, 1024)).astype('float')]\n for i in range(c, c + batch_size):\n clip_id = clip_ids[i]\n video_info = self.csv_fusion[\"train\"][clip_id]\n ground_truth = video_info[0][0]\n\n # first_frame_num = int(video_info[0][1].split(\"_\")[-1].split(\".\")[0])\n start = random.randint(0, len(video_info) - self.time_step)\n for index, elem in enumerate(video_info[start:self.time_step + start]):\n _, frame_path, audio_path = elem\n if not isfile(frame_path):\n start += 1\n if start >= len(video_info):\n raise\n continue\n frame_feature = np.load(frame_path)\n features[0][i - c][index] = np.array(from_arff_to_feture(audio_path)).reshape(self.feature_num, )\n features[1][i - c][index] = frame_feature.reshape(1024, )\n labels.append(ground_truth)\n c += batch_size\n if c + batch_size > len(clip_ids):\n c = 0\n random.shuffle(clip_ids)\n labels = self.lb.transform(np.array(labels)).reshape((batch_size, 7))\n yield features, labels\n\n def early_gen_new_val(self, list_files, batch_size, mode=\"val\", stride=1):\n \"\"\" stride 50% sul su tutti i file \"\"\"\n c = 0\n labels = features = []\n clip_ids = list(list_files.keys())\n while True:\n for clip_id in tqdm(clip_ids):\n video_info = list_files[clip_id]\n ground_truth = video_info[0][0]\n\n for start in range(0, len(video_info) - self.time_step, self.time_step // stride):\n if c == 0:\n labels = []\n features = [np.zeros((batch_size, self.time_step, self.feature_num)).astype('float'),\n np.zeros((batch_size, self.time_step, 1024)).astype('float')]\n for index, elem in enumerate(video_info[start:self.time_step + start]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][c][index] = np.array(from_arff_to_feture(audio_path)).reshape(\n self.feature_num, )\n features[1][c][index] = frame_feature.reshape(1024, )\n labels.append(ground_truth)\n\n c += 1\n if c == batch_size:\n c = 0\n labels = self.lb.transform(np.array(labels)).reshape((batch_size, 7))\n yield features, labels\n if mode == \"eval\":\n break\n\n def early_gen_test_clip(self, list_files, clip_id, stride=1):\n \"\"\" stride su singolo file, quindi va richiamato per ogni file \"\"\"\n ground_truth = list_files[0][0]\n start = 0\n end = len(list_files) - self.time_step\n while True:\n labels = []\n features = [np.zeros((1, self.time_step, self.feature_num)).astype('float'),\n np.zeros((1, self.time_step, 1024)).astype('float')]\n for index, elem in enumerate(list_files[start:start + self.time_step]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][0][index] = np.array(from_arff_to_feture(audio_path)).reshape(self.feature_num, )\n features[1][0][index] = frame_feature.reshape(1024, )\n labels.append(ground_truth)\n start += self.time_step // stride\n if start >= end:\n break\n labels = self.lb.transform(np.array(labels)).reshape((1, 7))\n yield features, labels\n\n def get_validation_dim(self):\n if self.stride == 2:\n if \"full\" in self.feature_name:\n return 141\n elif \"600\" in self.feature_name:\n return 0\n elif \"300\" in self.feature_name:\n return 114\n elif \"100\" in self.feature_name:\n return 128\n elif self.stride == 1:\n if \"full\" in self.feature_name:\n return 76\n elif \"600\" in self.feature_name:\n return 0\n elif \"300\" in self.feature_name:\n return 63\n elif \"100\" in self.feature_name:\n return 69\n elif self.stride == self.time_step:\n return 0\n\n def train(self, train_files, val_files, train_data, model):\n if train_data[\"opt\"] == \"Adam\":\n optimizer = Adam(lr=train_data[\"lr\"])\n else:\n optimizer = SGD(lr=train_data[\"lr\"])\n\n model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])\n model.summary()\n\n train_gen = train_data[\"generator1\"](train_files, train_data[\"batch_size\"])\n no_of_training_images = len(train_files)\n\n no_of_val_images = self.get_validation_dim()\n print(\"no_of_val_images:\", no_of_val_images)\n val_gen = train_data[\"generator2\"](val_files, train_data[\"batch_size\"])\n\n # stride = 1, no overlapping\n # stride = 2, overlapping: 50%\n # stride = time_step, stride: 1\n\n model_name = \"_lr\" + str(train_data[\"lr\"]) + \"_Opt\" + train_data[\"opt\"] + \"_Model\" + str(\n train_data[\"model_name\"]) + \"_Feature\" + self.feature_name + \"_\" + str(\n train_data[\"iteration\"]) + \"_\" + self.train_mode # + \"_modelType\" + str(self.model_type)\n model_name += \"stride\" + str(self.stride)\n model_name += \".h5\"\n\n def custom_scheduler(epoch):\n if epoch < 50:\n print(0.1)\n return 0.1\n if epoch < 100:\n print(0.01)\n return 0.01\n if epoch < 125:\n print(0.001)\n return 0.001\n else:\n print(0.0001)\n return 0.0001\n #print(0.1 / 10 ** (floor(epoch / 40) + 1))\n #return 0.1 / 10 ** (floor(epoch / 40) + 1)\n\n class CheckValCMCallback(keras.callbacks.Callback):\n def __init__(self, m, dim, validation_files, epoch):\n super().__init__()\n self.vc = m\n self.dim = dim\n self.val_files = validation_files\n self.epoch = epoch\n self.accs = []\n\n def on_epoch_end(self, epoch, logs=None):\n csv_fusion = self.vc.load_early_csv(\"val\")\n # gen = self.vc.early_gen_new_val(csv_fusion, 16, \"eval\")\n # predictions = []\n # ground_truths = []\n # for x in gen:\n # ground_truths.append(self.vc.lb.inverse_transform(x[1])[0])\n # pred = self.model.predict(x[0])\n # pred = self.vc.lb.inverse_transform(pred)\n # predictions.append(pred[0])\n # self.vc.print_stats(ground_truths, predictions, \"Video\" + str(epoch))\n gen = self.vc.early_gen_new_val(csv_fusion, 16, \"eval\")\n acc = self.model.evaluate_generator(gen, self.dim, workers=0)\n self.accs.append(acc)\n print(\"Evaluate:\", acc)\n\n if self.epoch == epoch + 1:\n print(\"Validation_Accuracy =\", self.accs)\n\n cb = [ModelCheckpoint(\n filepath=str(\n \"weights_new_fusion/videoModel__t{accuracy:.4f}_epoch{epoch:02d}\" + model_name),\n monitor=\"val_accuracy\", save_weights_only=True),\n TensorBoard(log_dir=\"NewFusionLogs_sched/\" + self.train_mode + \"/\" + self.feature_name, write_graph=True,\n write_images=True)]\n cb += [LearningRateScheduler(custom_scheduler)]\n #cb += [CheckValCMCallback(self, no_of_val_images, val_files, train_data[\"epoch\"])]\n history = model.fit_generator(train_gen,\n validation_data=val_gen,\n epochs=train_data[\"epoch\"],\n steps_per_epoch=(no_of_training_images * 2 // train_data[\"batch_size\"]),\n validation_steps=(no_of_val_images),\n workers=0, verbose=1, callbacks=cb)\n print(\"\\n\\nTrain_Accuracy =\", history.history['accuracy'])\n print(\"\\nVal_Accuracy =\", history.history['val_accuracy'])\n print(\"\\n\\nTrain_Loss =\", history.history['loss'])\n print(\"\\nVal_Loss =\", history.history['val_loss'])\n\n def print_stats(self, ground_truths, predictions, name):\n cm = confusion_matrix(ground_truths, predictions, self.classes)\n print(\"###\" + name + \" Results###\\n\")\n # print_cm(cm, self.classes)\n # print(\"\\n\\n\")\n print_cm(np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=3), self.classes)\n print(\"\\n\\n\")\n print(\"Accuracy score: \", accuracy_score(ground_truths, predictions), \"\\n\\n\")\n # print(\"Report\")\n # print(classification_report(ground_truths, predictions))\n print(\"#################################################################end###\\n\\n\\n\")\n\n def print_confusion_matrix(self, stride=1):\n \"\"\" IMPLEMENT FOR EARLY FUSION MISSING \"\"\"\n csv_fusion = {}\n predictions = []\n ground_truths = []\n if self.train_mode == \"early_fusion\":\n csv_fusion = self.load_early_csv(\"val\")\n print(\"CSV loaded\", len(csv_fusion))\n gen = self.early_gen_new_val(csv_fusion, 1, \"eval\", stride)\n for x in gen:\n ground_truths.append(self.lb.inverse_transform(x[1])[0])\n pred = self.model.predict(x[0])\n pred = self.lb.inverse_transform(pred)\n predictions.append(pred[0])\n # print(\"\\ngt, pred\", self.lb.inverse_transform(x[1]), pred)\n self.print_stats(ground_truths, predictions, \"Video\")\n else:\n with open('lables_late_fusion' + self.feature_name + '.csv', 'r') as f:\n f.readline()\n csv_reader = csv.reader(f)\n for row in csv_reader:\n csv_fusion[row[0]] = [row[1], row[2], row[3]]\n a_p = []\n f_p = []\n files = glob.glob(\"/user/vlongobardi/late_feature/\" + self.feature_name + \"/*/*csv\")\n for file in files:\n clip_id = basename(file).split(\".\")[0]\n ground_truth, frame_pred, audio_pred = csv_fusion[clip_id]\n sample = np.append(self.lb.transform(np.array([audio_pred])), self.lb.transform(np.array([frame_pred])))\n pred = self.model.predict(sample.reshape((1, 14)))\n pred = self.lb.inverse_transform(pred)[0]\n predictions.append(pred)\n a_p.append(audio_pred)\n f_p.append(frame_pred)\n ground_truths.append(ground_truth)\n\n self.print_stats(ground_truths, predictions, \"Video\")\n self.print_stats(ground_truths, a_p, \"Audio\")\n self.print_stats(ground_truths, f_p, \"Frame\")\n\n\nif __name__ == \"__main__\":\n if sys.argv[1] == \"late\":\n print(\"LATE\")\n model_path = [\n \"audio_models/audioModel_0.2285_epoch135_lr0.1_OptSGD_Modela_model7_Featureemobase2010_100_3.h5\",\n \"audio_models/audioModel_0.2650_epoch01_lr0.01_OptSGD_Modela_model7_Featureemobase2010_300_2.h5\",\n \"audio_models/audioModel_0.2865_epoch13_lr0.001_OptSGD_Modela_model7_Featureemobase2010_600_0.h5\",\n \"audio_models/audioModel_0.3668_epoch67_lr0.001_OptSGD_Modela_model7_Featureemobase2010_full_2.h5\"\n ]\n for mp in model_path:\n vc = VideoClassifier(train_mode=\"late_fusion\", audio_model_path=mp)\n elif sys.argv[1] == \"early\":\n # mt = int(sys.argv[2])\n print(\"EARLY\") # , Model_type:\", mt)\n arff_paths = {\"e1\": \"emobase2010_100\", \"i1\": \"IS09_emotion_100\",\n \"e3\": \"emobase2010_300\", \"i3\": \"IS09_emotion_300\",\n \"e6\": \"emobase2010_600\", \"i6\": \"IS09_emotion_600\",\n \"ef\": \"emobase2010_full\", \"if\": \"IS09_emotion_full\"}\n vc = VideoClassifier(train_mode=\"early_fusion\", feature_name=arff_paths[sys.argv[2]]) # , model_type=mt)\n",
"step-ids": [
7,
11,
13,
14,
18
]
}
|
[
7,
11,
13,
14,
18
] |
import pyaudio
import numpy as np
from collections import OrderedDict
import utils
class MasterPlayer(object):
def __init__(self, volume=1., samplesPerSecond=44100):
self.p = pyaudio.PyAudio()
self.volume = volume
self.samplesPerSecond = samplesPerSecond
self.individual_callbacks = OrderedDict()
self.volumes = {}
def __del__(self):
self.p.terminate()
def play(self):
self.offset = 0
def callback(in_data, frame_count, time_info, status):
total_stereo = np.zeros((frame_count*2))
time = self.offset / float(self.samplesPerSecond)
for ic in self.individual_callbacks:
left, right = ic(self.offset, time, frame_count)
if left is None: # dead voice
continue
stereo = utils.to_stereo(left, right)
# Accumulate
total_stereo += stereo * self.volumes[ic]
self.offset += frame_count
output = utils.np_to_frames(total_stereo * self.volume)
return (output, pyaudio.paContinue)
self.stream = self.p.open(format=self.p.get_format_from_width(2),
channels=2,
rate=self.samplesPerSecond,
output=True,
stream_callback=callback)
self.stream.start_stream()
def stop(self):
self.stream.stop_stream()
def register(self, callback):
self.individual_callbacks[callback] = {}
self.volumes[callback] = 1.
def unregister(self, callback):
if callback in self.individual_callbacks:
del self.individual_callbacks[callback]
del self.volumes[callback]
def set_volume(self, callback, volume):
self.volumes[callback] = volume
MAXVOLUME = 32767.
def sawtooth(x):
return np.mod(x / (2*np.pi), 1.)
class ADSR(object):
def __init__(self, a=0.01, d=0.1, s=0.8, r=0.5, mode='linear'):
self.a = a
self.d = d
self.s = s
self.r = r
assert mode == 'linear'
def get_envelope_pressed(self, delta):
'''
:param delta: time after pressed
:return: envelope (between 0 and 1)
'''
delta = delta.astype(float)
#assert delta>0.
envelope = np.zeros(len(delta))
# attack
attack = delta < self.a
envelope[attack] = delta[attack] / self.a
# decay
decay = (delta < self.a + self.d) & (delta >= self.a)
envelope[decay] = 1 - (1 - self.s) * (delta[decay] - self.a) / self.d
# sustain
sustain = (delta >= self.a + self.d)
envelope[sustain] = self.s
return envelope
def get_envelope_released(self, delta):
'''
:param delta: time after released
:return: envelope (between 0 and 1)
'''
delta = delta.astype(float)
envelope = np.zeros(len(delta))
# release
release = delta < self.r
envelope[release] = self.s * (self.r - delta[release]) / self.r
# dead
dead = delta >= self.r
all_dead = np.all(dead)
return envelope, all_dead
class SineWavePlayer(object):
def __init__(self, freq, samplerate, adsr, motherwave=None):
self.freq = freq
self.samplerate = samplerate
self.pressed = False
self.volume = 0.3
#self.wave = np.sin
if motherwave is None:
motherwave = sawtooth()
self.wave = motherwave
self.adsr = adsr
self.dead = True
def __call__(self, offset, time, frame_count):
# Find out which state we are in
# Dead/NewPress/Pressed/NewRelease/Released/Dead
if self.pressed:
if self.new_press:
# Initialize phase to prevent clicking
self.onset = time
self.new_press = False
# Relative time after press
time_after_press = (time + np.arange(frame_count, dtype=float) / self.samplerate - self.onset)
left = self.volume * MAXVOLUME * self.wave(time_after_press * 2*np.pi * self.freq)
envelope = self.adsr.get_envelope_pressed(time_after_press)
left *= envelope
right = left
elif not self.dead:
if self.new_release:
self.new_release = False
self.release_time = time
# Relative time after release
time_after_press = (time + np.arange(frame_count, dtype=float) / self.samplerate - self.onset)
time_after_release = (time + np.arange(frame_count, dtype=float) / self.samplerate - self.release_time)
left = self.volume * MAXVOLUME * self.wave(time_after_press * 2*np.pi * self.freq)
envelope, self.dead = self.adsr.get_envelope_released(time_after_release)
left *= envelope
right = left
else:
left = right = None
return left, right
def press(self):
self.pressed = True
self.new_press = True
self.dead = False
def release(self):
self.pressed = False
self.new_release = True
def note_to_freq(note):
reference_a = 45
return np.exp(np.log(440) + (note - reference_a) / 12. * np.log(2))
class NaivePoly(object):
def __init__(self, octaves, samplerate, adsr, motherwave):
self.voices = []
self.octaves = octaves
for note in xrange(self.octaves*12):
# Compute frequency -> 440hz is note 45
freq = note_to_freq(note)
# Initialize voice
self.voices.append(SineWavePlayer(freq, samplerate, adsr, motherwave))
print 'note {} freq {}'.format(note, freq)
def register(self, master):
for voice in self.voices:
master.register(voice)
def unregister(self, master):
for voice in self.voices:
master.unregister(voice)
def press(self, key):
self.voices[key].press()
def release(self, key):
self.voices[key].release()
|
normal
|
{
"blob_id": "c4e4e54ac93c2acdbd3a1cd22b200341a6e45688",
"index": 224,
"step-1": "import pyaudio\nimport numpy as np\nfrom collections import OrderedDict\nimport utils\n\n\nclass MasterPlayer(object):\n def __init__(self, volume=1., samplesPerSecond=44100):\n self.p = pyaudio.PyAudio()\n self.volume = volume\n self.samplesPerSecond = samplesPerSecond\n self.individual_callbacks = OrderedDict()\n self.volumes = {}\n\n def __del__(self):\n self.p.terminate()\n\n def play(self):\n\n self.offset = 0\n def callback(in_data, frame_count, time_info, status):\n total_stereo = np.zeros((frame_count*2))\n time = self.offset / float(self.samplesPerSecond)\n\n for ic in self.individual_callbacks:\n left, right = ic(self.offset, time, frame_count)\n if left is None: # dead voice\n continue\n stereo = utils.to_stereo(left, right)\n # Accumulate\n total_stereo += stereo * self.volumes[ic]\n\n self.offset += frame_count\n output = utils.np_to_frames(total_stereo * self.volume)\n return (output, pyaudio.paContinue)\n\n self.stream = self.p.open(format=self.p.get_format_from_width(2),\n channels=2,\n rate=self.samplesPerSecond,\n output=True,\n stream_callback=callback)\n self.stream.start_stream()\n\n def stop(self):\n self.stream.stop_stream()\n\n def register(self, callback):\n self.individual_callbacks[callback] = {}\n self.volumes[callback] = 1.\n\n def unregister(self, callback):\n if callback in self.individual_callbacks:\n del self.individual_callbacks[callback]\n del self.volumes[callback]\n\n def set_volume(self, callback, volume):\n self.volumes[callback] = volume\n\nMAXVOLUME = 32767.\n\n\ndef sawtooth(x):\n return np.mod(x / (2*np.pi), 1.)\n\nclass ADSR(object):\n def __init__(self, a=0.01, d=0.1, s=0.8, r=0.5, mode='linear'):\n self.a = a\n self.d = d\n self.s = s\n self.r = r\n assert mode == 'linear'\n\n def get_envelope_pressed(self, delta):\n '''\n :param delta: time after pressed\n :return: envelope (between 0 and 1)\n '''\n delta = delta.astype(float)\n #assert delta>0.\n envelope = np.zeros(len(delta))\n # attack\n attack = delta < self.a\n envelope[attack] = delta[attack] / self.a\n # decay\n decay = (delta < self.a + self.d) & (delta >= self.a)\n envelope[decay] = 1 - (1 - self.s) * (delta[decay] - self.a) / self.d\n # sustain\n sustain = (delta >= self.a + self.d)\n envelope[sustain] = self.s\n\n return envelope\n\n def get_envelope_released(self, delta):\n '''\n :param delta: time after released\n :return: envelope (between 0 and 1)\n '''\n delta = delta.astype(float)\n envelope = np.zeros(len(delta))\n\n # release\n release = delta < self.r\n envelope[release] = self.s * (self.r - delta[release]) / self.r\n\n # dead\n dead = delta >= self.r\n all_dead = np.all(dead)\n\n return envelope, all_dead\n\n\nclass SineWavePlayer(object):\n def __init__(self, freq, samplerate, adsr, motherwave=None):\n self.freq = freq\n self.samplerate = samplerate\n self.pressed = False\n self.volume = 0.3\n #self.wave = np.sin\n if motherwave is None:\n motherwave = sawtooth()\n self.wave = motherwave\n self.adsr = adsr\n self.dead = True\n\n def __call__(self, offset, time, frame_count):\n\n # Find out which state we are in\n # Dead/NewPress/Pressed/NewRelease/Released/Dead\n if self.pressed:\n if self.new_press:\n # Initialize phase to prevent clicking\n self.onset = time\n self.new_press = False\n # Relative time after press\n time_after_press = (time + np.arange(frame_count, dtype=float) / self.samplerate - self.onset)\n\n left = self.volume * MAXVOLUME * self.wave(time_after_press * 2*np.pi * self.freq)\n envelope = self.adsr.get_envelope_pressed(time_after_press)\n left *= envelope\n right = left\n elif not self.dead:\n if self.new_release:\n self.new_release = False\n self.release_time = time\n # Relative time after release\n time_after_press = (time + np.arange(frame_count, dtype=float) / self.samplerate - self.onset)\n time_after_release = (time + np.arange(frame_count, dtype=float) / self.samplerate - self.release_time)\n\n left = self.volume * MAXVOLUME * self.wave(time_after_press * 2*np.pi * self.freq)\n envelope, self.dead = self.adsr.get_envelope_released(time_after_release)\n left *= envelope\n right = left\n else:\n left = right = None\n return left, right\n\n def press(self):\n self.pressed = True\n self.new_press = True\n self.dead = False\n\n def release(self):\n self.pressed = False\n self.new_release = True\n\n\ndef note_to_freq(note):\n reference_a = 45\n return np.exp(np.log(440) + (note - reference_a) / 12. * np.log(2))\n\n\nclass NaivePoly(object):\n def __init__(self, octaves, samplerate, adsr, motherwave):\n self.voices = []\n self.octaves = octaves\n for note in xrange(self.octaves*12):\n # Compute frequency -> 440hz is note 45\n freq = note_to_freq(note)\n # Initialize voice\n self.voices.append(SineWavePlayer(freq, samplerate, adsr, motherwave))\n print 'note {} freq {}'.format(note, freq)\n\n def register(self, master):\n for voice in self.voices:\n master.register(voice)\n\n def unregister(self, master):\n for voice in self.voices:\n master.unregister(voice)\n\n def press(self, key):\n self.voices[key].press()\n\n def release(self, key):\n self.voices[key].release()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python
#------------------------------------------------------------------------------
# imsrg_pairing.py
#
# author: H. Hergert
# version: 1.5.0
# date: Dec 6, 2016
#
# tested with Python v2.7
#
# Solves the pairing model for four particles in a basis of four doubly
# degenerate states by means of an In-Medium Similarity Renormalization
# Group (IMSRG) flow.
#
#------------------------------------------------------------------------------
import numpy as np
from numpy import array, dot, diag, reshape, transpose
from scipy.linalg import eigvalsh
from scipy.integrate import odeint, ode
from sys import argv
#-----------------------------------------------------------------------------------
# basis and index functions
#-----------------------------------------------------------------------------------
def construct_basis_2B(holes, particles):
basis = []
for i in holes:
for j in holes:
basis.append((i, j))
for i in holes:
for a in particles:
basis.append((i, a))
for a in particles:
for i in holes:
basis.append((a, i))
for a in particles:
for b in particles:
basis.append((a, b))
return basis
def construct_basis_ph2B(holes, particles):
basis = []
for i in holes:
for j in holes:
basis.append((i, j))
for i in holes:
for a in particles:
basis.append((i, a))
for a in particles:
for i in holes:
basis.append((a, i))
for a in particles:
for b in particles:
basis.append((a, b))
return basis
#
# We use dictionaries for the reverse lookup of state indices
#
def construct_index_2B(bas2B):
index = { }
for i, state in enumerate(bas2B):
index[state] = i
return index
#-----------------------------------------------------------------------------------
# transform matrices to particle-hole representation
#-----------------------------------------------------------------------------------
def ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B):
dim = len(basph2B)
Gamma_ph = np.zeros((dim, dim))
for i1, (a,b) in enumerate(basph2B):
for i2, (c, d) in enumerate(basph2B):
Gamma_ph[i1, i2] -= Gamma[idx2B[(a,d)], idx2B[(c,b)]]
return Gamma_ph
def inverse_ph_transform_2B(Gamma_ph, bas2B, idx2B, basph2B, idxph2B):
dim = len(bas2B)
Gamma = np.zeros((dim, dim))
for i1, (a,b) in enumerate(bas2B):
for i2, (c, d) in enumerate(bas2B):
Gamma[i1, i2] -= Gamma_ph[idxph2B[(a,d)], idxph2B[(c,b)]]
return Gamma
#-----------------------------------------------------------------------------------
# commutator of matrices
#-----------------------------------------------------------------------------------
def commutator(a,b):
return dot(a,b) - dot(b,a)
#-----------------------------------------------------------------------------------
# norms of off-diagonal Hamiltonian pieces
#-----------------------------------------------------------------------------------
def calc_fod_norm(f, user_data):
particles = user_data["particles"]
holes = user_data["holes"]
norm = 0.0
for a in particles:
for i in holes:
norm += f[a,i]**2 + f[i,a]**2
return np.sqrt(norm)
def calc_Gammaod_norm(Gamma, user_data):
particles = user_data["particles"]
holes = user_data["holes"]
idx2B = user_data["idx2B"]
norm = 0.0
for a in particles:
for b in particles:
for i in holes:
for j in holes:
norm += Gamma[idx2B[(a,b)],idx2B[(i,j)]]**2 + Gamma[idx2B[(i,j)],idx2B[(a,b)]]**2
return np.sqrt(norm)
#-----------------------------------------------------------------------------------
# occupation number matrices
#-----------------------------------------------------------------------------------
def construct_occupation_1B(bas1B, holes, particles):
dim = len(bas1B)
occ = np.zeros(dim)
for i in holes:
occ[i] = 1.
return occ
# diagonal matrix: n_a - n_b
def construct_occupationA_2B(bas2B, occ1B):
dim = len(bas2B)
occ = np.zeros((dim,dim))
for i1, (i,j) in enumerate(bas2B):
occ[i1, i1] = occ1B[i] - occ1B[j]
return occ
# diagonal matrix: 1 - n_a - n_b
def construct_occupationB_2B(bas2B, occ1B):
dim = len(bas2B)
occ = np.zeros((dim,dim))
for i1, (i,j) in enumerate(bas2B):
occ[i1, i1] = 1. - occ1B[i] - occ1B[j]
return occ
# diagonal matrix: n_a * n_b
def construct_occupationC_2B(bas2B, occ1B):
dim = len(bas2B)
occ = np.zeros((dim,dim))
for i1, (i,j) in enumerate(bas2B):
occ[i1, i1] = occ1B[i] * occ1B[j]
return occ
#-----------------------------------------------------------------------------------
# generators
#-----------------------------------------------------------------------------------
def eta_brillouin(f, Gamma, user_data):
dim1B = user_data["dim1B"]
particles = user_data["particles"]
holes = user_data["holes"]
idx2B = user_data["idx2B"]
# one-body part of the generator
eta1B = np.zeros_like(f)
for a in particles:
for i in holes:
# (1-n_a)n_i - n_a(1-n_i) = n_i - n_a
eta1B[a, i] = f[a,i]
eta1B[i, a] = -f[a,i]
# two-body part of the generator
eta2B = np.zeros_like(Gamma)
for a in particles:
for b in particles:
for i in holes:
for j in holes:
val = Gamma[idx2B[(a,b)], idx2B[(i,j)]]
eta2B[idx2B[(a,b)],idx2B[(i,j)]] = val
eta2B[idx2B[(i,j)],idx2B[(a,b)]] = -val
return eta1B, eta2B
def eta_imtime(f, Gamma, user_data):
dim1B = user_data["dim1B"]
particles = user_data["particles"]
holes = user_data["holes"]
idx2B = user_data["idx2B"]
# one-body part of the generator
eta1B = np.zeros_like(f)
for a in particles:
for i in holes:
dE = f[a,a] - f[i,i] + Gamma[idx2B[(a,i)], idx2B[(a,i)]]
val = np.sign(dE)*f[a,i]
eta1B[a, i] = val
eta1B[i, a] = -val
# two-body part of the generator
eta2B = np.zeros_like(Gamma)
for a in particles:
for b in particles:
for i in holes:
for j in holes:
dE = (
f[a,a] + f[b,b] - f[i,i] - f[j,j]
+ Gamma[idx2B[(a,b)],idx2B[(a,b)]]
+ Gamma[idx2B[(i,j)],idx2B[(i,j)]]
- Gamma[idx2B[(a,i)],idx2B[(a,i)]]
- Gamma[idx2B[(a,j)],idx2B[(a,j)]]
- Gamma[idx2B[(b,i)],idx2B[(b,i)]]
- Gamma[idx2B[(b,j)],idx2B[(b,j)]]
)
val = np.sign(dE)*Gamma[idx2B[(a,b)], idx2B[(i,j)]]
eta2B[idx2B[(a,b)],idx2B[(i,j)]] = val
eta2B[idx2B[(i,j)],idx2B[(a,b)]] = -val
return eta1B, eta2B
def eta_white(f, Gamma, user_data):
dim1B = user_data["dim1B"]
particles = user_data["particles"]
holes = user_data["holes"]
idx2B = user_data["idx2B"]
# one-body part of the generator
eta1B = np.zeros_like(f)
for a in particles:
for i in holes:
denom = f[a,a] - f[i,i] + Gamma[idx2B[(a,i)], idx2B[(a,i)]]
val = f[a,i]/denom
eta1B[a, i] = val
eta1B[i, a] = -val
# two-body part of the generator
eta2B = np.zeros_like(Gamma)
for a in particles:
for b in particles:
for i in holes:
for j in holes:
denom = (
f[a,a] + f[b,b] - f[i,i] - f[j,j]
+ Gamma[idx2B[(a,b)],idx2B[(a,b)]]
+ Gamma[idx2B[(i,j)],idx2B[(i,j)]]
- Gamma[idx2B[(a,i)],idx2B[(a,i)]]
- Gamma[idx2B[(a,j)],idx2B[(a,j)]]
- Gamma[idx2B[(b,i)],idx2B[(b,i)]]
- Gamma[idx2B[(b,j)],idx2B[(b,j)]]
)
val = Gamma[idx2B[(a,b)], idx2B[(i,j)]] / denom
eta2B[idx2B[(a,b)],idx2B[(i,j)]] = val
eta2B[idx2B[(i,j)],idx2B[(a,b)]] = -val
return eta1B, eta2B
def eta_white_mp(f, Gamma, user_data):
dim1B = user_data["dim1B"]
particles = user_data["particles"]
holes = user_data["holes"]
idx2B = user_data["idx2B"]
# one-body part of the generator
eta1B = np.zeros_like(f)
for a in particles:
for i in holes:
denom = f[a,a] - f[i,i]
val = f[a,i]/denom
eta1B[a, i] = val
eta1B[i, a] = -val
# two-body part of the generator
eta2B = np.zeros_like(Gamma)
for a in particles:
for b in particles:
for i in holes:
for j in holes:
denom = (
f[a,a] + f[b,b] - f[i,i] - f[j,j]
)
val = Gamma[idx2B[(a,b)], idx2B[(i,j)]] / denom
eta2B[idx2B[(a,b)],idx2B[(i,j)]] = val
eta2B[idx2B[(i,j)],idx2B[(a,b)]] = -val
return eta1B, eta2B
def eta_white_atan(f, Gamma, user_data):
dim1B = user_data["dim1B"]
particles = user_data["particles"]
holes = user_data["holes"]
idx2B = user_data["idx2B"]
# one-body part of the generator
eta1B = np.zeros_like(f)
for a in particles:
for i in holes:
denom = f[a,a] - f[i,i] + Gamma[idx2B[(a,i)], idx2B[(a,i)]]
val = 0.5 * np.arctan(2 * f[a,i]/denom)
eta1B[a, i] = val
eta1B[i, a] = -val
# two-body part of the generator
eta2B = np.zeros_like(Gamma)
for a in particles:
for b in particles:
for i in holes:
for j in holes:
denom = (
f[a,a] + f[b,b] - f[i,i] - f[j,j]
+ Gamma[idx2B[(a,b)],idx2B[(a,b)]]
+ Gamma[idx2B[(i,j)],idx2B[(i,j)]]
- Gamma[idx2B[(a,i)],idx2B[(a,i)]]
- Gamma[idx2B[(a,j)],idx2B[(a,j)]]
- Gamma[idx2B[(b,i)],idx2B[(b,i)]]
- Gamma[idx2B[(b,j)],idx2B[(b,j)]]
)
val = 0.5 * np.arctan(2 * Gamma[idx2B[(a,b)], idx2B[(i,j)]] / denom)
eta2B[idx2B[(a,b)],idx2B[(i,j)]] = val
eta2B[idx2B[(i,j)],idx2B[(a,b)]] = -val
return eta1B, eta2B
def eta_wegner(f, Gamma, user_data):
dim1B = user_data["dim1B"]
holes = user_data["holes"]
particles = user_data["particles"]
bas2B = user_data["bas2B"]
basph2B = user_data["basph2B"]
idx2B = user_data["idx2B"]
idxph2B = user_data["idxph2B"]
occB_2B = user_data["occB_2B"]
occC_2B = user_data["occC_2B"]
occphA_2B = user_data["occphA_2B"]
# split Hamiltonian in diagonal and off-diagonal parts
fd = np.zeros_like(f)
fod = np.zeros_like(f)
Gammad = np.zeros_like(Gamma)
Gammaod = np.zeros_like(Gamma)
for a in particles:
for i in holes:
fod[a, i] = f[a,i]
fod[i, a] = f[i,a]
fd = f - fod
for a in particles:
for b in particles:
for i in holes:
for j in holes:
Gammaod[idx2B[(a,b)], idx2B[(i,j)]] = Gamma[idx2B[(a,b)], idx2B[(i,j)]]
Gammaod[idx2B[(i,j)], idx2B[(a,b)]] = Gamma[idx2B[(i,j)], idx2B[(a,b)]]
Gammad = Gamma - Gammaod
#############################
# one-body part of the generator
eta1B = np.zeros_like(f)
# 1B - 1B
eta1B += commutator(fd, fod)
# 1B - 2B
for p in range(dim1B):
for q in range(dim1B):
for i in holes:
for a in particles:
eta1B[p,q] += (
fd[i,a] * Gammaod[idx2B[(a, p)], idx2B[(i, q)]]
- fd[a,i] * Gammaod[idx2B[(i, p)], idx2B[(a, q)]]
- fod[i,a] * Gammad[idx2B[(a, p)], idx2B[(i, q)]]
+ fod[a,i] * Gammad[idx2B[(i, p)], idx2B[(a, q)]]
)
# 2B - 2B
# n_a n_b nn_c + nn_a nn_b n_c = n_a n_b + (1 - n_a - n_b) * n_c
GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))
for p in range(dim1B):
for q in range(dim1B):
for i in holes:
eta1B[p,q] += 0.5*(
GammaGamma[idx2B[(i,p)], idx2B[(i,q)]]
- transpose(GammaGamma)[idx2B[(i,p)], idx2B[(i,q)]]
)
GammaGamma = dot(Gammad, dot(occC_2B, Gammaod))
for p in range(dim1B):
for q in range(dim1B):
for r in range(dim1B):
eta1B[p,q] += 0.5*(
GammaGamma[idx2B[(r,p)], idx2B[(r,q)]]
+ transpose(GammaGamma)[idx2B[(r,p)], idx2B[(r,q)]]
)
#############################
# two-body flow equation
eta2B = np.zeros_like(Gamma)
# 1B - 2B
for p in range(dim1B):
for q in range(dim1B):
for r in range(dim1B):
for s in range(dim1B):
for t in range(dim1B):
eta2B[idx2B[(p,q)],idx2B[(r,s)]] += (
fd[p,t] * Gammaod[idx2B[(t,q)],idx2B[(r,s)]]
+ fd[q,t] * Gammaod[idx2B[(p,t)],idx2B[(r,s)]]
- fd[t,r] * Gammaod[idx2B[(p,q)],idx2B[(t,s)]]
- fd[t,s] * Gammaod[idx2B[(p,q)],idx2B[(r,t)]]
- fod[p,t] * Gammad[idx2B[(t,q)],idx2B[(r,s)]]
- fod[q,t] * Gammad[idx2B[(p,t)],idx2B[(r,s)]]
+ fod[t,r] * Gammad[idx2B[(p,q)],idx2B[(t,s)]]
+ fod[t,s] * Gammad[idx2B[(p,q)],idx2B[(r,t)]]
)
# 2B - 2B - particle and hole ladders
# Gammad.occB.Gammaod
GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))
eta2B += 0.5 * (GammaGamma - transpose(GammaGamma))
# 2B - 2B - particle-hole chain
# transform matrices to particle-hole representation and calculate
# Gammad_ph.occA_ph.Gammaod_ph
Gammad_ph = ph_transform_2B(Gammad, bas2B, idx2B, basph2B, idxph2B)
Gammaod_ph = ph_transform_2B(Gammaod, bas2B, idx2B, basph2B, idxph2B)
GammaGamma_ph = dot(Gammad_ph, dot(occphA_2B, Gammaod_ph))
# transform back to standard representation
GammaGamma = inverse_ph_transform_2B(GammaGamma_ph, bas2B, idx2B, basph2B, idxph2B)
# commutator / antisymmetrization
work = np.zeros_like(GammaGamma)
for i1, (i,j) in enumerate(bas2B):
for i2, (k,l) in enumerate(bas2B):
work[i1, i2] -= (
GammaGamma[i1, i2]
- GammaGamma[idx2B[(j,i)], i2]
- GammaGamma[i1, idx2B[(l,k)]]
+ GammaGamma[idx2B[(j,i)], idx2B[(l,k)]]
)
GammaGamma = work
eta2B += GammaGamma
return eta1B, eta2B
#-----------------------------------------------------------------------------------
# derivatives
#-----------------------------------------------------------------------------------
def flow_imsrg2(eta1B, eta2B, f, Gamma, user_data):
dim1B = user_data["dim1B"]
holes = user_data["holes"]
particles = user_data["particles"]
bas2B = user_data["bas2B"]
idx2B = user_data["idx2B"]
basph2B = user_data["basph2B"]
idxph2B = user_data["idxph2B"]
occB_2B = user_data["occB_2B"]
occC_2B = user_data["occC_2B"]
occphA_2B = user_data["occphA_2B"]
#############################
# zero-body flow equation
dE = 0.0
for i in holes:
for a in particles:
dE += eta1B[i,a] * f[a,i] - eta1B[a,i] * f[i,a]
for i in holes:
for j in holes:
for a in particles:
for b in particles:
dE += 0.5 * eta2B[idx2B[(i,j)], idx2B[(a,b)]] * Gamma[idx2B[(a,b)], idx2B[(i,j)]]
#############################
# one-body flow equation
df = np.zeros_like(f)
# 1B - 1B
df += commutator(eta1B, f)
# 1B - 2B
for p in range(dim1B):
for q in range(dim1B):
for i in holes:
for a in particles:
df[p,q] += (
eta1B[i,a] * Gamma[idx2B[(a, p)], idx2B[(i, q)]]
- eta1B[a,i] * Gamma[idx2B[(i, p)], idx2B[(a, q)]]
- f[i,a] * eta2B[idx2B[(a, p)], idx2B[(i, q)]]
+ f[a,i] * eta2B[idx2B[(i, p)], idx2B[(a, q)]]
)
# 2B - 2B
# n_a n_b nn_c + nn_a nn_b n_c = n_a n_b + (1 - n_a - n_b) * n_c
etaGamma = dot(eta2B, dot(occB_2B, Gamma))
for p in range(dim1B):
for q in range(dim1B):
for i in holes:
df[p,q] += 0.5*(
etaGamma[idx2B[(i,p)], idx2B[(i,q)]]
+ transpose(etaGamma)[idx2B[(i,p)], idx2B[(i,q)]]
)
etaGamma = dot(eta2B, dot(occC_2B, Gamma))
for p in range(dim1B):
for q in range(dim1B):
for r in range(dim1B):
df[p,q] += 0.5*(
etaGamma[idx2B[(r,p)], idx2B[(r,q)]]
+ transpose(etaGamma)[idx2B[(r,p)], idx2B[(r,q)]]
)
#############################
# two-body flow equation
dGamma = np.zeros_like(Gamma)
# 1B - 2B
for p in range(dim1B):
for q in range(dim1B):
for r in range(dim1B):
for s in range(dim1B):
for t in range(dim1B):
dGamma[idx2B[(p,q)],idx2B[(r,s)]] += (
eta1B[p,t] * Gamma[idx2B[(t,q)],idx2B[(r,s)]]
+ eta1B[q,t] * Gamma[idx2B[(p,t)],idx2B[(r,s)]]
- eta1B[t,r] * Gamma[idx2B[(p,q)],idx2B[(t,s)]]
- eta1B[t,s] * Gamma[idx2B[(p,q)],idx2B[(r,t)]]
- f[p,t] * eta2B[idx2B[(t,q)],idx2B[(r,s)]]
- f[q,t] * eta2B[idx2B[(p,t)],idx2B[(r,s)]]
+ f[t,r] * eta2B[idx2B[(p,q)],idx2B[(t,s)]]
+ f[t,s] * eta2B[idx2B[(p,q)],idx2B[(r,t)]]
)
# 2B - 2B - particle and hole ladders
# eta2B.occB.Gamma
etaGamma = dot(eta2B, dot(occB_2B, Gamma))
dGamma += 0.5 * (etaGamma + transpose(etaGamma))
# 2B - 2B - particle-hole chain
# transform matrices to particle-hole representation and calculate
# eta2B_ph.occA_ph.Gamma_ph
eta2B_ph = ph_transform_2B(eta2B, bas2B, idx2B, basph2B, idxph2B)
Gamma_ph = ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B)
etaGamma_ph = dot(eta2B_ph, dot(occphA_2B, Gamma_ph))
# transform back to standard representation
etaGamma = inverse_ph_transform_2B(etaGamma_ph, bas2B, idx2B, basph2B, idxph2B)
# commutator / antisymmetrization
work = np.zeros_like(etaGamma)
for i1, (i,j) in enumerate(bas2B):
for i2, (k,l) in enumerate(bas2B):
work[i1, i2] -= (
etaGamma[i1, i2]
- etaGamma[idx2B[(j,i)], i2]
- etaGamma[i1, idx2B[(l,k)]]
+ etaGamma[idx2B[(j,i)], idx2B[(l,k)]]
)
etaGamma = work
dGamma += etaGamma
return dE, df, dGamma
#-----------------------------------------------------------------------------------
# derivative wrapper
#-----------------------------------------------------------------------------------
def get_operator_from_y(y, dim1B, dim2B):
# reshape the solution vector into 0B, 1B, 2B pieces
ptr = 0
zero_body = y[ptr]
ptr += 1
one_body = reshape(y[ptr:ptr+dim1B*dim1B], (dim1B, dim1B))
ptr += dim1B*dim1B
two_body = reshape(y[ptr:ptr+dim2B*dim2B], (dim2B, dim2B))
return zero_body,one_body,two_body
def derivative_wrapper(t, y, user_data):
dim1B = user_data["dim1B"]
dim2B = dim1B*dim1B
holes = user_data["holes"]
particles = user_data["particles"]
bas1B = user_data["bas1B"]
bas2B = user_data["bas2B"]
basph2B = user_data["basph2B"]
idx2B = user_data["idx2B"]
idxph2B = user_data["idxph2B"]
occA_2B = user_data["occA_2B"]
occB_2B = user_data["occB_2B"]
occC_2B = user_data["occC_2B"]
occphA_2B = user_data["occphA_2B"]
calc_eta = user_data["calc_eta"]
calc_rhs = user_data["calc_rhs"]
# extract operator pieces from solution vector
E, f, Gamma = get_operator_from_y(y, dim1B, dim2B)
# calculate the generator
eta1B, eta2B = calc_eta(f, Gamma, user_data)
# calculate the right-hand side
dE, df, dGamma = calc_rhs(eta1B, eta2B, f, Gamma, user_data)
# convert derivatives into linear array
dy = np.append([dE], np.append(reshape(df, -1), reshape(dGamma, -1)))
# share data
user_data["dE"] = dE
user_data["eta_norm"] = np.linalg.norm(eta1B,ord='fro')+np.linalg.norm(eta2B,ord='fro')
return dy
#-----------------------------------------------------------------------------------
# pairing Hamiltonian
#-----------------------------------------------------------------------------------
def pairing_hamiltonian(delta, g, user_data):
bas1B = user_data["bas1B"]
bas2B = user_data["bas2B"]
idx2B = user_data["idx2B"]
dim = len(bas1B)
H1B = np.zeros((dim,dim))
for i in bas1B:
H1B[i,i] = delta*np.floor_divide(i, 2)
dim = len(bas2B)
H2B = np.zeros((dim, dim))
# spin up states have even indices, spin down the next odd index
for (i, j) in bas2B:
if (i % 2 == 0 and j == i+1):
for (k, l) in bas2B:
if (k % 2 == 0 and l == k+1):
H2B[idx2B[(i,j)],idx2B[(k,l)]] = -0.5*g
H2B[idx2B[(j,i)],idx2B[(k,l)]] = 0.5*g
H2B[idx2B[(i,j)],idx2B[(l,k)]] = 0.5*g
H2B[idx2B[(j,i)],idx2B[(l,k)]] = -0.5*g
return H1B, H2B
#-----------------------------------------------------------------------------------
# normal-ordered pairing Hamiltonian
#-----------------------------------------------------------------------------------
def normal_order(H1B, H2B, user_data):
bas1B = user_data["bas1B"]
bas2B = user_data["bas2B"]
idx2B = user_data["idx2B"]
particles = user_data["particles"]
holes = user_data["holes"]
# 0B part
E = 0.0
for i in holes:
E += H1B[i,i]
for i in holes:
for j in holes:
E += 0.5*H2B[idx2B[(i,j)],idx2B[(i,j)]]
# 1B part
f = H1B
for i in bas1B:
for j in bas1B:
for h in holes:
f[i,j] += H2B[idx2B[(i,h)],idx2B[(j,h)]]
# 2B part
Gamma = H2B
return E, f, Gamma
#-----------------------------------------------------------------------------------
# Perturbation theory
#-----------------------------------------------------------------------------------
def calc_mbpt2(f, Gamma, user_data):
DE2 = 0.0
particles = user_data["particles"]
holes = user_data["holes"]
idx2B = user_data["idx2B"]
for i in holes:
for j in holes:
for a in particles:
for b in particles:
denom = f[i,i] + f[j,j] - f[a,a] - f[b,b]
me = Gamma[idx2B[(a,b)],idx2B[(i,j)]]
DE2 += 0.25*me*me/denom
return DE2
def calc_mbpt3(f, Gamma, user_data):
particles = user_data["particles"]
holes = user_data["holes"]
idx2B = user_data["idx2B"]
# DE3 = 0.0
DE3pp = 0.0
DE3hh = 0.0
DE3ph = 0.0
for a in particles:
for b in particles:
for c in particles:
for d in particles:
for i in holes:
for j in holes:
denom = (f[i,i] + f[j,j] - f[a,a] - f[b,b])*(f[i,i] + f[j,j] - f[c,c] - f[d,d])
me = (Gamma[idx2B[(i,j)],idx2B[(a,b)]]*Gamma[idx2B[(a,b)],idx2B[(c,d)]]*
Gamma[idx2B[(c,d)],idx2B[(i,j)]])
DE3pp += 0.125*me/denom
for i in holes:
for j in holes:
for k in holes:
for l in holes:
for a in particles:
for b in particles:
denom = (f[i,i] + f[j,j] - f[a,a] - f[b,b])*(f[k,k] + f[l,l] - f[a,a] - f[b,b])
me = (Gamma[idx2B[(a,b)],idx2B[(k,l)]]*Gamma[idx2B[(k,l)],idx2B[(i,j)]]*
Gamma[idx2B[(i,j)],idx2B[(a,b)]])
DE3hh += 0.125*me/denom
for i in holes:
for j in holes:
for k in holes:
for a in particles:
for b in particles:
for c in particles:
denom = (f[i,i] + f[j,j] - f[a,a] - f[b,b])*(f[k,k] + f[j,j] - f[a,a] - f[c,c])
me = (Gamma[idx2B[(i,j)],idx2B[(a,b)]]*Gamma[idx2B[(k,b)],idx2B[(i,c)]]*
Gamma[idx2B[(a,c)],idx2B[(k,j)]])
DE3ph -= me/denom
return DE3pp+DE3hh+DE3ph
#------------------------------------------------------------------------------
# Main program
#------------------------------------------------------------------------------
def main():
# grab delta and g from the command line
delta = float(argv[1])
g = float(argv[2])
particles = 4
# setup shared data
dim1B = 8
# this defines the reference state
# 1st state
holes = [0,1,2,3]
particles = [4,5,6,7]
# 2nd state
# holes = [0,1,4,5]
# particles = [2,3,6,7]
# 3rd state
# holes = [0,1,6,7]
# particles = [2,3,4,5]
# basis definitions
bas1B = range(dim1B)
bas2B = construct_basis_2B(holes, particles)
basph2B = construct_basis_ph2B(holes, particles)
idx2B = construct_index_2B(bas2B)
idxph2B = construct_index_2B(basph2B)
# occupation number matrices
occ1B = construct_occupation_1B(bas1B, holes, particles)
occA_2B = construct_occupationA_2B(bas2B, occ1B)
occB_2B = construct_occupationB_2B(bas2B, occ1B)
occC_2B = construct_occupationC_2B(bas2B, occ1B)
occphA_2B = construct_occupationA_2B(basph2B, occ1B)
# store shared data in a dictionary, so we can avoid passing the basis
# lookups etc. as separate parameters all the time
user_data = {
"dim1B": dim1B,
"holes": holes,
"particles": particles,
"bas1B": bas1B,
"bas2B": bas2B,
"basph2B": basph2B,
"idx2B": idx2B,
"idxph2B": idxph2B,
"occ1B": occ1B,
"occA_2B": occA_2B,
"occB_2B": occB_2B,
"occC_2B": occC_2B,
"occphA_2B": occphA_2B,
"eta_norm": 0.0, # variables for sharing data between ODE solver
"dE": 0.0, # and main routine
"calc_eta": eta_white_atan, # specify the generator (function object)
"calc_rhs": flow_imsrg2 # specify the right-hand side and truncation
}
# set up initial Hamiltonian
H1B, H2B = pairing_hamiltonian(delta, g, user_data)
E, f, Gamma = normal_order(H1B, H2B, user_data)
# reshape Hamiltonian into a linear array (initial ODE vector)
y0 = np.append([E], np.append(reshape(f, -1), reshape(Gamma, -1)))
# integrate flow equations
solver = ode(derivative_wrapper,jac=None)
solver.set_integrator('vode', method='bdf', order=5, nsteps=1000)
solver.set_f_params(user_data)
solver.set_initial_value(y0, 0.)
sfinal = 50
ds = 0.1
print("%-8s %-14s %-14s %-14s %-14s %-14s %-14s %-14s %-14s"%(
"s", "E" , "DE(2)", "DE(3)", "E+DE", "dE/ds",
"||eta||", "||fod||", "||Gammaod||"))
print("-" * 148)
while solver.successful() and solver.t < sfinal:
ys = solver.integrate(sfinal, step=True)
dim2B = dim1B*dim1B
E, f, Gamma = get_operator_from_y(ys, dim1B, dim2B)
DE2 = calc_mbpt2(f, Gamma, user_data)
DE3 = calc_mbpt3(f, Gamma, user_data)
norm_fod = calc_fod_norm(f, user_data)
norm_Gammaod = calc_Gammaod_norm(Gamma, user_data)
print("%8.5f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f"%(
solver.t, E , DE2, DE3, E+DE2+DE3, user_data["dE"], user_data["eta_norm"], norm_fod, norm_Gammaod))
if abs(DE2/E) < 10e-8: break
return
#------------------------------------------------------------------------------
# make executable
#------------------------------------------------------------------------------
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "0eb86fc64b74c79cace838e2d71ed92533123229",
"index": 9910,
"step-1": "<mask token>\n\n\ndef ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B):\n dim = len(basph2B)\n Gamma_ph = np.zeros((dim, dim))\n for i1, (a, b) in enumerate(basph2B):\n for i2, (c, d) in enumerate(basph2B):\n Gamma_ph[i1, i2] -= Gamma[idx2B[a, d], idx2B[c, b]]\n return Gamma_ph\n\n\ndef inverse_ph_transform_2B(Gamma_ph, bas2B, idx2B, basph2B, idxph2B):\n dim = len(bas2B)\n Gamma = np.zeros((dim, dim))\n for i1, (a, b) in enumerate(bas2B):\n for i2, (c, d) in enumerate(bas2B):\n Gamma[i1, i2] -= Gamma_ph[idxph2B[a, d], idxph2B[c, b]]\n return Gamma\n\n\n<mask token>\n\n\ndef calc_fod_norm(f, user_data):\n particles = user_data['particles']\n holes = user_data['holes']\n norm = 0.0\n for a in particles:\n for i in holes:\n norm += f[a, i] ** 2 + f[i, a] ** 2\n return np.sqrt(norm)\n\n\ndef calc_Gammaod_norm(Gamma, user_data):\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n norm = 0.0\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n norm += Gamma[idx2B[a, b], idx2B[i, j]] ** 2 + Gamma[\n idx2B[i, j], idx2B[a, b]] ** 2\n return np.sqrt(norm)\n\n\ndef construct_occupation_1B(bas1B, holes, particles):\n dim = len(bas1B)\n occ = np.zeros(dim)\n for i in holes:\n occ[i] = 1.0\n return occ\n\n\n<mask token>\n\n\ndef construct_occupationB_2B(bas2B, occ1B):\n dim = len(bas2B)\n occ = np.zeros((dim, dim))\n for i1, (i, j) in enumerate(bas2B):\n occ[i1, i1] = 1.0 - occ1B[i] - occ1B[j]\n return occ\n\n\n<mask token>\n\n\ndef eta_brillouin(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n eta1B[a, i] = f[a, i]\n eta1B[i, a] = -f[a, i]\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n val = Gamma[idx2B[a, b], idx2B[i, j]]\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_imtime(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n dE = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = np.sign(dE) * f[a, i]\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n dE = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = np.sign(dE) * Gamma[idx2B[a, b], idx2B[i, j]]\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = f[a, i] / denom\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = Gamma[idx2B[a, b], idx2B[i, j]] / denom\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white_mp(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i]\n val = f[a, i] / denom\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j]\n val = Gamma[idx2B[a, b], idx2B[i, j]] / denom\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white_atan(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = 0.5 * np.arctan(2 * f[a, i] / denom)\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = 0.5 * np.arctan(2 * Gamma[idx2B[a, b], idx2B[i, j\n ]] / denom)\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_wegner(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n holes = user_data['holes']\n particles = user_data['particles']\n bas2B = user_data['bas2B']\n basph2B = user_data['basph2B']\n idx2B = user_data['idx2B']\n idxph2B = user_data['idxph2B']\n occB_2B = user_data['occB_2B']\n occC_2B = user_data['occC_2B']\n occphA_2B = user_data['occphA_2B']\n fd = np.zeros_like(f)\n fod = np.zeros_like(f)\n Gammad = np.zeros_like(Gamma)\n Gammaod = np.zeros_like(Gamma)\n for a in particles:\n for i in holes:\n fod[a, i] = f[a, i]\n fod[i, a] = f[i, a]\n fd = f - fod\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n Gammaod[idx2B[a, b], idx2B[i, j]] = Gamma[idx2B[a, b],\n idx2B[i, j]]\n Gammaod[idx2B[i, j], idx2B[a, b]] = Gamma[idx2B[i, j],\n idx2B[a, b]]\n Gammad = Gamma - Gammaod\n eta1B = np.zeros_like(f)\n eta1B += commutator(fd, fod)\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n for a in particles:\n eta1B[p, q] += fd[i, a] * Gammaod[idx2B[a, p], idx2B[i, q]\n ] - fd[a, i] * Gammaod[idx2B[i, p], idx2B[a, q]] - fod[\n i, a] * Gammad[idx2B[a, p], idx2B[i, q]] + fod[a, i\n ] * Gammad[idx2B[i, p], idx2B[a, q]]\n GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n eta1B[p, q] += 0.5 * (GammaGamma[idx2B[i, p], idx2B[i, q]] -\n transpose(GammaGamma)[idx2B[i, p], idx2B[i, q]])\n GammaGamma = dot(Gammad, dot(occC_2B, Gammaod))\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n eta1B[p, q] += 0.5 * (GammaGamma[idx2B[r, p], idx2B[r, q]] +\n transpose(GammaGamma)[idx2B[r, p], idx2B[r, q]])\n eta2B = np.zeros_like(Gamma)\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n for s in range(dim1B):\n for t in range(dim1B):\n eta2B[idx2B[p, q], idx2B[r, s]] += fd[p, t] * Gammaod[\n idx2B[t, q], idx2B[r, s]] + fd[q, t] * Gammaod[\n idx2B[p, t], idx2B[r, s]] - fd[t, r] * Gammaod[\n idx2B[p, q], idx2B[t, s]] - fd[t, s] * Gammaod[\n idx2B[p, q], idx2B[r, t]] - fod[p, t] * Gammad[\n idx2B[t, q], idx2B[r, s]] - fod[q, t] * Gammad[\n idx2B[p, t], idx2B[r, s]] + fod[t, r] * Gammad[\n idx2B[p, q], idx2B[t, s]] + fod[t, s] * Gammad[\n idx2B[p, q], idx2B[r, t]]\n GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))\n eta2B += 0.5 * (GammaGamma - transpose(GammaGamma))\n Gammad_ph = ph_transform_2B(Gammad, bas2B, idx2B, basph2B, idxph2B)\n Gammaod_ph = ph_transform_2B(Gammaod, bas2B, idx2B, basph2B, idxph2B)\n GammaGamma_ph = dot(Gammad_ph, dot(occphA_2B, Gammaod_ph))\n GammaGamma = inverse_ph_transform_2B(GammaGamma_ph, bas2B, idx2B,\n basph2B, idxph2B)\n work = np.zeros_like(GammaGamma)\n for i1, (i, j) in enumerate(bas2B):\n for i2, (k, l) in enumerate(bas2B):\n work[i1, i2] -= GammaGamma[i1, i2] - GammaGamma[idx2B[j, i], i2\n ] - GammaGamma[i1, idx2B[l, k]] + GammaGamma[idx2B[j, i],\n idx2B[l, k]]\n GammaGamma = work\n eta2B += GammaGamma\n return eta1B, eta2B\n\n\ndef flow_imsrg2(eta1B, eta2B, f, Gamma, user_data):\n dim1B = user_data['dim1B']\n holes = user_data['holes']\n particles = user_data['particles']\n bas2B = user_data['bas2B']\n idx2B = user_data['idx2B']\n basph2B = user_data['basph2B']\n idxph2B = user_data['idxph2B']\n occB_2B = user_data['occB_2B']\n occC_2B = user_data['occC_2B']\n occphA_2B = user_data['occphA_2B']\n dE = 0.0\n for i in holes:\n for a in particles:\n dE += eta1B[i, a] * f[a, i] - eta1B[a, i] * f[i, a]\n for i in holes:\n for j in holes:\n for a in particles:\n for b in particles:\n dE += 0.5 * eta2B[idx2B[i, j], idx2B[a, b]] * Gamma[\n idx2B[a, b], idx2B[i, j]]\n df = np.zeros_like(f)\n df += commutator(eta1B, f)\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n for a in particles:\n df[p, q] += eta1B[i, a] * Gamma[idx2B[a, p], idx2B[i, q]\n ] - eta1B[a, i] * Gamma[idx2B[i, p], idx2B[a, q]] - f[\n i, a] * eta2B[idx2B[a, p], idx2B[i, q]] + f[a, i\n ] * eta2B[idx2B[i, p], idx2B[a, q]]\n etaGamma = dot(eta2B, dot(occB_2B, Gamma))\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n df[p, q] += 0.5 * (etaGamma[idx2B[i, p], idx2B[i, q]] +\n transpose(etaGamma)[idx2B[i, p], idx2B[i, q]])\n etaGamma = dot(eta2B, dot(occC_2B, Gamma))\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n df[p, q] += 0.5 * (etaGamma[idx2B[r, p], idx2B[r, q]] +\n transpose(etaGamma)[idx2B[r, p], idx2B[r, q]])\n dGamma = np.zeros_like(Gamma)\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n for s in range(dim1B):\n for t in range(dim1B):\n dGamma[idx2B[p, q], idx2B[r, s]] += eta1B[p, t\n ] * Gamma[idx2B[t, q], idx2B[r, s]] + eta1B[q, t\n ] * Gamma[idx2B[p, t], idx2B[r, s]] - eta1B[t, r\n ] * Gamma[idx2B[p, q], idx2B[t, s]] - eta1B[t, s\n ] * Gamma[idx2B[p, q], idx2B[r, t]] - f[p, t\n ] * eta2B[idx2B[t, q], idx2B[r, s]] - f[q, t\n ] * eta2B[idx2B[p, t], idx2B[r, s]] + f[t, r\n ] * eta2B[idx2B[p, q], idx2B[t, s]] + f[t, s\n ] * eta2B[idx2B[p, q], idx2B[r, t]]\n etaGamma = dot(eta2B, dot(occB_2B, Gamma))\n dGamma += 0.5 * (etaGamma + transpose(etaGamma))\n eta2B_ph = ph_transform_2B(eta2B, bas2B, idx2B, basph2B, idxph2B)\n Gamma_ph = ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B)\n etaGamma_ph = dot(eta2B_ph, dot(occphA_2B, Gamma_ph))\n etaGamma = inverse_ph_transform_2B(etaGamma_ph, bas2B, idx2B, basph2B,\n idxph2B)\n work = np.zeros_like(etaGamma)\n for i1, (i, j) in enumerate(bas2B):\n for i2, (k, l) in enumerate(bas2B):\n work[i1, i2] -= etaGamma[i1, i2] - etaGamma[idx2B[j, i], i2\n ] - etaGamma[i1, idx2B[l, k]] + etaGamma[idx2B[j, i], idx2B\n [l, k]]\n etaGamma = work\n dGamma += etaGamma\n return dE, df, dGamma\n\n\ndef get_operator_from_y(y, dim1B, dim2B):\n ptr = 0\n zero_body = y[ptr]\n ptr += 1\n one_body = reshape(y[ptr:ptr + dim1B * dim1B], (dim1B, dim1B))\n ptr += dim1B * dim1B\n two_body = reshape(y[ptr:ptr + dim2B * dim2B], (dim2B, dim2B))\n return zero_body, one_body, two_body\n\n\n<mask token>\n\n\ndef pairing_hamiltonian(delta, g, user_data):\n bas1B = user_data['bas1B']\n bas2B = user_data['bas2B']\n idx2B = user_data['idx2B']\n dim = len(bas1B)\n H1B = np.zeros((dim, dim))\n for i in bas1B:\n H1B[i, i] = delta * np.floor_divide(i, 2)\n dim = len(bas2B)\n H2B = np.zeros((dim, dim))\n for i, j in bas2B:\n if i % 2 == 0 and j == i + 1:\n for k, l in bas2B:\n if k % 2 == 0 and l == k + 1:\n H2B[idx2B[i, j], idx2B[k, l]] = -0.5 * g\n H2B[idx2B[j, i], idx2B[k, l]] = 0.5 * g\n H2B[idx2B[i, j], idx2B[l, k]] = 0.5 * g\n H2B[idx2B[j, i], idx2B[l, k]] = -0.5 * g\n return H1B, H2B\n\n\n<mask token>\n\n\ndef calc_mbpt2(f, Gamma, user_data):\n DE2 = 0.0\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n for i in holes:\n for j in holes:\n for a in particles:\n for b in particles:\n denom = f[i, i] + f[j, j] - f[a, a] - f[b, b]\n me = Gamma[idx2B[a, b], idx2B[i, j]]\n DE2 += 0.25 * me * me / denom\n return DE2\n\n\n<mask token>\n\n\ndef main():\n delta = float(argv[1])\n g = float(argv[2])\n particles = 4\n dim1B = 8\n holes = [0, 1, 2, 3]\n particles = [4, 5, 6, 7]\n bas1B = range(dim1B)\n bas2B = construct_basis_2B(holes, particles)\n basph2B = construct_basis_ph2B(holes, particles)\n idx2B = construct_index_2B(bas2B)\n idxph2B = construct_index_2B(basph2B)\n occ1B = construct_occupation_1B(bas1B, holes, particles)\n occA_2B = construct_occupationA_2B(bas2B, occ1B)\n occB_2B = construct_occupationB_2B(bas2B, occ1B)\n occC_2B = construct_occupationC_2B(bas2B, occ1B)\n occphA_2B = construct_occupationA_2B(basph2B, occ1B)\n user_data = {'dim1B': dim1B, 'holes': holes, 'particles': particles,\n 'bas1B': bas1B, 'bas2B': bas2B, 'basph2B': basph2B, 'idx2B': idx2B,\n 'idxph2B': idxph2B, 'occ1B': occ1B, 'occA_2B': occA_2B, 'occB_2B':\n occB_2B, 'occC_2B': occC_2B, 'occphA_2B': occphA_2B, 'eta_norm': \n 0.0, 'dE': 0.0, 'calc_eta': eta_white_atan, 'calc_rhs': flow_imsrg2}\n H1B, H2B = pairing_hamiltonian(delta, g, user_data)\n E, f, Gamma = normal_order(H1B, H2B, user_data)\n y0 = np.append([E], np.append(reshape(f, -1), reshape(Gamma, -1)))\n solver = ode(derivative_wrapper, jac=None)\n solver.set_integrator('vode', method='bdf', order=5, nsteps=1000)\n solver.set_f_params(user_data)\n solver.set_initial_value(y0, 0.0)\n sfinal = 50\n ds = 0.1\n print(\n '%-8s %-14s %-14s %-14s %-14s %-14s %-14s %-14s %-14s'\n % ('s', 'E', 'DE(2)', 'DE(3)', 'E+DE', 'dE/ds', '||eta||',\n '||fod||', '||Gammaod||'))\n print('-' * 148)\n while solver.successful() and solver.t < sfinal:\n ys = solver.integrate(sfinal, step=True)\n dim2B = dim1B * dim1B\n E, f, Gamma = get_operator_from_y(ys, dim1B, dim2B)\n DE2 = calc_mbpt2(f, Gamma, user_data)\n DE3 = calc_mbpt3(f, Gamma, user_data)\n norm_fod = calc_fod_norm(f, user_data)\n norm_Gammaod = calc_Gammaod_norm(Gamma, user_data)\n print(\n '%8.5f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f'\n % (solver.t, E, DE2, DE3, E + DE2 + DE3, user_data['dE'],\n user_data['eta_norm'], norm_fod, norm_Gammaod))\n if abs(DE2 / E) < 1e-07:\n break\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef construct_basis_ph2B(holes, particles):\n basis = []\n for i in holes:\n for j in holes:\n basis.append((i, j))\n for i in holes:\n for a in particles:\n basis.append((i, a))\n for a in particles:\n for i in holes:\n basis.append((a, i))\n for a in particles:\n for b in particles:\n basis.append((a, b))\n return basis\n\n\n<mask token>\n\n\ndef ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B):\n dim = len(basph2B)\n Gamma_ph = np.zeros((dim, dim))\n for i1, (a, b) in enumerate(basph2B):\n for i2, (c, d) in enumerate(basph2B):\n Gamma_ph[i1, i2] -= Gamma[idx2B[a, d], idx2B[c, b]]\n return Gamma_ph\n\n\ndef inverse_ph_transform_2B(Gamma_ph, bas2B, idx2B, basph2B, idxph2B):\n dim = len(bas2B)\n Gamma = np.zeros((dim, dim))\n for i1, (a, b) in enumerate(bas2B):\n for i2, (c, d) in enumerate(bas2B):\n Gamma[i1, i2] -= Gamma_ph[idxph2B[a, d], idxph2B[c, b]]\n return Gamma\n\n\n<mask token>\n\n\ndef calc_fod_norm(f, user_data):\n particles = user_data['particles']\n holes = user_data['holes']\n norm = 0.0\n for a in particles:\n for i in holes:\n norm += f[a, i] ** 2 + f[i, a] ** 2\n return np.sqrt(norm)\n\n\ndef calc_Gammaod_norm(Gamma, user_data):\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n norm = 0.0\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n norm += Gamma[idx2B[a, b], idx2B[i, j]] ** 2 + Gamma[\n idx2B[i, j], idx2B[a, b]] ** 2\n return np.sqrt(norm)\n\n\ndef construct_occupation_1B(bas1B, holes, particles):\n dim = len(bas1B)\n occ = np.zeros(dim)\n for i in holes:\n occ[i] = 1.0\n return occ\n\n\n<mask token>\n\n\ndef construct_occupationB_2B(bas2B, occ1B):\n dim = len(bas2B)\n occ = np.zeros((dim, dim))\n for i1, (i, j) in enumerate(bas2B):\n occ[i1, i1] = 1.0 - occ1B[i] - occ1B[j]\n return occ\n\n\n<mask token>\n\n\ndef eta_brillouin(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n eta1B[a, i] = f[a, i]\n eta1B[i, a] = -f[a, i]\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n val = Gamma[idx2B[a, b], idx2B[i, j]]\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_imtime(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n dE = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = np.sign(dE) * f[a, i]\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n dE = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = np.sign(dE) * Gamma[idx2B[a, b], idx2B[i, j]]\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = f[a, i] / denom\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = Gamma[idx2B[a, b], idx2B[i, j]] / denom\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white_mp(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i]\n val = f[a, i] / denom\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j]\n val = Gamma[idx2B[a, b], idx2B[i, j]] / denom\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white_atan(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = 0.5 * np.arctan(2 * f[a, i] / denom)\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = 0.5 * np.arctan(2 * Gamma[idx2B[a, b], idx2B[i, j\n ]] / denom)\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_wegner(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n holes = user_data['holes']\n particles = user_data['particles']\n bas2B = user_data['bas2B']\n basph2B = user_data['basph2B']\n idx2B = user_data['idx2B']\n idxph2B = user_data['idxph2B']\n occB_2B = user_data['occB_2B']\n occC_2B = user_data['occC_2B']\n occphA_2B = user_data['occphA_2B']\n fd = np.zeros_like(f)\n fod = np.zeros_like(f)\n Gammad = np.zeros_like(Gamma)\n Gammaod = np.zeros_like(Gamma)\n for a in particles:\n for i in holes:\n fod[a, i] = f[a, i]\n fod[i, a] = f[i, a]\n fd = f - fod\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n Gammaod[idx2B[a, b], idx2B[i, j]] = Gamma[idx2B[a, b],\n idx2B[i, j]]\n Gammaod[idx2B[i, j], idx2B[a, b]] = Gamma[idx2B[i, j],\n idx2B[a, b]]\n Gammad = Gamma - Gammaod\n eta1B = np.zeros_like(f)\n eta1B += commutator(fd, fod)\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n for a in particles:\n eta1B[p, q] += fd[i, a] * Gammaod[idx2B[a, p], idx2B[i, q]\n ] - fd[a, i] * Gammaod[idx2B[i, p], idx2B[a, q]] - fod[\n i, a] * Gammad[idx2B[a, p], idx2B[i, q]] + fod[a, i\n ] * Gammad[idx2B[i, p], idx2B[a, q]]\n GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n eta1B[p, q] += 0.5 * (GammaGamma[idx2B[i, p], idx2B[i, q]] -\n transpose(GammaGamma)[idx2B[i, p], idx2B[i, q]])\n GammaGamma = dot(Gammad, dot(occC_2B, Gammaod))\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n eta1B[p, q] += 0.5 * (GammaGamma[idx2B[r, p], idx2B[r, q]] +\n transpose(GammaGamma)[idx2B[r, p], idx2B[r, q]])\n eta2B = np.zeros_like(Gamma)\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n for s in range(dim1B):\n for t in range(dim1B):\n eta2B[idx2B[p, q], idx2B[r, s]] += fd[p, t] * Gammaod[\n idx2B[t, q], idx2B[r, s]] + fd[q, t] * Gammaod[\n idx2B[p, t], idx2B[r, s]] - fd[t, r] * Gammaod[\n idx2B[p, q], idx2B[t, s]] - fd[t, s] * Gammaod[\n idx2B[p, q], idx2B[r, t]] - fod[p, t] * Gammad[\n idx2B[t, q], idx2B[r, s]] - fod[q, t] * Gammad[\n idx2B[p, t], idx2B[r, s]] + fod[t, r] * Gammad[\n idx2B[p, q], idx2B[t, s]] + fod[t, s] * Gammad[\n idx2B[p, q], idx2B[r, t]]\n GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))\n eta2B += 0.5 * (GammaGamma - transpose(GammaGamma))\n Gammad_ph = ph_transform_2B(Gammad, bas2B, idx2B, basph2B, idxph2B)\n Gammaod_ph = ph_transform_2B(Gammaod, bas2B, idx2B, basph2B, idxph2B)\n GammaGamma_ph = dot(Gammad_ph, dot(occphA_2B, Gammaod_ph))\n GammaGamma = inverse_ph_transform_2B(GammaGamma_ph, bas2B, idx2B,\n basph2B, idxph2B)\n work = np.zeros_like(GammaGamma)\n for i1, (i, j) in enumerate(bas2B):\n for i2, (k, l) in enumerate(bas2B):\n work[i1, i2] -= GammaGamma[i1, i2] - GammaGamma[idx2B[j, i], i2\n ] - GammaGamma[i1, idx2B[l, k]] + GammaGamma[idx2B[j, i],\n idx2B[l, k]]\n GammaGamma = work\n eta2B += GammaGamma\n return eta1B, eta2B\n\n\ndef flow_imsrg2(eta1B, eta2B, f, Gamma, user_data):\n dim1B = user_data['dim1B']\n holes = user_data['holes']\n particles = user_data['particles']\n bas2B = user_data['bas2B']\n idx2B = user_data['idx2B']\n basph2B = user_data['basph2B']\n idxph2B = user_data['idxph2B']\n occB_2B = user_data['occB_2B']\n occC_2B = user_data['occC_2B']\n occphA_2B = user_data['occphA_2B']\n dE = 0.0\n for i in holes:\n for a in particles:\n dE += eta1B[i, a] * f[a, i] - eta1B[a, i] * f[i, a]\n for i in holes:\n for j in holes:\n for a in particles:\n for b in particles:\n dE += 0.5 * eta2B[idx2B[i, j], idx2B[a, b]] * Gamma[\n idx2B[a, b], idx2B[i, j]]\n df = np.zeros_like(f)\n df += commutator(eta1B, f)\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n for a in particles:\n df[p, q] += eta1B[i, a] * Gamma[idx2B[a, p], idx2B[i, q]\n ] - eta1B[a, i] * Gamma[idx2B[i, p], idx2B[a, q]] - f[\n i, a] * eta2B[idx2B[a, p], idx2B[i, q]] + f[a, i\n ] * eta2B[idx2B[i, p], idx2B[a, q]]\n etaGamma = dot(eta2B, dot(occB_2B, Gamma))\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n df[p, q] += 0.5 * (etaGamma[idx2B[i, p], idx2B[i, q]] +\n transpose(etaGamma)[idx2B[i, p], idx2B[i, q]])\n etaGamma = dot(eta2B, dot(occC_2B, Gamma))\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n df[p, q] += 0.5 * (etaGamma[idx2B[r, p], idx2B[r, q]] +\n transpose(etaGamma)[idx2B[r, p], idx2B[r, q]])\n dGamma = np.zeros_like(Gamma)\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n for s in range(dim1B):\n for t in range(dim1B):\n dGamma[idx2B[p, q], idx2B[r, s]] += eta1B[p, t\n ] * Gamma[idx2B[t, q], idx2B[r, s]] + eta1B[q, t\n ] * Gamma[idx2B[p, t], idx2B[r, s]] - eta1B[t, r\n ] * Gamma[idx2B[p, q], idx2B[t, s]] - eta1B[t, s\n ] * Gamma[idx2B[p, q], idx2B[r, t]] - f[p, t\n ] * eta2B[idx2B[t, q], idx2B[r, s]] - f[q, t\n ] * eta2B[idx2B[p, t], idx2B[r, s]] + f[t, r\n ] * eta2B[idx2B[p, q], idx2B[t, s]] + f[t, s\n ] * eta2B[idx2B[p, q], idx2B[r, t]]\n etaGamma = dot(eta2B, dot(occB_2B, Gamma))\n dGamma += 0.5 * (etaGamma + transpose(etaGamma))\n eta2B_ph = ph_transform_2B(eta2B, bas2B, idx2B, basph2B, idxph2B)\n Gamma_ph = ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B)\n etaGamma_ph = dot(eta2B_ph, dot(occphA_2B, Gamma_ph))\n etaGamma = inverse_ph_transform_2B(etaGamma_ph, bas2B, idx2B, basph2B,\n idxph2B)\n work = np.zeros_like(etaGamma)\n for i1, (i, j) in enumerate(bas2B):\n for i2, (k, l) in enumerate(bas2B):\n work[i1, i2] -= etaGamma[i1, i2] - etaGamma[idx2B[j, i], i2\n ] - etaGamma[i1, idx2B[l, k]] + etaGamma[idx2B[j, i], idx2B\n [l, k]]\n etaGamma = work\n dGamma += etaGamma\n return dE, df, dGamma\n\n\ndef get_operator_from_y(y, dim1B, dim2B):\n ptr = 0\n zero_body = y[ptr]\n ptr += 1\n one_body = reshape(y[ptr:ptr + dim1B * dim1B], (dim1B, dim1B))\n ptr += dim1B * dim1B\n two_body = reshape(y[ptr:ptr + dim2B * dim2B], (dim2B, dim2B))\n return zero_body, one_body, two_body\n\n\n<mask token>\n\n\ndef pairing_hamiltonian(delta, g, user_data):\n bas1B = user_data['bas1B']\n bas2B = user_data['bas2B']\n idx2B = user_data['idx2B']\n dim = len(bas1B)\n H1B = np.zeros((dim, dim))\n for i in bas1B:\n H1B[i, i] = delta * np.floor_divide(i, 2)\n dim = len(bas2B)\n H2B = np.zeros((dim, dim))\n for i, j in bas2B:\n if i % 2 == 0 and j == i + 1:\n for k, l in bas2B:\n if k % 2 == 0 and l == k + 1:\n H2B[idx2B[i, j], idx2B[k, l]] = -0.5 * g\n H2B[idx2B[j, i], idx2B[k, l]] = 0.5 * g\n H2B[idx2B[i, j], idx2B[l, k]] = 0.5 * g\n H2B[idx2B[j, i], idx2B[l, k]] = -0.5 * g\n return H1B, H2B\n\n\n<mask token>\n\n\ndef calc_mbpt2(f, Gamma, user_data):\n DE2 = 0.0\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n for i in holes:\n for j in holes:\n for a in particles:\n for b in particles:\n denom = f[i, i] + f[j, j] - f[a, a] - f[b, b]\n me = Gamma[idx2B[a, b], idx2B[i, j]]\n DE2 += 0.25 * me * me / denom\n return DE2\n\n\n<mask token>\n\n\ndef main():\n delta = float(argv[1])\n g = float(argv[2])\n particles = 4\n dim1B = 8\n holes = [0, 1, 2, 3]\n particles = [4, 5, 6, 7]\n bas1B = range(dim1B)\n bas2B = construct_basis_2B(holes, particles)\n basph2B = construct_basis_ph2B(holes, particles)\n idx2B = construct_index_2B(bas2B)\n idxph2B = construct_index_2B(basph2B)\n occ1B = construct_occupation_1B(bas1B, holes, particles)\n occA_2B = construct_occupationA_2B(bas2B, occ1B)\n occB_2B = construct_occupationB_2B(bas2B, occ1B)\n occC_2B = construct_occupationC_2B(bas2B, occ1B)\n occphA_2B = construct_occupationA_2B(basph2B, occ1B)\n user_data = {'dim1B': dim1B, 'holes': holes, 'particles': particles,\n 'bas1B': bas1B, 'bas2B': bas2B, 'basph2B': basph2B, 'idx2B': idx2B,\n 'idxph2B': idxph2B, 'occ1B': occ1B, 'occA_2B': occA_2B, 'occB_2B':\n occB_2B, 'occC_2B': occC_2B, 'occphA_2B': occphA_2B, 'eta_norm': \n 0.0, 'dE': 0.0, 'calc_eta': eta_white_atan, 'calc_rhs': flow_imsrg2}\n H1B, H2B = pairing_hamiltonian(delta, g, user_data)\n E, f, Gamma = normal_order(H1B, H2B, user_data)\n y0 = np.append([E], np.append(reshape(f, -1), reshape(Gamma, -1)))\n solver = ode(derivative_wrapper, jac=None)\n solver.set_integrator('vode', method='bdf', order=5, nsteps=1000)\n solver.set_f_params(user_data)\n solver.set_initial_value(y0, 0.0)\n sfinal = 50\n ds = 0.1\n print(\n '%-8s %-14s %-14s %-14s %-14s %-14s %-14s %-14s %-14s'\n % ('s', 'E', 'DE(2)', 'DE(3)', 'E+DE', 'dE/ds', '||eta||',\n '||fod||', '||Gammaod||'))\n print('-' * 148)\n while solver.successful() and solver.t < sfinal:\n ys = solver.integrate(sfinal, step=True)\n dim2B = dim1B * dim1B\n E, f, Gamma = get_operator_from_y(ys, dim1B, dim2B)\n DE2 = calc_mbpt2(f, Gamma, user_data)\n DE3 = calc_mbpt3(f, Gamma, user_data)\n norm_fod = calc_fod_norm(f, user_data)\n norm_Gammaod = calc_Gammaod_norm(Gamma, user_data)\n print(\n '%8.5f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f'\n % (solver.t, E, DE2, DE3, E + DE2 + DE3, user_data['dE'],\n user_data['eta_norm'], norm_fod, norm_Gammaod))\n if abs(DE2 / E) < 1e-07:\n break\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef construct_basis_ph2B(holes, particles):\n basis = []\n for i in holes:\n for j in holes:\n basis.append((i, j))\n for i in holes:\n for a in particles:\n basis.append((i, a))\n for a in particles:\n for i in holes:\n basis.append((a, i))\n for a in particles:\n for b in particles:\n basis.append((a, b))\n return basis\n\n\n<mask token>\n\n\ndef ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B):\n dim = len(basph2B)\n Gamma_ph = np.zeros((dim, dim))\n for i1, (a, b) in enumerate(basph2B):\n for i2, (c, d) in enumerate(basph2B):\n Gamma_ph[i1, i2] -= Gamma[idx2B[a, d], idx2B[c, b]]\n return Gamma_ph\n\n\ndef inverse_ph_transform_2B(Gamma_ph, bas2B, idx2B, basph2B, idxph2B):\n dim = len(bas2B)\n Gamma = np.zeros((dim, dim))\n for i1, (a, b) in enumerate(bas2B):\n for i2, (c, d) in enumerate(bas2B):\n Gamma[i1, i2] -= Gamma_ph[idxph2B[a, d], idxph2B[c, b]]\n return Gamma\n\n\n<mask token>\n\n\ndef calc_fod_norm(f, user_data):\n particles = user_data['particles']\n holes = user_data['holes']\n norm = 0.0\n for a in particles:\n for i in holes:\n norm += f[a, i] ** 2 + f[i, a] ** 2\n return np.sqrt(norm)\n\n\ndef calc_Gammaod_norm(Gamma, user_data):\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n norm = 0.0\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n norm += Gamma[idx2B[a, b], idx2B[i, j]] ** 2 + Gamma[\n idx2B[i, j], idx2B[a, b]] ** 2\n return np.sqrt(norm)\n\n\ndef construct_occupation_1B(bas1B, holes, particles):\n dim = len(bas1B)\n occ = np.zeros(dim)\n for i in holes:\n occ[i] = 1.0\n return occ\n\n\n<mask token>\n\n\ndef construct_occupationB_2B(bas2B, occ1B):\n dim = len(bas2B)\n occ = np.zeros((dim, dim))\n for i1, (i, j) in enumerate(bas2B):\n occ[i1, i1] = 1.0 - occ1B[i] - occ1B[j]\n return occ\n\n\ndef construct_occupationC_2B(bas2B, occ1B):\n dim = len(bas2B)\n occ = np.zeros((dim, dim))\n for i1, (i, j) in enumerate(bas2B):\n occ[i1, i1] = occ1B[i] * occ1B[j]\n return occ\n\n\ndef eta_brillouin(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n eta1B[a, i] = f[a, i]\n eta1B[i, a] = -f[a, i]\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n val = Gamma[idx2B[a, b], idx2B[i, j]]\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_imtime(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n dE = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = np.sign(dE) * f[a, i]\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n dE = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = np.sign(dE) * Gamma[idx2B[a, b], idx2B[i, j]]\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = f[a, i] / denom\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = Gamma[idx2B[a, b], idx2B[i, j]] / denom\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white_mp(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i]\n val = f[a, i] / denom\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j]\n val = Gamma[idx2B[a, b], idx2B[i, j]] / denom\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white_atan(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = 0.5 * np.arctan(2 * f[a, i] / denom)\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = 0.5 * np.arctan(2 * Gamma[idx2B[a, b], idx2B[i, j\n ]] / denom)\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_wegner(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n holes = user_data['holes']\n particles = user_data['particles']\n bas2B = user_data['bas2B']\n basph2B = user_data['basph2B']\n idx2B = user_data['idx2B']\n idxph2B = user_data['idxph2B']\n occB_2B = user_data['occB_2B']\n occC_2B = user_data['occC_2B']\n occphA_2B = user_data['occphA_2B']\n fd = np.zeros_like(f)\n fod = np.zeros_like(f)\n Gammad = np.zeros_like(Gamma)\n Gammaod = np.zeros_like(Gamma)\n for a in particles:\n for i in holes:\n fod[a, i] = f[a, i]\n fod[i, a] = f[i, a]\n fd = f - fod\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n Gammaod[idx2B[a, b], idx2B[i, j]] = Gamma[idx2B[a, b],\n idx2B[i, j]]\n Gammaod[idx2B[i, j], idx2B[a, b]] = Gamma[idx2B[i, j],\n idx2B[a, b]]\n Gammad = Gamma - Gammaod\n eta1B = np.zeros_like(f)\n eta1B += commutator(fd, fod)\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n for a in particles:\n eta1B[p, q] += fd[i, a] * Gammaod[idx2B[a, p], idx2B[i, q]\n ] - fd[a, i] * Gammaod[idx2B[i, p], idx2B[a, q]] - fod[\n i, a] * Gammad[idx2B[a, p], idx2B[i, q]] + fod[a, i\n ] * Gammad[idx2B[i, p], idx2B[a, q]]\n GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n eta1B[p, q] += 0.5 * (GammaGamma[idx2B[i, p], idx2B[i, q]] -\n transpose(GammaGamma)[idx2B[i, p], idx2B[i, q]])\n GammaGamma = dot(Gammad, dot(occC_2B, Gammaod))\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n eta1B[p, q] += 0.5 * (GammaGamma[idx2B[r, p], idx2B[r, q]] +\n transpose(GammaGamma)[idx2B[r, p], idx2B[r, q]])\n eta2B = np.zeros_like(Gamma)\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n for s in range(dim1B):\n for t in range(dim1B):\n eta2B[idx2B[p, q], idx2B[r, s]] += fd[p, t] * Gammaod[\n idx2B[t, q], idx2B[r, s]] + fd[q, t] * Gammaod[\n idx2B[p, t], idx2B[r, s]] - fd[t, r] * Gammaod[\n idx2B[p, q], idx2B[t, s]] - fd[t, s] * Gammaod[\n idx2B[p, q], idx2B[r, t]] - fod[p, t] * Gammad[\n idx2B[t, q], idx2B[r, s]] - fod[q, t] * Gammad[\n idx2B[p, t], idx2B[r, s]] + fod[t, r] * Gammad[\n idx2B[p, q], idx2B[t, s]] + fod[t, s] * Gammad[\n idx2B[p, q], idx2B[r, t]]\n GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))\n eta2B += 0.5 * (GammaGamma - transpose(GammaGamma))\n Gammad_ph = ph_transform_2B(Gammad, bas2B, idx2B, basph2B, idxph2B)\n Gammaod_ph = ph_transform_2B(Gammaod, bas2B, idx2B, basph2B, idxph2B)\n GammaGamma_ph = dot(Gammad_ph, dot(occphA_2B, Gammaod_ph))\n GammaGamma = inverse_ph_transform_2B(GammaGamma_ph, bas2B, idx2B,\n basph2B, idxph2B)\n work = np.zeros_like(GammaGamma)\n for i1, (i, j) in enumerate(bas2B):\n for i2, (k, l) in enumerate(bas2B):\n work[i1, i2] -= GammaGamma[i1, i2] - GammaGamma[idx2B[j, i], i2\n ] - GammaGamma[i1, idx2B[l, k]] + GammaGamma[idx2B[j, i],\n idx2B[l, k]]\n GammaGamma = work\n eta2B += GammaGamma\n return eta1B, eta2B\n\n\ndef flow_imsrg2(eta1B, eta2B, f, Gamma, user_data):\n dim1B = user_data['dim1B']\n holes = user_data['holes']\n particles = user_data['particles']\n bas2B = user_data['bas2B']\n idx2B = user_data['idx2B']\n basph2B = user_data['basph2B']\n idxph2B = user_data['idxph2B']\n occB_2B = user_data['occB_2B']\n occC_2B = user_data['occC_2B']\n occphA_2B = user_data['occphA_2B']\n dE = 0.0\n for i in holes:\n for a in particles:\n dE += eta1B[i, a] * f[a, i] - eta1B[a, i] * f[i, a]\n for i in holes:\n for j in holes:\n for a in particles:\n for b in particles:\n dE += 0.5 * eta2B[idx2B[i, j], idx2B[a, b]] * Gamma[\n idx2B[a, b], idx2B[i, j]]\n df = np.zeros_like(f)\n df += commutator(eta1B, f)\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n for a in particles:\n df[p, q] += eta1B[i, a] * Gamma[idx2B[a, p], idx2B[i, q]\n ] - eta1B[a, i] * Gamma[idx2B[i, p], idx2B[a, q]] - f[\n i, a] * eta2B[idx2B[a, p], idx2B[i, q]] + f[a, i\n ] * eta2B[idx2B[i, p], idx2B[a, q]]\n etaGamma = dot(eta2B, dot(occB_2B, Gamma))\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n df[p, q] += 0.5 * (etaGamma[idx2B[i, p], idx2B[i, q]] +\n transpose(etaGamma)[idx2B[i, p], idx2B[i, q]])\n etaGamma = dot(eta2B, dot(occC_2B, Gamma))\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n df[p, q] += 0.5 * (etaGamma[idx2B[r, p], idx2B[r, q]] +\n transpose(etaGamma)[idx2B[r, p], idx2B[r, q]])\n dGamma = np.zeros_like(Gamma)\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n for s in range(dim1B):\n for t in range(dim1B):\n dGamma[idx2B[p, q], idx2B[r, s]] += eta1B[p, t\n ] * Gamma[idx2B[t, q], idx2B[r, s]] + eta1B[q, t\n ] * Gamma[idx2B[p, t], idx2B[r, s]] - eta1B[t, r\n ] * Gamma[idx2B[p, q], idx2B[t, s]] - eta1B[t, s\n ] * Gamma[idx2B[p, q], idx2B[r, t]] - f[p, t\n ] * eta2B[idx2B[t, q], idx2B[r, s]] - f[q, t\n ] * eta2B[idx2B[p, t], idx2B[r, s]] + f[t, r\n ] * eta2B[idx2B[p, q], idx2B[t, s]] + f[t, s\n ] * eta2B[idx2B[p, q], idx2B[r, t]]\n etaGamma = dot(eta2B, dot(occB_2B, Gamma))\n dGamma += 0.5 * (etaGamma + transpose(etaGamma))\n eta2B_ph = ph_transform_2B(eta2B, bas2B, idx2B, basph2B, idxph2B)\n Gamma_ph = ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B)\n etaGamma_ph = dot(eta2B_ph, dot(occphA_2B, Gamma_ph))\n etaGamma = inverse_ph_transform_2B(etaGamma_ph, bas2B, idx2B, basph2B,\n idxph2B)\n work = np.zeros_like(etaGamma)\n for i1, (i, j) in enumerate(bas2B):\n for i2, (k, l) in enumerate(bas2B):\n work[i1, i2] -= etaGamma[i1, i2] - etaGamma[idx2B[j, i], i2\n ] - etaGamma[i1, idx2B[l, k]] + etaGamma[idx2B[j, i], idx2B\n [l, k]]\n etaGamma = work\n dGamma += etaGamma\n return dE, df, dGamma\n\n\ndef get_operator_from_y(y, dim1B, dim2B):\n ptr = 0\n zero_body = y[ptr]\n ptr += 1\n one_body = reshape(y[ptr:ptr + dim1B * dim1B], (dim1B, dim1B))\n ptr += dim1B * dim1B\n two_body = reshape(y[ptr:ptr + dim2B * dim2B], (dim2B, dim2B))\n return zero_body, one_body, two_body\n\n\ndef derivative_wrapper(t, y, user_data):\n dim1B = user_data['dim1B']\n dim2B = dim1B * dim1B\n holes = user_data['holes']\n particles = user_data['particles']\n bas1B = user_data['bas1B']\n bas2B = user_data['bas2B']\n basph2B = user_data['basph2B']\n idx2B = user_data['idx2B']\n idxph2B = user_data['idxph2B']\n occA_2B = user_data['occA_2B']\n occB_2B = user_data['occB_2B']\n occC_2B = user_data['occC_2B']\n occphA_2B = user_data['occphA_2B']\n calc_eta = user_data['calc_eta']\n calc_rhs = user_data['calc_rhs']\n E, f, Gamma = get_operator_from_y(y, dim1B, dim2B)\n eta1B, eta2B = calc_eta(f, Gamma, user_data)\n dE, df, dGamma = calc_rhs(eta1B, eta2B, f, Gamma, user_data)\n dy = np.append([dE], np.append(reshape(df, -1), reshape(dGamma, -1)))\n user_data['dE'] = dE\n user_data['eta_norm'] = np.linalg.norm(eta1B, ord='fro') + np.linalg.norm(\n eta2B, ord='fro')\n return dy\n\n\ndef pairing_hamiltonian(delta, g, user_data):\n bas1B = user_data['bas1B']\n bas2B = user_data['bas2B']\n idx2B = user_data['idx2B']\n dim = len(bas1B)\n H1B = np.zeros((dim, dim))\n for i in bas1B:\n H1B[i, i] = delta * np.floor_divide(i, 2)\n dim = len(bas2B)\n H2B = np.zeros((dim, dim))\n for i, j in bas2B:\n if i % 2 == 0 and j == i + 1:\n for k, l in bas2B:\n if k % 2 == 0 and l == k + 1:\n H2B[idx2B[i, j], idx2B[k, l]] = -0.5 * g\n H2B[idx2B[j, i], idx2B[k, l]] = 0.5 * g\n H2B[idx2B[i, j], idx2B[l, k]] = 0.5 * g\n H2B[idx2B[j, i], idx2B[l, k]] = -0.5 * g\n return H1B, H2B\n\n\n<mask token>\n\n\ndef calc_mbpt2(f, Gamma, user_data):\n DE2 = 0.0\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n for i in holes:\n for j in holes:\n for a in particles:\n for b in particles:\n denom = f[i, i] + f[j, j] - f[a, a] - f[b, b]\n me = Gamma[idx2B[a, b], idx2B[i, j]]\n DE2 += 0.25 * me * me / denom\n return DE2\n\n\n<mask token>\n\n\ndef main():\n delta = float(argv[1])\n g = float(argv[2])\n particles = 4\n dim1B = 8\n holes = [0, 1, 2, 3]\n particles = [4, 5, 6, 7]\n bas1B = range(dim1B)\n bas2B = construct_basis_2B(holes, particles)\n basph2B = construct_basis_ph2B(holes, particles)\n idx2B = construct_index_2B(bas2B)\n idxph2B = construct_index_2B(basph2B)\n occ1B = construct_occupation_1B(bas1B, holes, particles)\n occA_2B = construct_occupationA_2B(bas2B, occ1B)\n occB_2B = construct_occupationB_2B(bas2B, occ1B)\n occC_2B = construct_occupationC_2B(bas2B, occ1B)\n occphA_2B = construct_occupationA_2B(basph2B, occ1B)\n user_data = {'dim1B': dim1B, 'holes': holes, 'particles': particles,\n 'bas1B': bas1B, 'bas2B': bas2B, 'basph2B': basph2B, 'idx2B': idx2B,\n 'idxph2B': idxph2B, 'occ1B': occ1B, 'occA_2B': occA_2B, 'occB_2B':\n occB_2B, 'occC_2B': occC_2B, 'occphA_2B': occphA_2B, 'eta_norm': \n 0.0, 'dE': 0.0, 'calc_eta': eta_white_atan, 'calc_rhs': flow_imsrg2}\n H1B, H2B = pairing_hamiltonian(delta, g, user_data)\n E, f, Gamma = normal_order(H1B, H2B, user_data)\n y0 = np.append([E], np.append(reshape(f, -1), reshape(Gamma, -1)))\n solver = ode(derivative_wrapper, jac=None)\n solver.set_integrator('vode', method='bdf', order=5, nsteps=1000)\n solver.set_f_params(user_data)\n solver.set_initial_value(y0, 0.0)\n sfinal = 50\n ds = 0.1\n print(\n '%-8s %-14s %-14s %-14s %-14s %-14s %-14s %-14s %-14s'\n % ('s', 'E', 'DE(2)', 'DE(3)', 'E+DE', 'dE/ds', '||eta||',\n '||fod||', '||Gammaod||'))\n print('-' * 148)\n while solver.successful() and solver.t < sfinal:\n ys = solver.integrate(sfinal, step=True)\n dim2B = dim1B * dim1B\n E, f, Gamma = get_operator_from_y(ys, dim1B, dim2B)\n DE2 = calc_mbpt2(f, Gamma, user_data)\n DE3 = calc_mbpt3(f, Gamma, user_data)\n norm_fod = calc_fod_norm(f, user_data)\n norm_Gammaod = calc_Gammaod_norm(Gamma, user_data)\n print(\n '%8.5f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f'\n % (solver.t, E, DE2, DE3, E + DE2 + DE3, user_data['dE'],\n user_data['eta_norm'], norm_fod, norm_Gammaod))\n if abs(DE2 / E) < 1e-07:\n break\n return\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef construct_basis_ph2B(holes, particles):\n basis = []\n for i in holes:\n for j in holes:\n basis.append((i, j))\n for i in holes:\n for a in particles:\n basis.append((i, a))\n for a in particles:\n for i in holes:\n basis.append((a, i))\n for a in particles:\n for b in particles:\n basis.append((a, b))\n return basis\n\n\n<mask token>\n\n\ndef ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B):\n dim = len(basph2B)\n Gamma_ph = np.zeros((dim, dim))\n for i1, (a, b) in enumerate(basph2B):\n for i2, (c, d) in enumerate(basph2B):\n Gamma_ph[i1, i2] -= Gamma[idx2B[a, d], idx2B[c, b]]\n return Gamma_ph\n\n\ndef inverse_ph_transform_2B(Gamma_ph, bas2B, idx2B, basph2B, idxph2B):\n dim = len(bas2B)\n Gamma = np.zeros((dim, dim))\n for i1, (a, b) in enumerate(bas2B):\n for i2, (c, d) in enumerate(bas2B):\n Gamma[i1, i2] -= Gamma_ph[idxph2B[a, d], idxph2B[c, b]]\n return Gamma\n\n\n<mask token>\n\n\ndef calc_fod_norm(f, user_data):\n particles = user_data['particles']\n holes = user_data['holes']\n norm = 0.0\n for a in particles:\n for i in holes:\n norm += f[a, i] ** 2 + f[i, a] ** 2\n return np.sqrt(norm)\n\n\ndef calc_Gammaod_norm(Gamma, user_data):\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n norm = 0.0\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n norm += Gamma[idx2B[a, b], idx2B[i, j]] ** 2 + Gamma[\n idx2B[i, j], idx2B[a, b]] ** 2\n return np.sqrt(norm)\n\n\ndef construct_occupation_1B(bas1B, holes, particles):\n dim = len(bas1B)\n occ = np.zeros(dim)\n for i in holes:\n occ[i] = 1.0\n return occ\n\n\ndef construct_occupationA_2B(bas2B, occ1B):\n dim = len(bas2B)\n occ = np.zeros((dim, dim))\n for i1, (i, j) in enumerate(bas2B):\n occ[i1, i1] = occ1B[i] - occ1B[j]\n return occ\n\n\ndef construct_occupationB_2B(bas2B, occ1B):\n dim = len(bas2B)\n occ = np.zeros((dim, dim))\n for i1, (i, j) in enumerate(bas2B):\n occ[i1, i1] = 1.0 - occ1B[i] - occ1B[j]\n return occ\n\n\ndef construct_occupationC_2B(bas2B, occ1B):\n dim = len(bas2B)\n occ = np.zeros((dim, dim))\n for i1, (i, j) in enumerate(bas2B):\n occ[i1, i1] = occ1B[i] * occ1B[j]\n return occ\n\n\ndef eta_brillouin(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n eta1B[a, i] = f[a, i]\n eta1B[i, a] = -f[a, i]\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n val = Gamma[idx2B[a, b], idx2B[i, j]]\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_imtime(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n dE = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = np.sign(dE) * f[a, i]\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n dE = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = np.sign(dE) * Gamma[idx2B[a, b], idx2B[i, j]]\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = f[a, i] / denom\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = Gamma[idx2B[a, b], idx2B[i, j]] / denom\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white_mp(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i]\n val = f[a, i] / denom\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j]\n val = Gamma[idx2B[a, b], idx2B[i, j]] / denom\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_white_atan(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n eta1B = np.zeros_like(f)\n for a in particles:\n for i in holes:\n denom = f[a, a] - f[i, i] + Gamma[idx2B[a, i], idx2B[a, i]]\n val = 0.5 * np.arctan(2 * f[a, i] / denom)\n eta1B[a, i] = val\n eta1B[i, a] = -val\n eta2B = np.zeros_like(Gamma)\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = f[a, a] + f[b, b] - f[i, i] - f[j, j] + Gamma[\n idx2B[a, b], idx2B[a, b]] + Gamma[idx2B[i, j],\n idx2B[i, j]] - Gamma[idx2B[a, i], idx2B[a, i]] - Gamma[\n idx2B[a, j], idx2B[a, j]] - Gamma[idx2B[b, i],\n idx2B[b, i]] - Gamma[idx2B[b, j], idx2B[b, j]]\n val = 0.5 * np.arctan(2 * Gamma[idx2B[a, b], idx2B[i, j\n ]] / denom)\n eta2B[idx2B[a, b], idx2B[i, j]] = val\n eta2B[idx2B[i, j], idx2B[a, b]] = -val\n return eta1B, eta2B\n\n\ndef eta_wegner(f, Gamma, user_data):\n dim1B = user_data['dim1B']\n holes = user_data['holes']\n particles = user_data['particles']\n bas2B = user_data['bas2B']\n basph2B = user_data['basph2B']\n idx2B = user_data['idx2B']\n idxph2B = user_data['idxph2B']\n occB_2B = user_data['occB_2B']\n occC_2B = user_data['occC_2B']\n occphA_2B = user_data['occphA_2B']\n fd = np.zeros_like(f)\n fod = np.zeros_like(f)\n Gammad = np.zeros_like(Gamma)\n Gammaod = np.zeros_like(Gamma)\n for a in particles:\n for i in holes:\n fod[a, i] = f[a, i]\n fod[i, a] = f[i, a]\n fd = f - fod\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n Gammaod[idx2B[a, b], idx2B[i, j]] = Gamma[idx2B[a, b],\n idx2B[i, j]]\n Gammaod[idx2B[i, j], idx2B[a, b]] = Gamma[idx2B[i, j],\n idx2B[a, b]]\n Gammad = Gamma - Gammaod\n eta1B = np.zeros_like(f)\n eta1B += commutator(fd, fod)\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n for a in particles:\n eta1B[p, q] += fd[i, a] * Gammaod[idx2B[a, p], idx2B[i, q]\n ] - fd[a, i] * Gammaod[idx2B[i, p], idx2B[a, q]] - fod[\n i, a] * Gammad[idx2B[a, p], idx2B[i, q]] + fod[a, i\n ] * Gammad[idx2B[i, p], idx2B[a, q]]\n GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n eta1B[p, q] += 0.5 * (GammaGamma[idx2B[i, p], idx2B[i, q]] -\n transpose(GammaGamma)[idx2B[i, p], idx2B[i, q]])\n GammaGamma = dot(Gammad, dot(occC_2B, Gammaod))\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n eta1B[p, q] += 0.5 * (GammaGamma[idx2B[r, p], idx2B[r, q]] +\n transpose(GammaGamma)[idx2B[r, p], idx2B[r, q]])\n eta2B = np.zeros_like(Gamma)\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n for s in range(dim1B):\n for t in range(dim1B):\n eta2B[idx2B[p, q], idx2B[r, s]] += fd[p, t] * Gammaod[\n idx2B[t, q], idx2B[r, s]] + fd[q, t] * Gammaod[\n idx2B[p, t], idx2B[r, s]] - fd[t, r] * Gammaod[\n idx2B[p, q], idx2B[t, s]] - fd[t, s] * Gammaod[\n idx2B[p, q], idx2B[r, t]] - fod[p, t] * Gammad[\n idx2B[t, q], idx2B[r, s]] - fod[q, t] * Gammad[\n idx2B[p, t], idx2B[r, s]] + fod[t, r] * Gammad[\n idx2B[p, q], idx2B[t, s]] + fod[t, s] * Gammad[\n idx2B[p, q], idx2B[r, t]]\n GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))\n eta2B += 0.5 * (GammaGamma - transpose(GammaGamma))\n Gammad_ph = ph_transform_2B(Gammad, bas2B, idx2B, basph2B, idxph2B)\n Gammaod_ph = ph_transform_2B(Gammaod, bas2B, idx2B, basph2B, idxph2B)\n GammaGamma_ph = dot(Gammad_ph, dot(occphA_2B, Gammaod_ph))\n GammaGamma = inverse_ph_transform_2B(GammaGamma_ph, bas2B, idx2B,\n basph2B, idxph2B)\n work = np.zeros_like(GammaGamma)\n for i1, (i, j) in enumerate(bas2B):\n for i2, (k, l) in enumerate(bas2B):\n work[i1, i2] -= GammaGamma[i1, i2] - GammaGamma[idx2B[j, i], i2\n ] - GammaGamma[i1, idx2B[l, k]] + GammaGamma[idx2B[j, i],\n idx2B[l, k]]\n GammaGamma = work\n eta2B += GammaGamma\n return eta1B, eta2B\n\n\ndef flow_imsrg2(eta1B, eta2B, f, Gamma, user_data):\n dim1B = user_data['dim1B']\n holes = user_data['holes']\n particles = user_data['particles']\n bas2B = user_data['bas2B']\n idx2B = user_data['idx2B']\n basph2B = user_data['basph2B']\n idxph2B = user_data['idxph2B']\n occB_2B = user_data['occB_2B']\n occC_2B = user_data['occC_2B']\n occphA_2B = user_data['occphA_2B']\n dE = 0.0\n for i in holes:\n for a in particles:\n dE += eta1B[i, a] * f[a, i] - eta1B[a, i] * f[i, a]\n for i in holes:\n for j in holes:\n for a in particles:\n for b in particles:\n dE += 0.5 * eta2B[idx2B[i, j], idx2B[a, b]] * Gamma[\n idx2B[a, b], idx2B[i, j]]\n df = np.zeros_like(f)\n df += commutator(eta1B, f)\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n for a in particles:\n df[p, q] += eta1B[i, a] * Gamma[idx2B[a, p], idx2B[i, q]\n ] - eta1B[a, i] * Gamma[idx2B[i, p], idx2B[a, q]] - f[\n i, a] * eta2B[idx2B[a, p], idx2B[i, q]] + f[a, i\n ] * eta2B[idx2B[i, p], idx2B[a, q]]\n etaGamma = dot(eta2B, dot(occB_2B, Gamma))\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n df[p, q] += 0.5 * (etaGamma[idx2B[i, p], idx2B[i, q]] +\n transpose(etaGamma)[idx2B[i, p], idx2B[i, q]])\n etaGamma = dot(eta2B, dot(occC_2B, Gamma))\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n df[p, q] += 0.5 * (etaGamma[idx2B[r, p], idx2B[r, q]] +\n transpose(etaGamma)[idx2B[r, p], idx2B[r, q]])\n dGamma = np.zeros_like(Gamma)\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n for s in range(dim1B):\n for t in range(dim1B):\n dGamma[idx2B[p, q], idx2B[r, s]] += eta1B[p, t\n ] * Gamma[idx2B[t, q], idx2B[r, s]] + eta1B[q, t\n ] * Gamma[idx2B[p, t], idx2B[r, s]] - eta1B[t, r\n ] * Gamma[idx2B[p, q], idx2B[t, s]] - eta1B[t, s\n ] * Gamma[idx2B[p, q], idx2B[r, t]] - f[p, t\n ] * eta2B[idx2B[t, q], idx2B[r, s]] - f[q, t\n ] * eta2B[idx2B[p, t], idx2B[r, s]] + f[t, r\n ] * eta2B[idx2B[p, q], idx2B[t, s]] + f[t, s\n ] * eta2B[idx2B[p, q], idx2B[r, t]]\n etaGamma = dot(eta2B, dot(occB_2B, Gamma))\n dGamma += 0.5 * (etaGamma + transpose(etaGamma))\n eta2B_ph = ph_transform_2B(eta2B, bas2B, idx2B, basph2B, idxph2B)\n Gamma_ph = ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B)\n etaGamma_ph = dot(eta2B_ph, dot(occphA_2B, Gamma_ph))\n etaGamma = inverse_ph_transform_2B(etaGamma_ph, bas2B, idx2B, basph2B,\n idxph2B)\n work = np.zeros_like(etaGamma)\n for i1, (i, j) in enumerate(bas2B):\n for i2, (k, l) in enumerate(bas2B):\n work[i1, i2] -= etaGamma[i1, i2] - etaGamma[idx2B[j, i], i2\n ] - etaGamma[i1, idx2B[l, k]] + etaGamma[idx2B[j, i], idx2B\n [l, k]]\n etaGamma = work\n dGamma += etaGamma\n return dE, df, dGamma\n\n\ndef get_operator_from_y(y, dim1B, dim2B):\n ptr = 0\n zero_body = y[ptr]\n ptr += 1\n one_body = reshape(y[ptr:ptr + dim1B * dim1B], (dim1B, dim1B))\n ptr += dim1B * dim1B\n two_body = reshape(y[ptr:ptr + dim2B * dim2B], (dim2B, dim2B))\n return zero_body, one_body, two_body\n\n\ndef derivative_wrapper(t, y, user_data):\n dim1B = user_data['dim1B']\n dim2B = dim1B * dim1B\n holes = user_data['holes']\n particles = user_data['particles']\n bas1B = user_data['bas1B']\n bas2B = user_data['bas2B']\n basph2B = user_data['basph2B']\n idx2B = user_data['idx2B']\n idxph2B = user_data['idxph2B']\n occA_2B = user_data['occA_2B']\n occB_2B = user_data['occB_2B']\n occC_2B = user_data['occC_2B']\n occphA_2B = user_data['occphA_2B']\n calc_eta = user_data['calc_eta']\n calc_rhs = user_data['calc_rhs']\n E, f, Gamma = get_operator_from_y(y, dim1B, dim2B)\n eta1B, eta2B = calc_eta(f, Gamma, user_data)\n dE, df, dGamma = calc_rhs(eta1B, eta2B, f, Gamma, user_data)\n dy = np.append([dE], np.append(reshape(df, -1), reshape(dGamma, -1)))\n user_data['dE'] = dE\n user_data['eta_norm'] = np.linalg.norm(eta1B, ord='fro') + np.linalg.norm(\n eta2B, ord='fro')\n return dy\n\n\ndef pairing_hamiltonian(delta, g, user_data):\n bas1B = user_data['bas1B']\n bas2B = user_data['bas2B']\n idx2B = user_data['idx2B']\n dim = len(bas1B)\n H1B = np.zeros((dim, dim))\n for i in bas1B:\n H1B[i, i] = delta * np.floor_divide(i, 2)\n dim = len(bas2B)\n H2B = np.zeros((dim, dim))\n for i, j in bas2B:\n if i % 2 == 0 and j == i + 1:\n for k, l in bas2B:\n if k % 2 == 0 and l == k + 1:\n H2B[idx2B[i, j], idx2B[k, l]] = -0.5 * g\n H2B[idx2B[j, i], idx2B[k, l]] = 0.5 * g\n H2B[idx2B[i, j], idx2B[l, k]] = 0.5 * g\n H2B[idx2B[j, i], idx2B[l, k]] = -0.5 * g\n return H1B, H2B\n\n\n<mask token>\n\n\ndef calc_mbpt2(f, Gamma, user_data):\n DE2 = 0.0\n particles = user_data['particles']\n holes = user_data['holes']\n idx2B = user_data['idx2B']\n for i in holes:\n for j in holes:\n for a in particles:\n for b in particles:\n denom = f[i, i] + f[j, j] - f[a, a] - f[b, b]\n me = Gamma[idx2B[a, b], idx2B[i, j]]\n DE2 += 0.25 * me * me / denom\n return DE2\n\n\n<mask token>\n\n\ndef main():\n delta = float(argv[1])\n g = float(argv[2])\n particles = 4\n dim1B = 8\n holes = [0, 1, 2, 3]\n particles = [4, 5, 6, 7]\n bas1B = range(dim1B)\n bas2B = construct_basis_2B(holes, particles)\n basph2B = construct_basis_ph2B(holes, particles)\n idx2B = construct_index_2B(bas2B)\n idxph2B = construct_index_2B(basph2B)\n occ1B = construct_occupation_1B(bas1B, holes, particles)\n occA_2B = construct_occupationA_2B(bas2B, occ1B)\n occB_2B = construct_occupationB_2B(bas2B, occ1B)\n occC_2B = construct_occupationC_2B(bas2B, occ1B)\n occphA_2B = construct_occupationA_2B(basph2B, occ1B)\n user_data = {'dim1B': dim1B, 'holes': holes, 'particles': particles,\n 'bas1B': bas1B, 'bas2B': bas2B, 'basph2B': basph2B, 'idx2B': idx2B,\n 'idxph2B': idxph2B, 'occ1B': occ1B, 'occA_2B': occA_2B, 'occB_2B':\n occB_2B, 'occC_2B': occC_2B, 'occphA_2B': occphA_2B, 'eta_norm': \n 0.0, 'dE': 0.0, 'calc_eta': eta_white_atan, 'calc_rhs': flow_imsrg2}\n H1B, H2B = pairing_hamiltonian(delta, g, user_data)\n E, f, Gamma = normal_order(H1B, H2B, user_data)\n y0 = np.append([E], np.append(reshape(f, -1), reshape(Gamma, -1)))\n solver = ode(derivative_wrapper, jac=None)\n solver.set_integrator('vode', method='bdf', order=5, nsteps=1000)\n solver.set_f_params(user_data)\n solver.set_initial_value(y0, 0.0)\n sfinal = 50\n ds = 0.1\n print(\n '%-8s %-14s %-14s %-14s %-14s %-14s %-14s %-14s %-14s'\n % ('s', 'E', 'DE(2)', 'DE(3)', 'E+DE', 'dE/ds', '||eta||',\n '||fod||', '||Gammaod||'))\n print('-' * 148)\n while solver.successful() and solver.t < sfinal:\n ys = solver.integrate(sfinal, step=True)\n dim2B = dim1B * dim1B\n E, f, Gamma = get_operator_from_y(ys, dim1B, dim2B)\n DE2 = calc_mbpt2(f, Gamma, user_data)\n DE3 = calc_mbpt3(f, Gamma, user_data)\n norm_fod = calc_fod_norm(f, user_data)\n norm_Gammaod = calc_Gammaod_norm(Gamma, user_data)\n print(\n '%8.5f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f'\n % (solver.t, E, DE2, DE3, E + DE2 + DE3, user_data['dE'],\n user_data['eta_norm'], norm_fod, norm_Gammaod))\n if abs(DE2 / E) < 1e-07:\n break\n return\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python\n\n#------------------------------------------------------------------------------\n# imsrg_pairing.py\n#\n# author: H. Hergert \n# version: 1.5.0\n# date: Dec 6, 2016\n# \n# tested with Python v2.7\n# \n# Solves the pairing model for four particles in a basis of four doubly \n# degenerate states by means of an In-Medium Similarity Renormalization \n# Group (IMSRG) flow.\n#\n#------------------------------------------------------------------------------\n\nimport numpy as np\nfrom numpy import array, dot, diag, reshape, transpose\nfrom scipy.linalg import eigvalsh\nfrom scipy.integrate import odeint, ode\n\nfrom sys import argv\n\n#-----------------------------------------------------------------------------------\n# basis and index functions\n#-----------------------------------------------------------------------------------\n\ndef construct_basis_2B(holes, particles):\n basis = []\n for i in holes:\n for j in holes:\n basis.append((i, j))\n\n for i in holes:\n for a in particles:\n basis.append((i, a))\n\n for a in particles:\n for i in holes:\n basis.append((a, i))\n\n for a in particles:\n for b in particles:\n basis.append((a, b))\n\n return basis\n\n\ndef construct_basis_ph2B(holes, particles):\n basis = []\n for i in holes:\n for j in holes:\n basis.append((i, j))\n\n for i in holes:\n for a in particles:\n basis.append((i, a))\n\n for a in particles:\n for i in holes:\n basis.append((a, i))\n\n for a in particles:\n for b in particles:\n basis.append((a, b))\n\n return basis\n\n\n#\n# We use dictionaries for the reverse lookup of state indices\n#\ndef construct_index_2B(bas2B):\n index = { }\n for i, state in enumerate(bas2B):\n index[state] = i\n\n return index\n\n\n\n#-----------------------------------------------------------------------------------\n# transform matrices to particle-hole representation\n#-----------------------------------------------------------------------------------\ndef ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B):\n dim = len(basph2B)\n Gamma_ph = np.zeros((dim, dim))\n\n for i1, (a,b) in enumerate(basph2B):\n for i2, (c, d) in enumerate(basph2B):\n Gamma_ph[i1, i2] -= Gamma[idx2B[(a,d)], idx2B[(c,b)]]\n\n return Gamma_ph\n\ndef inverse_ph_transform_2B(Gamma_ph, bas2B, idx2B, basph2B, idxph2B):\n dim = len(bas2B)\n Gamma = np.zeros((dim, dim))\n\n for i1, (a,b) in enumerate(bas2B):\n for i2, (c, d) in enumerate(bas2B):\n Gamma[i1, i2] -= Gamma_ph[idxph2B[(a,d)], idxph2B[(c,b)]]\n \n return Gamma\n\n#-----------------------------------------------------------------------------------\n# commutator of matrices\n#-----------------------------------------------------------------------------------\ndef commutator(a,b):\n return dot(a,b) - dot(b,a)\n\n#-----------------------------------------------------------------------------------\n# norms of off-diagonal Hamiltonian pieces\n#-----------------------------------------------------------------------------------\ndef calc_fod_norm(f, user_data):\n particles = user_data[\"particles\"]\n holes = user_data[\"holes\"]\n \n norm = 0.0\n for a in particles:\n for i in holes:\n norm += f[a,i]**2 + f[i,a]**2\n\n return np.sqrt(norm)\n\ndef calc_Gammaod_norm(Gamma, user_data):\n particles = user_data[\"particles\"]\n holes = user_data[\"holes\"]\n idx2B = user_data[\"idx2B\"]\n\n norm = 0.0\n for a in particles: \n for b in particles:\n for i in holes:\n for j in holes:\n norm += Gamma[idx2B[(a,b)],idx2B[(i,j)]]**2 + Gamma[idx2B[(i,j)],idx2B[(a,b)]]**2\n\n return np.sqrt(norm)\n\n#-----------------------------------------------------------------------------------\n# occupation number matrices\n#-----------------------------------------------------------------------------------\ndef construct_occupation_1B(bas1B, holes, particles):\n dim = len(bas1B)\n occ = np.zeros(dim)\n\n for i in holes:\n occ[i] = 1.\n\n return occ\n\n# diagonal matrix: n_a - n_b\ndef construct_occupationA_2B(bas2B, occ1B):\n dim = len(bas2B)\n occ = np.zeros((dim,dim))\n\n for i1, (i,j) in enumerate(bas2B):\n occ[i1, i1] = occ1B[i] - occ1B[j]\n\n return occ\n\n\n# diagonal matrix: 1 - n_a - n_b\ndef construct_occupationB_2B(bas2B, occ1B):\n dim = len(bas2B)\n occ = np.zeros((dim,dim))\n\n for i1, (i,j) in enumerate(bas2B):\n occ[i1, i1] = 1. - occ1B[i] - occ1B[j]\n\n return occ\n\n# diagonal matrix: n_a * n_b\ndef construct_occupationC_2B(bas2B, occ1B):\n dim = len(bas2B)\n occ = np.zeros((dim,dim))\n\n for i1, (i,j) in enumerate(bas2B):\n occ[i1, i1] = occ1B[i] * occ1B[j]\n\n return occ\n\n#-----------------------------------------------------------------------------------\n# generators\n#-----------------------------------------------------------------------------------\ndef eta_brillouin(f, Gamma, user_data):\n dim1B = user_data[\"dim1B\"]\n particles = user_data[\"particles\"]\n holes = user_data[\"holes\"]\n idx2B = user_data[\"idx2B\"]\n\n # one-body part of the generator\n eta1B = np.zeros_like(f)\n\n for a in particles:\n for i in holes:\n # (1-n_a)n_i - n_a(1-n_i) = n_i - n_a\n eta1B[a, i] = f[a,i]\n eta1B[i, a] = -f[a,i]\n\n # two-body part of the generator\n eta2B = np.zeros_like(Gamma)\n\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n val = Gamma[idx2B[(a,b)], idx2B[(i,j)]]\n\n eta2B[idx2B[(a,b)],idx2B[(i,j)]] = val\n eta2B[idx2B[(i,j)],idx2B[(a,b)]] = -val\n\n return eta1B, eta2B\n\ndef eta_imtime(f, Gamma, user_data):\n dim1B = user_data[\"dim1B\"]\n particles = user_data[\"particles\"]\n holes = user_data[\"holes\"]\n idx2B = user_data[\"idx2B\"]\n\n # one-body part of the generator\n eta1B = np.zeros_like(f)\n\n for a in particles:\n for i in holes:\n dE = f[a,a] - f[i,i] + Gamma[idx2B[(a,i)], idx2B[(a,i)]]\n val = np.sign(dE)*f[a,i]\n eta1B[a, i] = val\n eta1B[i, a] = -val \n\n # two-body part of the generator\n eta2B = np.zeros_like(Gamma)\n\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n dE = ( \n f[a,a] + f[b,b] - f[i,i] - f[j,j] \n + Gamma[idx2B[(a,b)],idx2B[(a,b)]] \n + Gamma[idx2B[(i,j)],idx2B[(i,j)]]\n - Gamma[idx2B[(a,i)],idx2B[(a,i)]] \n - Gamma[idx2B[(a,j)],idx2B[(a,j)]] \n - Gamma[idx2B[(b,i)],idx2B[(b,i)]] \n - Gamma[idx2B[(b,j)],idx2B[(b,j)]] \n )\n\n val = np.sign(dE)*Gamma[idx2B[(a,b)], idx2B[(i,j)]]\n\n eta2B[idx2B[(a,b)],idx2B[(i,j)]] = val\n eta2B[idx2B[(i,j)],idx2B[(a,b)]] = -val\n\n return eta1B, eta2B\n\n\ndef eta_white(f, Gamma, user_data):\n dim1B = user_data[\"dim1B\"]\n particles = user_data[\"particles\"]\n holes = user_data[\"holes\"]\n idx2B = user_data[\"idx2B\"]\n\n # one-body part of the generator\n eta1B = np.zeros_like(f)\n\n for a in particles:\n for i in holes:\n denom = f[a,a] - f[i,i] + Gamma[idx2B[(a,i)], idx2B[(a,i)]]\n val = f[a,i]/denom\n eta1B[a, i] = val\n eta1B[i, a] = -val \n\n # two-body part of the generator\n eta2B = np.zeros_like(Gamma)\n\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = ( \n f[a,a] + f[b,b] - f[i,i] - f[j,j] \n + Gamma[idx2B[(a,b)],idx2B[(a,b)]] \n + Gamma[idx2B[(i,j)],idx2B[(i,j)]]\n - Gamma[idx2B[(a,i)],idx2B[(a,i)]] \n - Gamma[idx2B[(a,j)],idx2B[(a,j)]] \n - Gamma[idx2B[(b,i)],idx2B[(b,i)]] \n - Gamma[idx2B[(b,j)],idx2B[(b,j)]] \n )\n\n val = Gamma[idx2B[(a,b)], idx2B[(i,j)]] / denom\n\n eta2B[idx2B[(a,b)],idx2B[(i,j)]] = val\n eta2B[idx2B[(i,j)],idx2B[(a,b)]] = -val\n\n return eta1B, eta2B\n\n\ndef eta_white_mp(f, Gamma, user_data):\n dim1B = user_data[\"dim1B\"]\n particles = user_data[\"particles\"]\n holes = user_data[\"holes\"]\n idx2B = user_data[\"idx2B\"]\n\n # one-body part of the generator\n eta1B = np.zeros_like(f)\n\n for a in particles:\n for i in holes:\n denom = f[a,a] - f[i,i]\n val = f[a,i]/denom\n eta1B[a, i] = val\n eta1B[i, a] = -val \n\n # two-body part of the generator\n eta2B = np.zeros_like(Gamma)\n\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = ( \n f[a,a] + f[b,b] - f[i,i] - f[j,j] \n )\n\n val = Gamma[idx2B[(a,b)], idx2B[(i,j)]] / denom\n\n eta2B[idx2B[(a,b)],idx2B[(i,j)]] = val\n eta2B[idx2B[(i,j)],idx2B[(a,b)]] = -val\n\n return eta1B, eta2B\n\ndef eta_white_atan(f, Gamma, user_data):\n dim1B = user_data[\"dim1B\"]\n particles = user_data[\"particles\"]\n holes = user_data[\"holes\"]\n idx2B = user_data[\"idx2B\"]\n\n # one-body part of the generator\n eta1B = np.zeros_like(f)\n\n for a in particles:\n for i in holes:\n denom = f[a,a] - f[i,i] + Gamma[idx2B[(a,i)], idx2B[(a,i)]]\n val = 0.5 * np.arctan(2 * f[a,i]/denom)\n eta1B[a, i] = val\n eta1B[i, a] = -val \n\n # two-body part of the generator\n eta2B = np.zeros_like(Gamma)\n\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n denom = ( \n f[a,a] + f[b,b] - f[i,i] - f[j,j] \n + Gamma[idx2B[(a,b)],idx2B[(a,b)]] \n + Gamma[idx2B[(i,j)],idx2B[(i,j)]] \n - Gamma[idx2B[(a,i)],idx2B[(a,i)]] \n - Gamma[idx2B[(a,j)],idx2B[(a,j)]] \n - Gamma[idx2B[(b,i)],idx2B[(b,i)]] \n - Gamma[idx2B[(b,j)],idx2B[(b,j)]] \n )\n\n val = 0.5 * np.arctan(2 * Gamma[idx2B[(a,b)], idx2B[(i,j)]] / denom)\n\n eta2B[idx2B[(a,b)],idx2B[(i,j)]] = val\n eta2B[idx2B[(i,j)],idx2B[(a,b)]] = -val\n\n return eta1B, eta2B\n\n\ndef eta_wegner(f, Gamma, user_data):\n\n dim1B = user_data[\"dim1B\"]\n holes = user_data[\"holes\"]\n particles = user_data[\"particles\"]\n bas2B = user_data[\"bas2B\"]\n basph2B = user_data[\"basph2B\"]\n idx2B = user_data[\"idx2B\"]\n idxph2B = user_data[\"idxph2B\"]\n occB_2B = user_data[\"occB_2B\"]\n occC_2B = user_data[\"occC_2B\"]\n occphA_2B = user_data[\"occphA_2B\"]\n\n\n # split Hamiltonian in diagonal and off-diagonal parts\n fd = np.zeros_like(f)\n fod = np.zeros_like(f)\n Gammad = np.zeros_like(Gamma)\n Gammaod = np.zeros_like(Gamma)\n\n for a in particles:\n for i in holes:\n fod[a, i] = f[a,i]\n fod[i, a] = f[i,a]\n fd = f - fod\n\n for a in particles:\n for b in particles:\n for i in holes:\n for j in holes:\n Gammaod[idx2B[(a,b)], idx2B[(i,j)]] = Gamma[idx2B[(a,b)], idx2B[(i,j)]]\n Gammaod[idx2B[(i,j)], idx2B[(a,b)]] = Gamma[idx2B[(i,j)], idx2B[(a,b)]]\n Gammad = Gamma - Gammaod\n\n\n ############################# \n # one-body part of the generator\n eta1B = np.zeros_like(f)\n\n # 1B - 1B\n eta1B += commutator(fd, fod)\n\n # 1B - 2B\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n for a in particles:\n eta1B[p,q] += (\n fd[i,a] * Gammaod[idx2B[(a, p)], idx2B[(i, q)]] \n - fd[a,i] * Gammaod[idx2B[(i, p)], idx2B[(a, q)]] \n - fod[i,a] * Gammad[idx2B[(a, p)], idx2B[(i, q)]] \n + fod[a,i] * Gammad[idx2B[(i, p)], idx2B[(a, q)]]\n )\n\n # 2B - 2B\n # n_a n_b nn_c + nn_a nn_b n_c = n_a n_b + (1 - n_a - n_b) * n_c\n GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n eta1B[p,q] += 0.5*(\n GammaGamma[idx2B[(i,p)], idx2B[(i,q)]] \n - transpose(GammaGamma)[idx2B[(i,p)], idx2B[(i,q)]]\n )\n\n GammaGamma = dot(Gammad, dot(occC_2B, Gammaod))\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n eta1B[p,q] += 0.5*(\n GammaGamma[idx2B[(r,p)], idx2B[(r,q)]] \n + transpose(GammaGamma)[idx2B[(r,p)], idx2B[(r,q)]] \n )\n\n\n ############################# \n # two-body flow equation \n eta2B = np.zeros_like(Gamma)\n\n # 1B - 2B\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n for s in range(dim1B):\n for t in range(dim1B):\n eta2B[idx2B[(p,q)],idx2B[(r,s)]] += (\n fd[p,t] * Gammaod[idx2B[(t,q)],idx2B[(r,s)]] \n + fd[q,t] * Gammaod[idx2B[(p,t)],idx2B[(r,s)]] \n - fd[t,r] * Gammaod[idx2B[(p,q)],idx2B[(t,s)]] \n - fd[t,s] * Gammaod[idx2B[(p,q)],idx2B[(r,t)]]\n - fod[p,t] * Gammad[idx2B[(t,q)],idx2B[(r,s)]] \n - fod[q,t] * Gammad[idx2B[(p,t)],idx2B[(r,s)]] \n + fod[t,r] * Gammad[idx2B[(p,q)],idx2B[(t,s)]] \n + fod[t,s] * Gammad[idx2B[(p,q)],idx2B[(r,t)]]\n )\n\n \n # 2B - 2B - particle and hole ladders\n # Gammad.occB.Gammaod\n GammaGamma = dot(Gammad, dot(occB_2B, Gammaod))\n\n eta2B += 0.5 * (GammaGamma - transpose(GammaGamma))\n\n # 2B - 2B - particle-hole chain\n \n # transform matrices to particle-hole representation and calculate \n # Gammad_ph.occA_ph.Gammaod_ph\n Gammad_ph = ph_transform_2B(Gammad, bas2B, idx2B, basph2B, idxph2B)\n Gammaod_ph = ph_transform_2B(Gammaod, bas2B, idx2B, basph2B, idxph2B)\n\n GammaGamma_ph = dot(Gammad_ph, dot(occphA_2B, Gammaod_ph))\n\n # transform back to standard representation\n GammaGamma = inverse_ph_transform_2B(GammaGamma_ph, bas2B, idx2B, basph2B, idxph2B)\n\n # commutator / antisymmetrization\n work = np.zeros_like(GammaGamma)\n for i1, (i,j) in enumerate(bas2B):\n for i2, (k,l) in enumerate(bas2B):\n work[i1, i2] -= (\n GammaGamma[i1, i2] \n - GammaGamma[idx2B[(j,i)], i2] \n - GammaGamma[i1, idx2B[(l,k)]] \n + GammaGamma[idx2B[(j,i)], idx2B[(l,k)]]\n )\n GammaGamma = work\n\n eta2B += GammaGamma\n\n\n return eta1B, eta2B\n\n\n#-----------------------------------------------------------------------------------\n# derivatives \n#-----------------------------------------------------------------------------------\ndef flow_imsrg2(eta1B, eta2B, f, Gamma, user_data):\n\n dim1B = user_data[\"dim1B\"]\n holes = user_data[\"holes\"]\n particles = user_data[\"particles\"]\n bas2B = user_data[\"bas2B\"]\n idx2B = user_data[\"idx2B\"]\n basph2B = user_data[\"basph2B\"]\n idxph2B = user_data[\"idxph2B\"]\n occB_2B = user_data[\"occB_2B\"]\n occC_2B = user_data[\"occC_2B\"]\n occphA_2B = user_data[\"occphA_2B\"]\n\n ############################# \n # zero-body flow equation\n dE = 0.0\n\n for i in holes:\n for a in particles:\n dE += eta1B[i,a] * f[a,i] - eta1B[a,i] * f[i,a]\n\n for i in holes:\n for j in holes:\n for a in particles:\n for b in particles:\n dE += 0.5 * eta2B[idx2B[(i,j)], idx2B[(a,b)]] * Gamma[idx2B[(a,b)], idx2B[(i,j)]]\n\n\n ############################# \n # one-body flow equation \n df = np.zeros_like(f)\n\n # 1B - 1B\n df += commutator(eta1B, f)\n\n # 1B - 2B\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n for a in particles:\n df[p,q] += (\n eta1B[i,a] * Gamma[idx2B[(a, p)], idx2B[(i, q)]] \n - eta1B[a,i] * Gamma[idx2B[(i, p)], idx2B[(a, q)]] \n - f[i,a] * eta2B[idx2B[(a, p)], idx2B[(i, q)]] \n + f[a,i] * eta2B[idx2B[(i, p)], idx2B[(a, q)]]\n )\n\n # 2B - 2B\n # n_a n_b nn_c + nn_a nn_b n_c = n_a n_b + (1 - n_a - n_b) * n_c\n etaGamma = dot(eta2B, dot(occB_2B, Gamma))\n for p in range(dim1B):\n for q in range(dim1B):\n for i in holes:\n df[p,q] += 0.5*(\n etaGamma[idx2B[(i,p)], idx2B[(i,q)]] \n + transpose(etaGamma)[idx2B[(i,p)], idx2B[(i,q)]]\n )\n\n etaGamma = dot(eta2B, dot(occC_2B, Gamma))\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n df[p,q] += 0.5*(\n etaGamma[idx2B[(r,p)], idx2B[(r,q)]] \n + transpose(etaGamma)[idx2B[(r,p)], idx2B[(r,q)]] \n )\n\n\n ############################# \n # two-body flow equation \n dGamma = np.zeros_like(Gamma)\n\n # 1B - 2B\n for p in range(dim1B):\n for q in range(dim1B):\n for r in range(dim1B):\n for s in range(dim1B):\n for t in range(dim1B):\n dGamma[idx2B[(p,q)],idx2B[(r,s)]] += (\n eta1B[p,t] * Gamma[idx2B[(t,q)],idx2B[(r,s)]] \n + eta1B[q,t] * Gamma[idx2B[(p,t)],idx2B[(r,s)]] \n - eta1B[t,r] * Gamma[idx2B[(p,q)],idx2B[(t,s)]] \n - eta1B[t,s] * Gamma[idx2B[(p,q)],idx2B[(r,t)]]\n - f[p,t] * eta2B[idx2B[(t,q)],idx2B[(r,s)]] \n - f[q,t] * eta2B[idx2B[(p,t)],idx2B[(r,s)]] \n + f[t,r] * eta2B[idx2B[(p,q)],idx2B[(t,s)]] \n + f[t,s] * eta2B[idx2B[(p,q)],idx2B[(r,t)]]\n )\n\n \n # 2B - 2B - particle and hole ladders\n # eta2B.occB.Gamma\n etaGamma = dot(eta2B, dot(occB_2B, Gamma))\n\n dGamma += 0.5 * (etaGamma + transpose(etaGamma))\n\n # 2B - 2B - particle-hole chain\n \n # transform matrices to particle-hole representation and calculate \n # eta2B_ph.occA_ph.Gamma_ph\n eta2B_ph = ph_transform_2B(eta2B, bas2B, idx2B, basph2B, idxph2B)\n Gamma_ph = ph_transform_2B(Gamma, bas2B, idx2B, basph2B, idxph2B)\n\n etaGamma_ph = dot(eta2B_ph, dot(occphA_2B, Gamma_ph))\n\n # transform back to standard representation\n etaGamma = inverse_ph_transform_2B(etaGamma_ph, bas2B, idx2B, basph2B, idxph2B)\n\n # commutator / antisymmetrization\n work = np.zeros_like(etaGamma)\n for i1, (i,j) in enumerate(bas2B):\n for i2, (k,l) in enumerate(bas2B):\n work[i1, i2] -= (\n etaGamma[i1, i2] \n - etaGamma[idx2B[(j,i)], i2] \n - etaGamma[i1, idx2B[(l,k)]] \n + etaGamma[idx2B[(j,i)], idx2B[(l,k)]]\n )\n etaGamma = work\n\n dGamma += etaGamma\n\n\n return dE, df, dGamma\n\n\n#-----------------------------------------------------------------------------------\n# derivative wrapper\n#-----------------------------------------------------------------------------------\ndef get_operator_from_y(y, dim1B, dim2B):\n \n # reshape the solution vector into 0B, 1B, 2B pieces\n ptr = 0\n zero_body = y[ptr]\n\n ptr += 1\n one_body = reshape(y[ptr:ptr+dim1B*dim1B], (dim1B, dim1B))\n\n ptr += dim1B*dim1B\n two_body = reshape(y[ptr:ptr+dim2B*dim2B], (dim2B, dim2B))\n\n return zero_body,one_body,two_body\n\n\ndef derivative_wrapper(t, y, user_data):\n\n dim1B = user_data[\"dim1B\"]\n dim2B = dim1B*dim1B\n\n\n holes = user_data[\"holes\"]\n particles = user_data[\"particles\"]\n bas1B = user_data[\"bas1B\"]\n bas2B = user_data[\"bas2B\"]\n basph2B = user_data[\"basph2B\"]\n idx2B = user_data[\"idx2B\"]\n idxph2B = user_data[\"idxph2B\"]\n occA_2B = user_data[\"occA_2B\"]\n occB_2B = user_data[\"occB_2B\"]\n occC_2B = user_data[\"occC_2B\"]\n occphA_2B = user_data[\"occphA_2B\"]\n calc_eta = user_data[\"calc_eta\"]\n calc_rhs = user_data[\"calc_rhs\"]\n\n # extract operator pieces from solution vector\n E, f, Gamma = get_operator_from_y(y, dim1B, dim2B)\n\n\n # calculate the generator\n eta1B, eta2B = calc_eta(f, Gamma, user_data)\n\n # calculate the right-hand side\n dE, df, dGamma = calc_rhs(eta1B, eta2B, f, Gamma, user_data)\n\n # convert derivatives into linear array\n dy = np.append([dE], np.append(reshape(df, -1), reshape(dGamma, -1)))\n\n # share data\n user_data[\"dE\"] = dE\n user_data[\"eta_norm\"] = np.linalg.norm(eta1B,ord='fro')+np.linalg.norm(eta2B,ord='fro')\n \n return dy\n\n#-----------------------------------------------------------------------------------\n# pairing Hamiltonian\n#-----------------------------------------------------------------------------------\ndef pairing_hamiltonian(delta, g, user_data):\n bas1B = user_data[\"bas1B\"]\n bas2B = user_data[\"bas2B\"]\n idx2B = user_data[\"idx2B\"]\n\n dim = len(bas1B)\n H1B = np.zeros((dim,dim))\n\n for i in bas1B:\n H1B[i,i] = delta*np.floor_divide(i, 2)\n\n dim = len(bas2B)\n H2B = np.zeros((dim, dim))\n\n # spin up states have even indices, spin down the next odd index\n for (i, j) in bas2B:\n if (i % 2 == 0 and j == i+1):\n for (k, l) in bas2B:\n if (k % 2 == 0 and l == k+1):\n H2B[idx2B[(i,j)],idx2B[(k,l)]] = -0.5*g\n H2B[idx2B[(j,i)],idx2B[(k,l)]] = 0.5*g\n H2B[idx2B[(i,j)],idx2B[(l,k)]] = 0.5*g\n H2B[idx2B[(j,i)],idx2B[(l,k)]] = -0.5*g\n \n return H1B, H2B\n\n#-----------------------------------------------------------------------------------\n# normal-ordered pairing Hamiltonian\n#-----------------------------------------------------------------------------------\ndef normal_order(H1B, H2B, user_data):\n bas1B = user_data[\"bas1B\"]\n bas2B = user_data[\"bas2B\"]\n idx2B = user_data[\"idx2B\"]\n particles = user_data[\"particles\"]\n holes = user_data[\"holes\"]\n\n # 0B part\n E = 0.0\n for i in holes:\n E += H1B[i,i]\n\n for i in holes:\n for j in holes:\n E += 0.5*H2B[idx2B[(i,j)],idx2B[(i,j)]] \n\n # 1B part\n f = H1B\n for i in bas1B:\n for j in bas1B:\n for h in holes:\n f[i,j] += H2B[idx2B[(i,h)],idx2B[(j,h)]] \n\n # 2B part\n Gamma = H2B\n\n return E, f, Gamma\n\n#-----------------------------------------------------------------------------------\n# Perturbation theory\n#-----------------------------------------------------------------------------------\ndef calc_mbpt2(f, Gamma, user_data):\n DE2 = 0.0\n\n particles = user_data[\"particles\"]\n holes = user_data[\"holes\"]\n idx2B = user_data[\"idx2B\"]\n\n for i in holes:\n for j in holes:\n for a in particles:\n for b in particles:\n denom = f[i,i] + f[j,j] - f[a,a] - f[b,b]\n me = Gamma[idx2B[(a,b)],idx2B[(i,j)]]\n DE2 += 0.25*me*me/denom\n\n return DE2\n\ndef calc_mbpt3(f, Gamma, user_data):\n particles = user_data[\"particles\"]\n holes = user_data[\"holes\"]\n idx2B = user_data[\"idx2B\"]\n\n # DE3 = 0.0\n\n DE3pp = 0.0\n DE3hh = 0.0\n DE3ph = 0.0\n\n for a in particles:\n for b in particles:\n for c in particles:\n for d in particles:\n for i in holes:\n for j in holes:\n denom = (f[i,i] + f[j,j] - f[a,a] - f[b,b])*(f[i,i] + f[j,j] - f[c,c] - f[d,d])\n me = (Gamma[idx2B[(i,j)],idx2B[(a,b)]]*Gamma[idx2B[(a,b)],idx2B[(c,d)]]*\n Gamma[idx2B[(c,d)],idx2B[(i,j)]])\n DE3pp += 0.125*me/denom\n\n for i in holes:\n for j in holes:\n for k in holes:\n for l in holes:\n for a in particles:\n for b in particles:\n denom = (f[i,i] + f[j,j] - f[a,a] - f[b,b])*(f[k,k] + f[l,l] - f[a,a] - f[b,b])\n me = (Gamma[idx2B[(a,b)],idx2B[(k,l)]]*Gamma[idx2B[(k,l)],idx2B[(i,j)]]*\n Gamma[idx2B[(i,j)],idx2B[(a,b)]])\n DE3hh += 0.125*me/denom\n\n for i in holes:\n for j in holes:\n for k in holes:\n for a in particles:\n for b in particles:\n for c in particles:\n denom = (f[i,i] + f[j,j] - f[a,a] - f[b,b])*(f[k,k] + f[j,j] - f[a,a] - f[c,c])\n me = (Gamma[idx2B[(i,j)],idx2B[(a,b)]]*Gamma[idx2B[(k,b)],idx2B[(i,c)]]*\n Gamma[idx2B[(a,c)],idx2B[(k,j)]])\n DE3ph -= me/denom\n\n return DE3pp+DE3hh+DE3ph\n\n\n#------------------------------------------------------------------------------\n# Main program\n#------------------------------------------------------------------------------\ndef main():\n # grab delta and g from the command line\n delta = float(argv[1])\n g = float(argv[2])\n\n particles = 4\n\n # setup shared data\n dim1B = 8\n\n # this defines the reference state\n # 1st state\n holes = [0,1,2,3]\n particles = [4,5,6,7]\n\n # 2nd state\n # holes = [0,1,4,5]\n # particles = [2,3,6,7]\n\n # 3rd state\n # holes = [0,1,6,7]\n # particles = [2,3,4,5]\n\n # basis definitions\n bas1B = range(dim1B)\n bas2B = construct_basis_2B(holes, particles)\n basph2B = construct_basis_ph2B(holes, particles)\n\n idx2B = construct_index_2B(bas2B)\n idxph2B = construct_index_2B(basph2B)\n\n # occupation number matrices\n occ1B = construct_occupation_1B(bas1B, holes, particles)\n occA_2B = construct_occupationA_2B(bas2B, occ1B)\n occB_2B = construct_occupationB_2B(bas2B, occ1B)\n occC_2B = construct_occupationC_2B(bas2B, occ1B)\n\n occphA_2B = construct_occupationA_2B(basph2B, occ1B)\n\n # store shared data in a dictionary, so we can avoid passing the basis\n # lookups etc. as separate parameters all the time\n user_data = {\n \"dim1B\": dim1B, \n \"holes\": holes,\n \"particles\": particles,\n \"bas1B\": bas1B,\n \"bas2B\": bas2B,\n \"basph2B\": basph2B,\n \"idx2B\": idx2B,\n \"idxph2B\": idxph2B,\n \"occ1B\": occ1B,\n \"occA_2B\": occA_2B,\n \"occB_2B\": occB_2B,\n \"occC_2B\": occC_2B,\n \"occphA_2B\": occphA_2B,\n\n \"eta_norm\": 0.0, # variables for sharing data between ODE solver\n \"dE\": 0.0, # and main routine\n\n\n \"calc_eta\": eta_white_atan, # specify the generator (function object)\n \"calc_rhs\": flow_imsrg2 # specify the right-hand side and truncation\n }\n\n # set up initial Hamiltonian\n H1B, H2B = pairing_hamiltonian(delta, g, user_data)\n\n E, f, Gamma = normal_order(H1B, H2B, user_data) \n\n # reshape Hamiltonian into a linear array (initial ODE vector)\n y0 = np.append([E], np.append(reshape(f, -1), reshape(Gamma, -1)))\n\n # integrate flow equations \n solver = ode(derivative_wrapper,jac=None)\n solver.set_integrator('vode', method='bdf', order=5, nsteps=1000)\n solver.set_f_params(user_data)\n solver.set_initial_value(y0, 0.)\n\n sfinal = 50\n ds = 0.1\n\n print(\"%-8s %-14s %-14s %-14s %-14s %-14s %-14s %-14s %-14s\"%(\n \"s\", \"E\" , \"DE(2)\", \"DE(3)\", \"E+DE\", \"dE/ds\", \n \"||eta||\", \"||fod||\", \"||Gammaod||\"))\n print(\"-\" * 148)\n \n while solver.successful() and solver.t < sfinal:\n ys = solver.integrate(sfinal, step=True)\n \n dim2B = dim1B*dim1B\n E, f, Gamma = get_operator_from_y(ys, dim1B, dim2B)\n\n DE2 = calc_mbpt2(f, Gamma, user_data)\n DE3 = calc_mbpt3(f, Gamma, user_data)\n\n norm_fod = calc_fod_norm(f, user_data)\n norm_Gammaod = calc_Gammaod_norm(Gamma, user_data)\n\n print(\"%8.5f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f %14.8f\"%(\n solver.t, E , DE2, DE3, E+DE2+DE3, user_data[\"dE\"], user_data[\"eta_norm\"], norm_fod, norm_Gammaod))\n if abs(DE2/E) < 10e-8: break\n\n return\n\n\n#------------------------------------------------------------------------------\n# make executable\n#------------------------------------------------------------------------------\nif __name__ == \"__main__\": \n main()\n",
"step-ids": [
17,
18,
20,
21,
29
]
}
|
[
17,
18,
20,
21,
29
] |
import pytest
from mine.models import Application
class TestApplication:
"""Unit tests for the application class."""
app1 = Application("iTunes")
app1.versions.mac = "iTunes.app"
app2 = Application("HipChat")
app3 = Application("Sublime Text")
app3.versions.linux = "sublime_text"
app4 = Application("hipchat")
str_application = [
("iTunes", app1),
("HipChat", app2),
("Sublime Text", app3),
("hipchat", app4),
]
@pytest.mark.parametrize("string,application", str_application)
def test_str(self, string, application):
"""Verify applications can be converted to strings."""
assert string == str(application)
def test_eq(self):
"""Verify applications can be equated."""
assert self.app2 == self.app4
assert self.app1 != self.app3
def test_lt(self):
"""Verify applications can be sorted."""
assert self.app2 < self.app1
assert self.app3 > self.app2
|
normal
|
{
"blob_id": "f4df7688ed927e1788ada0ef11f528eab5a52282",
"index": 4899,
"step-1": "<mask token>\n\n\nclass TestApplication:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @pytest.mark.parametrize('string,application', str_application)\n def test_str(self, string, application):\n \"\"\"Verify applications can be converted to strings.\"\"\"\n assert string == str(application)\n\n def test_eq(self):\n \"\"\"Verify applications can be equated.\"\"\"\n assert self.app2 == self.app4\n assert self.app1 != self.app3\n\n def test_lt(self):\n \"\"\"Verify applications can be sorted.\"\"\"\n assert self.app2 < self.app1\n assert self.app3 > self.app2\n",
"step-2": "<mask token>\n\n\nclass TestApplication:\n <mask token>\n app1 = Application('iTunes')\n app1.versions.mac = 'iTunes.app'\n app2 = Application('HipChat')\n app3 = Application('Sublime Text')\n app3.versions.linux = 'sublime_text'\n app4 = Application('hipchat')\n str_application = [('iTunes', app1), ('HipChat', app2), ('Sublime Text',\n app3), ('hipchat', app4)]\n\n @pytest.mark.parametrize('string,application', str_application)\n def test_str(self, string, application):\n \"\"\"Verify applications can be converted to strings.\"\"\"\n assert string == str(application)\n\n def test_eq(self):\n \"\"\"Verify applications can be equated.\"\"\"\n assert self.app2 == self.app4\n assert self.app1 != self.app3\n\n def test_lt(self):\n \"\"\"Verify applications can be sorted.\"\"\"\n assert self.app2 < self.app1\n assert self.app3 > self.app2\n",
"step-3": "<mask token>\n\n\nclass TestApplication:\n \"\"\"Unit tests for the application class.\"\"\"\n app1 = Application('iTunes')\n app1.versions.mac = 'iTunes.app'\n app2 = Application('HipChat')\n app3 = Application('Sublime Text')\n app3.versions.linux = 'sublime_text'\n app4 = Application('hipchat')\n str_application = [('iTunes', app1), ('HipChat', app2), ('Sublime Text',\n app3), ('hipchat', app4)]\n\n @pytest.mark.parametrize('string,application', str_application)\n def test_str(self, string, application):\n \"\"\"Verify applications can be converted to strings.\"\"\"\n assert string == str(application)\n\n def test_eq(self):\n \"\"\"Verify applications can be equated.\"\"\"\n assert self.app2 == self.app4\n assert self.app1 != self.app3\n\n def test_lt(self):\n \"\"\"Verify applications can be sorted.\"\"\"\n assert self.app2 < self.app1\n assert self.app3 > self.app2\n",
"step-4": "import pytest\nfrom mine.models import Application\n\n\nclass TestApplication:\n \"\"\"Unit tests for the application class.\"\"\"\n app1 = Application('iTunes')\n app1.versions.mac = 'iTunes.app'\n app2 = Application('HipChat')\n app3 = Application('Sublime Text')\n app3.versions.linux = 'sublime_text'\n app4 = Application('hipchat')\n str_application = [('iTunes', app1), ('HipChat', app2), ('Sublime Text',\n app3), ('hipchat', app4)]\n\n @pytest.mark.parametrize('string,application', str_application)\n def test_str(self, string, application):\n \"\"\"Verify applications can be converted to strings.\"\"\"\n assert string == str(application)\n\n def test_eq(self):\n \"\"\"Verify applications can be equated.\"\"\"\n assert self.app2 == self.app4\n assert self.app1 != self.app3\n\n def test_lt(self):\n \"\"\"Verify applications can be sorted.\"\"\"\n assert self.app2 < self.app1\n assert self.app3 > self.app2\n",
"step-5": "import pytest\n\nfrom mine.models import Application\n\n\nclass TestApplication:\n \"\"\"Unit tests for the application class.\"\"\"\n\n app1 = Application(\"iTunes\")\n app1.versions.mac = \"iTunes.app\"\n app2 = Application(\"HipChat\")\n app3 = Application(\"Sublime Text\")\n app3.versions.linux = \"sublime_text\"\n app4 = Application(\"hipchat\")\n\n str_application = [\n (\"iTunes\", app1),\n (\"HipChat\", app2),\n (\"Sublime Text\", app3),\n (\"hipchat\", app4),\n ]\n\n @pytest.mark.parametrize(\"string,application\", str_application)\n def test_str(self, string, application):\n \"\"\"Verify applications can be converted to strings.\"\"\"\n assert string == str(application)\n\n def test_eq(self):\n \"\"\"Verify applications can be equated.\"\"\"\n assert self.app2 == self.app4\n assert self.app1 != self.app3\n\n def test_lt(self):\n \"\"\"Verify applications can be sorted.\"\"\"\n assert self.app2 < self.app1\n assert self.app3 > self.app2\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def GE_pearson(this_looper, core_list=None):
if core_list is None:
core_list = np.unique(this_looper.tr.core_ids)
name = this_looper.sim_name
thtr = this_looper.tr
mask = movie_frames.quantized_mask(this_looper).flatten()
times = thtr.times[mask] + 0
times.shape = times.size, 1
times = times / colors.tff
G = colors.G
PearsonR = np.zeros([len(core_list), len(times)])
PearsonP = np.zeros([len(core_list), len(times)])
PearsonRho = np.zeros([len(core_list), len(times)])
PeakRho = np.zeros([len(core_list), len(times)])
for nc, core_id in enumerate(core_list):
print('GE pearson %s %d' % (name, core_id))
ms = trackage.mini_scrubber(thtr, core_id, do_velocity=False)
if ms.nparticles < 1000:
sl = slice(None)
c = [0.5] * 4
else:
sl = slice(None, None, 10)
c = [0.1] * 4
rho = ms.density[sl]
rho = rho[:, mask]
PeakRho[nc, :] = rho.max(axis=0)
gx = thtr.c([core_id], 'grav_x')[sl][:, mask]
gy = thtr.c([core_id], 'grav_y')[sl][:, mask]
gz = thtr.c([core_id], 'grav_z')[sl][:, mask]
GE2 = 1 / (8 * np.pi * G) * (gx * gx + gy * gy + gz * gz)
RRR = ms.r[sl][:, mask]
for n in range(GE2.shape[1]):
the_x = np.log(RRR[:, n])
the_y = np.log(GE2[:, n])
r, p = scipy.stats.pearsonr(the_x, the_y)
PearsonR[nc, n] = r
PearsonP[nc, n] = p
the_y = np.log(rho[:, n])
r, p = scipy.stats.pearsonr(the_x, the_y)
PearsonRho[nc, n] = r
if 0:
fig, ax = plt.subplots(1, 2)
ax[0].plot(times, PearsonR)
fig.savefig('plots_to_sort/phi_box_%s.png' % name)
return {'PR': PearsonR, 'PP': PearsonP, 'Prho': PearsonRho, 'T': times,
'PeakRho': PeakRho}
if 0:
fig, ax = plt.subplots(1, 1)
ax.plot(times, GE2, c=c, linewidth=0.1)
axbonk(ax, xlabel='$t/t_{ff}$', ylabel='$(\\nabla \\phi)^2/8 pi G$',
yscale='log', ylim=[ge_min, ge_max])
ax2 = ax.twinx()
c = [1.0, 0.1, 0.1, 0.1]
ax2.plot(times, rho, c=c, linewidth=0.1)
axbonk(ax2, xlabel='$t/t_{ff}$', ylabel='$\\rho$', yscale='log')
outname = 'plots_to_sort/%s_GE_t_c%04d.png' % (this_looper.sim_name,
core_id)
fig.savefig(outname)
print(outname)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
reload(hair_dryer)
<|reserved_special_token_0|>
def GE_pearson(this_looper, core_list=None):
if core_list is None:
core_list = np.unique(this_looper.tr.core_ids)
name = this_looper.sim_name
thtr = this_looper.tr
mask = movie_frames.quantized_mask(this_looper).flatten()
times = thtr.times[mask] + 0
times.shape = times.size, 1
times = times / colors.tff
G = colors.G
PearsonR = np.zeros([len(core_list), len(times)])
PearsonP = np.zeros([len(core_list), len(times)])
PearsonRho = np.zeros([len(core_list), len(times)])
PeakRho = np.zeros([len(core_list), len(times)])
for nc, core_id in enumerate(core_list):
print('GE pearson %s %d' % (name, core_id))
ms = trackage.mini_scrubber(thtr, core_id, do_velocity=False)
if ms.nparticles < 1000:
sl = slice(None)
c = [0.5] * 4
else:
sl = slice(None, None, 10)
c = [0.1] * 4
rho = ms.density[sl]
rho = rho[:, mask]
PeakRho[nc, :] = rho.max(axis=0)
gx = thtr.c([core_id], 'grav_x')[sl][:, mask]
gy = thtr.c([core_id], 'grav_y')[sl][:, mask]
gz = thtr.c([core_id], 'grav_z')[sl][:, mask]
GE2 = 1 / (8 * np.pi * G) * (gx * gx + gy * gy + gz * gz)
RRR = ms.r[sl][:, mask]
for n in range(GE2.shape[1]):
the_x = np.log(RRR[:, n])
the_y = np.log(GE2[:, n])
r, p = scipy.stats.pearsonr(the_x, the_y)
PearsonR[nc, n] = r
PearsonP[nc, n] = p
the_y = np.log(rho[:, n])
r, p = scipy.stats.pearsonr(the_x, the_y)
PearsonRho[nc, n] = r
if 0:
fig, ax = plt.subplots(1, 2)
ax[0].plot(times, PearsonR)
fig.savefig('plots_to_sort/phi_box_%s.png' % name)
return {'PR': PearsonR, 'PP': PearsonP, 'Prho': PearsonRho, 'T': times,
'PeakRho': PeakRho}
if 0:
fig, ax = plt.subplots(1, 1)
ax.plot(times, GE2, c=c, linewidth=0.1)
axbonk(ax, xlabel='$t/t_{ff}$', ylabel='$(\\nabla \\phi)^2/8 pi G$',
yscale='log', ylim=[ge_min, ge_max])
ax2 = ax.twinx()
c = [1.0, 0.1, 0.1, 0.1]
ax2.plot(times, rho, c=c, linewidth=0.1)
axbonk(ax2, xlabel='$t/t_{ff}$', ylabel='$\\rho$', yscale='log')
outname = 'plots_to_sort/%s_GE_t_c%04d.png' % (this_looper.sim_name,
core_id)
fig.savefig(outname)
print(outname)
<|reserved_special_token_0|>
if 'stuff' not in dir():
stuff = {}
for sim in sims:
core_list = np.unique(TL.loops[sim].tr.core_ids)
stuff[sim] = GE_pearson(TL.loops[sim], core_list=core_list)
if 1:
for sim in stuff:
fig, ax = plt.subplots(1, 1)
T = stuff[sim]['T']
rho = stuff[sim]['PeakRho']
Rphi = stuff[sim]['PR']
ax.plot(Rphi.transpose(), rho.transpose(), c=[0.1] * 4)
axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')
fig.savefig('plots_to_sort/peak_rho_pearson_phi%s.png' % sim)
if 1:
for sim in stuff:
fig, ax = plt.subplots(1, 1)
T = stuff[sim]['T']
rho = stuff[sim]['PeakRho']
ax.plot(T, rho.transpose(), c=[0.1] * 4)
axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')
fig.savefig('plots_to_sort/peak_rho_%s.png' % sim)
if 0:
for sim in stuff:
fig, ax = plt.subplots(1, 1)
c = [0.1] * 4
XX, YY = stuff[sim]['Prho'].flatten(), stuff[sim]['PR'].flatten()
ok = ~np.isnan(XX) * ~np.isnan(YY)
XX = XX[ok]
YY = YY[ok]
xbins = np.linspace(XX.min(), XX.max(), 64)
ybins = np.linspace(YY.min(), YY.max(), 64)
hist, xb, yb = np.histogram2d(XX, YY, bins=[xbins, ybins])
import pcolormesh_helper as pch
pch.helper(hist, xb, yb, ax=ax)
fig.savefig('plots_to_sort/RGE_Rrho_%s.png' % sim)
if 1:
for sim in stuff:
fig, ax = plt.subplots(1, 2)
Rphi = stuff[sim]['PR']
ax[0].boxplot(Rphi)
ax[0].plot(Rphi.mean(axis=0))
ax[1].boxplot(stuff[sim]['Prho'])
axbonk(ax[0], xlabel='frame', ylabel='Rgrad phi')
axbonk(ax[1], xlabel='frame', ylabel='R rho')
fig.savefig('plots_to_sort/Boxes_%s.png' % sim)
if 0:
from scipy.ndimage import gaussian_filter
fig, ax = plt.subplots()
for sim in stuff:
Rphi = stuff[sim]['PR']
Rrho = stuff[sim]['Prho']
ax.plot(gaussian_filter(Rphi.mean(axis=0), 1), colors.color[sim] + '--'
)
ax.plot(Rrho.mean(axis=0), colors.color[sim])
axbonk(ax, xlabel='frame', ylabel='Rgrad phi')
fig.savefig('plots_to_sort/MeanR_%s.png' % sim)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
reload(hair_dryer)
<|reserved_special_token_0|>
def GE_pearson(this_looper, core_list=None):
if core_list is None:
core_list = np.unique(this_looper.tr.core_ids)
name = this_looper.sim_name
thtr = this_looper.tr
mask = movie_frames.quantized_mask(this_looper).flatten()
times = thtr.times[mask] + 0
times.shape = times.size, 1
times = times / colors.tff
G = colors.G
PearsonR = np.zeros([len(core_list), len(times)])
PearsonP = np.zeros([len(core_list), len(times)])
PearsonRho = np.zeros([len(core_list), len(times)])
PeakRho = np.zeros([len(core_list), len(times)])
for nc, core_id in enumerate(core_list):
print('GE pearson %s %d' % (name, core_id))
ms = trackage.mini_scrubber(thtr, core_id, do_velocity=False)
if ms.nparticles < 1000:
sl = slice(None)
c = [0.5] * 4
else:
sl = slice(None, None, 10)
c = [0.1] * 4
rho = ms.density[sl]
rho = rho[:, mask]
PeakRho[nc, :] = rho.max(axis=0)
gx = thtr.c([core_id], 'grav_x')[sl][:, mask]
gy = thtr.c([core_id], 'grav_y')[sl][:, mask]
gz = thtr.c([core_id], 'grav_z')[sl][:, mask]
GE2 = 1 / (8 * np.pi * G) * (gx * gx + gy * gy + gz * gz)
RRR = ms.r[sl][:, mask]
for n in range(GE2.shape[1]):
the_x = np.log(RRR[:, n])
the_y = np.log(GE2[:, n])
r, p = scipy.stats.pearsonr(the_x, the_y)
PearsonR[nc, n] = r
PearsonP[nc, n] = p
the_y = np.log(rho[:, n])
r, p = scipy.stats.pearsonr(the_x, the_y)
PearsonRho[nc, n] = r
if 0:
fig, ax = plt.subplots(1, 2)
ax[0].plot(times, PearsonR)
fig.savefig('plots_to_sort/phi_box_%s.png' % name)
return {'PR': PearsonR, 'PP': PearsonP, 'Prho': PearsonRho, 'T': times,
'PeakRho': PeakRho}
if 0:
fig, ax = plt.subplots(1, 1)
ax.plot(times, GE2, c=c, linewidth=0.1)
axbonk(ax, xlabel='$t/t_{ff}$', ylabel='$(\\nabla \\phi)^2/8 pi G$',
yscale='log', ylim=[ge_min, ge_max])
ax2 = ax.twinx()
c = [1.0, 0.1, 0.1, 0.1]
ax2.plot(times, rho, c=c, linewidth=0.1)
axbonk(ax2, xlabel='$t/t_{ff}$', ylabel='$\\rho$', yscale='log')
outname = 'plots_to_sort/%s_GE_t_c%04d.png' % (this_looper.sim_name,
core_id)
fig.savefig(outname)
print(outname)
sims = ['u501', 'u502', 'u503']
if 'stuff' not in dir():
stuff = {}
for sim in sims:
core_list = np.unique(TL.loops[sim].tr.core_ids)
stuff[sim] = GE_pearson(TL.loops[sim], core_list=core_list)
if 1:
for sim in stuff:
fig, ax = plt.subplots(1, 1)
T = stuff[sim]['T']
rho = stuff[sim]['PeakRho']
Rphi = stuff[sim]['PR']
ax.plot(Rphi.transpose(), rho.transpose(), c=[0.1] * 4)
axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')
fig.savefig('plots_to_sort/peak_rho_pearson_phi%s.png' % sim)
if 1:
for sim in stuff:
fig, ax = plt.subplots(1, 1)
T = stuff[sim]['T']
rho = stuff[sim]['PeakRho']
ax.plot(T, rho.transpose(), c=[0.1] * 4)
axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')
fig.savefig('plots_to_sort/peak_rho_%s.png' % sim)
if 0:
for sim in stuff:
fig, ax = plt.subplots(1, 1)
c = [0.1] * 4
XX, YY = stuff[sim]['Prho'].flatten(), stuff[sim]['PR'].flatten()
ok = ~np.isnan(XX) * ~np.isnan(YY)
XX = XX[ok]
YY = YY[ok]
xbins = np.linspace(XX.min(), XX.max(), 64)
ybins = np.linspace(YY.min(), YY.max(), 64)
hist, xb, yb = np.histogram2d(XX, YY, bins=[xbins, ybins])
import pcolormesh_helper as pch
pch.helper(hist, xb, yb, ax=ax)
fig.savefig('plots_to_sort/RGE_Rrho_%s.png' % sim)
if 1:
for sim in stuff:
fig, ax = plt.subplots(1, 2)
Rphi = stuff[sim]['PR']
ax[0].boxplot(Rphi)
ax[0].plot(Rphi.mean(axis=0))
ax[1].boxplot(stuff[sim]['Prho'])
axbonk(ax[0], xlabel='frame', ylabel='Rgrad phi')
axbonk(ax[1], xlabel='frame', ylabel='R rho')
fig.savefig('plots_to_sort/Boxes_%s.png' % sim)
if 0:
from scipy.ndimage import gaussian_filter
fig, ax = plt.subplots()
for sim in stuff:
Rphi = stuff[sim]['PR']
Rrho = stuff[sim]['Prho']
ax.plot(gaussian_filter(Rphi.mean(axis=0), 1), colors.color[sim] + '--'
)
ax.plot(Rrho.mean(axis=0), colors.color[sim])
axbonk(ax, xlabel='frame', ylabel='Rgrad phi')
fig.savefig('plots_to_sort/MeanR_%s.png' % sim)
<|reserved_special_token_1|>
from starter2 import *
from collections import defaultdict
import scipy
import colors
import hair_dryer
reload(hair_dryer)
import three_loopers_u500 as TL
import movie_frames
def GE_pearson(this_looper, core_list=None):
if core_list is None:
core_list = np.unique(this_looper.tr.core_ids)
name = this_looper.sim_name
thtr = this_looper.tr
mask = movie_frames.quantized_mask(this_looper).flatten()
times = thtr.times[mask] + 0
times.shape = times.size, 1
times = times / colors.tff
G = colors.G
PearsonR = np.zeros([len(core_list), len(times)])
PearsonP = np.zeros([len(core_list), len(times)])
PearsonRho = np.zeros([len(core_list), len(times)])
PeakRho = np.zeros([len(core_list), len(times)])
for nc, core_id in enumerate(core_list):
print('GE pearson %s %d' % (name, core_id))
ms = trackage.mini_scrubber(thtr, core_id, do_velocity=False)
if ms.nparticles < 1000:
sl = slice(None)
c = [0.5] * 4
else:
sl = slice(None, None, 10)
c = [0.1] * 4
rho = ms.density[sl]
rho = rho[:, mask]
PeakRho[nc, :] = rho.max(axis=0)
gx = thtr.c([core_id], 'grav_x')[sl][:, mask]
gy = thtr.c([core_id], 'grav_y')[sl][:, mask]
gz = thtr.c([core_id], 'grav_z')[sl][:, mask]
GE2 = 1 / (8 * np.pi * G) * (gx * gx + gy * gy + gz * gz)
RRR = ms.r[sl][:, mask]
for n in range(GE2.shape[1]):
the_x = np.log(RRR[:, n])
the_y = np.log(GE2[:, n])
r, p = scipy.stats.pearsonr(the_x, the_y)
PearsonR[nc, n] = r
PearsonP[nc, n] = p
the_y = np.log(rho[:, n])
r, p = scipy.stats.pearsonr(the_x, the_y)
PearsonRho[nc, n] = r
if 0:
fig, ax = plt.subplots(1, 2)
ax[0].plot(times, PearsonR)
fig.savefig('plots_to_sort/phi_box_%s.png' % name)
return {'PR': PearsonR, 'PP': PearsonP, 'Prho': PearsonRho, 'T': times,
'PeakRho': PeakRho}
if 0:
fig, ax = plt.subplots(1, 1)
ax.plot(times, GE2, c=c, linewidth=0.1)
axbonk(ax, xlabel='$t/t_{ff}$', ylabel='$(\\nabla \\phi)^2/8 pi G$',
yscale='log', ylim=[ge_min, ge_max])
ax2 = ax.twinx()
c = [1.0, 0.1, 0.1, 0.1]
ax2.plot(times, rho, c=c, linewidth=0.1)
axbonk(ax2, xlabel='$t/t_{ff}$', ylabel='$\\rho$', yscale='log')
outname = 'plots_to_sort/%s_GE_t_c%04d.png' % (this_looper.sim_name,
core_id)
fig.savefig(outname)
print(outname)
sims = ['u501', 'u502', 'u503']
if 'stuff' not in dir():
stuff = {}
for sim in sims:
core_list = np.unique(TL.loops[sim].tr.core_ids)
stuff[sim] = GE_pearson(TL.loops[sim], core_list=core_list)
if 1:
for sim in stuff:
fig, ax = plt.subplots(1, 1)
T = stuff[sim]['T']
rho = stuff[sim]['PeakRho']
Rphi = stuff[sim]['PR']
ax.plot(Rphi.transpose(), rho.transpose(), c=[0.1] * 4)
axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')
fig.savefig('plots_to_sort/peak_rho_pearson_phi%s.png' % sim)
if 1:
for sim in stuff:
fig, ax = plt.subplots(1, 1)
T = stuff[sim]['T']
rho = stuff[sim]['PeakRho']
ax.plot(T, rho.transpose(), c=[0.1] * 4)
axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')
fig.savefig('plots_to_sort/peak_rho_%s.png' % sim)
if 0:
for sim in stuff:
fig, ax = plt.subplots(1, 1)
c = [0.1] * 4
XX, YY = stuff[sim]['Prho'].flatten(), stuff[sim]['PR'].flatten()
ok = ~np.isnan(XX) * ~np.isnan(YY)
XX = XX[ok]
YY = YY[ok]
xbins = np.linspace(XX.min(), XX.max(), 64)
ybins = np.linspace(YY.min(), YY.max(), 64)
hist, xb, yb = np.histogram2d(XX, YY, bins=[xbins, ybins])
import pcolormesh_helper as pch
pch.helper(hist, xb, yb, ax=ax)
fig.savefig('plots_to_sort/RGE_Rrho_%s.png' % sim)
if 1:
for sim in stuff:
fig, ax = plt.subplots(1, 2)
Rphi = stuff[sim]['PR']
ax[0].boxplot(Rphi)
ax[0].plot(Rphi.mean(axis=0))
ax[1].boxplot(stuff[sim]['Prho'])
axbonk(ax[0], xlabel='frame', ylabel='Rgrad phi')
axbonk(ax[1], xlabel='frame', ylabel='R rho')
fig.savefig('plots_to_sort/Boxes_%s.png' % sim)
if 0:
from scipy.ndimage import gaussian_filter
fig, ax = plt.subplots()
for sim in stuff:
Rphi = stuff[sim]['PR']
Rrho = stuff[sim]['Prho']
ax.plot(gaussian_filter(Rphi.mean(axis=0), 1), colors.color[sim] + '--'
)
ax.plot(Rrho.mean(axis=0), colors.color[sim])
axbonk(ax, xlabel='frame', ylabel='Rgrad phi')
fig.savefig('plots_to_sort/MeanR_%s.png' % sim)
<|reserved_special_token_1|>
from starter2 import *
from collections import defaultdict
import scipy
import colors
import hair_dryer
reload(hair_dryer)
import three_loopers_u500 as TL
import movie_frames
def GE_pearson(this_looper,core_list=None):
if core_list is None:
core_list = np.unique(this_looper.tr.core_ids)
name = this_looper.sim_name
thtr=this_looper.tr
mask = movie_frames.quantized_mask(this_looper).flatten()
times=thtr.times[mask]+0 #the zero makes a copy
times.shape=times.size,1
times=times/colors.tff
G = colors.G
#gx = thtr.track_dict['grav_x']
#gy = thtr.track_dict['grav_y']
#gz = thtr.track_dict['grav_z']
#GE2 = -1/(8*np.pi)*(gx*gx+gy*gy+gz*gz)
#ge_min=GE2.min()
#ge_max=GE2.max()
PearsonR = np.zeros([len(core_list), len(times)])
PearsonP = np.zeros([len(core_list), len(times)])
PearsonRho = np.zeros([len(core_list), len(times)])
PeakRho = np.zeros([len(core_list), len(times)])
for nc, core_id in enumerate(core_list):
print('GE pearson %s %d'%(name,core_id))
ms = trackage.mini_scrubber(thtr,core_id, do_velocity=False)
#ms.particle_pos(core_id)
if ms.nparticles < 1000:
sl=slice(None)
c=[0.5]*4
else:
sl = slice(None,None,10)
#c=[0,0,0,0.1]
c=[0.1]*4
rho = ms.density[sl]
rho = rho[:,mask]
PeakRho[nc,:]=rho.max(axis=0)
gx = thtr.c([core_id],'grav_x')[sl][:,mask]
gy = thtr.c([core_id],'grav_y')[sl][:,mask]
gz = thtr.c([core_id],'grav_z')[sl][:,mask]
GE2 = 1/(8*np.pi*G)*(gx*gx+gy*gy+gz*gz)
RRR = ms.r[sl][:,mask]
for n in range(GE2.shape[1]):
the_x=np.log(RRR[:,n])
the_y=np.log(GE2[:,n])
#the_y=rho[:,n]
r,p=scipy.stats.pearsonr(the_x,the_y)
PearsonR[nc,n]=r
PearsonP[nc,n]=p
the_y=np.log(rho[:,n])
r,p=scipy.stats.pearsonr(the_x,the_y)
PearsonRho[nc,n]=r
if 0:
fig,ax=plt.subplots(1,2)
ax[0].plot(times,PearsonR)
#ax[0].boxplot(PearsonR)
#ax[1].boxplot(PearsonRho)
fig.savefig('plots_to_sort/phi_box_%s.png'%name)
return {'PR':PearsonR, 'PP':PearsonP, 'Prho':PearsonRho, 'T':times, 'PeakRho':PeakRho}
if 0:
fig,ax=plt.subplots(1,1)
ax.plot(times , GE2, c=c, linewidth=0.1)
axbonk(ax,xlabel=r'$t/t_{ff}$', ylabel=r'$(\nabla \phi)^2/8 pi G$',yscale='log', ylim=[ge_min,ge_max])
ax2=ax.twinx()
c=[1.0,0.1,0.1,0.1]
ax2.plot(times , rho, c=c, linewidth=0.1)
axbonk(ax2,xlabel=r'$t/t_{ff}$', ylabel=r'$\rho$',yscale='log')
outname='plots_to_sort/%s_GE_t_c%04d.png'%(this_looper.sim_name,core_id)
fig.savefig(outname)
print(outname)
sims=['u501', 'u502','u503']
if 'stuff' not in dir():
stuff={}
for sim in sims:
core_list = np.unique(TL.loops[sim].tr.core_ids)
#core_list=core_list[:10]
stuff[sim] = GE_pearson(TL.loops[sim],core_list=core_list)
if 1:
for sim in stuff:
fig,ax=plt.subplots(1,1)
T = stuff[sim]['T']
rho=stuff[sim]['PeakRho']
Rphi=stuff[sim]['PR']
ax.plot(Rphi.transpose() ,rho.transpose(),c=[0.1]*4)
axbonk(ax,xlabel='time',ylabel='rho max', yscale='log')
fig.savefig('plots_to_sort/peak_rho_pearson_phi%s.png'%sim)
if 1:
for sim in stuff:
fig,ax=plt.subplots(1,1)
T = stuff[sim]['T']
rho=stuff[sim]['PeakRho']
ax.plot(T,rho.transpose(),c=[0.1]*4)
axbonk(ax,xlabel='time',ylabel='rho max', yscale='log')
fig.savefig('plots_to_sort/peak_rho_%s.png'%sim)
if 0:
for sim in stuff:
fig,ax=plt.subplots(1,1)
c=[0.1]*4
#ax.plot( stuff[sim]['T'], stuff[sim]['PR'].transpose(),c=c)
#ax.scatter( stuff[sim]['Prho'].transpose(), stuff[sim]['PR'].transpose(),c=c)
XX,YY= stuff[sim]['Prho'].flatten(), stuff[sim]['PR'].flatten()
ok = (~np.isnan(XX))*(~np.isnan(YY))
XX=XX[ok]
YY=YY[ok]
xbins = np.linspace( XX.min(), XX.max(), 64)
ybins = np.linspace( YY.min(), YY.max(), 64)
hist, xb, yb = np.histogram2d(XX,YY, bins=[xbins,ybins])
import pcolormesh_helper as pch
pch.helper(hist,xb,yb,ax=ax)
fig.savefig('plots_to_sort/RGE_Rrho_%s.png'%sim)
if 1:
for sim in stuff:
fig,ax=plt.subplots(1,2)
Rphi = stuff[sim]['PR']
ax[0].boxplot( Rphi )
ax[0].plot( Rphi.mean(axis=0))
ax[1].boxplot( stuff[sim]['Prho'])
axbonk(ax[0],xlabel='frame',ylabel='Rgrad phi')
axbonk(ax[1],xlabel='frame',ylabel='R rho')
fig.savefig('plots_to_sort/Boxes_%s.png'%(sim))
if 0:
from scipy.ndimage import gaussian_filter
fig,ax=plt.subplots()
for sim in stuff:
Rphi = stuff[sim]['PR']
Rrho = stuff[sim]['Prho']
ax.plot( gaussian_filter(Rphi.mean(axis=0),1), colors.color[sim] +'--')
ax.plot( Rrho.mean(axis=0), colors.color[sim])
axbonk(ax,xlabel='frame',ylabel='Rgrad phi')
fig.savefig('plots_to_sort/MeanR_%s.png'%(sim))
|
flexible
|
{
"blob_id": "0762c5bec2d796bb7888e3de45e29fb20f88f491",
"index": 392,
"step-1": "<mask token>\n\n\ndef GE_pearson(this_looper, core_list=None):\n if core_list is None:\n core_list = np.unique(this_looper.tr.core_ids)\n name = this_looper.sim_name\n thtr = this_looper.tr\n mask = movie_frames.quantized_mask(this_looper).flatten()\n times = thtr.times[mask] + 0\n times.shape = times.size, 1\n times = times / colors.tff\n G = colors.G\n PearsonR = np.zeros([len(core_list), len(times)])\n PearsonP = np.zeros([len(core_list), len(times)])\n PearsonRho = np.zeros([len(core_list), len(times)])\n PeakRho = np.zeros([len(core_list), len(times)])\n for nc, core_id in enumerate(core_list):\n print('GE pearson %s %d' % (name, core_id))\n ms = trackage.mini_scrubber(thtr, core_id, do_velocity=False)\n if ms.nparticles < 1000:\n sl = slice(None)\n c = [0.5] * 4\n else:\n sl = slice(None, None, 10)\n c = [0.1] * 4\n rho = ms.density[sl]\n rho = rho[:, mask]\n PeakRho[nc, :] = rho.max(axis=0)\n gx = thtr.c([core_id], 'grav_x')[sl][:, mask]\n gy = thtr.c([core_id], 'grav_y')[sl][:, mask]\n gz = thtr.c([core_id], 'grav_z')[sl][:, mask]\n GE2 = 1 / (8 * np.pi * G) * (gx * gx + gy * gy + gz * gz)\n RRR = ms.r[sl][:, mask]\n for n in range(GE2.shape[1]):\n the_x = np.log(RRR[:, n])\n the_y = np.log(GE2[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonR[nc, n] = r\n PearsonP[nc, n] = p\n the_y = np.log(rho[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonRho[nc, n] = r\n if 0:\n fig, ax = plt.subplots(1, 2)\n ax[0].plot(times, PearsonR)\n fig.savefig('plots_to_sort/phi_box_%s.png' % name)\n return {'PR': PearsonR, 'PP': PearsonP, 'Prho': PearsonRho, 'T': times,\n 'PeakRho': PeakRho}\n if 0:\n fig, ax = plt.subplots(1, 1)\n ax.plot(times, GE2, c=c, linewidth=0.1)\n axbonk(ax, xlabel='$t/t_{ff}$', ylabel='$(\\\\nabla \\\\phi)^2/8 pi G$',\n yscale='log', ylim=[ge_min, ge_max])\n ax2 = ax.twinx()\n c = [1.0, 0.1, 0.1, 0.1]\n ax2.plot(times, rho, c=c, linewidth=0.1)\n axbonk(ax2, xlabel='$t/t_{ff}$', ylabel='$\\\\rho$', yscale='log')\n outname = 'plots_to_sort/%s_GE_t_c%04d.png' % (this_looper.sim_name,\n core_id)\n fig.savefig(outname)\n print(outname)\n\n\n<mask token>\n",
"step-2": "<mask token>\nreload(hair_dryer)\n<mask token>\n\n\ndef GE_pearson(this_looper, core_list=None):\n if core_list is None:\n core_list = np.unique(this_looper.tr.core_ids)\n name = this_looper.sim_name\n thtr = this_looper.tr\n mask = movie_frames.quantized_mask(this_looper).flatten()\n times = thtr.times[mask] + 0\n times.shape = times.size, 1\n times = times / colors.tff\n G = colors.G\n PearsonR = np.zeros([len(core_list), len(times)])\n PearsonP = np.zeros([len(core_list), len(times)])\n PearsonRho = np.zeros([len(core_list), len(times)])\n PeakRho = np.zeros([len(core_list), len(times)])\n for nc, core_id in enumerate(core_list):\n print('GE pearson %s %d' % (name, core_id))\n ms = trackage.mini_scrubber(thtr, core_id, do_velocity=False)\n if ms.nparticles < 1000:\n sl = slice(None)\n c = [0.5] * 4\n else:\n sl = slice(None, None, 10)\n c = [0.1] * 4\n rho = ms.density[sl]\n rho = rho[:, mask]\n PeakRho[nc, :] = rho.max(axis=0)\n gx = thtr.c([core_id], 'grav_x')[sl][:, mask]\n gy = thtr.c([core_id], 'grav_y')[sl][:, mask]\n gz = thtr.c([core_id], 'grav_z')[sl][:, mask]\n GE2 = 1 / (8 * np.pi * G) * (gx * gx + gy * gy + gz * gz)\n RRR = ms.r[sl][:, mask]\n for n in range(GE2.shape[1]):\n the_x = np.log(RRR[:, n])\n the_y = np.log(GE2[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonR[nc, n] = r\n PearsonP[nc, n] = p\n the_y = np.log(rho[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonRho[nc, n] = r\n if 0:\n fig, ax = plt.subplots(1, 2)\n ax[0].plot(times, PearsonR)\n fig.savefig('plots_to_sort/phi_box_%s.png' % name)\n return {'PR': PearsonR, 'PP': PearsonP, 'Prho': PearsonRho, 'T': times,\n 'PeakRho': PeakRho}\n if 0:\n fig, ax = plt.subplots(1, 1)\n ax.plot(times, GE2, c=c, linewidth=0.1)\n axbonk(ax, xlabel='$t/t_{ff}$', ylabel='$(\\\\nabla \\\\phi)^2/8 pi G$',\n yscale='log', ylim=[ge_min, ge_max])\n ax2 = ax.twinx()\n c = [1.0, 0.1, 0.1, 0.1]\n ax2.plot(times, rho, c=c, linewidth=0.1)\n axbonk(ax2, xlabel='$t/t_{ff}$', ylabel='$\\\\rho$', yscale='log')\n outname = 'plots_to_sort/%s_GE_t_c%04d.png' % (this_looper.sim_name,\n core_id)\n fig.savefig(outname)\n print(outname)\n\n\n<mask token>\nif 'stuff' not in dir():\n stuff = {}\n for sim in sims:\n core_list = np.unique(TL.loops[sim].tr.core_ids)\n stuff[sim] = GE_pearson(TL.loops[sim], core_list=core_list)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n T = stuff[sim]['T']\n rho = stuff[sim]['PeakRho']\n Rphi = stuff[sim]['PR']\n ax.plot(Rphi.transpose(), rho.transpose(), c=[0.1] * 4)\n axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_pearson_phi%s.png' % sim)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n T = stuff[sim]['T']\n rho = stuff[sim]['PeakRho']\n ax.plot(T, rho.transpose(), c=[0.1] * 4)\n axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_%s.png' % sim)\nif 0:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n c = [0.1] * 4\n XX, YY = stuff[sim]['Prho'].flatten(), stuff[sim]['PR'].flatten()\n ok = ~np.isnan(XX) * ~np.isnan(YY)\n XX = XX[ok]\n YY = YY[ok]\n xbins = np.linspace(XX.min(), XX.max(), 64)\n ybins = np.linspace(YY.min(), YY.max(), 64)\n hist, xb, yb = np.histogram2d(XX, YY, bins=[xbins, ybins])\n import pcolormesh_helper as pch\n pch.helper(hist, xb, yb, ax=ax)\n fig.savefig('plots_to_sort/RGE_Rrho_%s.png' % sim)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 2)\n Rphi = stuff[sim]['PR']\n ax[0].boxplot(Rphi)\n ax[0].plot(Rphi.mean(axis=0))\n ax[1].boxplot(stuff[sim]['Prho'])\n axbonk(ax[0], xlabel='frame', ylabel='Rgrad phi')\n axbonk(ax[1], xlabel='frame', ylabel='R rho')\n fig.savefig('plots_to_sort/Boxes_%s.png' % sim)\nif 0:\n from scipy.ndimage import gaussian_filter\n fig, ax = plt.subplots()\n for sim in stuff:\n Rphi = stuff[sim]['PR']\n Rrho = stuff[sim]['Prho']\n ax.plot(gaussian_filter(Rphi.mean(axis=0), 1), colors.color[sim] + '--'\n )\n ax.plot(Rrho.mean(axis=0), colors.color[sim])\n axbonk(ax, xlabel='frame', ylabel='Rgrad phi')\n fig.savefig('plots_to_sort/MeanR_%s.png' % sim)\n",
"step-3": "<mask token>\nreload(hair_dryer)\n<mask token>\n\n\ndef GE_pearson(this_looper, core_list=None):\n if core_list is None:\n core_list = np.unique(this_looper.tr.core_ids)\n name = this_looper.sim_name\n thtr = this_looper.tr\n mask = movie_frames.quantized_mask(this_looper).flatten()\n times = thtr.times[mask] + 0\n times.shape = times.size, 1\n times = times / colors.tff\n G = colors.G\n PearsonR = np.zeros([len(core_list), len(times)])\n PearsonP = np.zeros([len(core_list), len(times)])\n PearsonRho = np.zeros([len(core_list), len(times)])\n PeakRho = np.zeros([len(core_list), len(times)])\n for nc, core_id in enumerate(core_list):\n print('GE pearson %s %d' % (name, core_id))\n ms = trackage.mini_scrubber(thtr, core_id, do_velocity=False)\n if ms.nparticles < 1000:\n sl = slice(None)\n c = [0.5] * 4\n else:\n sl = slice(None, None, 10)\n c = [0.1] * 4\n rho = ms.density[sl]\n rho = rho[:, mask]\n PeakRho[nc, :] = rho.max(axis=0)\n gx = thtr.c([core_id], 'grav_x')[sl][:, mask]\n gy = thtr.c([core_id], 'grav_y')[sl][:, mask]\n gz = thtr.c([core_id], 'grav_z')[sl][:, mask]\n GE2 = 1 / (8 * np.pi * G) * (gx * gx + gy * gy + gz * gz)\n RRR = ms.r[sl][:, mask]\n for n in range(GE2.shape[1]):\n the_x = np.log(RRR[:, n])\n the_y = np.log(GE2[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonR[nc, n] = r\n PearsonP[nc, n] = p\n the_y = np.log(rho[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonRho[nc, n] = r\n if 0:\n fig, ax = plt.subplots(1, 2)\n ax[0].plot(times, PearsonR)\n fig.savefig('plots_to_sort/phi_box_%s.png' % name)\n return {'PR': PearsonR, 'PP': PearsonP, 'Prho': PearsonRho, 'T': times,\n 'PeakRho': PeakRho}\n if 0:\n fig, ax = plt.subplots(1, 1)\n ax.plot(times, GE2, c=c, linewidth=0.1)\n axbonk(ax, xlabel='$t/t_{ff}$', ylabel='$(\\\\nabla \\\\phi)^2/8 pi G$',\n yscale='log', ylim=[ge_min, ge_max])\n ax2 = ax.twinx()\n c = [1.0, 0.1, 0.1, 0.1]\n ax2.plot(times, rho, c=c, linewidth=0.1)\n axbonk(ax2, xlabel='$t/t_{ff}$', ylabel='$\\\\rho$', yscale='log')\n outname = 'plots_to_sort/%s_GE_t_c%04d.png' % (this_looper.sim_name,\n core_id)\n fig.savefig(outname)\n print(outname)\n\n\nsims = ['u501', 'u502', 'u503']\nif 'stuff' not in dir():\n stuff = {}\n for sim in sims:\n core_list = np.unique(TL.loops[sim].tr.core_ids)\n stuff[sim] = GE_pearson(TL.loops[sim], core_list=core_list)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n T = stuff[sim]['T']\n rho = stuff[sim]['PeakRho']\n Rphi = stuff[sim]['PR']\n ax.plot(Rphi.transpose(), rho.transpose(), c=[0.1] * 4)\n axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_pearson_phi%s.png' % sim)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n T = stuff[sim]['T']\n rho = stuff[sim]['PeakRho']\n ax.plot(T, rho.transpose(), c=[0.1] * 4)\n axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_%s.png' % sim)\nif 0:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n c = [0.1] * 4\n XX, YY = stuff[sim]['Prho'].flatten(), stuff[sim]['PR'].flatten()\n ok = ~np.isnan(XX) * ~np.isnan(YY)\n XX = XX[ok]\n YY = YY[ok]\n xbins = np.linspace(XX.min(), XX.max(), 64)\n ybins = np.linspace(YY.min(), YY.max(), 64)\n hist, xb, yb = np.histogram2d(XX, YY, bins=[xbins, ybins])\n import pcolormesh_helper as pch\n pch.helper(hist, xb, yb, ax=ax)\n fig.savefig('plots_to_sort/RGE_Rrho_%s.png' % sim)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 2)\n Rphi = stuff[sim]['PR']\n ax[0].boxplot(Rphi)\n ax[0].plot(Rphi.mean(axis=0))\n ax[1].boxplot(stuff[sim]['Prho'])\n axbonk(ax[0], xlabel='frame', ylabel='Rgrad phi')\n axbonk(ax[1], xlabel='frame', ylabel='R rho')\n fig.savefig('plots_to_sort/Boxes_%s.png' % sim)\nif 0:\n from scipy.ndimage import gaussian_filter\n fig, ax = plt.subplots()\n for sim in stuff:\n Rphi = stuff[sim]['PR']\n Rrho = stuff[sim]['Prho']\n ax.plot(gaussian_filter(Rphi.mean(axis=0), 1), colors.color[sim] + '--'\n )\n ax.plot(Rrho.mean(axis=0), colors.color[sim])\n axbonk(ax, xlabel='frame', ylabel='Rgrad phi')\n fig.savefig('plots_to_sort/MeanR_%s.png' % sim)\n",
"step-4": "from starter2 import *\nfrom collections import defaultdict\nimport scipy\nimport colors\nimport hair_dryer\nreload(hair_dryer)\nimport three_loopers_u500 as TL\nimport movie_frames\n\n\ndef GE_pearson(this_looper, core_list=None):\n if core_list is None:\n core_list = np.unique(this_looper.tr.core_ids)\n name = this_looper.sim_name\n thtr = this_looper.tr\n mask = movie_frames.quantized_mask(this_looper).flatten()\n times = thtr.times[mask] + 0\n times.shape = times.size, 1\n times = times / colors.tff\n G = colors.G\n PearsonR = np.zeros([len(core_list), len(times)])\n PearsonP = np.zeros([len(core_list), len(times)])\n PearsonRho = np.zeros([len(core_list), len(times)])\n PeakRho = np.zeros([len(core_list), len(times)])\n for nc, core_id in enumerate(core_list):\n print('GE pearson %s %d' % (name, core_id))\n ms = trackage.mini_scrubber(thtr, core_id, do_velocity=False)\n if ms.nparticles < 1000:\n sl = slice(None)\n c = [0.5] * 4\n else:\n sl = slice(None, None, 10)\n c = [0.1] * 4\n rho = ms.density[sl]\n rho = rho[:, mask]\n PeakRho[nc, :] = rho.max(axis=0)\n gx = thtr.c([core_id], 'grav_x')[sl][:, mask]\n gy = thtr.c([core_id], 'grav_y')[sl][:, mask]\n gz = thtr.c([core_id], 'grav_z')[sl][:, mask]\n GE2 = 1 / (8 * np.pi * G) * (gx * gx + gy * gy + gz * gz)\n RRR = ms.r[sl][:, mask]\n for n in range(GE2.shape[1]):\n the_x = np.log(RRR[:, n])\n the_y = np.log(GE2[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonR[nc, n] = r\n PearsonP[nc, n] = p\n the_y = np.log(rho[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonRho[nc, n] = r\n if 0:\n fig, ax = plt.subplots(1, 2)\n ax[0].plot(times, PearsonR)\n fig.savefig('plots_to_sort/phi_box_%s.png' % name)\n return {'PR': PearsonR, 'PP': PearsonP, 'Prho': PearsonRho, 'T': times,\n 'PeakRho': PeakRho}\n if 0:\n fig, ax = plt.subplots(1, 1)\n ax.plot(times, GE2, c=c, linewidth=0.1)\n axbonk(ax, xlabel='$t/t_{ff}$', ylabel='$(\\\\nabla \\\\phi)^2/8 pi G$',\n yscale='log', ylim=[ge_min, ge_max])\n ax2 = ax.twinx()\n c = [1.0, 0.1, 0.1, 0.1]\n ax2.plot(times, rho, c=c, linewidth=0.1)\n axbonk(ax2, xlabel='$t/t_{ff}$', ylabel='$\\\\rho$', yscale='log')\n outname = 'plots_to_sort/%s_GE_t_c%04d.png' % (this_looper.sim_name,\n core_id)\n fig.savefig(outname)\n print(outname)\n\n\nsims = ['u501', 'u502', 'u503']\nif 'stuff' not in dir():\n stuff = {}\n for sim in sims:\n core_list = np.unique(TL.loops[sim].tr.core_ids)\n stuff[sim] = GE_pearson(TL.loops[sim], core_list=core_list)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n T = stuff[sim]['T']\n rho = stuff[sim]['PeakRho']\n Rphi = stuff[sim]['PR']\n ax.plot(Rphi.transpose(), rho.transpose(), c=[0.1] * 4)\n axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_pearson_phi%s.png' % sim)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n T = stuff[sim]['T']\n rho = stuff[sim]['PeakRho']\n ax.plot(T, rho.transpose(), c=[0.1] * 4)\n axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_%s.png' % sim)\nif 0:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n c = [0.1] * 4\n XX, YY = stuff[sim]['Prho'].flatten(), stuff[sim]['PR'].flatten()\n ok = ~np.isnan(XX) * ~np.isnan(YY)\n XX = XX[ok]\n YY = YY[ok]\n xbins = np.linspace(XX.min(), XX.max(), 64)\n ybins = np.linspace(YY.min(), YY.max(), 64)\n hist, xb, yb = np.histogram2d(XX, YY, bins=[xbins, ybins])\n import pcolormesh_helper as pch\n pch.helper(hist, xb, yb, ax=ax)\n fig.savefig('plots_to_sort/RGE_Rrho_%s.png' % sim)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 2)\n Rphi = stuff[sim]['PR']\n ax[0].boxplot(Rphi)\n ax[0].plot(Rphi.mean(axis=0))\n ax[1].boxplot(stuff[sim]['Prho'])\n axbonk(ax[0], xlabel='frame', ylabel='Rgrad phi')\n axbonk(ax[1], xlabel='frame', ylabel='R rho')\n fig.savefig('plots_to_sort/Boxes_%s.png' % sim)\nif 0:\n from scipy.ndimage import gaussian_filter\n fig, ax = plt.subplots()\n for sim in stuff:\n Rphi = stuff[sim]['PR']\n Rrho = stuff[sim]['Prho']\n ax.plot(gaussian_filter(Rphi.mean(axis=0), 1), colors.color[sim] + '--'\n )\n ax.plot(Rrho.mean(axis=0), colors.color[sim])\n axbonk(ax, xlabel='frame', ylabel='Rgrad phi')\n fig.savefig('plots_to_sort/MeanR_%s.png' % sim)\n",
"step-5": "\nfrom starter2 import *\nfrom collections import defaultdict\nimport scipy\nimport colors\n\nimport hair_dryer\nreload(hair_dryer)\n\nimport three_loopers_u500 as TL\nimport movie_frames \n\ndef GE_pearson(this_looper,core_list=None):\n\n if core_list is None:\n core_list = np.unique(this_looper.tr.core_ids)\n\n name = this_looper.sim_name\n thtr=this_looper.tr\n mask = movie_frames.quantized_mask(this_looper).flatten()\n times=thtr.times[mask]+0 #the zero makes a copy\n times.shape=times.size,1\n times=times/colors.tff\n G = colors.G\n #gx = thtr.track_dict['grav_x']\n #gy = thtr.track_dict['grav_y']\n #gz = thtr.track_dict['grav_z']\n #GE2 = -1/(8*np.pi)*(gx*gx+gy*gy+gz*gz)\n #ge_min=GE2.min()\n #ge_max=GE2.max()\n PearsonR = np.zeros([len(core_list), len(times)])\n PearsonP = np.zeros([len(core_list), len(times)])\n PearsonRho = np.zeros([len(core_list), len(times)])\n PeakRho = np.zeros([len(core_list), len(times)])\n for nc, core_id in enumerate(core_list):\n print('GE pearson %s %d'%(name,core_id))\n\n \n ms = trackage.mini_scrubber(thtr,core_id, do_velocity=False)\n #ms.particle_pos(core_id)\n\n if ms.nparticles < 1000:\n sl=slice(None)\n c=[0.5]*4\n else:\n sl = slice(None,None,10)\n #c=[0,0,0,0.1]\n c=[0.1]*4\n\n rho = ms.density[sl]\n rho = rho[:,mask]\n\n PeakRho[nc,:]=rho.max(axis=0)\n\n gx = thtr.c([core_id],'grav_x')[sl][:,mask]\n gy = thtr.c([core_id],'grav_y')[sl][:,mask]\n gz = thtr.c([core_id],'grav_z')[sl][:,mask]\n GE2 = 1/(8*np.pi*G)*(gx*gx+gy*gy+gz*gz)\n\n RRR = ms.r[sl][:,mask]\n for n in range(GE2.shape[1]):\n the_x=np.log(RRR[:,n])\n the_y=np.log(GE2[:,n])\n #the_y=rho[:,n]\n r,p=scipy.stats.pearsonr(the_x,the_y)\n PearsonR[nc,n]=r\n PearsonP[nc,n]=p\n the_y=np.log(rho[:,n])\n r,p=scipy.stats.pearsonr(the_x,the_y)\n PearsonRho[nc,n]=r\n \n if 0:\n fig,ax=plt.subplots(1,2)\n ax[0].plot(times,PearsonR)\n #ax[0].boxplot(PearsonR)\n #ax[1].boxplot(PearsonRho)\n fig.savefig('plots_to_sort/phi_box_%s.png'%name)\n\n return {'PR':PearsonR, 'PP':PearsonP, 'Prho':PearsonRho, 'T':times, 'PeakRho':PeakRho}\n\n\n\n if 0:\n fig,ax=plt.subplots(1,1)\n ax.plot(times , GE2, c=c, linewidth=0.1)\n axbonk(ax,xlabel=r'$t/t_{ff}$', ylabel=r'$(\\nabla \\phi)^2/8 pi G$',yscale='log', ylim=[ge_min,ge_max])\n ax2=ax.twinx()\n c=[1.0,0.1,0.1,0.1]\n ax2.plot(times , rho, c=c, linewidth=0.1)\n axbonk(ax2,xlabel=r'$t/t_{ff}$', ylabel=r'$\\rho$',yscale='log')\n\n outname='plots_to_sort/%s_GE_t_c%04d.png'%(this_looper.sim_name,core_id)\n fig.savefig(outname)\n print(outname)\n\n\n\nsims=['u501', 'u502','u503']\nif 'stuff' not in dir():\n stuff={}\n for sim in sims:\n core_list = np.unique(TL.loops[sim].tr.core_ids)\n #core_list=core_list[:10]\n stuff[sim] = GE_pearson(TL.loops[sim],core_list=core_list)\n\nif 1:\n for sim in stuff:\n fig,ax=plt.subplots(1,1)\n T = stuff[sim]['T']\n rho=stuff[sim]['PeakRho']\n Rphi=stuff[sim]['PR']\n ax.plot(Rphi.transpose() ,rho.transpose(),c=[0.1]*4)\n axbonk(ax,xlabel='time',ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_pearson_phi%s.png'%sim)\n\nif 1:\n for sim in stuff:\n fig,ax=plt.subplots(1,1)\n T = stuff[sim]['T']\n rho=stuff[sim]['PeakRho']\n ax.plot(T,rho.transpose(),c=[0.1]*4)\n axbonk(ax,xlabel='time',ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_%s.png'%sim)\n\nif 0:\n for sim in stuff:\n fig,ax=plt.subplots(1,1)\n c=[0.1]*4\n #ax.plot( stuff[sim]['T'], stuff[sim]['PR'].transpose(),c=c)\n #ax.scatter( stuff[sim]['Prho'].transpose(), stuff[sim]['PR'].transpose(),c=c)\n XX,YY= stuff[sim]['Prho'].flatten(), stuff[sim]['PR'].flatten()\n ok = (~np.isnan(XX))*(~np.isnan(YY))\n XX=XX[ok]\n YY=YY[ok]\n xbins = np.linspace( XX.min(), XX.max(), 64)\n ybins = np.linspace( YY.min(), YY.max(), 64)\n hist, xb, yb = np.histogram2d(XX,YY, bins=[xbins,ybins])\n import pcolormesh_helper as pch\n pch.helper(hist,xb,yb,ax=ax)\n fig.savefig('plots_to_sort/RGE_Rrho_%s.png'%sim)\n\nif 1:\n for sim in stuff:\n fig,ax=plt.subplots(1,2)\n Rphi = stuff[sim]['PR']\n ax[0].boxplot( Rphi )\n ax[0].plot( Rphi.mean(axis=0))\n ax[1].boxplot( stuff[sim]['Prho'])\n\n\n axbonk(ax[0],xlabel='frame',ylabel='Rgrad phi')\n axbonk(ax[1],xlabel='frame',ylabel='R rho')\n fig.savefig('plots_to_sort/Boxes_%s.png'%(sim))\n\n\nif 0:\n from scipy.ndimage import gaussian_filter\n fig,ax=plt.subplots()\n for sim in stuff:\n Rphi = stuff[sim]['PR']\n Rrho = stuff[sim]['Prho']\n ax.plot( gaussian_filter(Rphi.mean(axis=0),1), colors.color[sim] +'--')\n ax.plot( Rrho.mean(axis=0), colors.color[sim])\n\n\n axbonk(ax,xlabel='frame',ylabel='Rgrad phi')\n fig.savefig('plots_to_sort/MeanR_%s.png'%(sim))\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class SmallMobileNetV2(nn.Module):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class SmallMobileNetV2Part(nn.Module):
def __init__(self, widen_factor=1.0, num_classes=68 * 2):
super(SmallMobileNetV2Part, self).__init__()
self.block = InvertedResidual
self.input_channel = 8
self.last_channel = 64
self.interverted_residual_setting = [[1, 8, 1, 1], [6, 12, 2, 2], [
6, 16, 2, 2], [6, 24, 3, 2], [6, 32, 3, 2], [6, 48, 3, 2], [6,
64, 2, 2], [6, 80, 1, 1]]
if num_classes == 68 * 2:
part_dim = [22, 22, 18, 40]
elif num_classes == 98 * 2:
part_dim = [36, 36, 18, 40]
elif num_classes == 106 * 2:
part_dim = [38, 38, 30, 40]
self.left_eye_net = self.make_backbone(widen_factor)
self.right_eye_net = self.make_backbone(widen_factor)
self.nose_net = self.make_backbone(widen_factor)
self.mouth_net = self.make_backbone(widen_factor)
self.left_eye_loc = self.make_pts_fc(part_dim[0])
self.right_eye_loc = self.make_pts_fc(part_dim[1])
self.nose_loc = self.make_pts_fc(part_dim[2])
self.mouth_loc = self.make_pts_fc(part_dim[3])
self._initialize_weights()
def make_backbone(self, widen_factor):
input_channel = int(self.input_channel * widen_factor)
last_channel = int(self.last_channel * widen_factor
) if widen_factor > 1.0 else self.last_channel
features = [conv_bn(3, input_channel, 2)]
for t, c, n, s in self.interverted_residual_setting:
output_channel = int(c * widen_factor)
for i in range(n):
if i == 0:
features.append(self.block(input_channel,
output_channel, s, expand_ratio=t))
else:
features.append(self.block(input_channel,
output_channel, 1, expand_ratio=t))
input_channel = output_channel
features.append(conv_1x1_bn(input_channel, last_channel))
return nn.Sequential(*features)
def make_pts_fc(self, num_classes):
pts_net = nn.Sequential(nn.Linear(self.last_channel, 64), nn.PReLU(
), nn.Linear(64, 64), nn.PReLU(), nn.Linear(64, num_classes))
return pts_net
def forward(self, x):
xs_1 = self.left_eye_net(x[0])
xs_1 = torch.flatten(xs_1, 1)
out_1 = self.left_eye_loc(xs_1)
xs_2 = self.right_eye_net(x[1])
xs_2 = torch.flatten(xs_2, 1)
out_2 = self.right_eye_loc(xs_2)
xs_3 = self.nose_net(x[2])
xs_3 = torch.flatten(xs_3, 1)
out_3 = self.nose_loc(xs_3)
xs_4 = self.mouth_net(x[3])
xs_4 = torch.flatten(xs_4, 1)
out_4 = self.mouth_loc(xs_4)
return [out_1, out_2, out_3, out_4]
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class InvertedResidual(nn.Module):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class SmallMobileNetV2(nn.Module):
def __init__(self, widen_factor=1.0, num_classes=1000):
super(SmallMobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 8
last_channel = 64
interverted_residual_setting = [[1, 8, 1, 1], [6, 12, 2, 2], [6, 16,
2, 2], [6, 24, 3, 2], [6, 32, 3, 2], [6, 48, 3, 2], [6, 64, 2,
2], [6, 80, 1, 1]]
input_channel = int(input_channel * widen_factor)
self.last_channel = int(last_channel * widen_factor
) if widen_factor > 1.0 else last_channel
self.features = [conv_bn(3, input_channel, 2)]
for t, c, n, s in interverted_residual_setting:
output_channel = int(c * widen_factor)
for i in range(n):
if i == 0:
self.features.append(block(input_channel,
output_channel, s, expand_ratio=t))
else:
self.features.append(block(input_channel,
output_channel, 1, expand_ratio=t))
input_channel = output_channel
self.features.append(conv_1x1_bn(input_channel, self.last_channel))
self.features = nn.Sequential(*self.features)
self.pts_net = nn.Sequential(nn.Linear(4 * self.last_channel, 256),
nn.PReLU(), nn.Linear(256, 256), nn.PReLU(), nn.Linear(256,
num_classes))
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
pts = self.pts_net(x)
return pts
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
class SmallMobileNetV2Part(nn.Module):
def __init__(self, widen_factor=1.0, num_classes=68 * 2):
super(SmallMobileNetV2Part, self).__init__()
self.block = InvertedResidual
self.input_channel = 8
self.last_channel = 64
self.interverted_residual_setting = [[1, 8, 1, 1], [6, 12, 2, 2], [
6, 16, 2, 2], [6, 24, 3, 2], [6, 32, 3, 2], [6, 48, 3, 2], [6,
64, 2, 2], [6, 80, 1, 1]]
if num_classes == 68 * 2:
part_dim = [22, 22, 18, 40]
elif num_classes == 98 * 2:
part_dim = [36, 36, 18, 40]
elif num_classes == 106 * 2:
part_dim = [38, 38, 30, 40]
self.left_eye_net = self.make_backbone(widen_factor)
self.right_eye_net = self.make_backbone(widen_factor)
self.nose_net = self.make_backbone(widen_factor)
self.mouth_net = self.make_backbone(widen_factor)
self.left_eye_loc = self.make_pts_fc(part_dim[0])
self.right_eye_loc = self.make_pts_fc(part_dim[1])
self.nose_loc = self.make_pts_fc(part_dim[2])
self.mouth_loc = self.make_pts_fc(part_dim[3])
self._initialize_weights()
def make_backbone(self, widen_factor):
input_channel = int(self.input_channel * widen_factor)
last_channel = int(self.last_channel * widen_factor
) if widen_factor > 1.0 else self.last_channel
features = [conv_bn(3, input_channel, 2)]
for t, c, n, s in self.interverted_residual_setting:
output_channel = int(c * widen_factor)
for i in range(n):
if i == 0:
features.append(self.block(input_channel,
output_channel, s, expand_ratio=t))
else:
features.append(self.block(input_channel,
output_channel, 1, expand_ratio=t))
input_channel = output_channel
features.append(conv_1x1_bn(input_channel, last_channel))
return nn.Sequential(*features)
def make_pts_fc(self, num_classes):
pts_net = nn.Sequential(nn.Linear(self.last_channel, 64), nn.PReLU(
), nn.Linear(64, 64), nn.PReLU(), nn.Linear(64, num_classes))
return pts_net
def forward(self, x):
xs_1 = self.left_eye_net(x[0])
xs_1 = torch.flatten(xs_1, 1)
out_1 = self.left_eye_loc(xs_1)
xs_2 = self.right_eye_net(x[1])
xs_2 = torch.flatten(xs_2, 1)
out_2 = self.right_eye_loc(xs_2)
xs_3 = self.nose_net(x[2])
xs_3 = torch.flatten(xs_3, 1)
out_3 = self.nose_loc(xs_3)
xs_4 = self.mouth_net(x[3])
xs_4 = torch.flatten(xs_4, 1)
out_4 = self.mouth_loc(xs_4)
return [out_1, out_2, out_3, out_4]
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(nn.Conv2d(hidden_dim, hidden_dim, 3,
stride, 1, groups=hidden_dim, bias=False), nn.BatchNorm2d(
hidden_dim), nn.ReLU(inplace=True), nn.Conv2d(hidden_dim,
oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup))
else:
self.conv = nn.Sequential(nn.Conv2d(inp, hidden_dim, 1, 1, 0,
bias=False), nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=
True), nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1,
groups=hidden_dim, bias=False), nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True), nn.Conv2d(hidden_dim, oup, 1, 1, 0,
bias=False), nn.BatchNorm2d(oup))
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class SmallMobileNetV2(nn.Module):
def __init__(self, widen_factor=1.0, num_classes=1000):
super(SmallMobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 8
last_channel = 64
interverted_residual_setting = [[1, 8, 1, 1], [6, 12, 2, 2], [6, 16,
2, 2], [6, 24, 3, 2], [6, 32, 3, 2], [6, 48, 3, 2], [6, 64, 2,
2], [6, 80, 1, 1]]
input_channel = int(input_channel * widen_factor)
self.last_channel = int(last_channel * widen_factor
) if widen_factor > 1.0 else last_channel
self.features = [conv_bn(3, input_channel, 2)]
for t, c, n, s in interverted_residual_setting:
output_channel = int(c * widen_factor)
for i in range(n):
if i == 0:
self.features.append(block(input_channel,
output_channel, s, expand_ratio=t))
else:
self.features.append(block(input_channel,
output_channel, 1, expand_ratio=t))
input_channel = output_channel
self.features.append(conv_1x1_bn(input_channel, self.last_channel))
self.features = nn.Sequential(*self.features)
self.pts_net = nn.Sequential(nn.Linear(4 * self.last_channel, 256),
nn.PReLU(), nn.Linear(256, 256), nn.PReLU(), nn.Linear(256,
num_classes))
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
pts = self.pts_net(x)
return pts
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
class SmallMobileNetV2Part(nn.Module):
def __init__(self, widen_factor=1.0, num_classes=68 * 2):
super(SmallMobileNetV2Part, self).__init__()
self.block = InvertedResidual
self.input_channel = 8
self.last_channel = 64
self.interverted_residual_setting = [[1, 8, 1, 1], [6, 12, 2, 2], [
6, 16, 2, 2], [6, 24, 3, 2], [6, 32, 3, 2], [6, 48, 3, 2], [6,
64, 2, 2], [6, 80, 1, 1]]
if num_classes == 68 * 2:
part_dim = [22, 22, 18, 40]
elif num_classes == 98 * 2:
part_dim = [36, 36, 18, 40]
elif num_classes == 106 * 2:
part_dim = [38, 38, 30, 40]
self.left_eye_net = self.make_backbone(widen_factor)
self.right_eye_net = self.make_backbone(widen_factor)
self.nose_net = self.make_backbone(widen_factor)
self.mouth_net = self.make_backbone(widen_factor)
self.left_eye_loc = self.make_pts_fc(part_dim[0])
self.right_eye_loc = self.make_pts_fc(part_dim[1])
self.nose_loc = self.make_pts_fc(part_dim[2])
self.mouth_loc = self.make_pts_fc(part_dim[3])
self._initialize_weights()
def make_backbone(self, widen_factor):
input_channel = int(self.input_channel * widen_factor)
last_channel = int(self.last_channel * widen_factor
) if widen_factor > 1.0 else self.last_channel
features = [conv_bn(3, input_channel, 2)]
for t, c, n, s in self.interverted_residual_setting:
output_channel = int(c * widen_factor)
for i in range(n):
if i == 0:
features.append(self.block(input_channel,
output_channel, s, expand_ratio=t))
else:
features.append(self.block(input_channel,
output_channel, 1, expand_ratio=t))
input_channel = output_channel
features.append(conv_1x1_bn(input_channel, last_channel))
return nn.Sequential(*features)
def make_pts_fc(self, num_classes):
pts_net = nn.Sequential(nn.Linear(self.last_channel, 64), nn.PReLU(
), nn.Linear(64, 64), nn.PReLU(), nn.Linear(64, num_classes))
return pts_net
def forward(self, x):
xs_1 = self.left_eye_net(x[0])
xs_1 = torch.flatten(xs_1, 1)
out_1 = self.left_eye_loc(xs_1)
xs_2 = self.right_eye_net(x[1])
xs_2 = torch.flatten(xs_2, 1)
out_2 = self.right_eye_loc(xs_2)
xs_3 = self.nose_net(x[2])
xs_3 = torch.flatten(xs_3, 1)
out_3 = self.nose_loc(xs_3)
xs_4 = self.mouth_net(x[3])
xs_4 = torch.flatten(xs_4, 1)
out_4 = self.mouth_loc(xs_4)
return [out_1, out_2, out_3, out_4]
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def conv_bn(inp, oup, stride):
return nn.Sequential(nn.Conv2d(inp, oup, 3, stride, 1, bias=False), nn.
BatchNorm2d(oup), nn.ReLU(inplace=True))
def conv_1x1_bn(inp, oup):
return nn.Sequential(nn.Conv2d(inp, oup, 1, 1, 0, bias=False), nn.
BatchNorm2d(oup), nn.ReLU(inplace=True))
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(nn.Conv2d(hidden_dim, hidden_dim, 3,
stride, 1, groups=hidden_dim, bias=False), nn.BatchNorm2d(
hidden_dim), nn.ReLU(inplace=True), nn.Conv2d(hidden_dim,
oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup))
else:
self.conv = nn.Sequential(nn.Conv2d(inp, hidden_dim, 1, 1, 0,
bias=False), nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=
True), nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1,
groups=hidden_dim, bias=False), nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True), nn.Conv2d(hidden_dim, oup, 1, 1, 0,
bias=False), nn.BatchNorm2d(oup))
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class SmallMobileNetV2(nn.Module):
def __init__(self, widen_factor=1.0, num_classes=1000):
super(SmallMobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 8
last_channel = 64
interverted_residual_setting = [[1, 8, 1, 1], [6, 12, 2, 2], [6, 16,
2, 2], [6, 24, 3, 2], [6, 32, 3, 2], [6, 48, 3, 2], [6, 64, 2,
2], [6, 80, 1, 1]]
input_channel = int(input_channel * widen_factor)
self.last_channel = int(last_channel * widen_factor
) if widen_factor > 1.0 else last_channel
self.features = [conv_bn(3, input_channel, 2)]
for t, c, n, s in interverted_residual_setting:
output_channel = int(c * widen_factor)
for i in range(n):
if i == 0:
self.features.append(block(input_channel,
output_channel, s, expand_ratio=t))
else:
self.features.append(block(input_channel,
output_channel, 1, expand_ratio=t))
input_channel = output_channel
self.features.append(conv_1x1_bn(input_channel, self.last_channel))
self.features = nn.Sequential(*self.features)
self.pts_net = nn.Sequential(nn.Linear(4 * self.last_channel, 256),
nn.PReLU(), nn.Linear(256, 256), nn.PReLU(), nn.Linear(256,
num_classes))
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
pts = self.pts_net(x)
return pts
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
class SmallMobileNetV2Part(nn.Module):
def __init__(self, widen_factor=1.0, num_classes=68 * 2):
super(SmallMobileNetV2Part, self).__init__()
self.block = InvertedResidual
self.input_channel = 8
self.last_channel = 64
self.interverted_residual_setting = [[1, 8, 1, 1], [6, 12, 2, 2], [
6, 16, 2, 2], [6, 24, 3, 2], [6, 32, 3, 2], [6, 48, 3, 2], [6,
64, 2, 2], [6, 80, 1, 1]]
if num_classes == 68 * 2:
part_dim = [22, 22, 18, 40]
elif num_classes == 98 * 2:
part_dim = [36, 36, 18, 40]
elif num_classes == 106 * 2:
part_dim = [38, 38, 30, 40]
self.left_eye_net = self.make_backbone(widen_factor)
self.right_eye_net = self.make_backbone(widen_factor)
self.nose_net = self.make_backbone(widen_factor)
self.mouth_net = self.make_backbone(widen_factor)
self.left_eye_loc = self.make_pts_fc(part_dim[0])
self.right_eye_loc = self.make_pts_fc(part_dim[1])
self.nose_loc = self.make_pts_fc(part_dim[2])
self.mouth_loc = self.make_pts_fc(part_dim[3])
self._initialize_weights()
def make_backbone(self, widen_factor):
input_channel = int(self.input_channel * widen_factor)
last_channel = int(self.last_channel * widen_factor
) if widen_factor > 1.0 else self.last_channel
features = [conv_bn(3, input_channel, 2)]
for t, c, n, s in self.interverted_residual_setting:
output_channel = int(c * widen_factor)
for i in range(n):
if i == 0:
features.append(self.block(input_channel,
output_channel, s, expand_ratio=t))
else:
features.append(self.block(input_channel,
output_channel, 1, expand_ratio=t))
input_channel = output_channel
features.append(conv_1x1_bn(input_channel, last_channel))
return nn.Sequential(*features)
def make_pts_fc(self, num_classes):
pts_net = nn.Sequential(nn.Linear(self.last_channel, 64), nn.PReLU(
), nn.Linear(64, 64), nn.PReLU(), nn.Linear(64, num_classes))
return pts_net
def forward(self, x):
xs_1 = self.left_eye_net(x[0])
xs_1 = torch.flatten(xs_1, 1)
out_1 = self.left_eye_loc(xs_1)
xs_2 = self.right_eye_net(x[1])
xs_2 = torch.flatten(xs_2, 1)
out_2 = self.right_eye_loc(xs_2)
xs_3 = self.nose_net(x[2])
xs_3 = torch.flatten(xs_3, 1)
out_3 = self.nose_loc(xs_3)
xs_4 = self.mouth_net(x[3])
xs_4 = torch.flatten(xs_4, 1)
out_4 = self.mouth_loc(xs_4)
return [out_1, out_2, out_3, out_4]
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
<|reserved_special_token_1|>
from __future__ import division
import torch
import torch.nn as nn
import math
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class SmallMobileNetV2(nn.Module):
def __init__(self, widen_factor=1.0, num_classes=1000): #, input_size=224
super(SmallMobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 8
last_channel = 64
interverted_residual_setting = [
# t, c, n, s
[1, 8, 1, 1],
[6, 12, 2, 2],
[6, 16, 2, 2],
[6, 24, 3, 2],
[6, 32, 3, 2],
[6, 48, 3, 2],
[6, 64, 2, 2],
[6, 80, 1, 1],
]
# building first layer
# assert input_size % 32 == 0
input_channel = int(input_channel * widen_factor)
self.last_channel = int(last_channel * widen_factor) if widen_factor > 1.0 else last_channel
self.features = [conv_bn(3, input_channel, 2)]
# building inverted residual blocks
for t, c, n, s in interverted_residual_setting:
output_channel = int(c * widen_factor)
for i in range(n):
if i == 0:
self.features.append(block(input_channel, output_channel, s, expand_ratio=t))
else:
self.features.append(block(input_channel, output_channel, 1, expand_ratio=t))
input_channel = output_channel
# building last several layers
self.features.append(conv_1x1_bn(input_channel, self.last_channel))
# make it nn.Sequential
self.features = nn.Sequential(*self.features)
# building pts net
self.pts_net = nn.Sequential(
nn.Linear(4*self.last_channel, 256),
nn.PReLU(),
nn.Linear(256, 256),
nn.PReLU(),
nn.Linear(256, num_classes)
)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
pts = self.pts_net(x)
return pts
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
class SmallMobileNetV2Part(nn.Module):
def __init__(self, widen_factor=1.0, num_classes=68*2): #, input_size=224
super(SmallMobileNetV2Part, self).__init__()
self.block = InvertedResidual
self.input_channel = 8
self.last_channel = 64
self.interverted_residual_setting = [
# t, c, n, s
[1, 8, 1, 1],
[6, 12, 2, 2],
[6, 16, 2, 2],
[6, 24, 3, 2],
[6, 32, 3, 2],
[6, 48, 3, 2],
[6, 64, 2, 2],
[6, 80, 1, 1],
]
if num_classes==68*2:
part_dim = [22, 22, 18, 40]
elif num_classes==98*2:
part_dim = [36, 36, 18, 40]
elif num_classes==106*2:
part_dim = [38, 38, 30, 40]
# building first layer
# assert input_size % 32 == 0
# Spatial transformer localization-network
self.left_eye_net = self.make_backbone(widen_factor)
self.right_eye_net = self.make_backbone(widen_factor)
self.nose_net = self.make_backbone(widen_factor)
self.mouth_net = self.make_backbone(widen_factor)
# Regressor for the 3 * 2 affine matrix
self.left_eye_loc = self.make_pts_fc(part_dim[0])
self.right_eye_loc = self.make_pts_fc(part_dim[1])
self.nose_loc = self.make_pts_fc(part_dim[2])
self.mouth_loc = self.make_pts_fc(part_dim[3])
self._initialize_weights()
def make_backbone(self, widen_factor):
# building first layer
# assert input_size % 32 == 0
input_channel = int(self.input_channel * widen_factor)
last_channel = int(self.last_channel * widen_factor) if widen_factor > 1.0 else self.last_channel
features = [conv_bn(3, input_channel, 2)]
# building inverted residual blocks
for t, c, n, s in self.interverted_residual_setting:
output_channel = int(c * widen_factor)
for i in range(n):
if i == 0:
features.append(self.block(input_channel, output_channel, s, expand_ratio=t))
else:
features.append(self.block(input_channel, output_channel, 1, expand_ratio=t))
input_channel = output_channel
# building last several layers
features.append(conv_1x1_bn(input_channel, last_channel))
# make it nn.Sequential
return nn.Sequential(*features)
def make_pts_fc(self,num_classes):
#pdb.set_trace()
pts_net = nn.Sequential(
nn.Linear(self.last_channel, 64),
nn.PReLU(),
nn.Linear(64, 64),
nn.PReLU(),
nn.Linear(64, num_classes)
)
return pts_net
def forward(self, x):
xs_1 = self.left_eye_net(x[0])
xs_1 = torch.flatten(xs_1, 1)
#pdb.set_trace()
out_1 = self.left_eye_loc(xs_1)
xs_2 = self.right_eye_net(x[1])
xs_2 = torch.flatten(xs_2, 1)
out_2 = self.right_eye_loc(xs_2)
xs_3 = self.nose_net(x[2])
xs_3 = torch.flatten(xs_3, 1)
out_3 = self.nose_loc(xs_3)
xs_4 = self.mouth_net(x[3])
xs_4 = torch.flatten(xs_4, 1)
out_4 = self.mouth_loc(xs_4)
return [out_1, out_2, out_3, out_4]
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
|
flexible
|
{
"blob_id": "be1638638c70cf761bf5d2f0eb474b44684dfa47",
"index": 4657,
"step-1": "<mask token>\n\n\nclass SmallMobileNetV2(nn.Module):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass SmallMobileNetV2Part(nn.Module):\n\n def __init__(self, widen_factor=1.0, num_classes=68 * 2):\n super(SmallMobileNetV2Part, self).__init__()\n self.block = InvertedResidual\n self.input_channel = 8\n self.last_channel = 64\n self.interverted_residual_setting = [[1, 8, 1, 1], [6, 12, 2, 2], [\n 6, 16, 2, 2], [6, 24, 3, 2], [6, 32, 3, 2], [6, 48, 3, 2], [6, \n 64, 2, 2], [6, 80, 1, 1]]\n if num_classes == 68 * 2:\n part_dim = [22, 22, 18, 40]\n elif num_classes == 98 * 2:\n part_dim = [36, 36, 18, 40]\n elif num_classes == 106 * 2:\n part_dim = [38, 38, 30, 40]\n self.left_eye_net = self.make_backbone(widen_factor)\n self.right_eye_net = self.make_backbone(widen_factor)\n self.nose_net = self.make_backbone(widen_factor)\n self.mouth_net = self.make_backbone(widen_factor)\n self.left_eye_loc = self.make_pts_fc(part_dim[0])\n self.right_eye_loc = self.make_pts_fc(part_dim[1])\n self.nose_loc = self.make_pts_fc(part_dim[2])\n self.mouth_loc = self.make_pts_fc(part_dim[3])\n self._initialize_weights()\n\n def make_backbone(self, widen_factor):\n input_channel = int(self.input_channel * widen_factor)\n last_channel = int(self.last_channel * widen_factor\n ) if widen_factor > 1.0 else self.last_channel\n features = [conv_bn(3, input_channel, 2)]\n for t, c, n, s in self.interverted_residual_setting:\n output_channel = int(c * widen_factor)\n for i in range(n):\n if i == 0:\n features.append(self.block(input_channel,\n output_channel, s, expand_ratio=t))\n else:\n features.append(self.block(input_channel,\n output_channel, 1, expand_ratio=t))\n input_channel = output_channel\n features.append(conv_1x1_bn(input_channel, last_channel))\n return nn.Sequential(*features)\n\n def make_pts_fc(self, num_classes):\n pts_net = nn.Sequential(nn.Linear(self.last_channel, 64), nn.PReLU(\n ), nn.Linear(64, 64), nn.PReLU(), nn.Linear(64, num_classes))\n return pts_net\n\n def forward(self, x):\n xs_1 = self.left_eye_net(x[0])\n xs_1 = torch.flatten(xs_1, 1)\n out_1 = self.left_eye_loc(xs_1)\n xs_2 = self.right_eye_net(x[1])\n xs_2 = torch.flatten(xs_2, 1)\n out_2 = self.right_eye_loc(xs_2)\n xs_3 = self.nose_net(x[2])\n xs_3 = torch.flatten(xs_3, 1)\n out_3 = self.nose_loc(xs_3)\n xs_4 = self.mouth_net(x[3])\n xs_4 = torch.flatten(xs_4, 1)\n out_4 = self.mouth_loc(xs_4)\n return [out_1, out_2, out_3, out_4]\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2.0 / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n",
"step-2": "<mask token>\n\n\nclass InvertedResidual(nn.Module):\n <mask token>\n <mask token>\n\n\nclass SmallMobileNetV2(nn.Module):\n\n def __init__(self, widen_factor=1.0, num_classes=1000):\n super(SmallMobileNetV2, self).__init__()\n block = InvertedResidual\n input_channel = 8\n last_channel = 64\n interverted_residual_setting = [[1, 8, 1, 1], [6, 12, 2, 2], [6, 16,\n 2, 2], [6, 24, 3, 2], [6, 32, 3, 2], [6, 48, 3, 2], [6, 64, 2, \n 2], [6, 80, 1, 1]]\n input_channel = int(input_channel * widen_factor)\n self.last_channel = int(last_channel * widen_factor\n ) if widen_factor > 1.0 else last_channel\n self.features = [conv_bn(3, input_channel, 2)]\n for t, c, n, s in interverted_residual_setting:\n output_channel = int(c * widen_factor)\n for i in range(n):\n if i == 0:\n self.features.append(block(input_channel,\n output_channel, s, expand_ratio=t))\n else:\n self.features.append(block(input_channel,\n output_channel, 1, expand_ratio=t))\n input_channel = output_channel\n self.features.append(conv_1x1_bn(input_channel, self.last_channel))\n self.features = nn.Sequential(*self.features)\n self.pts_net = nn.Sequential(nn.Linear(4 * self.last_channel, 256),\n nn.PReLU(), nn.Linear(256, 256), nn.PReLU(), nn.Linear(256,\n num_classes))\n self._initialize_weights()\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n pts = self.pts_net(x)\n return pts\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2.0 / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n\nclass SmallMobileNetV2Part(nn.Module):\n\n def __init__(self, widen_factor=1.0, num_classes=68 * 2):\n super(SmallMobileNetV2Part, self).__init__()\n self.block = InvertedResidual\n self.input_channel = 8\n self.last_channel = 64\n self.interverted_residual_setting = [[1, 8, 1, 1], [6, 12, 2, 2], [\n 6, 16, 2, 2], [6, 24, 3, 2], [6, 32, 3, 2], [6, 48, 3, 2], [6, \n 64, 2, 2], [6, 80, 1, 1]]\n if num_classes == 68 * 2:\n part_dim = [22, 22, 18, 40]\n elif num_classes == 98 * 2:\n part_dim = [36, 36, 18, 40]\n elif num_classes == 106 * 2:\n part_dim = [38, 38, 30, 40]\n self.left_eye_net = self.make_backbone(widen_factor)\n self.right_eye_net = self.make_backbone(widen_factor)\n self.nose_net = self.make_backbone(widen_factor)\n self.mouth_net = self.make_backbone(widen_factor)\n self.left_eye_loc = self.make_pts_fc(part_dim[0])\n self.right_eye_loc = self.make_pts_fc(part_dim[1])\n self.nose_loc = self.make_pts_fc(part_dim[2])\n self.mouth_loc = self.make_pts_fc(part_dim[3])\n self._initialize_weights()\n\n def make_backbone(self, widen_factor):\n input_channel = int(self.input_channel * widen_factor)\n last_channel = int(self.last_channel * widen_factor\n ) if widen_factor > 1.0 else self.last_channel\n features = [conv_bn(3, input_channel, 2)]\n for t, c, n, s in self.interverted_residual_setting:\n output_channel = int(c * widen_factor)\n for i in range(n):\n if i == 0:\n features.append(self.block(input_channel,\n output_channel, s, expand_ratio=t))\n else:\n features.append(self.block(input_channel,\n output_channel, 1, expand_ratio=t))\n input_channel = output_channel\n features.append(conv_1x1_bn(input_channel, last_channel))\n return nn.Sequential(*features)\n\n def make_pts_fc(self, num_classes):\n pts_net = nn.Sequential(nn.Linear(self.last_channel, 64), nn.PReLU(\n ), nn.Linear(64, 64), nn.PReLU(), nn.Linear(64, num_classes))\n return pts_net\n\n def forward(self, x):\n xs_1 = self.left_eye_net(x[0])\n xs_1 = torch.flatten(xs_1, 1)\n out_1 = self.left_eye_loc(xs_1)\n xs_2 = self.right_eye_net(x[1])\n xs_2 = torch.flatten(xs_2, 1)\n out_2 = self.right_eye_loc(xs_2)\n xs_3 = self.nose_net(x[2])\n xs_3 = torch.flatten(xs_3, 1)\n out_3 = self.nose_loc(xs_3)\n xs_4 = self.mouth_net(x[3])\n xs_4 = torch.flatten(xs_4, 1)\n out_4 = self.mouth_loc(xs_4)\n return [out_1, out_2, out_3, out_4]\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2.0 / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n",
"step-3": "<mask token>\n\n\nclass InvertedResidual(nn.Module):\n\n def __init__(self, inp, oup, stride, expand_ratio):\n super(InvertedResidual, self).__init__()\n self.stride = stride\n assert stride in [1, 2]\n hidden_dim = round(inp * expand_ratio)\n self.use_res_connect = self.stride == 1 and inp == oup\n if expand_ratio == 1:\n self.conv = nn.Sequential(nn.Conv2d(hidden_dim, hidden_dim, 3,\n stride, 1, groups=hidden_dim, bias=False), nn.BatchNorm2d(\n hidden_dim), nn.ReLU(inplace=True), nn.Conv2d(hidden_dim,\n oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup))\n else:\n self.conv = nn.Sequential(nn.Conv2d(inp, hidden_dim, 1, 1, 0,\n bias=False), nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=\n True), nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1,\n groups=hidden_dim, bias=False), nn.BatchNorm2d(hidden_dim),\n nn.ReLU(inplace=True), nn.Conv2d(hidden_dim, oup, 1, 1, 0,\n bias=False), nn.BatchNorm2d(oup))\n\n def forward(self, x):\n if self.use_res_connect:\n return x + self.conv(x)\n else:\n return self.conv(x)\n\n\nclass SmallMobileNetV2(nn.Module):\n\n def __init__(self, widen_factor=1.0, num_classes=1000):\n super(SmallMobileNetV2, self).__init__()\n block = InvertedResidual\n input_channel = 8\n last_channel = 64\n interverted_residual_setting = [[1, 8, 1, 1], [6, 12, 2, 2], [6, 16,\n 2, 2], [6, 24, 3, 2], [6, 32, 3, 2], [6, 48, 3, 2], [6, 64, 2, \n 2], [6, 80, 1, 1]]\n input_channel = int(input_channel * widen_factor)\n self.last_channel = int(last_channel * widen_factor\n ) if widen_factor > 1.0 else last_channel\n self.features = [conv_bn(3, input_channel, 2)]\n for t, c, n, s in interverted_residual_setting:\n output_channel = int(c * widen_factor)\n for i in range(n):\n if i == 0:\n self.features.append(block(input_channel,\n output_channel, s, expand_ratio=t))\n else:\n self.features.append(block(input_channel,\n output_channel, 1, expand_ratio=t))\n input_channel = output_channel\n self.features.append(conv_1x1_bn(input_channel, self.last_channel))\n self.features = nn.Sequential(*self.features)\n self.pts_net = nn.Sequential(nn.Linear(4 * self.last_channel, 256),\n nn.PReLU(), nn.Linear(256, 256), nn.PReLU(), nn.Linear(256,\n num_classes))\n self._initialize_weights()\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n pts = self.pts_net(x)\n return pts\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2.0 / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n\nclass SmallMobileNetV2Part(nn.Module):\n\n def __init__(self, widen_factor=1.0, num_classes=68 * 2):\n super(SmallMobileNetV2Part, self).__init__()\n self.block = InvertedResidual\n self.input_channel = 8\n self.last_channel = 64\n self.interverted_residual_setting = [[1, 8, 1, 1], [6, 12, 2, 2], [\n 6, 16, 2, 2], [6, 24, 3, 2], [6, 32, 3, 2], [6, 48, 3, 2], [6, \n 64, 2, 2], [6, 80, 1, 1]]\n if num_classes == 68 * 2:\n part_dim = [22, 22, 18, 40]\n elif num_classes == 98 * 2:\n part_dim = [36, 36, 18, 40]\n elif num_classes == 106 * 2:\n part_dim = [38, 38, 30, 40]\n self.left_eye_net = self.make_backbone(widen_factor)\n self.right_eye_net = self.make_backbone(widen_factor)\n self.nose_net = self.make_backbone(widen_factor)\n self.mouth_net = self.make_backbone(widen_factor)\n self.left_eye_loc = self.make_pts_fc(part_dim[0])\n self.right_eye_loc = self.make_pts_fc(part_dim[1])\n self.nose_loc = self.make_pts_fc(part_dim[2])\n self.mouth_loc = self.make_pts_fc(part_dim[3])\n self._initialize_weights()\n\n def make_backbone(self, widen_factor):\n input_channel = int(self.input_channel * widen_factor)\n last_channel = int(self.last_channel * widen_factor\n ) if widen_factor > 1.0 else self.last_channel\n features = [conv_bn(3, input_channel, 2)]\n for t, c, n, s in self.interverted_residual_setting:\n output_channel = int(c * widen_factor)\n for i in range(n):\n if i == 0:\n features.append(self.block(input_channel,\n output_channel, s, expand_ratio=t))\n else:\n features.append(self.block(input_channel,\n output_channel, 1, expand_ratio=t))\n input_channel = output_channel\n features.append(conv_1x1_bn(input_channel, last_channel))\n return nn.Sequential(*features)\n\n def make_pts_fc(self, num_classes):\n pts_net = nn.Sequential(nn.Linear(self.last_channel, 64), nn.PReLU(\n ), nn.Linear(64, 64), nn.PReLU(), nn.Linear(64, num_classes))\n return pts_net\n\n def forward(self, x):\n xs_1 = self.left_eye_net(x[0])\n xs_1 = torch.flatten(xs_1, 1)\n out_1 = self.left_eye_loc(xs_1)\n xs_2 = self.right_eye_net(x[1])\n xs_2 = torch.flatten(xs_2, 1)\n out_2 = self.right_eye_loc(xs_2)\n xs_3 = self.nose_net(x[2])\n xs_3 = torch.flatten(xs_3, 1)\n out_3 = self.nose_loc(xs_3)\n xs_4 = self.mouth_net(x[3])\n xs_4 = torch.flatten(xs_4, 1)\n out_4 = self.mouth_loc(xs_4)\n return [out_1, out_2, out_3, out_4]\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2.0 / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n",
"step-4": "<mask token>\n\n\ndef conv_bn(inp, oup, stride):\n return nn.Sequential(nn.Conv2d(inp, oup, 3, stride, 1, bias=False), nn.\n BatchNorm2d(oup), nn.ReLU(inplace=True))\n\n\ndef conv_1x1_bn(inp, oup):\n return nn.Sequential(nn.Conv2d(inp, oup, 1, 1, 0, bias=False), nn.\n BatchNorm2d(oup), nn.ReLU(inplace=True))\n\n\nclass InvertedResidual(nn.Module):\n\n def __init__(self, inp, oup, stride, expand_ratio):\n super(InvertedResidual, self).__init__()\n self.stride = stride\n assert stride in [1, 2]\n hidden_dim = round(inp * expand_ratio)\n self.use_res_connect = self.stride == 1 and inp == oup\n if expand_ratio == 1:\n self.conv = nn.Sequential(nn.Conv2d(hidden_dim, hidden_dim, 3,\n stride, 1, groups=hidden_dim, bias=False), nn.BatchNorm2d(\n hidden_dim), nn.ReLU(inplace=True), nn.Conv2d(hidden_dim,\n oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup))\n else:\n self.conv = nn.Sequential(nn.Conv2d(inp, hidden_dim, 1, 1, 0,\n bias=False), nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=\n True), nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1,\n groups=hidden_dim, bias=False), nn.BatchNorm2d(hidden_dim),\n nn.ReLU(inplace=True), nn.Conv2d(hidden_dim, oup, 1, 1, 0,\n bias=False), nn.BatchNorm2d(oup))\n\n def forward(self, x):\n if self.use_res_connect:\n return x + self.conv(x)\n else:\n return self.conv(x)\n\n\nclass SmallMobileNetV2(nn.Module):\n\n def __init__(self, widen_factor=1.0, num_classes=1000):\n super(SmallMobileNetV2, self).__init__()\n block = InvertedResidual\n input_channel = 8\n last_channel = 64\n interverted_residual_setting = [[1, 8, 1, 1], [6, 12, 2, 2], [6, 16,\n 2, 2], [6, 24, 3, 2], [6, 32, 3, 2], [6, 48, 3, 2], [6, 64, 2, \n 2], [6, 80, 1, 1]]\n input_channel = int(input_channel * widen_factor)\n self.last_channel = int(last_channel * widen_factor\n ) if widen_factor > 1.0 else last_channel\n self.features = [conv_bn(3, input_channel, 2)]\n for t, c, n, s in interverted_residual_setting:\n output_channel = int(c * widen_factor)\n for i in range(n):\n if i == 0:\n self.features.append(block(input_channel,\n output_channel, s, expand_ratio=t))\n else:\n self.features.append(block(input_channel,\n output_channel, 1, expand_ratio=t))\n input_channel = output_channel\n self.features.append(conv_1x1_bn(input_channel, self.last_channel))\n self.features = nn.Sequential(*self.features)\n self.pts_net = nn.Sequential(nn.Linear(4 * self.last_channel, 256),\n nn.PReLU(), nn.Linear(256, 256), nn.PReLU(), nn.Linear(256,\n num_classes))\n self._initialize_weights()\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n pts = self.pts_net(x)\n return pts\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2.0 / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n\nclass SmallMobileNetV2Part(nn.Module):\n\n def __init__(self, widen_factor=1.0, num_classes=68 * 2):\n super(SmallMobileNetV2Part, self).__init__()\n self.block = InvertedResidual\n self.input_channel = 8\n self.last_channel = 64\n self.interverted_residual_setting = [[1, 8, 1, 1], [6, 12, 2, 2], [\n 6, 16, 2, 2], [6, 24, 3, 2], [6, 32, 3, 2], [6, 48, 3, 2], [6, \n 64, 2, 2], [6, 80, 1, 1]]\n if num_classes == 68 * 2:\n part_dim = [22, 22, 18, 40]\n elif num_classes == 98 * 2:\n part_dim = [36, 36, 18, 40]\n elif num_classes == 106 * 2:\n part_dim = [38, 38, 30, 40]\n self.left_eye_net = self.make_backbone(widen_factor)\n self.right_eye_net = self.make_backbone(widen_factor)\n self.nose_net = self.make_backbone(widen_factor)\n self.mouth_net = self.make_backbone(widen_factor)\n self.left_eye_loc = self.make_pts_fc(part_dim[0])\n self.right_eye_loc = self.make_pts_fc(part_dim[1])\n self.nose_loc = self.make_pts_fc(part_dim[2])\n self.mouth_loc = self.make_pts_fc(part_dim[3])\n self._initialize_weights()\n\n def make_backbone(self, widen_factor):\n input_channel = int(self.input_channel * widen_factor)\n last_channel = int(self.last_channel * widen_factor\n ) if widen_factor > 1.0 else self.last_channel\n features = [conv_bn(3, input_channel, 2)]\n for t, c, n, s in self.interverted_residual_setting:\n output_channel = int(c * widen_factor)\n for i in range(n):\n if i == 0:\n features.append(self.block(input_channel,\n output_channel, s, expand_ratio=t))\n else:\n features.append(self.block(input_channel,\n output_channel, 1, expand_ratio=t))\n input_channel = output_channel\n features.append(conv_1x1_bn(input_channel, last_channel))\n return nn.Sequential(*features)\n\n def make_pts_fc(self, num_classes):\n pts_net = nn.Sequential(nn.Linear(self.last_channel, 64), nn.PReLU(\n ), nn.Linear(64, 64), nn.PReLU(), nn.Linear(64, num_classes))\n return pts_net\n\n def forward(self, x):\n xs_1 = self.left_eye_net(x[0])\n xs_1 = torch.flatten(xs_1, 1)\n out_1 = self.left_eye_loc(xs_1)\n xs_2 = self.right_eye_net(x[1])\n xs_2 = torch.flatten(xs_2, 1)\n out_2 = self.right_eye_loc(xs_2)\n xs_3 = self.nose_net(x[2])\n xs_3 = torch.flatten(xs_3, 1)\n out_3 = self.nose_loc(xs_3)\n xs_4 = self.mouth_net(x[3])\n xs_4 = torch.flatten(xs_4, 1)\n out_4 = self.mouth_loc(xs_4)\n return [out_1, out_2, out_3, out_4]\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2.0 / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n",
"step-5": "\nfrom __future__ import division\nimport torch\nimport torch.nn as nn\nimport math\n\ndef conv_bn(inp, oup, stride):\n return nn.Sequential(\n nn.Conv2d(inp, oup, 3, stride, 1, bias=False),\n nn.BatchNorm2d(oup),\n nn.ReLU(inplace=True)\n )\n\n\ndef conv_1x1_bn(inp, oup):\n return nn.Sequential(\n nn.Conv2d(inp, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n nn.ReLU(inplace=True)\n )\n\n\nclass InvertedResidual(nn.Module):\n def __init__(self, inp, oup, stride, expand_ratio):\n super(InvertedResidual, self).__init__()\n self.stride = stride\n assert stride in [1, 2]\n\n hidden_dim = round(inp * expand_ratio)\n self.use_res_connect = self.stride == 1 and inp == oup\n\n if expand_ratio == 1:\n self.conv = nn.Sequential(\n # dw\n nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),\n nn.BatchNorm2d(hidden_dim),\n nn.ReLU(inplace=True),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n )\n else:\n self.conv = nn.Sequential(\n # pw\n nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),\n nn.BatchNorm2d(hidden_dim),\n nn.ReLU(inplace=True),\n # dw\n nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),\n nn.BatchNorm2d(hidden_dim),\n nn.ReLU(inplace=True),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n )\n\n def forward(self, x):\n if self.use_res_connect:\n return x + self.conv(x)\n else:\n return self.conv(x)\n\n\nclass SmallMobileNetV2(nn.Module):\n def __init__(self, widen_factor=1.0, num_classes=1000): #, input_size=224\n super(SmallMobileNetV2, self).__init__()\n block = InvertedResidual\n input_channel = 8\n last_channel = 64\n interverted_residual_setting = [\n # t, c, n, s\n [1, 8, 1, 1],\n [6, 12, 2, 2],\n [6, 16, 2, 2],\n [6, 24, 3, 2],\n [6, 32, 3, 2],\n [6, 48, 3, 2],\n [6, 64, 2, 2],\n [6, 80, 1, 1],\n ]\n\n # building first layer\n # assert input_size % 32 == 0\n input_channel = int(input_channel * widen_factor)\n self.last_channel = int(last_channel * widen_factor) if widen_factor > 1.0 else last_channel\n self.features = [conv_bn(3, input_channel, 2)]\n # building inverted residual blocks\n for t, c, n, s in interverted_residual_setting:\n output_channel = int(c * widen_factor)\n for i in range(n):\n if i == 0:\n self.features.append(block(input_channel, output_channel, s, expand_ratio=t))\n else:\n self.features.append(block(input_channel, output_channel, 1, expand_ratio=t))\n input_channel = output_channel\n # building last several layers\n self.features.append(conv_1x1_bn(input_channel, self.last_channel))\n # make it nn.Sequential\n self.features = nn.Sequential(*self.features)\n\n # building pts net\n self.pts_net = nn.Sequential(\n nn.Linear(4*self.last_channel, 256),\n nn.PReLU(),\n nn.Linear(256, 256),\n nn.PReLU(),\n nn.Linear(256, num_classes)\n )\n\n\n self._initialize_weights()\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n pts = self.pts_net(x)\n return pts\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n\n\n\n\nclass SmallMobileNetV2Part(nn.Module):\n def __init__(self, widen_factor=1.0, num_classes=68*2): #, input_size=224\n super(SmallMobileNetV2Part, self).__init__()\n self.block = InvertedResidual\n self.input_channel = 8\n self.last_channel = 64\n self.interverted_residual_setting = [\n # t, c, n, s\n [1, 8, 1, 1],\n [6, 12, 2, 2],\n [6, 16, 2, 2],\n [6, 24, 3, 2],\n [6, 32, 3, 2],\n [6, 48, 3, 2],\n [6, 64, 2, 2],\n [6, 80, 1, 1],\n ]\n\n if num_classes==68*2:\n part_dim = [22, 22, 18, 40]\n elif num_classes==98*2:\n part_dim = [36, 36, 18, 40]\n elif num_classes==106*2:\n part_dim = [38, 38, 30, 40]\n\n # building first layer\n # assert input_size % 32 == 0\n # Spatial transformer localization-network\n self.left_eye_net = self.make_backbone(widen_factor)\n self.right_eye_net = self.make_backbone(widen_factor)\n self.nose_net = self.make_backbone(widen_factor)\n self.mouth_net = self.make_backbone(widen_factor)\n # Regressor for the 3 * 2 affine matrix\n self.left_eye_loc = self.make_pts_fc(part_dim[0])\n self.right_eye_loc = self.make_pts_fc(part_dim[1])\n self.nose_loc = self.make_pts_fc(part_dim[2])\n self.mouth_loc = self.make_pts_fc(part_dim[3])\n\n self._initialize_weights()\n\n def make_backbone(self, widen_factor):\n # building first layer\n # assert input_size % 32 == 0\n input_channel = int(self.input_channel * widen_factor)\n last_channel = int(self.last_channel * widen_factor) if widen_factor > 1.0 else self.last_channel\n features = [conv_bn(3, input_channel, 2)]\n # building inverted residual blocks\n for t, c, n, s in self.interverted_residual_setting:\n output_channel = int(c * widen_factor)\n for i in range(n):\n if i == 0:\n features.append(self.block(input_channel, output_channel, s, expand_ratio=t))\n else:\n features.append(self.block(input_channel, output_channel, 1, expand_ratio=t))\n input_channel = output_channel\n # building last several layers\n features.append(conv_1x1_bn(input_channel, last_channel))\n # make it nn.Sequential\n return nn.Sequential(*features)\n\n def make_pts_fc(self,num_classes):\n #pdb.set_trace()\n pts_net = nn.Sequential(\n nn.Linear(self.last_channel, 64),\n nn.PReLU(),\n nn.Linear(64, 64),\n nn.PReLU(),\n nn.Linear(64, num_classes)\n )\n return pts_net\n\n\n def forward(self, x):\n xs_1 = self.left_eye_net(x[0])\n xs_1 = torch.flatten(xs_1, 1)\n #pdb.set_trace()\n out_1 = self.left_eye_loc(xs_1)\n\n xs_2 = self.right_eye_net(x[1])\n xs_2 = torch.flatten(xs_2, 1)\n out_2 = self.right_eye_loc(xs_2)\n\n xs_3 = self.nose_net(x[2])\n xs_3 = torch.flatten(xs_3, 1)\n out_3 = self.nose_loc(xs_3)\n\n xs_4 = self.mouth_net(x[3])\n xs_4 = torch.flatten(xs_4, 1)\n out_4 = self.mouth_loc(xs_4)\n\n return [out_1, out_2, out_3, out_4]\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n",
"step-ids": [
7,
11,
13,
15,
17
]
}
|
[
7,
11,
13,
15,
17
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('CRUD', '0001_initial')]
operations = [migrations.RenameField(model_name='employee', old_name=
'eAdddress', new_name='eAddress')]
<|reserved_special_token_1|>
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [('CRUD', '0001_initial')]
operations = [migrations.RenameField(model_name='employee', old_name=
'eAdddress', new_name='eAddress')]
<|reserved_special_token_1|>
# Generated by Django 3.0.8 on 2020-07-12 19:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('CRUD', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='employee',
old_name='eAdddress',
new_name='eAddress',
),
]
|
flexible
|
{
"blob_id": "b1d8a454e590dfa4afa257ca665376c320a4acb5",
"index": 5264,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('CRUD', '0001_initial')]\n operations = [migrations.RenameField(model_name='employee', old_name=\n 'eAdddress', new_name='eAddress')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('CRUD', '0001_initial')]\n operations = [migrations.RenameField(model_name='employee', old_name=\n 'eAdddress', new_name='eAddress')]\n",
"step-5": "# Generated by Django 3.0.8 on 2020-07-12 19:05\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('CRUD', '0001_initial'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='employee',\n old_name='eAdddress',\n new_name='eAddress',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('course', '0002_auto_20201103_1648')]
operations = [migrations.AddField(model_name='course', name=
'course_video', field=models.FileField(blank=True, max_length=255,
null=True, upload_to='video', verbose_name='封面视频')), migrations.
AlterField(model_name='course', name='brief', field=
ckeditor_uploader.fields.RichTextUploadingField(blank=True,
max_length=2048, null=True, verbose_name='详情介绍'))]
<|reserved_special_token_1|>
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('course', '0002_auto_20201103_1648')]
operations = [migrations.AddField(model_name='course', name=
'course_video', field=models.FileField(blank=True, max_length=255,
null=True, upload_to='video', verbose_name='封面视频')), migrations.
AlterField(model_name='course', name='brief', field=
ckeditor_uploader.fields.RichTextUploadingField(blank=True,
max_length=2048, null=True, verbose_name='详情介绍'))]
<|reserved_special_token_1|>
# Generated by Django 2.2.16 on 2020-11-04 12:48
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0002_auto_20201103_1648'),
]
operations = [
migrations.AddField(
model_name='course',
name='course_video',
field=models.FileField(blank=True, max_length=255, null=True, upload_to='video', verbose_name='封面视频'),
),
migrations.AlterField(
model_name='course',
name='brief',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, max_length=2048, null=True, verbose_name='详情介绍'),
),
]
|
flexible
|
{
"blob_id": "afacc2c54584c070963c4cb3cabbae64bb0e3159",
"index": 1858,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('course', '0002_auto_20201103_1648')]\n operations = [migrations.AddField(model_name='course', name=\n 'course_video', field=models.FileField(blank=True, max_length=255,\n null=True, upload_to='video', verbose_name='封面视频')), migrations.\n AlterField(model_name='course', name='brief', field=\n ckeditor_uploader.fields.RichTextUploadingField(blank=True,\n max_length=2048, null=True, verbose_name='详情介绍'))]\n",
"step-4": "import ckeditor_uploader.fields\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('course', '0002_auto_20201103_1648')]\n operations = [migrations.AddField(model_name='course', name=\n 'course_video', field=models.FileField(blank=True, max_length=255,\n null=True, upload_to='video', verbose_name='封面视频')), migrations.\n AlterField(model_name='course', name='brief', field=\n ckeditor_uploader.fields.RichTextUploadingField(blank=True,\n max_length=2048, null=True, verbose_name='详情介绍'))]\n",
"step-5": "# Generated by Django 2.2.16 on 2020-11-04 12:48\n\nimport ckeditor_uploader.fields\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('course', '0002_auto_20201103_1648'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='course',\n name='course_video',\n field=models.FileField(blank=True, max_length=255, null=True, upload_to='video', verbose_name='封面视频'),\n ),\n migrations.AlterField(\n model_name='course',\n name='brief',\n field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, max_length=2048, null=True, verbose_name='详情介绍'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
import utils
def revcomp(s):
comp = {'A':'T', 'T':'A', 'G':'C', 'C':'G'}
return ''.join([comp[c] for c in reversed(s)])
def reverse_palindromes(s):
results = []
l = len(s)
for i in range(l):
for j in range(4, 13):
if i + j > l:
continue
s1 = s[i:i+j]
s2 = revcomp(s1)
if s1 == s2:
results.append((i + 1, j))
return results
if __name__ == "__main__":
seq = utils.load_multifasta('files/rosalind_revp.txt').values()[0]
results = reverse_palindromes(seq)
print "\n".join([' '.join(map(str, r)) for r in results])
|
normal
|
{
"blob_id": "1f40c0ed8e449354a5a87ef18bb07978a9fb8a1c",
"index": 3368,
"step-1": "#!/usr/bin/env python\n\nimport utils\n\ndef revcomp(s):\n comp = {'A':'T', 'T':'A', 'G':'C', 'C':'G'}\n return ''.join([comp[c] for c in reversed(s)])\n\ndef reverse_palindromes(s):\n results = []\n l = len(s)\n for i in range(l):\n for j in range(4, 13):\n if i + j > l:\n continue\n s1 = s[i:i+j]\n s2 = revcomp(s1)\n if s1 == s2:\n results.append((i + 1, j))\n return results\n\nif __name__ == \"__main__\":\n\n seq = utils.load_multifasta('files/rosalind_revp.txt').values()[0]\n results = reverse_palindromes(seq)\nprint \"\\n\".join([' '.join(map(str, r)) for r in results])\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import common
import student_code
class bcolors:
RED = "\x1b[31m"
GREEN = "\x1b[32m"
NORMAL = "\x1b[0m"
def check_result(title, map1, map2):
result=True
print(title)
for y in range(0,common.constants.MAP_HEIGHT):
v=""
for x in range(0,common.constants.MAP_WIDTH):
if (map1[y][x]==map2[y][x]):
v+=bcolors.GREEN+str(map1[y][x])+bcolors.NORMAL
else:
result = False
v+=bcolors.RED+str(map1[y][x])+bcolors.NORMAL
print(v)
if (result):
print("Test Result: " + bcolors.GREEN+"Passed"+bcolors.NORMAL)
else:
print("Test Result: " + bcolors.RED+"Failed"+bcolors.NORMAL)
return result
data1 = (
"100000011"
"110111011"
"111111011"
"110000003"
"111111011"
"111020000")
gold_df1 = ("100000011"
"110111011"
"111111011"
"110000555"
"111111511"
"111055540")
data2 = (
"200000011"
"011111011"
"000001011"
"111011003"
"111111011"
"111000011"
"111111011")
gold_df2 = ("555555511"
"411111511"
"444441511"
"111411555"
"111111011"
"111000011"
"111111011")
data3 = (
"100000011"
"111011011"
"000011011"
"111011003"
"110011011"
"111200011")
gold_df3 = (
"100000011"
"111011011"
"000011011"
"111411555"
"110411511"
"111555511")
all_passed = True
gold_dfmap1 = common.init_map();
common.set_map(gold_dfmap1, gold_df1)
dfmap1 = common.init_map()
common.set_map(dfmap1, data1)
df1 = student_code.astar_search(dfmap1)
tdf1 ="Reachable goal:"
cdf1 = check_result(tdf1,dfmap1,gold_dfmap1)
all_passed = all_passed and cdf1 and df1
gold_dfmap2 = common.init_map();
common.set_map(gold_dfmap2, gold_df2)
dfmap2 = common.init_map()
common.set_map(dfmap2, data2)
df2 = student_code.astar_search(dfmap2)
tdf2 ="Reachable goal:"
cdf2 = check_result(tdf2,dfmap2,gold_dfmap2)
all_passed = all_passed and cdf2 and df2
gold_dfmap3 = common.init_map();
common.set_map(gold_dfmap3, gold_df3)
dfmap3 = common.init_map()
common.set_map(dfmap3, data3)
df3 = student_code.astar_search(dfmap3)
tdf3 ="Reachable goal:"
cdf3 = check_result(tdf3,dfmap3,gold_dfmap3)
all_passed = all_passed and cdf3 and df3
all_passed = all_passed and cdf5 and df5
if all_passed:
exit(0)
else:
exit(1)
|
normal
|
{
"blob_id": "602d2c545c6e3eabe5c6285d2ab0c7f4216a00f5",
"index": 1563,
"step-1": "<mask token>\n\n\nclass bcolors:\n RED = '\\x1b[31m'\n GREEN = '\\x1b[32m'\n NORMAL = '\\x1b[0m'\n\n\ndef check_result(title, map1, map2):\n result = True\n print(title)\n for y in range(0, common.constants.MAP_HEIGHT):\n v = ''\n for x in range(0, common.constants.MAP_WIDTH):\n if map1[y][x] == map2[y][x]:\n v += bcolors.GREEN + str(map1[y][x]) + bcolors.NORMAL\n else:\n result = False\n v += bcolors.RED + str(map1[y][x]) + bcolors.NORMAL\n print(v)\n if result:\n print('Test Result: ' + bcolors.GREEN + 'Passed' + bcolors.NORMAL)\n else:\n print('Test Result: ' + bcolors.RED + 'Failed' + bcolors.NORMAL)\n return result\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass bcolors:\n RED = '\\x1b[31m'\n GREEN = '\\x1b[32m'\n NORMAL = '\\x1b[0m'\n\n\ndef check_result(title, map1, map2):\n result = True\n print(title)\n for y in range(0, common.constants.MAP_HEIGHT):\n v = ''\n for x in range(0, common.constants.MAP_WIDTH):\n if map1[y][x] == map2[y][x]:\n v += bcolors.GREEN + str(map1[y][x]) + bcolors.NORMAL\n else:\n result = False\n v += bcolors.RED + str(map1[y][x]) + bcolors.NORMAL\n print(v)\n if result:\n print('Test Result: ' + bcolors.GREEN + 'Passed' + bcolors.NORMAL)\n else:\n print('Test Result: ' + bcolors.RED + 'Failed' + bcolors.NORMAL)\n return result\n\n\n<mask token>\ncommon.set_map(gold_dfmap1, gold_df1)\n<mask token>\ncommon.set_map(dfmap1, data1)\n<mask token>\ncommon.set_map(gold_dfmap2, gold_df2)\n<mask token>\ncommon.set_map(dfmap2, data2)\n<mask token>\ncommon.set_map(gold_dfmap3, gold_df3)\n<mask token>\ncommon.set_map(dfmap3, data3)\n<mask token>\nif all_passed:\n exit(0)\nelse:\n exit(1)\n",
"step-3": "<mask token>\n\n\nclass bcolors:\n RED = '\\x1b[31m'\n GREEN = '\\x1b[32m'\n NORMAL = '\\x1b[0m'\n\n\ndef check_result(title, map1, map2):\n result = True\n print(title)\n for y in range(0, common.constants.MAP_HEIGHT):\n v = ''\n for x in range(0, common.constants.MAP_WIDTH):\n if map1[y][x] == map2[y][x]:\n v += bcolors.GREEN + str(map1[y][x]) + bcolors.NORMAL\n else:\n result = False\n v += bcolors.RED + str(map1[y][x]) + bcolors.NORMAL\n print(v)\n if result:\n print('Test Result: ' + bcolors.GREEN + 'Passed' + bcolors.NORMAL)\n else:\n print('Test Result: ' + bcolors.RED + 'Failed' + bcolors.NORMAL)\n return result\n\n\ndata1 = '100000011110111011111111011110000003111111011111020000'\ngold_df1 = '100000011110111011111111011110000555111111511111055540'\ndata2 = '200000011011111011000001011111011003111111011111000011111111011'\ngold_df2 = '555555511411111511444441511111411555111111011111000011111111011'\ndata3 = '100000011111011011000011011111011003110011011111200011'\ngold_df3 = '100000011111011011000011011111411555110411511111555511'\nall_passed = True\ngold_dfmap1 = common.init_map()\ncommon.set_map(gold_dfmap1, gold_df1)\ndfmap1 = common.init_map()\ncommon.set_map(dfmap1, data1)\ndf1 = student_code.astar_search(dfmap1)\ntdf1 = 'Reachable goal:'\ncdf1 = check_result(tdf1, dfmap1, gold_dfmap1)\nall_passed = all_passed and cdf1 and df1\ngold_dfmap2 = common.init_map()\ncommon.set_map(gold_dfmap2, gold_df2)\ndfmap2 = common.init_map()\ncommon.set_map(dfmap2, data2)\ndf2 = student_code.astar_search(dfmap2)\ntdf2 = 'Reachable goal:'\ncdf2 = check_result(tdf2, dfmap2, gold_dfmap2)\nall_passed = all_passed and cdf2 and df2\ngold_dfmap3 = common.init_map()\ncommon.set_map(gold_dfmap3, gold_df3)\ndfmap3 = common.init_map()\ncommon.set_map(dfmap3, data3)\ndf3 = student_code.astar_search(dfmap3)\ntdf3 = 'Reachable goal:'\ncdf3 = check_result(tdf3, dfmap3, gold_dfmap3)\nall_passed = all_passed and cdf3 and df3\nall_passed = all_passed and cdf5 and df5\nif all_passed:\n exit(0)\nelse:\n exit(1)\n",
"step-4": "import common\nimport student_code\n\n\nclass bcolors:\n RED = '\\x1b[31m'\n GREEN = '\\x1b[32m'\n NORMAL = '\\x1b[0m'\n\n\ndef check_result(title, map1, map2):\n result = True\n print(title)\n for y in range(0, common.constants.MAP_HEIGHT):\n v = ''\n for x in range(0, common.constants.MAP_WIDTH):\n if map1[y][x] == map2[y][x]:\n v += bcolors.GREEN + str(map1[y][x]) + bcolors.NORMAL\n else:\n result = False\n v += bcolors.RED + str(map1[y][x]) + bcolors.NORMAL\n print(v)\n if result:\n print('Test Result: ' + bcolors.GREEN + 'Passed' + bcolors.NORMAL)\n else:\n print('Test Result: ' + bcolors.RED + 'Failed' + bcolors.NORMAL)\n return result\n\n\ndata1 = '100000011110111011111111011110000003111111011111020000'\ngold_df1 = '100000011110111011111111011110000555111111511111055540'\ndata2 = '200000011011111011000001011111011003111111011111000011111111011'\ngold_df2 = '555555511411111511444441511111411555111111011111000011111111011'\ndata3 = '100000011111011011000011011111011003110011011111200011'\ngold_df3 = '100000011111011011000011011111411555110411511111555511'\nall_passed = True\ngold_dfmap1 = common.init_map()\ncommon.set_map(gold_dfmap1, gold_df1)\ndfmap1 = common.init_map()\ncommon.set_map(dfmap1, data1)\ndf1 = student_code.astar_search(dfmap1)\ntdf1 = 'Reachable goal:'\ncdf1 = check_result(tdf1, dfmap1, gold_dfmap1)\nall_passed = all_passed and cdf1 and df1\ngold_dfmap2 = common.init_map()\ncommon.set_map(gold_dfmap2, gold_df2)\ndfmap2 = common.init_map()\ncommon.set_map(dfmap2, data2)\ndf2 = student_code.astar_search(dfmap2)\ntdf2 = 'Reachable goal:'\ncdf2 = check_result(tdf2, dfmap2, gold_dfmap2)\nall_passed = all_passed and cdf2 and df2\ngold_dfmap3 = common.init_map()\ncommon.set_map(gold_dfmap3, gold_df3)\ndfmap3 = common.init_map()\ncommon.set_map(dfmap3, data3)\ndf3 = student_code.astar_search(dfmap3)\ntdf3 = 'Reachable goal:'\ncdf3 = check_result(tdf3, dfmap3, gold_dfmap3)\nall_passed = all_passed and cdf3 and df3\nall_passed = all_passed and cdf5 and df5\nif all_passed:\n exit(0)\nelse:\n exit(1)\n",
"step-5": "import common\r\nimport student_code\r\n\r\nclass bcolors:\r\n\tRED = \"\\x1b[31m\"\r\n\tGREEN = \"\\x1b[32m\"\r\n\tNORMAL = \"\\x1b[0m\"\r\n\r\ndef check_result(title, map1, map2):\r\n\tresult=True\r\n\tprint(title)\r\n\tfor y in range(0,common.constants.MAP_HEIGHT):\r\n\t\tv=\"\"\r\n\t\tfor x in range(0,common.constants.MAP_WIDTH):\r\n\t\t\tif (map1[y][x]==map2[y][x]):\r\n\t\t\t\tv+=bcolors.GREEN+str(map1[y][x])+bcolors.NORMAL\r\n\t\t\telse:\r\n\t\t\t\tresult = False\r\n\t\t\t\tv+=bcolors.RED+str(map1[y][x])+bcolors.NORMAL\r\n\t\tprint(v)\r\n\tif (result):\r\n\t\tprint(\"Test Result: \" + bcolors.GREEN+\"Passed\"+bcolors.NORMAL)\r\n\telse:\r\n\t\tprint(\"Test Result: \" + bcolors.RED+\"Failed\"+bcolors.NORMAL)\r\n\treturn result\r\n\r\ndata1 = (\r\n\"100000011\"\r\n\"110111011\"\r\n\"111111011\"\r\n\"110000003\"\r\n\"111111011\"\r\n\"111020000\")\r\n\r\ngold_df1 = (\"100000011\"\r\n\"110111011\"\r\n\"111111011\"\r\n\"110000555\"\r\n\"111111511\"\r\n\"111055540\")\r\n\r\ndata2 = (\r\n\"200000011\"\r\n\"011111011\"\r\n\"000001011\"\r\n\"111011003\"\r\n\"111111011\"\r\n\"111000011\"\r\n\"111111011\")\r\n\r\ngold_df2 = (\"555555511\"\r\n\"411111511\"\r\n\"444441511\"\r\n\"111411555\"\r\n\"111111011\"\r\n\"111000011\"\r\n\"111111011\")\r\n\r\n\r\ndata3 = (\r\n\"100000011\"\r\n\"111011011\"\r\n\"000011011\"\r\n\"111011003\"\r\n\"110011011\"\r\n\"111200011\")\r\n\r\ngold_df3 = (\r\n\"100000011\"\r\n\"111011011\"\r\n\"000011011\"\r\n\"111411555\"\r\n\"110411511\"\r\n\"111555511\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\nall_passed = True\r\n\r\ngold_dfmap1 = common.init_map();\r\ncommon.set_map(gold_dfmap1, gold_df1)\r\n\r\ndfmap1 = common.init_map()\r\ncommon.set_map(dfmap1, data1)\r\ndf1 = student_code.astar_search(dfmap1)\r\ntdf1 =\"Reachable goal:\"\r\ncdf1 = check_result(tdf1,dfmap1,gold_dfmap1)\r\n\r\nall_passed = all_passed and cdf1 and df1 \r\n\r\ngold_dfmap2 = common.init_map();\r\ncommon.set_map(gold_dfmap2, gold_df2)\r\n\r\ndfmap2 = common.init_map()\r\ncommon.set_map(dfmap2, data2)\r\ndf2 = student_code.astar_search(dfmap2)\r\ntdf2 =\"Reachable goal:\"\r\ncdf2 = check_result(tdf2,dfmap2,gold_dfmap2)\r\n\r\nall_passed = all_passed and cdf2 and df2 \r\n\r\ngold_dfmap3 = common.init_map();\r\ncommon.set_map(gold_dfmap3, gold_df3)\r\n\r\ndfmap3 = common.init_map()\r\ncommon.set_map(dfmap3, data3)\r\ndf3 = student_code.astar_search(dfmap3)\r\ntdf3 =\"Reachable goal:\"\r\ncdf3 = check_result(tdf3,dfmap3,gold_dfmap3)\r\n\r\n\r\nall_passed = all_passed and cdf3 and df3 \r\n\r\n\r\n\r\n\r\nall_passed = all_passed and cdf5 and df5\r\n\r\nif all_passed:\r\n\texit(0)\r\nelse:\r\n\texit(1)\r\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def MLmodel(data):
import pickle
import numpy as np
from google.cloud import storage
storage_client = storage.Client()
bucket = storage_client.get_bucket('testing-gcp-mandar')
blob = bucket.blob('model.pkl')
model_local = 'TwitterSA_model.pkl'
blob.download_to_filename(model_local)
pickle_in = open('TwitterSA_model.pkl', 'rb')
model = pickle.load(pickle_in)
blob = bucket.blob('prep.pkl')
model_local = 'TwitterSA_prep.pkl'
blob.download_to_filename(model_local)
pickle_prep = open('TwitterSA_prep.pkl', 'rb')
prep = pickle.load(pickle_prep)
ntweet = [data]
x = prep.transform(ntweet)
pred = model.predict(x)
l1 = []
temp = {}
temp['Tweet'] = str(data)
temp['Sentiment'] = str(pred).replace('[', '').replace(']', '')
l1.append(temp)
print(l1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
dataflow_options.append('--staging_location=gs://testing-gcp-mandar/staging')
<|reserved_special_token_0|>
def MLmodel(data):
import pickle
import numpy as np
from google.cloud import storage
storage_client = storage.Client()
bucket = storage_client.get_bucket('testing-gcp-mandar')
blob = bucket.blob('model.pkl')
model_local = 'TwitterSA_model.pkl'
blob.download_to_filename(model_local)
pickle_in = open('TwitterSA_model.pkl', 'rb')
model = pickle.load(pickle_in)
blob = bucket.blob('prep.pkl')
model_local = 'TwitterSA_prep.pkl'
blob.download_to_filename(model_local)
pickle_prep = open('TwitterSA_prep.pkl', 'rb')
prep = pickle.load(pickle_prep)
ntweet = [data]
x = prep.transform(ntweet)
pred = model.predict(x)
l1 = []
temp = {}
temp['Tweet'] = str(data)
temp['Sentiment'] = str(pred).replace('[', '').replace(']', '')
l1.append(temp)
print(l1)
<|reserved_special_token_0|>
p.run().wait_until_finish()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
dataflow_options = ['--project=lofty-shine-248403', '--job_name=newjob',
'--temp_location=gs://testing-gcp-mandar/temp']
dataflow_options.append('--staging_location=gs://testing-gcp-mandar/staging')
options = PipelineOptions(dataflow_options)
gcloud_options = options.view_as(GoogleCloudOptions)
options.view_as(StandardOptions).runner = 'Directrunner'
options.view_as(StandardOptions).streaming = True
table_schema = {'fields': [{'name': 'Tweet', 'type': 'STRING', 'mode':
'NULLABLE'}, {'name': 'Sentiment', 'type': 'STRING', 'mode': 'NULLABLE'}]}
def MLmodel(data):
import pickle
import numpy as np
from google.cloud import storage
storage_client = storage.Client()
bucket = storage_client.get_bucket('testing-gcp-mandar')
blob = bucket.blob('model.pkl')
model_local = 'TwitterSA_model.pkl'
blob.download_to_filename(model_local)
pickle_in = open('TwitterSA_model.pkl', 'rb')
model = pickle.load(pickle_in)
blob = bucket.blob('prep.pkl')
model_local = 'TwitterSA_prep.pkl'
blob.download_to_filename(model_local)
pickle_prep = open('TwitterSA_prep.pkl', 'rb')
prep = pickle.load(pickle_prep)
ntweet = [data]
x = prep.transform(ntweet)
pred = model.predict(x)
l1 = []
temp = {}
temp['Tweet'] = str(data)
temp['Sentiment'] = str(pred).replace('[', '').replace(']', '')
l1.append(temp)
print(l1)
p = beam.Pipeline(options=options)
lines = p | 'Read Data From PubSub' >> beam.io.ReadFromPubSub(subscription=
'projects/lofty-shine-248403/subscriptions/TweetSub').with_output_types(
bytes) | 'predict' >> beam.ParDo(MLmodel
) | 'storing in bigQ' >> beam.io.WriteToBigQuery(schema=table_schema,
table='lofty-shine-248403:my_new_datasset.TweetLiveSentiment')
p.run().wait_until_finish()
<|reserved_special_token_1|>
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions, SetupOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import StandardOptions
dataflow_options = ['--project=lofty-shine-248403', '--job_name=newjob',
'--temp_location=gs://testing-gcp-mandar/temp']
dataflow_options.append('--staging_location=gs://testing-gcp-mandar/staging')
options = PipelineOptions(dataflow_options)
gcloud_options = options.view_as(GoogleCloudOptions)
options.view_as(StandardOptions).runner = 'Directrunner'
options.view_as(StandardOptions).streaming = True
table_schema = {'fields': [{'name': 'Tweet', 'type': 'STRING', 'mode':
'NULLABLE'}, {'name': 'Sentiment', 'type': 'STRING', 'mode': 'NULLABLE'}]}
def MLmodel(data):
import pickle
import numpy as np
from google.cloud import storage
storage_client = storage.Client()
bucket = storage_client.get_bucket('testing-gcp-mandar')
blob = bucket.blob('model.pkl')
model_local = 'TwitterSA_model.pkl'
blob.download_to_filename(model_local)
pickle_in = open('TwitterSA_model.pkl', 'rb')
model = pickle.load(pickle_in)
blob = bucket.blob('prep.pkl')
model_local = 'TwitterSA_prep.pkl'
blob.download_to_filename(model_local)
pickle_prep = open('TwitterSA_prep.pkl', 'rb')
prep = pickle.load(pickle_prep)
ntweet = [data]
x = prep.transform(ntweet)
pred = model.predict(x)
l1 = []
temp = {}
temp['Tweet'] = str(data)
temp['Sentiment'] = str(pred).replace('[', '').replace(']', '')
l1.append(temp)
print(l1)
p = beam.Pipeline(options=options)
lines = p | 'Read Data From PubSub' >> beam.io.ReadFromPubSub(subscription=
'projects/lofty-shine-248403/subscriptions/TweetSub').with_output_types(
bytes) | 'predict' >> beam.ParDo(MLmodel
) | 'storing in bigQ' >> beam.io.WriteToBigQuery(schema=table_schema,
table='lofty-shine-248403:my_new_datasset.TweetLiveSentiment')
p.run().wait_until_finish()
<|reserved_special_token_1|>
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions,SetupOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import StandardOptions
dataflow_options = ['--project=lofty-shine-248403', '--job_name=newjob', '--temp_location=gs://testing-gcp-mandar/temp']
dataflow_options.append('--staging_location=gs://testing-gcp-mandar/staging')
options = PipelineOptions(dataflow_options)
gcloud_options = options.view_as(GoogleCloudOptions)
# Dataflow runner
options.view_as(StandardOptions).runner = 'Directrunner'
# options.view_as(SetupOptions).save_main_session = True
options.view_as(StandardOptions).streaming = True
table_schema = {'fields': [
{'name': 'Tweet', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'Sentiment', 'type': 'STRING', 'mode': 'NULLABLE'}
]}
def MLmodel(data):
import pickle
import numpy as np
from google.cloud import storage
storage_client = storage.Client()
bucket = storage_client.get_bucket("testing-gcp-mandar")
blob = bucket.blob("model.pkl")
model_local = "TwitterSA_model.pkl"
blob.download_to_filename(model_local)
pickle_in = open("TwitterSA_model.pkl", "rb")
model = pickle.load(pickle_in)
blob = bucket.blob("prep.pkl")
model_local = "TwitterSA_prep.pkl"
blob.download_to_filename(model_local)
pickle_prep = open("TwitterSA_prep.pkl", "rb")
prep = pickle.load(pickle_prep)
# print tweet
ntweet = [data]
x = prep.transform(ntweet)
pred = model.predict(x)
l1 = []
temp = {}
temp['Tweet'] = str(data)
temp['Sentiment'] = str(pred).replace('[', '').replace(']', '')
l1.append(temp)
print(l1)
p = beam.Pipeline(options=options)
lines = \
(p | 'Read Data From PubSub' >> beam.io.ReadFromPubSub(subscription='projects/lofty-shine-248403/subscriptions/TweetSub').with_output_types(bytes)
# | 'decode' >> beam.Map(lambda x: x.decode('utf-8'))
| 'predict' >> beam.ParDo(MLmodel)
| 'storing in bigQ' >> beam.io.WriteToBigQuery(
schema=table_schema,
table="lofty-shine-248403:my_new_datasset.TweetLiveSentiment")
)
p.run().wait_until_finish()
|
flexible
|
{
"blob_id": "93a2385d9ebdbc1a7a88185c0a0d5d1f227e46a3",
"index": 8159,
"step-1": "<mask token>\n\n\ndef MLmodel(data):\n import pickle\n import numpy as np\n from google.cloud import storage\n storage_client = storage.Client()\n bucket = storage_client.get_bucket('testing-gcp-mandar')\n blob = bucket.blob('model.pkl')\n model_local = 'TwitterSA_model.pkl'\n blob.download_to_filename(model_local)\n pickle_in = open('TwitterSA_model.pkl', 'rb')\n model = pickle.load(pickle_in)\n blob = bucket.blob('prep.pkl')\n model_local = 'TwitterSA_prep.pkl'\n blob.download_to_filename(model_local)\n pickle_prep = open('TwitterSA_prep.pkl', 'rb')\n prep = pickle.load(pickle_prep)\n ntweet = [data]\n x = prep.transform(ntweet)\n pred = model.predict(x)\n l1 = []\n temp = {}\n temp['Tweet'] = str(data)\n temp['Sentiment'] = str(pred).replace('[', '').replace(']', '')\n l1.append(temp)\n print(l1)\n\n\n<mask token>\n",
"step-2": "<mask token>\ndataflow_options.append('--staging_location=gs://testing-gcp-mandar/staging')\n<mask token>\n\n\ndef MLmodel(data):\n import pickle\n import numpy as np\n from google.cloud import storage\n storage_client = storage.Client()\n bucket = storage_client.get_bucket('testing-gcp-mandar')\n blob = bucket.blob('model.pkl')\n model_local = 'TwitterSA_model.pkl'\n blob.download_to_filename(model_local)\n pickle_in = open('TwitterSA_model.pkl', 'rb')\n model = pickle.load(pickle_in)\n blob = bucket.blob('prep.pkl')\n model_local = 'TwitterSA_prep.pkl'\n blob.download_to_filename(model_local)\n pickle_prep = open('TwitterSA_prep.pkl', 'rb')\n prep = pickle.load(pickle_prep)\n ntweet = [data]\n x = prep.transform(ntweet)\n pred = model.predict(x)\n l1 = []\n temp = {}\n temp['Tweet'] = str(data)\n temp['Sentiment'] = str(pred).replace('[', '').replace(']', '')\n l1.append(temp)\n print(l1)\n\n\n<mask token>\np.run().wait_until_finish()\n",
"step-3": "<mask token>\ndataflow_options = ['--project=lofty-shine-248403', '--job_name=newjob',\n '--temp_location=gs://testing-gcp-mandar/temp']\ndataflow_options.append('--staging_location=gs://testing-gcp-mandar/staging')\noptions = PipelineOptions(dataflow_options)\ngcloud_options = options.view_as(GoogleCloudOptions)\noptions.view_as(StandardOptions).runner = 'Directrunner'\noptions.view_as(StandardOptions).streaming = True\ntable_schema = {'fields': [{'name': 'Tweet', 'type': 'STRING', 'mode':\n 'NULLABLE'}, {'name': 'Sentiment', 'type': 'STRING', 'mode': 'NULLABLE'}]}\n\n\ndef MLmodel(data):\n import pickle\n import numpy as np\n from google.cloud import storage\n storage_client = storage.Client()\n bucket = storage_client.get_bucket('testing-gcp-mandar')\n blob = bucket.blob('model.pkl')\n model_local = 'TwitterSA_model.pkl'\n blob.download_to_filename(model_local)\n pickle_in = open('TwitterSA_model.pkl', 'rb')\n model = pickle.load(pickle_in)\n blob = bucket.blob('prep.pkl')\n model_local = 'TwitterSA_prep.pkl'\n blob.download_to_filename(model_local)\n pickle_prep = open('TwitterSA_prep.pkl', 'rb')\n prep = pickle.load(pickle_prep)\n ntweet = [data]\n x = prep.transform(ntweet)\n pred = model.predict(x)\n l1 = []\n temp = {}\n temp['Tweet'] = str(data)\n temp['Sentiment'] = str(pred).replace('[', '').replace(']', '')\n l1.append(temp)\n print(l1)\n\n\np = beam.Pipeline(options=options)\nlines = p | 'Read Data From PubSub' >> beam.io.ReadFromPubSub(subscription=\n 'projects/lofty-shine-248403/subscriptions/TweetSub').with_output_types(\n bytes) | 'predict' >> beam.ParDo(MLmodel\n ) | 'storing in bigQ' >> beam.io.WriteToBigQuery(schema=table_schema,\n table='lofty-shine-248403:my_new_datasset.TweetLiveSentiment')\np.run().wait_until_finish()\n",
"step-4": "import apache_beam as beam\nfrom apache_beam.options.pipeline_options import PipelineOptions, SetupOptions\nfrom apache_beam.options.pipeline_options import GoogleCloudOptions\nfrom apache_beam.options.pipeline_options import StandardOptions\ndataflow_options = ['--project=lofty-shine-248403', '--job_name=newjob',\n '--temp_location=gs://testing-gcp-mandar/temp']\ndataflow_options.append('--staging_location=gs://testing-gcp-mandar/staging')\noptions = PipelineOptions(dataflow_options)\ngcloud_options = options.view_as(GoogleCloudOptions)\noptions.view_as(StandardOptions).runner = 'Directrunner'\noptions.view_as(StandardOptions).streaming = True\ntable_schema = {'fields': [{'name': 'Tweet', 'type': 'STRING', 'mode':\n 'NULLABLE'}, {'name': 'Sentiment', 'type': 'STRING', 'mode': 'NULLABLE'}]}\n\n\ndef MLmodel(data):\n import pickle\n import numpy as np\n from google.cloud import storage\n storage_client = storage.Client()\n bucket = storage_client.get_bucket('testing-gcp-mandar')\n blob = bucket.blob('model.pkl')\n model_local = 'TwitterSA_model.pkl'\n blob.download_to_filename(model_local)\n pickle_in = open('TwitterSA_model.pkl', 'rb')\n model = pickle.load(pickle_in)\n blob = bucket.blob('prep.pkl')\n model_local = 'TwitterSA_prep.pkl'\n blob.download_to_filename(model_local)\n pickle_prep = open('TwitterSA_prep.pkl', 'rb')\n prep = pickle.load(pickle_prep)\n ntweet = [data]\n x = prep.transform(ntweet)\n pred = model.predict(x)\n l1 = []\n temp = {}\n temp['Tweet'] = str(data)\n temp['Sentiment'] = str(pred).replace('[', '').replace(']', '')\n l1.append(temp)\n print(l1)\n\n\np = beam.Pipeline(options=options)\nlines = p | 'Read Data From PubSub' >> beam.io.ReadFromPubSub(subscription=\n 'projects/lofty-shine-248403/subscriptions/TweetSub').with_output_types(\n bytes) | 'predict' >> beam.ParDo(MLmodel\n ) | 'storing in bigQ' >> beam.io.WriteToBigQuery(schema=table_schema,\n table='lofty-shine-248403:my_new_datasset.TweetLiveSentiment')\np.run().wait_until_finish()\n",
"step-5": "import apache_beam as beam\nfrom apache_beam.options.pipeline_options import PipelineOptions,SetupOptions\nfrom apache_beam.options.pipeline_options import GoogleCloudOptions\nfrom apache_beam.options.pipeline_options import StandardOptions\n\ndataflow_options = ['--project=lofty-shine-248403', '--job_name=newjob', '--temp_location=gs://testing-gcp-mandar/temp']\ndataflow_options.append('--staging_location=gs://testing-gcp-mandar/staging')\noptions = PipelineOptions(dataflow_options)\ngcloud_options = options.view_as(GoogleCloudOptions)\n\n# Dataflow runner\noptions.view_as(StandardOptions).runner = 'Directrunner'\n# options.view_as(SetupOptions).save_main_session = True\noptions.view_as(StandardOptions).streaming = True\n\ntable_schema = {'fields': [\n {'name': 'Tweet', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'Sentiment', 'type': 'STRING', 'mode': 'NULLABLE'}\n]}\n\ndef MLmodel(data):\n import pickle\n import numpy as np\n from google.cloud import storage\n\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(\"testing-gcp-mandar\")\n\n blob = bucket.blob(\"model.pkl\")\n model_local = \"TwitterSA_model.pkl\"\n blob.download_to_filename(model_local)\n pickle_in = open(\"TwitterSA_model.pkl\", \"rb\")\n model = pickle.load(pickle_in)\n\n blob = bucket.blob(\"prep.pkl\")\n model_local = \"TwitterSA_prep.pkl\"\n blob.download_to_filename(model_local)\n pickle_prep = open(\"TwitterSA_prep.pkl\", \"rb\")\n prep = pickle.load(pickle_prep)\n\n # print tweet\n ntweet = [data]\n x = prep.transform(ntweet)\n pred = model.predict(x)\n l1 = []\n temp = {}\n temp['Tweet'] = str(data)\n temp['Sentiment'] = str(pred).replace('[', '').replace(']', '')\n l1.append(temp)\n print(l1)\n\np = beam.Pipeline(options=options)\n\nlines = \\\n (p | 'Read Data From PubSub' >> beam.io.ReadFromPubSub(subscription='projects/lofty-shine-248403/subscriptions/TweetSub').with_output_types(bytes)\n # | 'decode' >> beam.Map(lambda x: x.decode('utf-8'))\n | 'predict' >> beam.ParDo(MLmodel)\n | 'storing in bigQ' >> beam.io.WriteToBigQuery(\n schema=table_schema,\n table=\"lofty-shine-248403:my_new_datasset.TweetLiveSentiment\")\n\n )\n\np.run().wait_until_finish()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(n):
a = []
dcc.append(a)
<|reserved_special_token_0|>
for i in range(m):
x = input().split(' ')
a = int(x[0])
b = int(x[1])
available.append([a, b])
dcc[a - 1].append(b)
dcc[b - 1].append(a)
for i in range(q):
x = input().split(' ')
l = int(x[0])
r = int(x[1])
s = int(x[2])
t = int(x[3])
target = []
target.append(s)
for j in range(l - 1, r):
x = []
for a in target:
x.append(a)
for y in dcc[a - 1]:
if [a, y] in available:
if available.index([a, y]) == j:
x.append(y)
if [y, a] in available:
if available.index([y, a]) == j:
x.append(y)
target = x
print(target)
<|reserved_special_token_1|>
n_m_q = input().split(' ')
n = int(n_m_q[0])
m = int(n_m_q[1])
q = int(n_m_q[2])
dcc = []
for i in range(n):
a = []
dcc.append(a)
available = []
for i in range(m):
x = input().split(' ')
a = int(x[0])
b = int(x[1])
available.append([a, b])
dcc[a - 1].append(b)
dcc[b - 1].append(a)
for i in range(q):
x = input().split(' ')
l = int(x[0])
r = int(x[1])
s = int(x[2])
t = int(x[3])
target = []
target.append(s)
for j in range(l - 1, r):
x = []
for a in target:
x.append(a)
for y in dcc[a - 1]:
if [a, y] in available:
if available.index([a, y]) == j:
x.append(y)
if [y, a] in available:
if available.index([y, a]) == j:
x.append(y)
target = x
print(target)
<|reserved_special_token_1|>
n_m_q=input().split(" ")
n=int(n_m_q[0])
m=int(n_m_q[1])
q=int(n_m_q[2])
dcc=[]
for i in range(n):
a=[]
dcc.append(a)
available=[]
for i in range(m):
x=input().split(" ")
a=int(x[0])
b=int(x[1])
available.append([a,b])
dcc[a-1].append(b)
dcc[b-1].append(a)
for i in range(q):
x=input().split(" ")
l=int(x[0])
r=int(x[1])
s=int(x[2])
t=int(x[3])
target=[]
target.append(s)
for j in range(l-1,r):
x=[]
for a in target:
x.append(a)
for y in dcc[a-1]:
if [a,y] in available:
if available.index([a,y])==j:
x.append(y)
if [y,a] in available:
if available.index([y,a])==j:
x.append(y)
target=x
print(target)
|
flexible
|
{
"blob_id": "062b6133ba4de24f7eaf041e4b6c039501b47b9a",
"index": 8873,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(n):\n a = []\n dcc.append(a)\n<mask token>\nfor i in range(m):\n x = input().split(' ')\n a = int(x[0])\n b = int(x[1])\n available.append([a, b])\n dcc[a - 1].append(b)\n dcc[b - 1].append(a)\nfor i in range(q):\n x = input().split(' ')\n l = int(x[0])\n r = int(x[1])\n s = int(x[2])\n t = int(x[3])\n target = []\n target.append(s)\n for j in range(l - 1, r):\n x = []\n for a in target:\n x.append(a)\n for y in dcc[a - 1]:\n if [a, y] in available:\n if available.index([a, y]) == j:\n x.append(y)\n if [y, a] in available:\n if available.index([y, a]) == j:\n x.append(y)\n target = x\n print(target)\n",
"step-3": "n_m_q = input().split(' ')\nn = int(n_m_q[0])\nm = int(n_m_q[1])\nq = int(n_m_q[2])\ndcc = []\nfor i in range(n):\n a = []\n dcc.append(a)\navailable = []\nfor i in range(m):\n x = input().split(' ')\n a = int(x[0])\n b = int(x[1])\n available.append([a, b])\n dcc[a - 1].append(b)\n dcc[b - 1].append(a)\nfor i in range(q):\n x = input().split(' ')\n l = int(x[0])\n r = int(x[1])\n s = int(x[2])\n t = int(x[3])\n target = []\n target.append(s)\n for j in range(l - 1, r):\n x = []\n for a in target:\n x.append(a)\n for y in dcc[a - 1]:\n if [a, y] in available:\n if available.index([a, y]) == j:\n x.append(y)\n if [y, a] in available:\n if available.index([y, a]) == j:\n x.append(y)\n target = x\n print(target)\n",
"step-4": "n_m_q=input().split(\" \")\nn=int(n_m_q[0])\nm=int(n_m_q[1])\nq=int(n_m_q[2])\ndcc=[]\nfor i in range(n):\n a=[]\n dcc.append(a)\navailable=[]\nfor i in range(m):\n x=input().split(\" \")\n a=int(x[0])\n b=int(x[1])\n available.append([a,b])\n dcc[a-1].append(b)\n dcc[b-1].append(a)\nfor i in range(q):\n x=input().split(\" \")\n l=int(x[0])\n r=int(x[1])\n s=int(x[2])\n t=int(x[3])\n target=[]\n target.append(s)\n for j in range(l-1,r):\n x=[]\n for a in target:\n x.append(a)\n for y in dcc[a-1]:\n if [a,y] in available:\n if available.index([a,y])==j:\n x.append(y)\n if [y,a] in available:\n if available.index([y,a])==j:\n x.append(y)\n target=x\n print(target)\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import configparser
import shutil
def get_imagemagick_path():
config = configparser.ConfigParser()
config.read("settings/settings.ini")
return config['commands'].get('convert', shutil.which("convert"))
# try:
# except KeyError:
# EXIV2_PATH = shutil.which("exiv2")
|
normal
|
{
"blob_id": "5fa9c9908d4aea507cf0ca8287a6b8e5b391470a",
"index": 9297,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_imagemagick_path():\n config = configparser.ConfigParser()\n config.read('settings/settings.ini')\n return config['commands'].get('convert', shutil.which('convert'))\n",
"step-3": "import configparser\nimport shutil\n\n\ndef get_imagemagick_path():\n config = configparser.ConfigParser()\n config.read('settings/settings.ini')\n return config['commands'].get('convert', shutil.which('convert'))\n",
"step-4": "import configparser\nimport shutil\n\n\ndef get_imagemagick_path():\n config = configparser.ConfigParser()\n config.read(\"settings/settings.ini\")\n return config['commands'].get('convert', shutil.which(\"convert\"))\n\n\n# try:\n# except KeyError:\n# EXIV2_PATH = shutil.which(\"exiv2\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.