seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
10815957206 | day = "Friday"
temperature = 30
raining = False
if day == "Saturday" and temperature > 27 and not raining:
print("Go swimming")
else:
print("Learn Python")
if (day == "Saturday" and temperature > 27) or not raining:
print("Go swimming")
else:
print("Learn Python")
#() are added because and has higher precedence then or
#it is easier for us to read
| btemovska/Section4 | TrueFalse.py | TrueFalse.py | py | 368 | python | en | code | 0 | github-code | 90 |
18331860249 | n = int(input())
l = sorted(map(int, input().split()))
cnt = 0
m = n-1
g = []
for i in range(n):
for j in range(i):
g.append(l[i]+l[j])
g = sorted(g,reverse = True)
for i in g:
while l[m] >= i:
m-=1
cnt += n-m-1
print(n*(n-1)*(n-2)//6-cnt) | Aasthaengg/IBMdataset | Python_codes/p02888/s621464113.py | s621464113.py | py | 253 | python | en | code | 0 | github-code | 90 |
6137487690 | from pathlib import Path
import numpy as np
import xarray as xr
from datetime import timedelta
# params
max_nan_consecutive = 31 # 最大连续缺测
max_nan_rate = 0.05 # 最大缺测占比
time_period = [1961, 2020] # 提取时间段
path = Path.cwd()
filename_output = 'observation_interpolation' + '_' + str(max_nan_consecutive) + '_' + str(
int(max_nan_rate * 100)) + '_' + str(time_period[0]) + '-' + str(time_period[-1])
path_out_root = path.joinpath(filename_output)
path_out_root.mkdir(exist_ok=True)
ds = xr.open_dataset('china_observation.nc')
n = ds.time.size # the number of sample
for i in ds.variables:
if i in ['id', 'time']:
continue
else:
path_out = path_out_root.joinpath(i)
path_out.mkdir(exist_ok=True)
data_arr = ds[i]
years = data_arr['time.year']
data_arr = data_arr.sel(time=((years >= time_period[0]) & (years <= time_period[1])))
data_arr = data_arr.interpolate_na(dim='time', max_gap=timedelta(days=int(np.floor(n * max_nan_rate))), limit=max_nan_consecutive)
data_arr = data_arr.dropna(dim='id')
if data_arr.size == 0:
print(f'{i} Too many nan, continue.')
continue
else:
for j in data_arr.id:
data_arr_ = data_arr.sel(id=j)
data_arr_ = data_arr_.to_series()
data_arr_.to_csv(path_out.joinpath(str(j.values) + '.csv'))
print(f'{i}, {j.values}, successful.')
| Koni2020/SWFU | Python/Spatial analysis/Batch extraction/extract_observation.py | extract_observation.py | py | 1,487 | python | en | code | 2 | github-code | 90 |
18107941959 | def insertionSort(A, n, g):
cnt = 0
for i in range(g, n):
v = A[i]
j = i - g
while j >= 0 and A[j] > v:
A[j+g] = A[j]
j = j - g
cnt += 1
A[j+g] = v
return cnt
def shellSort(A, n):
cnt = 0
nn = n
G = []
g = 1
while g <= n:
G.insert(0, g)
g = g*3 + 1
m = len(G)
for g in G:
cnt += insertionSort(A, n, g)
return cnt, m, G
n = int(input())
A = [int(input()) for i in range(n)]
cnt, m, G = shellSort(A, n)
print(m)
print(" ".join(map(str, G)))
print(cnt)
for a in A:
print(a)
| Aasthaengg/IBMdataset | Python_codes/p02262/s138951593.py | s138951593.py | py | 614 | python | en | code | 0 | github-code | 90 |
18032767219 | import heapq
n,m = map(int,input().split())
abc = [list(map(int,input().split())) for _ in range(m)]
edges = [[] for _ in range(n)]
for a,b,dis in abc:
edges[a-1].append((b-1, dis))
edges[b-1].append((a-1, dis))
def dijkstra(edges, s):
hq = []
d = [-1] * n
d[s] = 0
heapq.heappush(hq, (0, s))
while hq:
d1, p = heapq.heappop(hq)
for p2, d2 in edges[p]:
if d[p2] == -1 or d[p2] > d1 + d2:
d[p2] = d1 + d2
heapq.heappush(hq, (d1+d2, p2))
return d
d = []
for i in range(n):
d.append(dijkstra(edges, i))
ans = 0
for a,b,dis in abc:
if dis > d[a-1][b-1]:
ans += 1
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p03837/s142308561.py | s142308561.py | py | 686 | python | en | code | 0 | github-code | 90 |
1442175997 | import boto3
from typing import Tuple
from enum import Enum
from datetime import datetime
from time import mktime
from config import Config
from aws_xray_sdk.core import xray_recorder
ddb_client = boto3.client('dynamodb', region_name=Config.REGION_NAME)
class DownloadStatus(Enum):
NONE='NONE',
ERROR='ERROR'
IN_PROGRESS='IN_PROGRESS'
COMPLETE='COMPLETE'
class StatusTable:
@property
def table_name(self)->str:
return self.__table_name
def __init__(self, table_name:str) -> None:
assert table_name is not None, "Missing table_name parameter"
self.__table_name = table_name
@xray_recorder.capture('write_stream_metadata')
def write_stream_metadata(self,video_id:str,key:str,definition:dict):
assert video_id is not None, "Missing video_id"
assert key is not None, "Missing s3_key"
assert definition is not None, "Missing definition"
item = {
'VideoId': { 'S': video_id },
'SortKey': {'S': 'Stream::Format::%s' % key}
}
for key in definition.keys():
value = definition[key]
if isinstance(value, str):
item[key] = {'S': value}
elif isinstance(value,int):
item[key]= {'N': str(value)}
elif isinstance(value,dict):
item[key] = {k: {'S':str(v)} for (k,v) in value.items()}
ddb_client.put_item(
TableName= Config.STATUS_TABLE,
Item=item)
@xray_recorder.capture('get_stream_status')
def get_stream_status(self,video_id:str, key:str)->Tuple[DownloadStatus, datetime]:
response = ddb_client.get_item(
TableName=Config.STATUS_TABLE,
Key={
'VideoId': {'S': video_id},
'SortKey': {'S': 'Stream::Status::%s' % key}
},
AttributesToGet=[
'downloadStatus','lastUpdated'
])
if not 'Item' in response:
xray_recorder.put_annotation('stream_status','None')
return (DownloadStatus.NONE, None)
status = response['Item']['downloadStatus']['S']
lastUpdated = response['Item']['lastUpdated']['N']
xray_recorder.put_annotation('stream_status',status)
xray_recorder.put_annotation('stream_lastUpdated',lastUpdated)
return (DownloadStatus(status),datetime.fromtimestamp(float(lastUpdated)))
@xray_recorder.capture('set_stream_status')
def set_stream_status(self, video_id:str, key:str, status:DownloadStatus)->None:
response = ddb_client.update_item(
TableName=Config.STATUS_TABLE,
Key={
'VideoId': {'S': video_id},
'SortKey': {'S': 'Stream::Status::%s' % key}
},
UpdateExpression="SET downloadStatus=:downloadStatus, lastUpdated=:lastUpdated",
ExpressionAttributeValues={
':downloadStatus': {'S': status.value},
':lastUpdated': {'N': str(mktime(datetime.utcnow().timetuple())) }
})
@xray_recorder.capture('get_video_status')
def get_video_status(self,video_id:str)->Tuple[DownloadStatus, datetime]:
response = ddb_client.get_item(
TableName=Config.STATUS_TABLE,
Key={
'VideoId': {'S': video_id},
'SortKey': {'S': 'File::Status'}
},
AttributesToGet=[
'downloadStatus','lastUpdated'
])
if not 'Item' in response:
return (DownloadStatus.NONE, None)
status = response['Item']['downloadStatus']['S']
lastUpdated = response['Item']['lastUpdated']['N']
return (DownloadStatus(status),datetime.fromtimestamp(float(lastUpdated)))
@xray_recorder.capture('set_video_status')
def set_video_status(self, video_id:str, status:DownloadStatus)->None:
response = ddb_client.update_item(
TableName=Config.STATUS_TABLE,
Key={
'VideoId': {'S': video_id},
'SortKey': {'S': 'File::Status'}
},
UpdateExpression="SET downloadStatus=:downloadStatus, lastUpdated=:lastUpdated",
ExpressionAttributeValues={
':downloadStatus': {'S': status.value},
':lastUpdated': {'N': str(mktime(datetime.utcnow().timetuple())) }
})
| dr-natetorious/Dissertation | cdk/src/pipeline/collection/status.py | status.py | py | 3,958 | python | en | code | 0 | github-code | 90 |
26288444464 | class Solution:
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
last = -1
index = 0
if len(nums) == 0:
return 0
for i in range(len(nums)):
if index + 1 == len(nums):
break
if nums[i] != last and i != index:
nums[i], nums[index+1] = nums[index+1], nums[i]
index += 1
last = nums[index]
return index + 1
solution = Solution()
nums = [0,0,1,1,1,2,2,3,3,4]
res = solution.removeDuplicates(nums)
print(res)
print(nums)
| wwg377655460/DataStructureToLeetCode | problem_26.py | problem_26.py | py | 619 | python | en | code | 0 | github-code | 90 |
2442490155 | class Solution(object):
def isSubsequence(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
dp = [[0 for i in xrange(len(t)+1)] for j in xrange(len(s)+1)]
for j in xrange(len(t)+1):
dp[0][j] = 1
for i in xrange(1, len(s)+1):
for j in xrange(1, len(t)+1):
if s[j-1] == t[i-1]:
dp[i][j] |= dp[i-1][j-1]
else:
dp[i][j] = dp[i-1][j]
return True if dp[len(s)][len(t)] == 1 else False
| sangreal/PyLintcode | py/IsSubsequence.py | IsSubsequence.py | py | 433 | python | en | code | 0 | github-code | 90 |
20616060295 | from itertools import product
text = input()
k,l,t = map(int,input().split(" "))
chars = "ACGT"
def doesFormClump(pattern):
for i in range(0,len(text)-l+1):
subText = text[i:i+l]
freq = subText.count(pattern)
if freq>=t:
return True
return False
for i in product(chars,repeat=k):
kmer = "".join(i)
if doesFormClump(kmer):
print(kmer)
| Shadat-tonmoy/BioinformaticsRosalindProblems | Lab Tasks/Day - 03/Subtask1.py | Subtask1.py | py | 399 | python | en | code | 0 | github-code | 90 |
18301530499 | from collections import deque
N = int(input())
A = deque(list(map(int, input().split())))
ans = 0
cnt = 1
while A:
a = A.popleft()
if cnt == a:
cnt += 1
continue
else:
ans += 1
if ans == N:
print(-1)
else:
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02832/s064507519.py | s064507519.py | py | 261 | python | en | code | 0 | github-code | 90 |
32819140385 | import pygame
from pygame.locals import *
from random import randint
def randpoint (screen):
h,w = screen.get_size()
return randint(0, w-1), randint(0, h-1)
def main():
pygame.init()
screen = pygame.display.set_mode((640, 480))
pygame.display.set_caption("My first drawing.")
screen.fill((200, 200, 255))
for i in range(1,20):
start = randpoint(screen)
stop = randpoint(screen)
pygame.draw.line(screen, (255,0,0), start, stop, 1)
pygame.display.flip()
while 1:
for event in pygame.event.get():
if event.type == QUIT:
return
elif event.type == KEYDOWN and event.key == K_ESCAPE:
return
if __name__ == '__main__':
try:
main()
finally:
pygame.quit()
| geofmatthews/csci321 | PygameDemos/0100lines/drawlines.py | drawlines.py | py | 819 | python | en | code | 2 | github-code | 90 |
35865584214 | import datetime
import csv
def parseLog(filename, searchToken):
results={}
file = open(filename) # open log file
for line in file: # for each in the file - read the line
if searchToken in line:
components = line.split(":")
date = components[0]
results[date] = results.get(date, 0) + 1
return(results)
def sortDates(dateStrings):
dates = [datetime.datetime.strptime(date, "%Y-%m-%d %H") for date in dateStrings]
dates.sort()
results = [date.strftime('%Y-%m-%d %H') for date in dates]
return(results)
# MAIN PROCESS STARTS HERE
resultsForConnectionError = parseLog("master.log", "Error connecting to loggly")
resultsForServerError = parseLog("master.log", "[error]: <html>")
nonUniqueDates = list(resultsForConnectionError.keys()) + list(resultsForServerError.keys())
uniqueDates = list(set(nonUniqueDates))
sortedDates = sortDates(uniqueDates)
with open("Error Counter.csv", "w") as output:
writer = csv.writer(output)
writer.writerow(("date", "connection errors", "server errors"))
for date in sortedDates:
writer.writerow((date, resultsForConnectionError.get(date, 0), resultsForServerError.get(date, 0)))
output.close() | Garethh-M/Bug-Counter | bug finder.py | bug finder.py | py | 1,218 | python | en | code | 0 | github-code | 90 |
18103285418 | from google.colab import drive
drive.mount('/content/drive')
import numpy as np
import matplotlib.pyplot as plt
# here we are working on Tensorflow version 2.1.0 so we need to write tensorflow.keras.
#keras is in built function in Tensorflow.
import os
import tensorflow
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Dense, Input, Dropout,Flatten, Conv2D
from tensorflow.keras.layers import BatchNormalization, Activation, MaxPooling2D
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.utils import plot_model
from IPython.display import SVG, Image
train_location = "/content/drive/MyDrive/Hand Written/DataSet"
test_location = "/content/drive/MyDrive/Hand Written/DataSet"
filepath = '/content/drive/MyDrive/Hand Written/CNN/VGG16/Model/Hand_written_VGG16_model1.h5'
#from tensorflow.keras.models import load_model
#Detection=load_model(filepath)
preprocess_input = tensorflow.keras.applications.mobilenet.preprocess_input
datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
img_size=224
batch_size=25
num_class=49
# Complete Dataset images can be loaded using ImageDataGenerator function
datagen_train=ImageDataGenerator(horizontal_flip=True)
train_generator=datagen_train.flow_from_directory(train_location,target_size=(img_size,img_size),batch_size=batch_size,class_mode='categorical',shuffle=True)
datagen_test=ImageDataGenerator(horizontal_flip=True)
validation_generator=datagen_test.flow_from_directory(test_location,target_size=(img_size,img_size),batch_size=batch_size,class_mode='categorical',shuffle=True)
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg16 import preprocess_input
vgg = VGG16(input_shape=[img_size,img_size] + [3], weights='imagenet', include_top=False)
vgg.summary()
for layer in vgg.layers:
layer.trainable = False
x = Flatten()(vgg.output)
prediction = Dense(num_class, activation='softmax')(x)
detection = Model(inputs=vgg.input, outputs=prediction)
detection.summary()
optimum=Adam(learning_rate=0.005)
detection.compile(optimizer=optimum,loss='categorical_crossentropy',metrics=['accuracy'])
print(train_generator.class_indices)
TRAIN_STEPS=train_generator.n//train_generator.batch_size
TRAIN_STEPS
VALIDATION_STEPS=validation_generator.n//validation_generator.batch_size
VALIDATION_STEPS
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy',
factor=0.5,
patience=2,
verbose=1,
mode='max',
min_lr=0.00001)
callbacks_list = [checkpoint, reduce_lr]
#callbacks_list = [checkpoint]
#history = detection.fit_generator(train_generator,
history = detection.fit(train_generator,
steps_per_epoch=TRAIN_STEPS,
#class_weight=class_weights,
validation_data=validation_generator,
validation_steps=VALIDATION_STEPS,
epochs=5,
verbose=1,
callbacks=callbacks_list
)
# get the metric names so I can use evaulate_generator
detection.metrics_names
# here the the last epoch will be used.
detection.evaluate_generator(validation_generator,steps=TRAIN_STEPS)
# display the loss and accuracy curves
import matplotlib.pyplot as plt
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.figure()
plt.plot(epochs, acc, 'bo', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.show()
epochs
loss
val_loss
acc
val_acc
| sanal-l-s/handwritten_equation_solver | Backend/Model/CNN_VGG16_Hand_written.py | CNN_VGG16_Hand_written.py | py | 4,384 | python | en | code | 0 | github-code | 90 |
41900913864 | from collections import deque
import sys
def maxcost(graph, src, t):
used = set()
ldag = deque()
def topological_sort(u):
used.add(u)
for v, c in graph[u]:
if v not in used:
topological_sort(v)
ldag.append(u)
topological_sort(src)
cost = [-1]*len(graph)
cost[src] = 0
while ldag:
u = ldag.pop()
if u == t:
return cost[u]
if cost[u] != -1:
for v, c in graph[u]:
d = cost[u] + c
if cost[v] < d:
cost[v] = d
return cost[t]
if __name__ == '__main__':
inp = sys.stdin.readline
n, m = map(int, inp().split())
graph = [[] for _ in range(n)]
for i in range(m):
u, v, c = map(int, inp().split())
graph[u-1].append((v-1, c))
sys.stdout.write(str(maxcost(graph, 0, n-1)))
| SingularityUrBrain/math-programming | MaxCostPath/solution.py | solution.py | py | 886 | python | en | code | 2 | github-code | 90 |
13674482940 | """
The "report.py" saves the optimization results in given path as spreadsheet
"""
__author__ = "Zhengjie You"
__copyright__ = "2020 TUM-EWK"
__credits__ = []
__license__ = "GPL v3.0"
__version__ = "1.0"
__maintainer__ = "Zhengjie You"
__email__ = "zhengjie.you@tum.de"
__status__ = "Development"
from datetime import datetime
import os
import pandas as pd
def save_results(ems, path):
""" save the optimization results in given path as spreadsheet
Args:
- ems: ems model instance
- path: path where the results data is to be saved, e.g. path= r'tests\data'
"""
try:
os.mkdir(path)
except OSError:
print("Opmtization result are being saved in %s" % path)
else:
print("Successfully created the directory %s " % path)
now = datetime.now().strftime('%Y%m%dT%H%M')
resultfile = os.path.join(path, 'result_optimization_{}.xlsx'.format(now))
writer = pd.ExcelWriter(resultfile)
df = pd.DataFrame(data=ems['optplan'])
df.to_excel(writer, 'operation_plan', merge_cells=False)
writer.save() # save
| tum-ewk/OpenTUMFlex | opentumflex/optimization/report.py | report.py | py | 1,087 | python | en | code | 20 | github-code | 90 |
33672553649 | from collections import OrderedDict
from cab_driver import CabDriver
from cab_rider import CabRider
if __name__ == "__main__":
# Suppose entries are comma separated values
# Take Data inputs
entries = int(input())
drivers = OrderedDict()
users = OrderedDict()
for _ in range(entries):
driver_name, driver_rating, user_name, user_rating = map(str, input().split(" "))
driver_rating = int(driver_rating)
user_rating = int(user_rating)
# Get the driver object for {driver_name}
curr_driver = drivers.get(driver_name, None)
if (curr_driver is None):
curr_driver = drivers[driver_name] = CabDriver(driver_name)
# Get the user object for {user_name}
curr_user = users.get(user_name, None)
if curr_user is None:
curr_user = users[user_name] = CabRider(user_name)
# Now update the rating of both {curr_driver} and {curr_user}
curr_driver.update_rating(user_rating)
curr_user.update_rating(driver_rating)
# Add bad drivers or users
if driver_rating == 1 or user_rating == 1:
curr_user.add_bad_driver(driver_name)
curr_driver.add_bad_user(user_name)
for user in users.values():
print(str(user))
for driver in drivers.values():
print(str(driver)) | jalotra/Youtube-LLD | src/main.py | main.py | py | 1,409 | python | en | code | 0 | github-code | 90 |
21459071176 | from importlib.resources import is_resource
import astropy.io.ascii as asc
import pandas as pd
import numpy as np
from scipy.stats import norm
import astropy.units as u
import dgf.galaxy_utils.isochrone as isochroneModel
from IPython import embed
# ------- DEFAULT VARIABLES ------- #
# ic_file = "Isochrones/iso_age_12_feh_-1.8.txt"
# lf_file = "Isochrones/lf_age_12_feh_-1.8_hires.txt"
# fg_file = "Foregrounds/formatted_gcd_dwarf.h5"
# -------------------------------- #
def create_dwarf(
x,
y,
ic_file,
lf_file,
distance=100 * u.kpc,
coordinates=(0, 0),
hrv=0,
dhrv=2,
dhrv_scale=0.4,
metal=-1.8,
metal_scale=0.2,
):
N = len(x)
x = x.to(u.kpc)
y = y.to(u.kpc)
distance = distance.to(u.kpc)
# Generate dwarf components
relative_dec = ((np.arctan((y.value / (2 * (distance.value))))) * u.rad).to(
u.degree
)
relative_ra = (
(np.arctan((x.value / (2 * (distance.to(u.kpc).value)))))
* np.cos(relative_dec)
* u.rad
).to(u.degree)
ra = coordinates[0] + relative_ra
dec = coordinates[1] + relative_dec
isochrone = asc.read(ic_file, format="commented_header", header_start=13)
lum_func = asc.read(lf_file, format="commented_header", header_start=13)
cmd = isochroneModel.sample(isochrone, lum_func, N, noise=0.05, dist=distance)
rmag = np.array(cmd["mag"])
color = np.array(cmd["color"])
gmag = np.array(cmd["mag"]) + np.array(cmd["color"])
df = pd.DataFrame(
{
"RA": ra,
"DEC": dec,
"HRV": hrv + norm.rvs(0, dhrv, N),
"dHRV": norm.rvs(dhrv, dhrv_scale, N),
"[Fe/H]": norm.rvs(metal, metal_scale, N),
"d[Fe/H]": 0.1 * np.ones(N),
"r": rmag,
"g": gmag,
"color": color,
"member": np.ones(N),
}
)
return df
def create_observation(
dwarf_df,
fg_file,
dwarf_coord,
dhrv,
dhrv_scale,
slit=None,
mag_limit=None,
cmd_window=False,
ic_file=None,
window=0.2,
distance=100 * u.kpc,
slit_sample_count=None,
):
fg_data = asc.read(fg_file, format="commented_header")
mw_ra = fg_data["RAJ2000"]
mw_dec = fg_data["DECJ2000"]
mw_hrv = fg_data["HRV"]
mw_dhrv = fg_data["errHrv"]
mw_c = fg_data["g-r"]
mw_r = mw_g = None
if "r" in fg_data.colnames:
mw_r = fg_data["r"]
mw_g = fg_data["g-r"] + mw_r
elif "g" in fg_data.colnames:
mw_g = fg_data["g"]
mw_r = mw_g - fg_data["g-r"]
mw_feh = fg_data["[M/H]"]
mw_feh_err = fg_data["errMet"]
N = len(mw_ra)
mw_df = pd.DataFrame(
{
"RA": mw_ra,
"DEC": mw_dec,
"HRV": mw_hrv,
"dHRV": mw_dhrv,
"[Fe/H]": mw_feh,
"d[Fe/H]": mw_feh_err,
"r": mw_r,
"g": mw_g,
"color": mw_c,
"member": np.zeros(N),
}
)
merged_df = dwarf_df.merge(mw_df, how="outer")
if slit != None:
merged_df = merged_df.loc[
(merged_df.RA < (dwarf_coord[0] + (slit[0].to(u.deg))))
& (merged_df.RA > (dwarf_coord[0] - (slit[0].to(u.deg))))
& (merged_df.DEC < (dwarf_coord[1] + (slit[1].to(u.deg))))
& (merged_df.DEC > (dwarf_coord[1] - (slit[1].to(u.deg))))
]
if mag_limit != None:
merged_df = merged_df.loc[merged_df.r < mag_limit]
if cmd_window:
isochrone = asc.read(ic_file, format="commented_header", header_start=13)
iso_r = 5 * np.log10(distance.to(u.pc).value) - 5 + isochrone["rmag"]
iso_c = isochrone["gmag"] - isochrone["rmag"]
mask = np.zeros(len(merged_df.color))
for i in np.arange(len(merged_df.color)):
distances = np.sqrt(
(merged_df.iloc[i]["r"] - iso_r) ** 2
+ (merged_df.iloc[i]["color"] - iso_c) ** 2
)
if np.min(distances) < window:
mask[i] = 1
merged_df = merged_df.iloc[mask == 1]
if slit_sample_count != None:
random_indices = np.random.randint(
low=0, high=len(merged_df.r), size=slit_sample_count
)
merged_df = merged_df.iloc[random_indices]
return merged_df
### Gaia Challenge Data Helper Functions ###
def format_gcd(data_path, outfile="formatted_data.hdf"):
data = asc.read(data_path)
# err = asc.read(err_path)
data.rename_columns(
["col1", "col2", "col3", "col4", "col5", "col6"],
["x", "y", "z", "vx", "vy", "vz"],
)
# err.rename_columns(
# ["col1", "col2", "col3", "col4", "col5", "col6"],
# ["x_err", "y_err", "z_err", "vx_err", "vy_err", "vz_err"],
# )
data.write(outfile, overwrite=True)
### Misc
def diameterToAngle(diameter, distance, dec=0 * u.deg):
angular_size = (
(np.arctan((diameter / (2 * (distance.to(u.kpc)))).decompose().value))
* np.cos(dec)
* u.rad
).to(u.degree)
return angular_size
| jaybaptista/satellites | dgf/galaxy_utils/generate.py | generate.py | py | 5,080 | python | en | code | 0 | github-code | 90 |
74049300457 | import unittest
import onitama as oni
import ai
from constants import *
from evaluators import *
class TestGame(unittest.TestCase):
def setUp(self):
self.game = oni.Game([oni.TIGER, oni.TIGER, oni.TIGER, oni.TIGER, oni.TIGER])
self.ai = ai.create_ai(version='unmove', game=self.game)
self.ai.set_game_as_root(self.game)
self.ai2 = ai.create_ai(version='copy')
self.ai2.set_game_as_root(self.game)
def test_set_root(self):
for card in oni.ALL_CARDS:
game = oni.Game([card]*5)
self.ai.set_game_as_root(game)
self.ai2.set_game_as_root(game)
if game.active_player.color() == 'red':
start_player = ai.RED
elif game.active_player.color() == 'blue':
start_player = ai.BLUE
else:
raise Exception
self.assertEqual(start_player,self.ai.active_player)
self.assertEqual(card.name(),self.ai.card_data[0].name)
moves = self.ai2.next_moves(self.ai2.root)
for move in moves:
self.assertEqual(start_player,move.player)
def test_search(self):
self.ai.mock_search(depth=3)
self.ai2.mock_search(depth=3)
for a in [self.ai, self.ai2]:
self.assertEqual(len(a.get_nodes(depth=0)), 1)
self.assertEqual(len(a.get_nodes(depth=1)), 10)
self.assertEqual(len(a.get_nodes(depth=2)), 100)
self.assertEqual(len(a.get_nodes(depth=3)), 80*12 + 16*8)
self.assertEqual(
len(list(filter(lambda x: x.end, a.get_nodes(depth=2)))), 4
)
def test_piece_set(self):
def all_pieces():
return self.ai.pieces[REDPAWN]|self.ai.pieces[BLUEPAWN]|self.ai.pieces[REDKING]|self.ai.pieces[BLUEKING]
for move in self.ai.next_moves():
self.ai.do_move(move, self.ai.root)
# check pieces
for i, piece in enumerate(self.ai.board):
if piece != EMPTY:
self.assertTrue(i in self.ai.pieces[piece])
else:
i not in all_pieces()
self.ai.undo_move(move)
for i, piece in enumerate(self.ai.board):
if piece != EMPTY:
self.assertTrue(i in self.ai.pieces[piece])
else:
i not in all_pieces()
def test_mobility_eval(self):
game = oni.Game([oni.TIGER, oni.MONKEY, oni.CRAB, oni.BOAR, oni.MANTIS])
self.ai.set_game_as_root(game)
eval = get_evaluator(self.ai)
eval.true_mobility_factor = 2.0
# 5 moves for TIGER
# 8 moves for MONKEY
# 5 moves for CRAB
# 5 moves for BOAR
# 8 moves for MANTIS
# RED: 2*13 + 5+5+8 = 44
# BLUE: 2*10 + 5+8+8 = 41
self.assertEqual(eval.mobility(), 3.0)
eval.pawn_weight, eval.mobility_weight = 1,1
self.assertEqual(eval.evaluate(RED), 3.0)
self.assertEqual(eval.evaluate(BLUE), -3.0)
def test_negamax(self):
game = oni.Game([oni.TIGER, oni.MONKEY, oni.CRAB, oni.BOAR, oni.MANTIS])
self.ai.set_game_as_root(game)
score = self.ai.negamax(node=self.ai.root, depth=5)
curr = self.ai.root
# Climb down our generated search tree to verify
# the correctness of negamax
for i in range(5):
curr = max(curr.children, key=lambda x: -x.eval)
sign = -1 if i % 2 == 0 else 1
self.assertEqual(score, sign*curr.eval)
def test_alphabeta(self):
game = oni.Game([oni.TIGER, oni.MONKEY, oni.CRAB, oni.BOAR, oni.MANTIS])
self.ai.set_game_as_root(game)
score = self.ai.alphabeta(
alpha=-float('inf'),
beta=float('inf'),
depth=4,
node=self.ai.root,
)
curr = self.ai.root
path = [curr]
for i in range(4):
children = [node for node in curr.children if node.eval != None]
curr = max(children, key=lambda x: -x.eval)
path.append(curr)
nega_score = self.ai.negamax(
node=self.ai.root,
depth=4
)
curr = self.ai.root
nega_path = [curr]
for i in range(4):
curr = max(curr.children, key=lambda x: -x.eval)
nega_path.append(curr)
self.assertEqual(score, nega_score)
def equal(move1, move2):
if move1 is None:
return move2 is None
elif move2 is None:
return move1 is None
else:
for attr in move1.__slots__:
if not getattr(move1, attr) == getattr(move2, attr):
return False
return True
for i in range(5):
if path[i].prev_move is None:
self.assertTrue(nega_path[i].prev_move is None)
elif nega_path[i].prev_move is None:
self.assertTrue(path[i].prev_move is None)
else:
self.assertTrue(equal(path[i].prev_move, nega_path[i].prev_move))
move = self.ai.find_move(depth=3)
if __name__ == '__main__':
unittest.main()
| arduy/onitama | aitests.py | aitests.py | py | 5,248 | python | en | code | 4 | github-code | 90 |
29102168171 | from discord.ext import commands
import time
class Utility(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def ping(self, ctx):
start = time.perf_counter()
message = await ctx.send("Ping...")
end = time.perf_counter()
duration = (end - start) * 1000
await message.edit(content=f'Pong! \nhttp - {round(duration)}ms \nWS - {round(self.client.latency * 1000)} ms')
async def setup(client):
await client.add_cog(Utility(client))
| nipunrautela/KoDS-Bot | cogs/utility.py | utility.py | py | 537 | python | en | code | 1 | github-code | 90 |
25168395565 | import csv
from decimal import Decimal
from django.contrib.auth.decorators import login_required
from django.template.loader import get_template
from django.views.generic import ListView
from .models import *
from .forms import *
from django.shortcuts import render, get_object_or_404
from django.shortcuts import redirect
from django.db.models import Sum
from django.http import HttpResponse
from django.utils import timezone
from .utils import render_to_pdf
now = timezone.now()
def home(request):
return render(request, 'crm/home.html',
{'crm': home})
@login_required
def customer_list(request):
customer = Customer.objects.filter(created_date__lte=timezone.now())
return render(request, 'crm/customer_list.html',
{'customers': customer})
@login_required
def customer_edit(request, pk):
customer = get_object_or_404(Customer, pk=pk)
if request.method == "POST":
# update
form = CustomerForm(request.POST, instance=customer)
if form.is_valid():
customer = form.save(commit=False)
customer.updated_date = timezone.now()
customer.save()
customer = Customer.objects.filter(created_date__lte=timezone.now())
return render(request, 'crm/customer_list.html',
{'customers': customer})
else:
# edit
form = CustomerForm(instance=customer)
return render(request, 'crm/customer_edit.html', {'form': form})
@login_required
def customer_delete(request, pk):
customer = get_object_or_404(Customer, pk=pk)
customer.delete()
return redirect('crm:customer_list')
@login_required
def customer_new(request):
if request.method == "POST":
form = CustomerForm(request.POST)
if form.is_valid():
customer = form.save(commit=False)
customer.created_date = timezone.now()
customer.save()
customer = Customer.objects.filter(created_date__lte=timezone.now())
return render(request, 'crm/customer_list.html',
{'customers': customer})
else:
form = CustomerForm()
# print("Else")
return render(request, 'crm/customer_new.html', {'form': form})
@login_required
def service_list(request):
services = Service.objects.filter(created_date__lte=timezone.now())
return render(request, 'crm/service_list.html', {'services': services})
@login_required
def service_new(request):
if request.method == "POST":
form = ServiceForm(request.POST)
if form.is_valid():
service = form.save(commit=False)
service.created_date = timezone.now()
service.save()
services = Service.objects.filter(created_date__lte=timezone.now())
return render(request, 'crm/service_list.html',
{'services': services})
else:
form = ServiceForm()
# print("Else")
return render(request, 'crm/service_new.html', {'form': form})
@login_required
def service_edit(request, pk):
service = get_object_or_404(Service, pk=pk)
if request.method == "POST":
form = ServiceForm(request.POST, instance=service)
if form.is_valid():
service = form.save()
# service.customer = service.id
service.updated_date = timezone.now()
service.save()
services = Service.objects.filter(created_date__lte=timezone.now())
return render(request, 'crm/service_list.html', {'services': services})
else:
# print("else")
form = ServiceForm(instance=service)
return render(request, 'crm/service_edit.html', {'form': form})
@login_required
def service_delete(request, pk):
service = get_object_or_404(Service, pk=pk)
service.delete()
return redirect('crm:service_list')
@login_required
def product_list(request):
product = Product.objects.filter(created_date__lte=timezone.now())
return render(request, 'crm/product_list.html', {'products': product})
@login_required
def product_new(request):
if request.method == "POST":
form = ProductForm(request.POST)
if form.is_valid():
product = form.save(commit=False)
product.created_date = timezone.now()
product.save()
products = Product.objects.filter(created_date__lte=timezone.now())
return render(request, 'crm/product_list.html',
{'products': products})
else:
form = ProductForm()
# print("Else")
return render(request, 'crm/product_new.html', {'form': form})
@login_required
def product_edit(request, pk):
product = get_object_or_404(Service, pk=pk)
if request.method == "POST":
form = ProductForm(request.POST, instance=product)
if form.is_valid():
product = form.save()
# product.customer = product.id
product.updated_date = timezone.now()
product.save()
products = Product.objects.filter(created_date__lte=timezone.now())
return render(request, 'crm/product_list.html', {'products': products})
else:
# print("else")
form = ProductForm(instance=product)
return render(request, 'crm/product_edit.html', {'form': form})
@login_required
def product_delete(request, pk):
product = get_object_or_404(Product, pk=pk)
product.delete()
return redirect('crm:product_list')
@login_required
def summary(request, pk):
customer = get_object_or_404(Customer, pk=pk)
customers = Customer.objects.filter(created_date__lte=timezone.now())
services = Service.objects.filter(cust_name=pk)
products = Product.objects.filter(cust_name=pk)
sum_service_charge = Service.objects.filter(cust_name=pk).aggregate(Sum('service_charge'))
sum_product_charge = Product.objects.filter(cust_name=pk).aggregate(Sum('charge'))
return render(request, 'crm/summary.html', {'customers': customers,
'products': products,
'services': services,
'sum_service_charge': sum_service_charge,
'sum_product_charge': sum_product_charge})
class GeneratePDF(ListView):
def get(self, request, *args, **kwargs):
template = get_template('crm/invoice.html')
context = {
"invoice_id": 123,
}
html = template.render(context)
pdf = render_to_pdf('crm/invoice.html', context)
if pdf:
response = HttpResponse(pdf, content_type='application/pdf')
filename = "Invoice_%s.pdf" % ("12341231")
content = "inline; filename='%s'" % (filename)
download = request.GET.get("download")
if download:
content = "attachment; filename='%s'" % (filename)
response['Content-Disposition'] = content
return response
return HttpResponse("Not found")
def get_csv(request):
response = HttpResponse(content_type='text/csv')
writer = csv.writer(response)
writer.writerow(['Customer Name', 'Organization', 'Role', 'Phone Number', 'Email', 'Building and Room',
'Account Number'])
for customer in Customer.objects.all().values_list('cust_name', 'organization', 'role', 'phone_number', 'email',
'bldgroom', 'account_number'):
writer.writerow(customer)
response['Content-Disposition'] = 'attachment; filename = customers.csv'
return response
'''
def getPDF():
customer = Customer.objects.all()
service = Service.objects.all()
product = Product.objects.all()
today = timezone.now()
params = {
'today': today,
'customer': customer,
'service': service,
'product': product
}
return Render.render('pdf.html', params)
'''
| lgkiemde/Maverick-Food-Service | crm/views.py | views.py | py | 8,040 | python | en | code | 0 | github-code | 90 |
18443597469 | def gcd(x,y):
if x < y:
x,y = y,x
if x%y == 0:
return y
return gcd(y,x%y)
n=int(input())
a=list(map(int,input().split()))
ans=a[0]
for i in range(1,len(a)):
ans = gcd(ans,a[i])
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03127/s086401964.py | s086401964.py | py | 239 | python | en | code | 0 | github-code | 90 |
37930850394 | from typing import List
class Solution:
# Time Complexity:
# O(logn) in best case, and O(n) in worst case.
# With duplicates, sometimes we do not know which way to explore.
# Worst Case: All equal in nums and target does not belong to nums.
# Space Complexity: O(1).
def search(self, nums: List[int], target: int) -> bool:
def _is_in_first(elem: int):
return elem >= nums[0]
def _check_and_update(mid_index: int, elem: int, start: int, end: int):
if elem < nums[mid_index]:
end = mid_index - 1
else:
start = mid_index + 1
return start, end
start, end, linear_search = 0, len(nums) - 1, False
while start <= end:
mid = end - (end - start) // 2
if nums[mid] == target:
return True
# We do not know which way to explore if since mid could be both in
# first and second sorted array.
if nums[mid] == nums[0]:
linear_search = True
break
mid_in_first, target_in_first = (
_is_in_first(elem=nums[mid]),
_is_in_first(elem=target),
)
if mid_in_first and target_in_first:
start, end = _check_and_update(
mid_index=mid, elem=target, start=start, end=end
)
elif mid_in_first and not target_in_first:
start = mid + 1
elif not mid_in_first and target_in_first:
end = mid - 1
else:
# mid in second, and target in second
start, end = _check_and_update(
mid_index=mid, elem=target, start=start, end=end
)
if linear_search:
# Include end also.
for i in range(start, end + 1):
if nums[i] == target:
return True
return False
| saubhik/leetcode | problems/search_in_rotated_sorted_array_ii.py | search_in_rotated_sorted_array_ii.py | py | 1,999 | python | en | code | 3 | github-code | 90 |
17634544917 | import re,urllib
from resources.lib.libraries import client
def resolve(url):
try:
id = url.split("?v=")[-1].split("/")[-1].split("?")[0].split("&")[0]
result = client.request('http://www.youtube.com/watch?v=%s' % id)
message = client.parseDOM(result, 'div', attrs = {'id': 'unavailable-submessage'})
message = ''.join(message)
alert = client.parseDOM(result, 'div', attrs = {'id': 'watch7-notification-area'})
if re.search('LIVE_WATCHING_NOW', result):
url = live(result, id)
if not url == None: return url
if len(alert) > 0: raise Exception()
if re.search('[a-zA-Z]', message): raise Exception()
url = 'plugin://plugin.video.youtube/play/?video_id=%s' % id
return url
except:
return
def live(result, id):
try:
hls = re.compile('"hlsvp" *: *"(.+?)"').findall(result)
if len(hls) == 0:
url = 'https://www.youtube.com/watch?v=%s' % id
url = 'http://translate.googleusercontent.com/translate_c?anno=2&hl=en&sl=mt&tl=en&u=%s' % url
hls = client.request(url)
hls = re.compile('"hlsvp" *: *"(.+?)"').findall(hls)
url = urllib.unquote(hls[0]).replace('\\/', '/')
result = client.request(url)
result = result.replace('\n','')
url = re.compile('RESOLUTION *= *(\d*)x\d{1}.+?(http.+?\.m3u8)').findall(result)
url = [(int(i[0]), i[1]) for i in url]
url.sort()
url = url[-1][1]
return url
except:
return
| mrknow/filmkodi | plugin.video.fanfilm/resources/lib/resolvers/youtube.py | youtube.py | py | 1,569 | python | en | code | 66 | github-code | 90 |
72092532776 | from kiwoom import Kiwoom
from dbwrapper import MongoDB
from pdreader import PDReader
from webscraper import SejongScraper
from processtracker import ProcessTracker, timeit
from PyQt5.QtWidgets import *
from PyQt5.QAxContainer import *
from PyQt5.QtCore import *
import os, time, json
import _pickle as pickle
from pathlib import Path
class Gobble(ProcessTracker):
@timeit
def __init__(self):
super().__init__() # initialize ProcessTracker
self.starting()
self.app = QApplication(["kiwoom.py"])
self.kiwoom = Kiwoom()
self.kiwoom.comm_connect()
@timeit
def start_db(self):
pickle_in = open("db-info.pickle", "rb")
db_info = pickle.load(pickle_in)
user = db_info["USER"]
pw = db_info["PW"]
ip = db_info["IP"]
db = db_info["DB"]
self.connecting_db()
self.db = MongoDB(user, pw, ip, db)
self.connect_successful()
@timeit
def step_one_kiwoom(self):
self.step_one()
for market_type in ["0", "10"]:
pickle_name = "kospi-dict.pickle" if market_type == "0" else "kosdaq-dict.pickle"
code_list = self.kiwoom.get_code_list_by_market(market_type)
name_list = [self.kiwoom.get_master_code_name(code) for code in code_list]
market_dict = dict(zip(code_list, name_list))
pickle_out = open("./data/" + pickle_name, "wb")
pickle.dump(market_dict, pickle_out)
pickle_out.close()
self.step_one_finish()
@timeit
def start_pdreader(self, start_date, end_date):
self.starting_pdreader()
dict_pickle = Path("./data/kospi-dict.pickle")
if not dict_pickle.exists():
self.step_one_skipped()
self.step_one_kiwoom()
self.pr = PDReader(start_date, end_date)
self.pr.set_task()
self.pdreader_started()
@timeit
def save_kospi_ohlcv(self):
# task done by: pdreader.PDReader
# roughly 35 minutes
pr = self.pr
self.saving_kospi_ohlcv()
notsaved = list()
for code, name in pr.task.items():
try:
self.starting_request(code, name)
df = pr.request_df(code)
ohlcv = pr.create_ohlcv(df)
db_initializer = pr.get_db_initializer(code, name, ohlcv)
with open("./data/stock/" + code + ".json", "w") as f:
json.dump(db_initializer, f)
self.data_saved()
except:
self.skipped_data(code, name)
notsaved.append(code)
pickle_out = open("./data/kospi-notsaved.pickle", "wb")
pickle.dump(notsaved, pickle_out)
self.data_saved()
@timeit
def start_sejongscraper(self):
self.ss = SejongScraper()
self.ss.set_tasks()
@timeit
def save_financial_sejong(self, market_type):
# task done by: webscraper.SejongScraper
# do after saving kospi ohlcv
ss = self.ss
notsaved = list()
task = ss.kospi_task if market_type == "kospi" else ss.kosdaq_task
for code, name in task.items():
try:
value_dict = ss.create_value(code)
with open("./data/stock/" + code + ".json") as f:
data = json.load(f)
data["annual"] = value_dict["annual"]
data["quarter"] = value_dict["quarter"]
json.dump(data, f)
except:
continue
notsaved.append(code)
pickle_out = open("./data/financial-notsaved.pickle", "wb")
pickle.dump(notsaved, pickle_out)
| ppark9553/safer | Gobble/gobble.py | gobble.py | py | 3,706 | python | en | code | 0 | github-code | 90 |
18259187209 | def main():
s = str(input())
q = int(input())
lst = [list(map(str, input().split())) for _ in range(q)]
switch = 0 # 0が通常 1が前
str_lst = [s]
front_lst = []
for i in range(q):
if lst[i][0] == '1':
switch = 1 - switch
else:
f = lst[i][1]
c = lst[i][2]
if f == '1': # 先頭に追加
if switch == 0:
front_lst.append(c)
else:
str_lst.append(c)
else:
if switch == 0:
str_lst.append(c)
else:
front_lst.append(c)
front = ''.join(front_lst)
front = front[::-1]
after = ''.join(str_lst)
answer = front + after
if switch == 1:
answer = answer[::-1]
print(answer)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p02756/s556793551.py | s556793551.py | py | 899 | python | en | code | 0 | github-code | 90 |
37118900793 | class Solution(object):
def search(self, array, target):
"""
input: int[] array, int target
return: int
"""
# write your solution here
if len(array) == 0:
return -1
left, right = 0, len(array)-1
while left+1 < right:
mid = left+(right-left)//2
if array[left] > array[mid]:
if array[mid] < target and target < array[right]:
left = mid
else:
right = mid
else:
if array[left] < target and target <= array[mid]:
right = mid
else:
left = mid
if array[left] == target:
return left
if array[right] == target:
return right
return -1
num_list = [3, 1, 1, 1, 1, 3]
s = Solution()
print(s.search(num_list, 3))
| nanw01/python-algrothm | Python Algrothm Advanced/practice/050104findinrotatedarray copy 2.py | 050104findinrotatedarray copy 2.py | py | 915 | python | en | code | 1 | github-code | 90 |
25521928885 | import webob
from oslo_config import cfg
from oslo_log import log as logging
from guts.api import extensions
from guts.api.openstack import wsgi
from guts import exception
from guts import objects
from guts import rpc
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
authorize = extensions.extension_authorizer('migration', 'instances')
class InstancesController(wsgi.Controller):
"""The instance API controller for the OpenStack API."""
def __init__(self, ext_mgr):
self.ext_mgr = ext_mgr
super(InstancesController, self).__init__()
def _notify_source_error(self, ctxt, method, err,
source=None, id=None, name=None):
payload = dict(sources=source, name=name, id=id, error_message=err)
rpc.get_notifier('source').error(ctxt, method, payload)
def _notify_source_info(self, ctxt, method, source):
payload = dict(sources=source)
rpc.get_notifier('source').info(ctxt, method, payload)
def index(self, req):
"""Returns the list of Instances."""
context = req.environ['guts.context']
db_instances = objects.ResourceList.get_all_by_type(context,
'instance')
instances = []
for i in db_instances:
instance = {}
instance['id'] = i.id
instance['name'] = i.name
instance['hypervisor_name'] = i.source_hypervisor
instance['migrated'] = i.migrated
instances.append(instance)
return dict(instances=instances)
def show(self, req, id):
"""Returns data about given instance."""
context = req.environ['guts.context']
try:
inst = objects.Resource.get(context, id)
except exception.NotFound:
raise webob.exc.HTTPNotFound()
instance = {}
instance['id'] = inst.id
instance['name'] = inst.name
instance['migrated'] = inst.migrated
instance['source'] = inst.source_hypervisor
instance['properties'] = inst.properties
return {'instance': instance}
def create_resource(ext_mgr):
return wsgi.Resource(InstancesController(ext_mgr))
| th3architect/guts | guts/api/v1/instances.py | instances.py | py | 2,220 | python | en | code | null | github-code | 90 |
13656489605 | # -*- encoding:utf-8 -*-
"""
This script provides an exmaple to wrap UER-py for classification.
"""
import torch
import json
import random
import argparse
import collections
import torch.nn as nn
from uer.utils.vocab import Vocab
from uer.utils.constants import *
from uer.utils.tokenizer import *
from uer.model_builder import build_model
from uer.utils.optimizers import BertAdam
from uer.utils.config import load_hyperparam
from uer.utils.seed import set_seed
from uer.model_saver import save_model
class BertClassifier(nn.Module):
def __init__(self, args, model):
super(BertClassifier, self).__init__()
self.embedding = model.embedding
self.encoder = model.encoder
self.labels_num = args.labels_num
self.pooling = args.pooling
self.output_layer_1 = nn.Linear(args.hidden_size, args.hidden_size)
self.output_layer_2 = nn.Linear(args.hidden_size, args.labels_num)
self.softmax = nn.LogSoftmax(dim=-1)
self.criterion = nn.NLLLoss()
def forward(self, src, label, mask):
"""
Args:
src: [batch_size x seq_length]
label: [batch_size]
mask: [batch_size x seq_length]
"""
# Embedding.
emb = self.embedding(src, mask)
# Encoder.
output = self.encoder(emb, mask)
# Target.
if self.pooling == "mean":
output = torch.mean(output, dim=1)
elif self.pooling == "max":
output = torch.max(output, dim=1)[0]
elif self.pooling == "last":
output = output[:, -1, :]
else:
output = output[:, 0, :]
output = torch.tanh(self.output_layer_1(output))
logits = self.output_layer_2(output)
loss = self.criterion(self.softmax(logits.view(-1, self.labels_num)), label.view(-1))
return loss, logits
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Path options.
parser.add_argument("--pretrained_model_path", default=None, type=str,
help="Path of the pretrained model.")
parser.add_argument("--output_model_path", default="./models/classifier_model.bin", type=str,
help="Path of the output model.")
parser.add_argument("--vocab_path", default="./models/google_vocab.txt", type=str,
help="Path of the vocabulary file.")
parser.add_argument("--train_path", type=str, required=True,
help="Path of the trainset.")
parser.add_argument("--dev_path", type=str, required=True,
help="Path of the devset.")
parser.add_argument("--test_path", type=str, required=True,
help="Path of the testset.")
parser.add_argument("--config_path", default="./models/google_config.json", type=str,
help="Path of the config file.")
# Model options.
parser.add_argument("--batch_size", type=int, default=64,
help="Batch size.")
parser.add_argument("--seq_length", type=int, default=128,
help="Sequence length.")
parser.add_argument("--encoder", choices=["bert", "lstm", "gru", \
"cnn", "gatedcnn", "attn", \
"rcnn", "crnn", "gpt", "bilstm"], \
default="bert", help="Encoder type.")
parser.add_argument("--bidirectional", action="store_true", help="Specific to recurrent model.")
parser.add_argument("--pooling", choices=["mean", "max", "first", "last"], default="first",
help="Pooling type.")
# Subword options.
parser.add_argument("--subword_type", choices=["none", "char"], default="none",
help="Subword feature type.")
parser.add_argument("--sub_vocab_path", type=str, default="models/sub_vocab.txt",
help="Path of the subword vocabulary file.")
parser.add_argument("--subencoder", choices=["avg", "lstm", "gru", "cnn"], default="avg",
help="Subencoder type.")
parser.add_argument("--sub_layers_num", type=int, default=2, help="The number of subencoder layers.")
# Tokenizer options.
parser.add_argument("--tokenizer", choices=["bert", "char", "space"], default="bert",
help="Specify the tokenizer."
"Original Google BERT uses bert tokenizer on Chinese corpus."
"Char tokenizer segments sentences into characters."
"Word tokenizer supports online word segmentation based on jieba segmentor."
"Space tokenizer segments sentences into words according to space."
)
# Optimizer options.
parser.add_argument("--learning_rate", type=float, default=2e-5,
help="Learning rate.")
parser.add_argument("--warmup", type=float, default=0.1,
help="Warm up value.")
# Training options.
parser.add_argument("--dropout", type=float, default=0.5,
help="Dropout.")
parser.add_argument("--epochs_num", type=int, default=3,
help="Number of epochs.")
parser.add_argument("--report_steps", type=int, default=100,
help="Specific steps to print prompt.")
parser.add_argument("--seed", type=int, default=7,
help="Random seed.")
# Evaluation options.
parser.add_argument("--mean_reciprocal_rank", action="store_true", help="Evaluation metrics for DBQA dataset.")
args = parser.parse_args()
# Load the hyperparameters from the config file.
args = load_hyperparam(args)
set_seed(args.seed)
# Count the number of labels.
labels_set = set()
columns = {}
with open(args.train_path, mode="r", encoding="utf-8") as f:
for line_id, line in enumerate(f):
try:
line = line.strip().split("\t")
if line_id == 0:
for i, column_name in enumerate(line):
columns[column_name] = i
continue
label = int(line[columns["label"]])
labels_set.add(label)
except:
pass
args.labels_num = len(labels_set)
# Load vocabulary.
vocab = Vocab()
vocab.load(args.vocab_path)
args.vocab = vocab
# Build bert model.
# A pseudo target is added.
args.target = "bert"
model = build_model(args)
# Load or initialize parameters.
if args.pretrained_model_path is not None:
# Initialize with pretrained model.
model.load_state_dict(torch.load(args.pretrained_model_path), strict=False)
else:
# Initialize with normal distribution.
for n, p in list(model.named_parameters()):
if 'gamma' not in n and 'beta' not in n:
p.data.normal_(0, 0.02)
# Build classification model.
model = BertClassifier(args, model)
# For simplicity, we use DataParallel wrapper to use multiple GPUs.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.device_count() > 1:
print("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
model = nn.DataParallel(model)
model = model.to(device)
# Datset loader.
def batch_loader(batch_size, input_ids, label_ids, mask_ids):
instances_num = input_ids.size()[0]
for i in range(instances_num // batch_size):
input_ids_batch = input_ids[i*batch_size: (i+1)*batch_size, :]
label_ids_batch = label_ids[i*batch_size: (i+1)*batch_size]
mask_ids_batch = mask_ids[i*batch_size: (i+1)*batch_size, :]
yield input_ids_batch, label_ids_batch, mask_ids_batch
if instances_num > instances_num // batch_size * batch_size:
input_ids_batch = input_ids[instances_num//batch_size*batch_size:, :]
label_ids_batch = label_ids[instances_num//batch_size*batch_size:]
mask_ids_batch = mask_ids[instances_num//batch_size*batch_size:, :]
yield input_ids_batch, label_ids_batch, mask_ids_batch
# Build tokenizer.
tokenizer = globals()[args.tokenizer.capitalize() + "Tokenizer"](args)
# Read dataset.
def read_dataset(path):
dataset = []
with open(path, mode="r", encoding="utf-8") as f:
for line_id, line in enumerate(f):
if line_id == 0:
continue
try:
line = line.strip().split('\t')
if len(line) == 2:
label = int(line[columns["label"]])
text = line[columns["text_a"]]
tokens = [vocab.get(t) for t in tokenizer.tokenize(text)]
tokens = [CLS_ID] + tokens
mask = [1] * len(tokens)
if len(tokens) > args.seq_length:
tokens = tokens[:args.seq_length]
mask = mask[:args.seq_length]
while len(tokens) < args.seq_length:
tokens.append(0)
mask.append(0)
dataset.append((tokens, label, mask))
elif len(line) == 3: # For sentence pair input.
label = int(line[columns["label"]])
text_a, text_b = line[columns["text_a"]], line[columns["text_b"]]
tokens_a = [vocab.get(t) for t in tokenizer.tokenize(text_a)]
tokens_a = [CLS_ID] + tokens_a + [SEP_ID]
tokens_b = [vocab.get(t) for t in tokenizer.tokenize(text_b)]
tokens_b = tokens_b + [SEP_ID]
tokens = tokens_a + tokens_b
mask = [1] * len(tokens_a) + [2] * len(tokens_b)
if len(tokens) > args.seq_length:
tokens = tokens[:args.seq_length]
mask = mask[:args.seq_length]
while len(tokens) < args.seq_length:
tokens.append(0)
mask.append(0)
dataset.append((tokens, label, mask))
elif len(line) == 4: # For dbqa input.
qid=int(line[columns["qid"]])
label = int(line[columns["label"]])
text_a, text_b = line[columns["text_a"]], line[columns["text_b"]]
tokens_a = [vocab.get(t) for t in tokenizer.tokenize(text_a)]
tokens_a = [CLS_ID] + tokens_a + [SEP_ID]
tokens_b = [vocab.get(t) for t in tokenizer.tokenize(text_b)]
tokens_b = tokens_b + [SEP_ID]
tokens = tokens_a + tokens_b
mask = [1] * len(tokens_a) + [2] * len(tokens_b)
if len(tokens) > args.seq_length:
tokens = tokens[:args.seq_length]
mask = mask[:args.seq_length]
while len(tokens) < args.seq_length:
tokens.append(0)
mask.append(0)
dataset.append((tokens, label, mask, qid))
else:
pass
except:
pass
return dataset
# Evaluation function.
def evaluate(args, is_test):
if is_test:
dataset = read_dataset(args.test_path)
else:
dataset = read_dataset(args.dev_path)
input_ids = torch.LongTensor([sample[0] for sample in dataset])
label_ids = torch.LongTensor([sample[1] for sample in dataset])
mask_ids = torch.LongTensor([sample[2] for sample in dataset])
batch_size = args.batch_size
instances_num = input_ids.size()[0]
if is_test:
print("The number of evaluation instances: ", instances_num)
correct = 0
# Confusion matrix.
confusion = torch.zeros(args.labels_num, args.labels_num, dtype=torch.long)
model.eval()
if not args.mean_reciprocal_rank:
for i, (input_ids_batch, label_ids_batch, mask_ids_batch) in enumerate(batch_loader(batch_size, input_ids, label_ids, mask_ids)):
input_ids_batch = input_ids_batch.to(device)
label_ids_batch = label_ids_batch.to(device)
mask_ids_batch = mask_ids_batch.to(device)
with torch.no_grad():
loss, logits = model(input_ids_batch, label_ids_batch, mask_ids_batch)
logits = nn.Softmax(dim=1)(logits)
pred = torch.argmax(logits, dim=1)
gold = label_ids_batch
for j in range(pred.size()[0]):
confusion[pred[j], gold[j]] += 1
correct += torch.sum(pred == gold).item()
if is_test:
print("Confusion matrix:")
print(confusion)
print("Report precision, recall, and f1:")
for i in range(confusion.size()[0]):
p = confusion[i,i].item()/confusion[i,:].sum().item()
r = confusion[i,i].item()/confusion[:,i].sum().item()
f1 = 2*p*r / (p+r)
if is_test:
print("Label {}: {:.3f}, {:.3f}, {:.3f}".format(i,p,r,f1))
print("Acc. (Correct/Total): {:.4f} ({}/{}) ".format(correct/len(dataset), correct, len(dataset)))
return correct/len(dataset)
else:
for i, (input_ids_batch, label_ids_batch, mask_ids_batch) in enumerate(batch_loader(batch_size, input_ids, label_ids, mask_ids)):
input_ids_batch = input_ids_batch.to(device)
label_ids_batch = label_ids_batch.to(device)
mask_ids_batch = mask_ids_batch.to(device)
with torch.no_grad():
loss, logits = model(input_ids_batch, label_ids_batch, mask_ids_batch)
logits = nn.Softmax(dim=1)(logits)
if i == 0:
logits_all=logits
if i >= 1:
logits_all=torch.cat((logits_all,logits),0)
order = -1
gold = []
for i in range(len(dataset)):
qid = dataset[i][3]
label = dataset[i][1]
if qid == order:
j += 1
if label == 1:
gold.append((qid,j))
else:
order = qid
j = 0
if label == 1:
gold.append((qid,j))
label_order = []
order = -1
for i in range(len(gold)):
if gold[i][0] == order:
templist.append(gold[i][1])
elif gold[i][0] != order:
order=gold[i][0]
if i > 0:
label_order.append(templist)
templist = []
templist.append(gold[i][1])
label_order.append(templist)
order = -1
score_list = []
for i in range(len(logits_all)):
score = float(logits_all[i][1])
qid=int(dataset[i][3])
if qid == order:
templist.append(score)
else:
order = qid
if i > 0:
score_list.append(templist)
templist = []
templist.append(score)
score_list.append(templist)
rank = []
pred = []
for i in range(len(score_list)):
if len(label_order[i])==1:
if label_order[i][0] < len(score_list[i]):
true_score = score_list[i][label_order[i][0]]
score_list[i].sort(reverse=True)
for j in range(len(score_list[i])):
if score_list[i][j] == true_score:
rank.append(1 / (j + 1))
else:
rank.append(0)
else:
true_rank = len(score_list[i])
for k in range(len(label_order[i])):
if label_order[i][k] < len(score_list[i]):
true_score = score_list[i][label_order[i][k]]
temp = sorted(score_list[i],reverse=True)
for j in range(len(temp)):
if temp[j] == true_score:
if j < true_rank:
true_rank = j
if true_rank < len(score_list[i]):
rank.append(1 / (true_rank + 1))
else:
rank.append(0)
MRR = sum(rank) / len(rank)
print(MRR)
return MRR
# Training phase.
print("Start training.")
trainset = read_dataset(args.train_path)
random.shuffle(trainset)
instances_num = len(trainset)
batch_size = args.batch_size
input_ids = torch.LongTensor([example[0] for example in trainset])
label_ids = torch.LongTensor([example[1] for example in trainset])
mask_ids = torch.LongTensor([example[2] for example in trainset])
train_steps = int(instances_num * args.epochs_num / batch_size) + 1
print("Batch size: ", batch_size)
print("The number of training instances:", instances_num)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0}
]
optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup, t_total=train_steps)
total_loss = 0.
result = 0.0
best_result = 0.0
for epoch in range(1, args.epochs_num+1):
model.train()
for i, (input_ids_batch, label_ids_batch, mask_ids_batch) in enumerate(batch_loader(batch_size, input_ids, label_ids, mask_ids)):
model.zero_grad()
input_ids_batch = input_ids_batch.to(device)
label_ids_batch = label_ids_batch.to(device)
mask_ids_batch = mask_ids_batch.to(device)
loss, _ = model(input_ids_batch, label_ids_batch, mask_ids_batch)
if torch.cuda.device_count() > 1:
loss = torch.mean(loss)
total_loss += loss.item()
if (i + 1) % args.report_steps == 0:
print("Epoch id: {}, Training steps: {}, Avg loss: {:.3f}".format(epoch, i+1, total_loss / args.report_steps))
total_loss = 0.
loss.backward()
optimizer.step()
result = evaluate(args, False)
if result > best_result:
best_result = result
save_model(model, args.output_model_path)
else:
continue
# Evaluation phase.
print("Start evaluation.")
if torch.cuda.device_count() > 1:
model.module.load_state_dict(torch.load(args.output_model_path))
else:
model.load_state_dict(torch.load(args.output_model_path))
evaluate(args, True)
if __name__ == "__main__":
main()
| LuoXukun/Bert_LSTM_CRF | run_classifier.py | run_classifier.py | py | 20,048 | python | en | code | 3 | github-code | 90 |
43356924871 | from typing import List
from enum import Enum, auto
from time import time
import random
from Models.RL.blackjack_simple import MCAgent
from Models.RL.Envs.blackjack_splitting import BlackjackEnvSplit, sum_hand, usable_ace
from Game_Engines.base_engine import BaseEngine
class BJStates(Enum):
"""
State-machine like enums for transitions. Usual execution flow goes like:
RESETTING->THINKING->H\ST\D\SP->T->H\ST\D\SP->...->W->D->Resetting
it's possible to skip the W->D chaing by busting, in which case we have:
R->T->...->R directly
in the case of a blackjack, we'd have:
R->T->R, since we instantly win -> also skip calling the agent in this case
"""
THINKING = auto() # initial state it is at the start of the game or after hitting\splitting\doubling
HITTING = auto()
DOUBLING = auto()
SPLITTING = auto()
WAITING_FOR_DEALER = auto() # waiting for dealer to reach a score of 17
DECIDING = auto() # state it switches to in the instant that the dealer passes (or reaches) 17
RESETTING = auto() # state in-between games
class BlackjackEngine(BaseEngine):
def __init__(self, bj_agent, statistics: bool=True):
super().__init__()
self.agent = bj_agent
self.state = BJStates.RESETTING
self.player_hand = []
self.dealer_hand = []
# check if a valid change has just been made to the deck
self.valid_change = False
# for keeping track of splits
self.splits_left = 0
self.split_values = []
self.finished_time = 0
self.WAIT_PERIOD = 8 # wait period between turns in seconds
# cool statistics
self.statistics = statistics
self.total_wins = 0
self.total_matches = 0
self.total_draws = 0
self.total_losses = 0
def update_detections(self, detected_player_hand: List, detected_card_pot: List):
"""
Function that handles updating the engine's detections.
:param detected_player_hand: list of labels of detected player hand
:param detected_card_pot: list of labels of detected card pot
:return: nothing
"""
if time() - self.finished_time < self.WAIT_PERIOD:
return
detected_player_hand = [1 if card[:-1] == "A" else card[:-1] for card in detected_player_hand]
detected_card_pot = [1 if card[:-1] == "A" else card[:-1] for card in detected_card_pot]
# for blackjack, we only need to check the hand each time a new card is drawn (either by us or the dealer)
if (len(self.player_hand) != len(detected_player_hand) or len(self.dealer_hand) != len(detected_card_pot)\
or self.player_hand != detected_player_hand or self.dealer_hand != detected_card_pot) \
and self.state != BJStates.DECIDING: # for speed
self.player_hand = detected_player_hand
if detected_card_pot != self.dealer_hand:
self.dealer_hand = detected_card_pot
if self.state != BJStates.WAITING_FOR_DEALER and self.state != BJStates.RESETTING:
print(f"Bad value for dealer detected.")
print("-"*75)
self.state = BJStates.RESETTING
self.valid_change = True
def act(self):
"""
Main function of the engine, here it decides what to do, based on the state it is in and the detections it has
received.
:return: nothing
"""
# a game just ended or beginning first game, need to reset everything basically
if self.state == BJStates.RESETTING:
if time() - self.finished_time < self.WAIT_PERIOD:
return
if len(self.player_hand) == 0 or len(self.dealer_hand) == 0 or not self.valid_change:
# if there's no (new) detection, don't do anything yet (maybe the dealer is flushing the deck etc.)
return
print(f"New hand.")
print(f"My cards are {self.player_hand}.")
print(f"Dealer cards are {self.dealer_hand}.")
if self.splits_left > 0:
self.splits_left -= 1
else:
self.split_values = []
self.state = BJStates.THINKING
self.total_matches += 1
self.valid_change = False # there will always be a valid change at the beginning of a hand
# -----------------------------------
elif self.state == BJStates.THINKING:
# we end up in this state whenever we need to take a new decision
splittable = None
if sum_hand(self.player_hand) > 21:
print(f"Busted.")
self.total_losses += 1
self.finished_time = time()
self.state = BJStates.RESETTING
return
if len(self.player_hand) == 2 and self.player_hand[0] == self.player_hand[1]:
splittable = 10 if self.player_hand[0] in {"K", "Q", "J"} else int(self.player_hand[0])
sum_player = sum_hand(self.player_hand)
if len(self.player_hand) == 2 and sum_player == 21: # natural blackjack
if self.splits_left == 0:
self.state = BJStates.DECIDING
else:
print(f"Blackjack - from split. change hand")
self.split_values.append(sum_hand(self.player_hand))
self.finished_time = time()
self.state = BJStates.RESETTING
return
ace = usable_ace(self.player_hand)
state = (splittable, sum_player, 10 if self.dealer_hand[0] in {"K", "Q", "J"}
else int(self.dealer_hand[0]), ace)
action = self.agent.get_action(state)
if action == 0: # STAND
print(f"I'm standing.")
print(f"My cards are {self.player_hand}.")
print(f"Dealer cards are {self.dealer_hand}.")
self.state = BJStates.WAITING_FOR_DEALER
elif action == 1: # HIT
print(f"Hit me.")
print(f"My cards are {self.player_hand}.")
print(f"Dealer cards are {self.dealer_hand}.")
self.state = BJStates.HITTING
elif action == 2: # SPLIT
print(f"Split.")
print(f"My cards are {self.player_hand}.")
print(f"Dealer cards are {self.dealer_hand}.")
self.state = BJStates.SPLITTING
elif action == 3: # DOUBLE OR HIT
print(f"Double if allowed, otherwise hit")
print(f"My cards are {self.player_hand}.")
print(f"Dealer cards are {self.dealer_hand}.")
if len(self.player_hand) == 2:
self.state = BJStates.DOUBLING
else:
self.state = BJStates.HITTING
elif action == 4: # DOUBLE OR STAND
print(f"Double if allowed, otherwise stand.")
print(f"My cards are {self.player_hand}.")
print(f"Dealer cards are {self.dealer_hand}.")
if len(self.player_hand) == 2:
self.state = BJStates.DOUBLING
else:
self.state = BJStates.WAITING_FOR_DEALER
if self.splits_left > 0 and self.state == BJStates.WAITING_FOR_DEALER:
self.finished_time = time()
self.state = BJStates.RESETTING
# ---------------------------------------------
elif self.state == BJStates.WAITING_FOR_DEALER:
# just waiting for the dealer to reach a sum of 17
dealer_sum = sum_hand(self.dealer_hand)
if dealer_sum > 17 or (dealer_sum == 17 and not usable_ace(self.dealer_hand)):
self.state = BJStates.DECIDING
print(f"Dealer reached over 17.")
print(f"My cards are {self.player_hand}.")
print(f"Dealer cards are {self.dealer_hand}.")
# ---------------------------------
elif self.state == BJStates.HITTING:
if self.valid_change:
self.valid_change = False
sum_player = sum_hand(self.player_hand)
if sum_player > 21:
print(f"Busted.")
self.total_losses += 1
self.finished_time = time()
self.state = BJStates.RESETTING
else:
self.state = BJStates.THINKING
# ------------------------------------
elif self.state == BJStates.SPLITTING:
self.splits_left += 2 - (self.splits_left == 0) # if we already split, then a new split in fact adds only one new hand
self.finished_time = time()
self.state = BJStates.RESETTING
# -----------------------------------
elif self.state == BJStates.DOUBLING:
if self.valid_change:
self.valid_change = False
sum_player = sum_hand(self.player_hand)
if sum_player > 21:
print(f"Busted.")
self.total_losses += 1
self.finished_time = time()
self.state = BJStates.RESETTING
else:
self.state = BJStates.WAITING_FOR_DEALER
if self.splits_left > 0 and self.state == BJStates.WAITING_FOR_DEALER:
self.finished_time = time()
self.state = BJStates.RESETTING
# -----------------------------------
elif self.state == BJStates.DECIDING:
if len(self.split_values) != 0:
self.split_values.append(sum_hand(self.player_hand))
sum_player = sum_hand(self.player_hand)
sum_dealer = sum_hand(self.dealer_hand)
if len(self.split_values) == 0:
if sum_dealer > 21:
print(f"Dealer busted - no split.")
print(f"My cards are {self.player_hand}.")
print(f"Dealer cards are {self.dealer_hand}.")
self.total_wins += 1
elif sum_player > sum_dealer:
print(f"I won.")
print(f"My cards are {self.player_hand}.")
print(f"Dealer cards are {self.dealer_hand}.")
self.total_wins += 1
elif sum_player == sum_dealer:
print(f"Draw.")
print(f"My cards are {self.player_hand}.")
print(f"Dealer cards are {self.dealer_hand}.")
self.total_draws += 1
elif sum_player < sum_dealer:
print(f"I lost.")
print(f"My cards are {self.player_hand}.")
print(f"Dealer cards are {self.dealer_hand}.")
self.total_losses += 1
else:
for sum_player in self.split_values:
if sum_player > 21:
continue
if sum_dealer > 21:
print(f"Dealer busted. - split")
self.total_wins += 1
elif sum_player > sum_dealer:
print(f"I won.")
print(f"My cards are {self.player_hand}.")
print(f"Dealer cards are {self.dealer_hand}.")
self.total_wins += 1
elif sum_player == sum_dealer:
print(f"Draw.")
print(f"My cards are {self.player_hand}.")
print(f"Dealer cards are {self.dealer_hand}.")
self.total_draws += 1
elif sum_player < sum_dealer:
print(f"I lost.")
print(f"My cards are {self.player_hand}.")
print(f"Dealer cards are {self.dealer_hand}.")
self.total_losses += 1
print("-"*75)
print("-"*75)
self.state = BJStates.RESETTING
self.finished_time = time()
| dragosconst/licenta | code/Game_Engines/bj_enginge.py | bj_enginge.py | py | 12,215 | python | en | code | 0 | github-code | 90 |
20594109817 | from transitions import Machine
from ClassDiagramWally import*
from Robot_Wally import*
accionamiento=Robot_Wally()
states=['home','exploracion','reconocimiento_objetos','deteccion','posicionar_garra','orientar_garra','medir_distancia','mover_garra','agarrar','depositar','verificar_reecoleccion','siguiente_categoria','finalizacion','mover_zona']
transitions = [
{ 'trigger': 'explorar_terreno', 'source': 'home', 'dest': 'exploracion' },
{ 'trigger': 'capturar_imagen', 'source': 'exploracion', 'dest': 'reconocimiento_objetos' },
{ 'trigger': 'identifico_objetos', 'source': 'reconocimiento de objetos', 'dest': 'deteccion' },
{ 'trigger': 'posiciona', 'source': 'deteccion', 'dest': 'posicionar_garra' },
{ 'trigger': 'dirige_garra', 'source': 'posicionar_garra', 'dest': 'orientar_garra' },
{ 'trigger': 'medir_distancia_og', 'source': 'orientar garra', 'dest': 'medir_distancia' },
{ 'trigger': 'mover', 'source': 'medir distancia', 'dest': 'mover garra' } ,
{ 'trigger': 'agarrar', 'source': 'mover_garra', 'dest': 'agarrar' },
{ 'trigger': 'mover_contenedor', 'source': 'agarrar', 'dest': 'depositar' },
{ 'trigger': 'verificar_recoleccion', 'source': 'depositar', 'dest': 'verificar_recoleccion' },
{ 'trigger': 'no_hay_objetos_categoria', 'source': 'verificar_recoleccion', 'dest': 'siguiente_categoria' },
{ 'trigger': 'finaliza_recoleccion', 'source': 'siguiente categoria', 'dest': 'finalizacion' },
{ 'trigger': 'mover_nueva_zona', 'source': 'finalizacion', 'dest': 'mover_zona' },
{ 'trigger': 'estado_inicial', 'source': 'mover_zona', 'dest': 'home' },
{ 'trigger': 'falla_robot', 'source': 'reconocimiento_objetos', 'dest': 'home' },
{ 'trigger': 'objeto_no_recogido', 'source': 'agarrar', 'dest': 'reconocimiento_objetos' },
{ 'trigger': 'alcance_limitado', 'source': 'medir_distancia', 'dest': 'reconocimiento_objetos' }
]
statemachineRobot_Wally= Machine(model=accionamiento, states=states, transitions=transitions, initial='home')
print (accionamiento.state)
accionamiento.explorar_terreno()
print (accionamiento.state) | AndresFp22/Robot_Wally_AyJ | STM_Wally.py | STM_Wally.py | py | 2,157 | python | es | code | 1 | github-code | 90 |
45139472113 | from kivy.core.window import Window
from kivy.utils import get_color_from_hex as hex
from kivy.uix.button import Button
from kivymd.uix.screen import MDScreen
from kivymd.uix.floatlayout import MDFloatLayout
from kivymd.uix.textfield import MDTextField
from kivymd.uix.label import MDLabel
from kivymd.uix.button import MDRaisedButton
from screens.accounts_screens.choose_account_screen import ChooseAccountScreen
red = hex("#E63946")
cream = hex("#F1FAEE")
light_teal = hex("#A8DADC")
blue = hex("#457B9D")
dark_blue = hex("#1D3557")
green = hex("#84a98c")
class EditRecurringTransferScreen(MDScreen):
def __init__(self, app, recurring_transfer, **kwargs):
super(EditRecurringTransferScreen, self).__init__(**kwargs)
self.layout = MDFloatLayout()
self.app = app
self.recurring_transfer = recurring_transfer
self.window_width, self.window_height = Window.size
# Create edit recurring transfer Label
self.edit_recurring_transfer_label = MDLabel(
text="Edit Recurring Transfer",
pos_hint = {"x": 0.3, "y": 0.85},
size_hint = (0.4, 0.05),
halign = "center"
)
self.layout.add_widget(self.edit_recurring_transfer_label)
# Create Text Fields
self.name_text_field = MDTextField(
hint_text = "Name",
mode = "rectangle",
helper_text = "Name already exists.",
helper_text_mode = "on_error",
pos_hint = {"x": 0.1, "y": 0.78},
size_hint = (0.8, 0.05)
)
self.layout.add_widget(self.name_text_field)
self.name_text_field.text = recurring_transfer.name
self.value_text_field = MDTextField(
hint_text = "Value",
mode = "rectangle",
helper_text = "Invalid Number",
helper_text_mode = "on_error",
pos_hint = {"x": 0.1, "y": 0.68},
size_hint = (0.8, 0.05)
)
self.layout.add_widget(self.value_text_field)
self.value_text_field.text = str(recurring_transfer.value)
self.start_date_text_field = MDTextField(
hint_text = "Start Date (dd/mm/yyyy)",
mode = "rectangle",
helper_text = "Invalid Date",
helper_text_mode = "on_error",
pos_hint = {"x": 0.1, "y": 0.58},
size_hint = (0.8, 0.1)
)
day = self.recurring_transfer.start_date.day
month = self.recurring_transfer.start_date.month
year = self.recurring_transfer.start_date.year
self.start_date_text_field.text = f"{day}/{month}/{year}"
self.layout.add_widget(self.start_date_text_field)
self.end_date_text_field = MDTextField(
hint_text = "End Date (dd/mm/yyyy)",
mode = "rectangle",
helper_text = "Invalid Hour",
helper_text_mode = "on_error",
pos_hint = {"x": 0.1, "y": 0.48},
size_hint = (0.8, 0.1)
)
self.end_date_text_field.text = ""
if self.recurring_transfer.end_date != None:
day = self.recurring_transfer.end_date.day
month = self.recurring_transfer.end_date.month
year = self.recurring_transfer.end_date.year
self.end_date_text_field.text = f"{day}/{month}/{year}"
self.layout.add_widget(self.end_date_text_field)
self.month_day_text_field = MDTextField(
hint_text = "Month Day",
mode = "rectangle",
helper_text = "Invalid Day",
helper_text_mode = "on_error",
pos_hint = {"x": 0.1, "y": 0.38},
size_hint = (0.8, 0.1)
)
self.month_day_text_field.text = str(recurring_transfer.month_day)
self.layout.add_widget(self.month_day_text_field)
self.note_text_field = MDTextField(
hint_text = "Note",
mode = "rectangle",
pos_hint = {"x": 0.1, "y": 0.28},
size_hint = (0.8, 0.1)
)
self.note_text_field.text = recurring_transfer.note
self.layout.add_widget(self.note_text_field)
# create account option
self.account_sending_label = MDLabel(
text="Account Sending:",
pos_hint = {"x": 0, "y": 0.21},
size_hint = (0.5, 0.05),
halign = "center"
)
self.layout.add_widget(self.account_sending_label)
account_sending_text = "Choose Account"
if recurring_transfer.account_sending != None:
account_sending_text = f"{recurring_transfer.account_sending.number}. {recurring_transfer.account_sending.name}"
self.choose_account_sending_btn = Button(
text = account_sending_text,
color = (1, 1, 1, 1),
background_color = blue,
pos_hint = {"x": 0.525, "y": 0.21},
size_hint = (0.45, 0.05),
background_normal = ""
)
self.layout.add_widget(self.choose_account_sending_btn)
self.choose_account_sending_btn.bind(on_press=self.change_account())
self.account_receiving_label = MDLabel(
text="Account Receiving:",
pos_hint = {"x": 0, "y": 0.155},
size_hint = (0.5, 0.05),
halign = "center"
)
self.layout.add_widget(self.account_receiving_label)
account_receiving_text = "Choose Account"
if recurring_transfer.account_receiving != None:
account_receiving_text = f"{recurring_transfer.account_receiving.number}. {recurring_transfer.account_receiving.name}"
self.choose_account_receiving_btn = Button(
text = account_receiving_text,
color = (1, 1, 1, 1),
background_color = blue,
pos_hint = {"x": 0.525, "y": 0.155},
size_hint = (0.45, 0.05),
background_normal = ""
)
self.layout.add_widget(self.choose_account_receiving_btn)
self.choose_account_receiving_btn.bind(on_press=self.change_account())
# create error messages
self.error_account_label = MDLabel(
text="No accounts chosen.",
theme_text_color = "Custom",
halign = "center",
text_color = red,
size_hint=(0.5, 0.05),
pos_hint={"x": 0.25, "y": 0.1175},
)
# Create Buttons
self.cancel_btn = MDRaisedButton(
text="Cancel",
md_bg_color = blue,
size_hint=(0.45, 0.04),
pos_hint={"x": 0.025, "y": 0.005},
on_press = self.cancel_pressed(app)
)
self.layout.add_widget(self.cancel_btn)
self.confirm_changes_btn = MDRaisedButton(
text="Confirm",
md_bg_color = blue,
size_hint=(0.45, 0.04),
pos_hint={"x": 0.525, "y": 0.005},
on_press = self.confirm_changes()
)
self.layout.add_widget(self.confirm_changes_btn)
self.remove_btn = MDRaisedButton(
text="Remove Recurring Transfer",
md_bg_color = red,
size_hint=(0.5, 0.04),
pos_hint={"x": 0.25, "y": 0.075},
on_press = self.remove_act(app)
)
self.layout.add_widget(self.remove_btn)
# Add layout to Add Account Screen
self.add_widget(self.layout)
def remove_act(self, app):
def remove(instance):
app.recurring_acts_screen.remove_recurring_act(self.recurring_transfer)
app.switch_screen("recurring_acts_screen")(instance)
app.transition_diagram.remove_node("edit_recurring_transfer_screen")
app.screen_manager.remove_widget(app.edit_recurring_transfer_screen)
return remove
def change_account(self):
def change(instance):
self.app.choose_account_screen = ChooseAccountScreen(self.app, "edit_recurring_transfer_screen", instance, name="choose_account_screen")
self.app.screen_manager.add_widget(self.app.choose_account_screen)
# add screen to transition diagram
self.app.transition_diagram.add_node("choose_account_screen", root_screen_node = self.app.home_screen_node, left_node = self.app.home_screen_node)
self.app.switch_screen("choose_account_screen")(instance)
return change
def confirm_changes(self):
def confirm(instance):
errors = []
new_name = self.name_text_field.text
if new_name in self.app.recurring_acts_screen.recurring_acts_dict:
if new_name != self.recurring_transfer.name:
errors.append("name_already_exists")
value = self.value_text_field.text
try:
value = float(value)
value = round(value, 2)
if value < 0:
errors.append("invalid_value")
except:
errors.append("invalid_value")
simple_start_date = self.start_date_text_field.text
if not self._validate_date(simple_start_date):
errors.append("invalid_start_date")
simple_end_date = self.end_date_text_field.text
if simple_end_date != "" and not self._validate_date(simple_end_date):
errors.append("invalid_end_date")
month_day = self.month_day_text_field.text
try:
month_day = int(month_day)
if month_day < 1 or month_day > 28:
errors.append("invalid_month_day")
except:
errors.append("invalid_month_day")
note = self.note_text_field.text
if self.choose_account_sending_btn.text == "Choose Account" and self.choose_account_receiving_btn.text == "Choose Account":
errors.append("no_accounts_chosen")
if not errors:
# create new recurring_transfer object
if simple_end_date != "":
end_date = self.app.home_screen.date.parse_string(simple_end_date + " 00:00:00")
else:
end_date = None
if self.choose_account_sending_btn.text != "Choose Account":
account_sending = self.app.accounts_screen.accounts_dict[int(self.choose_account_sending_btn.text.split(".")[0])]
else:
account_sending = None
if self.choose_account_receiving_btn.text != "Choose Account":
account_receiving = self.app.accounts_screen.accounts_dict[int(self.choose_account_receiving_btn.text.split(".")[0])]
else:
account_receiving = None
old_name = self.recurring_transfer.name
if new_name != self.recurring_transfer.name:
# update recurring_act location in recurring_acts_dict
del self.app.recurring_acts_screen.recurring_acts_dict[self.recurring_transfer.name]
if self.recurring_transfer.name in self.app.recurring_acts_screen.displayed_recurring_acts:
del self.app.recurring_acts_screen.displayed_recurring_acts[self.recurring_transfer.name]
self.app.recurring_acts_screen.displayed_recurring_acts[new_name] = self.recurring_transfer
self.recurring_transfer.name = new_name
self.app.recurring_acts_screen.recurring_acts_dict[new_name] = self.recurring_transfer
self.recurring_transfer.value = value
self.recurring_transfer.start_date = self.app.home_screen.date.parse_string(simple_start_date + " 00:00:00")
self.recurring_transfer.end_date = end_date
self.recurring_transfer.month_day = month_day
self.recurring_transfer.note = note
self.recurring_transfer.account_sending = account_sending
self.recurring_transfer.account_receiving = account_receiving
# modify recurring transfer in storage
self.app.recurring_acts_screen.recurring_transfers_store.delete(old_name)
self.app.recurring_acts_screen.store_recurring_transfer(self.recurring_transfer)
self.app.recurring_acts_screen.refresh_row_widgets()
self.app.switch_screen("recurring_acts_screen")(instance)
self.app.transition_diagram.remove_node("edit_recurring_transfer_screen")
self.app.screen_manager.remove_widget(self.app.edit_recurring_transfer_screen)
if "name_already_exists" in errors:
if self.name_text_field.error == False:
self.name_text_field.error = True
if "name_already_exists" not in errors:
if self.name_text_field.error == True:
self.name_text_field.error = False
if "invalid_value" in errors:
if self.value_text_field.error == False:
self.value_text_field.error = True
if "invalid_value" not in errors:
if self.value_text_field.error == True:
self.value_text_field.error = False
if "invalid_start_date" in errors:
if self.start_date_text_field.error == False:
self.start_date_text_field.error = True
if "invalid_start_date" not in errors:
if self.start_date_text_field.error == True:
self.start_date_text_field.error = False
if "invalid_end_date" in errors:
if self.end_date_text_field.error == False:
self.end_date_text_field.error = True
if "invalid_end_date" not in errors:
if self.end_date_text_field.error == True:
self.end_date_text_field.error = False
if "invalid_month_day" in errors:
if self.month_day_text_field.error == False:
self.month_day_text_field.error = True
if "invalid_month_day" not in errors:
if self.month_day_text_field.error == True:
self.month_day_text_field.error = False
if "no_accounts_chosen" in errors:
if self.error_account_label not in self.layout.children:
self.layout.add_widget(self.error_account_label)
if "no_accounts_chosen" not in errors:
if self.error_account_label in self.layout.children:
self.layout.remove_widget(self.error_account_label)
return confirm
def cancel_pressed(self, app):
def cancel(instance):
app.switch_screen("recurring_acts_screen")(instance)
app.transition_diagram.remove_node("edit_recurring_transfer_screen")
app.screen_manager.remove_widget(app.edit_recurring_transfer_screen)
return cancel
@staticmethod
def _is_leap_year(year):
# checks if a year int between 0 and 9999 is a leap year
if year % 4 == 0:
if year % 100 == 0:
if year % 400 == 0:
return True
return False
return True
return False
def _validate_date(self, date):
# return true if date is valid, ie has the format "dd/mm/yyyy" and corresponds to a real date
# for example, 29/02/2001 is not a real date because 2001 was not a leap year
calendar = {
1: 31,
2: 28,
3: 31,
4: 30,
5: 31,
6: 30,
7: 31,
8: 31,
9: 30,
10: 31,
11: 30,
12: 31
}
leap_calendar = {
1: 31,
2: 29,
3: 31,
4: 30,
5: 31,
6: 30,
7: 31,
8: 31,
9: 30,
10: 31,
11: 30,
12: 31
}
split_date = date.split("/")
try:
days = split_date[0]
month = split_date[1]
year = split_date[2]
if len(days) != 2 or len(month) != 2 or len(year) != 4:
return False
days = int(days)
month = int(month)
year = int(year)
if year < 0 or year > 9999:
return False
if month < 1 or month > 12:
return False
if self._is_leap_year(year):
if days < 1 or days > leap_calendar[month]:
return False
if not self._is_leap_year(year):
if days < 1 or days > calendar[month]:
return False
return True
except:
return False
| Rodrigo-Duarte-8128/expenses-tracker | screens/transfers_screens/edit_recurring_transfer_screen.py | edit_recurring_transfer_screen.py | py | 17,765 | python | en | code | 0 | github-code | 90 |
18314932079 | from collections import defaultdict as dd
N, K = map(int, input().split())
a = list(map(int, input().split()))
b = [val-1 for val in a]
c = [0]*(N+1)
for i,val in enumerate(a):
c[i+1] = (c[i] + val-1)%K
dic = dd(int)
K2 = min(N,K-1)
right = K2
for k,val in enumerate(c[1:K2+1]):
dic[val] += 1
res = 0
# 左端を動かしていく
prev = 0
for val in c[1:]:
tgt = prev
res += dic[tgt]
if right!=N:
right += 1
dic[c[right]] += 1
dic[val] = max(0,dic[val]-1)
prev = val
print(res) | Aasthaengg/IBMdataset | Python_codes/p02851/s670937893.py | s670937893.py | py | 523 | python | en | code | 0 | github-code | 90 |
18383026739 | import sys
read = sys.stdin.read
readline = sys.stdin.readline
readlines = sys.stdin.readlines
sys.setrecursionlimit(10 ** 9)
INF = 1 << 60
MOD = 1000000007
def main():
N, X, *L = map(int, read().split())
ans = 1
d = 0
for l in L:
d += l
if d <= X:
ans += 1
else:
break
print(ans)
return
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03000/s033764393.py | s033764393.py | py | 413 | python | en | code | 0 | github-code | 90 |
33411077577 | from flask import Flask, request, render_template
# from random import choice, sample
from flask_debugtoolbar import DebugToolbarExtension
from stories import Story
app = Flask(__name__)
app.config['SECRET_KEY'] = "oh-so-secret"
debug = DebugToolbarExtension(app)
@app.route('/')
def index():
"""Return homepage."""
return render_template("base.html")
story_template = Story(
["place", "noun", "verb", "adjective", "plural_noun"],
"""Once upon a time in a long-ago {place}, there lived a
large {adjective} {noun}. It loved to {verb} {plural_noun}."""
)
@app.route('/form')
def render_form():
"""Return form."""
prompts = story_template.prompts
return render_template("form.html", prompts=prompts)
@app.route('/story')
def get_story():
"""Return form."""
result_story = story_template.generate(request.args)
return render_template("story.html", result_story=result_story)
| ninadel/Springboard-SWE-Exercises | ex_24-2_flaskjinja/flask-madlibs/app.py | app.py | py | 926 | python | en | code | 0 | github-code | 90 |
26500992130 | game_board = []
start = None
goal = None
# read 12.txt into game_board, the board should be a list of characters
# each line should be a list of characters
with open('12.txt', 'r') as f:
for line in f:
game_board.append(list(line.strip()))
if 'S' in line:
start = (len(game_board) - 1, line.index('S'))
game_board[-1][line.index('S')] = 'a'
if 'E' in line:
goal = (len(game_board) - 1, line.index('E'))
game_board[-1][line.index('E')] = 'z'
game_board[-1] = [ord(char)-97 for char in game_board[-1]]
starts = lambda x: (x, start, set()) #[(i, j) for i in range(len(x)) for j in range(len(x[0])) if x[i][j] == 0])
starts2 = lambda x: [(i, j) for i in range(len(x)) for j in range(len(x[0])) if x[i][j] == 0]
# Find legal moves for a cell (i,j) in game board gb
neighbors = lambda cell, gb: [x for x in [(cell[0] - 1, cell[1]), (cell[0] + 1, cell[1]), (cell[0], cell[1] - 1), (cell[0], cell[1] + 1)] \
if (x[0] >= 0 and x[0] < len(gb) and x[1] >= 0 and x[1] < len(gb[0])) and (gb[x[0]][x[1]] == gb[cell[0]][cell[1]] + 1\
or gb[x[0]][x[1]] <= gb[cell[0]][cell[1]])]
gb = starts([[ord(x)-97 if x not in ['S', 'E'] else 0 if x=='S' else 25 for x in list(line.strip())] for line in open("12.txt")])
s2 = starts2([[ord(x)-97 if x not in ['S', 'E'] else 0 if x=='S' else 25 for x in list(line.strip())] for line in open("12.txt")])
bfs = lambda cell, visited, gb, s: [(1, bfs(cell, visited+[cell], gb, s+1))[1] if s < 400 else (s+1 if gb[cell[0]][cell[1]] == 25 else 10000) for cell in neighbors(cell, gb) if cell not in visited]
flatten_nested = lambda x: [item for sublist in x for item in sublist]
recursive_flatten = lambda x: [item for sublist in x for item in (recursive_flatten(sublist) if isinstance(sublist, list) else [sublist])]
print(min([min(recursive_flatten(bfs(start_cell, [], gb[0], 1))) for start_cell in s2]))
# for each cell, check updownleftright, if legal, do recursive call
# if not legal, return 0
exit()
# print game board
print("\n".join(["".join([chr(char+97) for char in line]) for line in game_board]))
# exit()
current_step = {start}
# Add all cells that have value 0 to current_step
current_step.update([(i, j) for i in range(len(game_board)) for j in range(len(game_board[0])) if game_board[i][j] == 0])
visited = set()
steps = 0
found = False
while not found:
if found: break
steps+= 1
next_step = set()
for cell in current_step:
# check cells up, dpwn, left, right
for coord in [(cell[0] - 1, cell[1]), (cell[0] + 1, cell[1]), (cell[0], cell[1] - 1), (cell[0], cell[1] + 1)]:
if coord in visited:
continue
if coord[0] < 0 or coord[0] >= len(game_board) or coord[1] < 0 or coord[1] >= len(game_board[0]):
continue
if (game_board[coord[0]][coord[1]] == game_board[cell[0]][cell[1]] + 1) or (game_board[coord[0]][coord[1]] <= game_board[cell[0]][cell[1]]):
next_step.add(coord)
if coord == goal:
found = True
break
[visited.add(coord) for coord in current_step]
current_step = next_step
print(steps)
# print(game_board, start, goal) | hallis21/AOC22 | 12/12.py | 12.py | py | 3,334 | python | en | code | 0 | github-code | 90 |
27654386035 | from typing import List
import os
import logging
from commons import utils, bq_client
from inference.nn_model_training import net_training_fn
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder, StandardScaler, OrdinalEncoder, LabelEncoder
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.multioutput import MultiOutputRegressor
from sklearn.model_selection import train_test_split
import optuna
import lightgbm as lgb
import datetime
import numpy as np
import pandas as pd
import math
log = logging.getLogger(__name__)
log.setLevel(os.environ.get("LOG_LEVEL", "INFO"))
date_ = datetime.datetime.now()
def training_fn(bq_project: str,
bq_dataset: str,
train_table_id: str,
model_name: str,
embedding_features: List[str],
metadata_table_id: str,
filter_list: List[str],
git_branch: str,
batch_size: int,
epochs: int,
data_limit: int = None,
# regression_labels: List[str] = None,
regression_labels: List[str] = None,
hyperparameter_tuning=True):
"""Runs training on a model against the given data from BigQuery
:param bq_project: BigQuery project ID where the dataset is residing
:param bq_dataset: BigQuery Dataset ID where the the table is residing
:param embedding_features: List of features for entity embeddings
:param metadata_table_id: Table ID that holds training metadata
:param filter_list: List of columns to filter from data
:param git_branch: git branch to associated with code
:param data_limit: number of records to run training on
:param regression_labels: List of labels used for regression targets
:param classification_labels: List of labels used for classification targets
:param hyperparameter_tuning: Boolean that indicates if hyperparameter tuning will be done
"""
labels = regression_labels
print(f"Starting training: "
f"| project: {bq_project} "
f"| dataset: {bq_dataset} "
f"| table: {train_table_id}")
exp_name = str(math.floor(datetime.datetime.now().timestamp())) + f'_{git_branch}'
print(f'Experiment: {exp_name}')
train_table = f'{bq_project}.{bq_dataset}.{train_table_id}'
metadata_table = f'{bq_project}.{bq_dataset}.{metadata_table_id}_{git_branch}'
# Pull data
data = utils.query_data(train_table, reduce_mem=True, predict_data=False, limit=data_limit)
#data = pd.read_csv('train_data.csv', nrows=100000)
# Get features and type
features = utils.get_features(train_table, filter_list)
categorical_cols, numeric_cols = utils.split_column_type(features, exclude_cols=labels)
categorical_cols = list(set(categorical_cols) - set(embedding_features))
print(f'categorical features: {categorical_cols}')
print(f'numerical features: {numeric_cols}')
print(f'embedding features: {embedding_features}')
# Preprocess
#data.to_csv('train_data.csv')
unknown_val = len(np.unique(data.loc[data['DATA_LABEL'] == 'TRAIN'][embedding_features])) + 1
print(unknown_val)
# column transformer
num_pipe = Pipeline([('imputer', SimpleImputer()), ('normalize', StandardScaler())])
transformer = ColumnTransformer(transformers=[('num', num_pipe, numeric_cols),
('cat', OneHotEncoder(handle_unknown='ignore', sparse=False,
dtype=np.float32), categorical_cols),
('embed', OrdinalEncoder(handle_unknown='use_encoded_value',
unknown_value=unknown_val, dtype=np.float32),
embedding_features)
],
# one hot encode all categoricals
sparse_threshold=0,
remainder='passthrough'
)
print(f"Labels: {labels}")
x_train, y_train = utils.preprocess(data.loc[data['DATA_LABEL'] == 'TRAIN'],
numeric_cols,
categorical_cols,
embedding_features,
label=labels,
dense=True,
transformer=transformer,
train=True)
x_test, y_test = utils.preprocess(data.loc[data['DATA_LABEL'] == 'TEST'],
numeric_cols,
categorical_cols,
embedding_features,
label=labels,
dense=True,
transformer=transformer,
train=False)
x_eval, y_eval = utils.preprocess(data.loc[data['DATA_LABEL'] == 'EVAL'],
numeric_cols,
categorical_cols,
embedding_features,
label=labels,
dense=True,
transformer=transformer,
train=False)
ohe_path = utils.save_model(transformer, f'{model_name}_transformer', 'store-ops-ml', exp_name)
print(f'Train shape: {x_train.shape}, {y_train.shape}')
print(f'Test shape: {x_test.shape}, {y_test.shape}')
print(f'Eval shape: {x_eval.shape}, {y_eval.shape}')
feature_names = utils.get_feature_names(transformer)
# find embedding features and exclude them from the lgbm modeling
embed_bay_idx = feature_names.index('embed__BAY_LOC')
print(f'Embedding col index: {embed_bay_idx}')
bay_embed_train = x_train[:, embed_bay_idx]
bay_embed_test = x_test[:, embed_bay_idx]
bay_embed_eval = x_eval[:, embed_bay_idx]
num_bay_tokens = len(np.unique(bay_embed_train))
print(num_bay_tokens)
x_train = x_train[:, :embed_bay_idx]
x_test = x_test[:, :embed_bay_idx]
x_eval = x_eval[:, :embed_bay_idx]
print(x_train.shape, bay_embed_train.shape)
print(x_test.shape, bay_embed_test.shape)
print(x_eval.shape, bay_embed_eval.shape)
net_training_fn(train_data=(x_train, bay_embed_train, y_train),
test_data=(x_test, bay_embed_test, y_test),
eval_data=(x_eval, bay_embed_eval, y_eval),
data_features=[*categorical_cols, *numeric_cols, *embedding_features],
model_name=model_name,
num_tokens=num_bay_tokens,
batch_size=batch_size,
epochs=epochs,
metadata_table=metadata_table,
exp_name=exp_name
)
| thorrester/ML_EXAMPLE | entry_points/inference/training.py | training.py | py | 7,243 | python | en | code | 0 | github-code | 90 |
46181672270 | '''chat_client.py.'''
import sys
import socket
import select
def chat_client():
'''Client.'''
if len(sys.argv) < 3:
print("Usage : python chat_client.py hostname port")
sys.exit()
host = sys.argv[1]
port = int(sys.argv[2])
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.settimeout(2)
# connect to remote host
try:
soc.connect((host, port))
#pylint: disable=bare-except
except:
print("Unable to connect")
sys.exit()
print("Connected to remote host. You can start sending messages")
sys.stdout.write('[Me] ')
sys.stdout.flush()
while 1:
socket_list = [sys.stdin, soc]
# Get the list sockets which are readable
ready_to_read, ready_to_write, in_error = select.select(socket_list, [], [])
print(ready_to_write, in_error)
for sock in ready_to_read:
if sock == soc:
# incoming message from remote server, s
data = sock.recv(4096)
if not data:
print("\nDisconnected from chat server")
sys.exit()
else:
#print data
sys.stdout.write(data)
sys.stdout.write('[Me] ')
sys.stdout.flush()
else:
# user entered a message
msg = sys.stdin.readline()
soc.send(msg)
sys.stdout.write('[Me] ')
sys.stdout.flush()
if __name__ == "__main__":
sys.exit(chat_client())
| dhanraju/python | sock_prog/cli_serv/chat_app/chat_client.py | chat_client.py | py | 1,597 | python | en | code | 0 | github-code | 90 |
39767488242 | from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, BooleanField, SelectField
from wtforms.validators import InputRequired, URL, Optional, NumberRange
class AddPetForm(FlaskForm):
name = StringField('Pet Name', validators=[InputRequired(message="Name is required")])
species = SelectField('Species')
photo_url = StringField('Photo URL', validators=[URL(), Optional()])
age = IntegerField('Age in Years', validators=[NumberRange(min=0, max=30, message='Please enter a number from 0 to 30')])
notes = StringField('Add details about personality and temperament')
class EditPetForm(FlaskForm):
photo_url = StringField('Photo URL', validators=[URL(), Optional()])
notes = StringField('Notes')
available = BooleanField('Available for adoption')
| TaraDenniston/adopt | forms.py | forms.py | py | 801 | python | en | code | 0 | github-code | 90 |
9891821008 | import time
from dataclasses import dataclass
from email.utils import formatdate, mktime_tz, parsedate_tz
from typing import Iterable, Mapping, Optional, Tuple, Union
from seleniumwire.thirdparty.mitmproxy.coretypes import multidict
from seleniumwire.thirdparty.mitmproxy.net.http import cookies, status_codes, message
from seleniumwire.thirdparty.mitmproxy.net.http.headers import Headers
from seleniumwire.thirdparty.mitmproxy.utils import human, strutils
from seleniumwire.thirdparty.mitmproxy.utils.strutils import always_bytes
@dataclass
class ResponseData(message.MessageData):
status_code: int
reason: bytes
class Response(message.Message):
"""
An HTTP response.
"""
data: ResponseData
def __init__(
self,
http_version: bytes,
status_code: int,
reason: bytes,
headers: Union[Headers, Tuple[Tuple[bytes, bytes], ...]],
content: Optional[bytes],
trailers: Union[None, Headers, Tuple[Tuple[bytes, bytes], ...]],
timestamp_start: float,
timestamp_end: Optional[float],
):
# auto-convert invalid types to retain compatibility with older code.
if isinstance(http_version, str):
http_version = http_version.encode("ascii", "strict")
if isinstance(reason, str):
reason = reason.encode("ascii", "strict")
if isinstance(content, str):
raise ValueError("Content must be bytes, not {}".format(type(content).__name__))
if not isinstance(headers, Headers):
headers = Headers(headers)
if trailers is not None and not isinstance(trailers, Headers):
trailers = Headers(trailers)
self.data = ResponseData(
http_version=http_version,
status_code=status_code,
reason=reason,
headers=headers,
content=content,
trailers=trailers,
timestamp_start=timestamp_start,
timestamp_end=timestamp_end,
)
def __repr__(self) -> str:
if self.raw_content:
ct = self.headers.get("content-type", "unknown content type")
size = human.pretty_size(len(self.raw_content))
details = f"{ct}, {size}"
else:
details = "no content"
return f"Response({self.status_code}, {details})"
@classmethod
def make(
cls,
status_code: int = 200,
content: Union[bytes, str] = b"",
headers: Union[Headers, Mapping[str, Union[str, bytes]], Iterable[Tuple[bytes, bytes]]] = ()
) -> "Response":
"""
Simplified API for creating response objects.
"""
if isinstance(headers, Headers):
headers = headers
elif isinstance(headers, dict):
headers = Headers(
(always_bytes(k, "utf-8", "surrogateescape"),
always_bytes(v, "utf-8", "surrogateescape"))
for k, v in headers.items()
)
elif isinstance(headers, Iterable):
headers = Headers(headers)
else:
raise TypeError("Expected headers to be an iterable or dict, but is {}.".format(
type(headers).__name__
))
resp = cls(
b"HTTP/1.1",
status_code,
status_codes.RESPONSES.get(status_code, "").encode(),
headers,
None,
None,
time.time(),
time.time(),
)
# Assign this manually to update the content-length header.
if isinstance(content, bytes):
resp.content = content
elif isinstance(content, str):
resp.text = content
else:
raise TypeError(f"Expected content to be str or bytes, but is {type(content).__name__}.")
return resp
@property
def status_code(self) -> int:
"""
HTTP Status Code, e.g. ``200``.
"""
return self.data.status_code
@status_code.setter
def status_code(self, status_code: int) -> None:
self.data.status_code = status_code
@property
def reason(self) -> str:
"""
HTTP Reason Phrase, e.g. "Not Found".
HTTP/2 responses do not contain a reason phrase, an empty string will be returned instead.
"""
# Encoding: http://stackoverflow.com/a/16674906/934719
return self.data.reason.decode("ISO-8859-1")
@reason.setter
def reason(self, reason: Union[str, bytes]) -> None:
self.data.reason = strutils.always_bytes(reason, "ISO-8859-1")
def _get_cookies(self):
h = self.headers.get_all("set-cookie")
all_cookies = cookies.parse_set_cookie_headers(h)
return tuple(
(name, (value, attrs))
for name, value, attrs in all_cookies
)
def _set_cookies(self, value):
cookie_headers = []
for k, v in value:
header = cookies.format_set_cookie_header([(k, v[0], v[1])])
cookie_headers.append(header)
self.headers.set_all("set-cookie", cookie_headers)
@property
def cookies(self) -> multidict.MultiDictView:
"""
The response cookies. A possibly empty
:py:class:`~seleniumwire.thirdparty.mitmproxy.net.multidict.MultiDictView`,
where the keys are cookie name strings, and values are (value, attr) tuples.
Value is a string, and attr is an MultiDictView containing cookie attributes.
Within attrs, unary attributes (e.g. HTTPOnly) are indicated by a Null value.
Caveats:
Updating the attr
"""
return multidict.MultiDictView(
self._get_cookies,
self._set_cookies
)
@cookies.setter
def cookies(self, value):
self._set_cookies(value)
def refresh(self, now=None):
"""
This fairly complex and heuristic function refreshes a server
response for replay.
- It adjusts date, expires and last-modified headers.
- It adjusts cookie expiration.
"""
if not now:
now = time.time()
delta = now - self.timestamp_start
refresh_headers = [
"date",
"expires",
"last-modified",
]
for i in refresh_headers:
if i in self.headers:
d = parsedate_tz(self.headers[i])
if d:
new = mktime_tz(d) + delta
self.headers[i] = formatdate(new, usegmt=True)
c = []
for set_cookie_header in self.headers.get_all("set-cookie"):
try:
refreshed = cookies.refresh_set_cookie_header(set_cookie_header, delta)
except ValueError:
refreshed = set_cookie_header
c.append(refreshed)
if c:
self.headers.set_all("set-cookie", c)
| wkeeling/selenium-wire | seleniumwire/thirdparty/mitmproxy/net/http/response.py | response.py | py | 6,967 | python | en | code | 1,689 | github-code | 90 |
18174441449 | import os
import sys
import math
import heapq
from decimal import *
from io import BytesIO, IOBase
from collections import defaultdict, deque
def r():
return int(input())
def rm():
return map(int,input().split())
def rl():
return list(map(int,input().split()))
def chk(mid,a,n,k):
cuts=0
for i in a:
if i <= mid:
continue
cuts += i//mid+(-1 if i%mid==0 else 0)
return (True if cuts<=k else False)
n,k = rm()
a = rl()
lo = 1
hi = 10**9
ans = 10**9
while lo < hi :
mid = (lo+hi)//2
if chk(mid,a,n,k):
hi=mid
ans=mid
else:
lo = mid+1
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02598/s531270698.py | s531270698.py | py | 631 | python | en | code | 0 | github-code | 90 |
21358537955 | # requires: gtts
import os
from gtts import gTTS
from telethon.tl.types import DocumentAttributeAudio
from telethon.errors import MessageEmptyError, TimeoutError
from telethon import events
from .. import loader, utils
def register(cb):
cb(SayTextMod())
class SayTextMod(loader.Module):
strings = {"name": "SayText"}
async def saycmd(self, message):
""".say <текст> - преобразует текст в голосовое сообщение."""
text = utils.get_args_raw(message)
if not text:
reply = await message.get_reply_message()
if reply and reply.message:
text = reply.message
else:
return await utils.answer(message, "<b>Отсутствует текст или ответное сообщение.</b>")
sent_message = await utils.answer(message, "<b>Генерация голосового сообщения...</b>")
try:
tts = gTTS(text, lang="ru")
tts.save("say.ogg")
voice = await message.client.upload_file("say.ogg")
await message.client.send_file(
message.chat_id,
voice,
voice_note=True,
reply_to=message.id,
attributes=[DocumentAttributeAudio(duration=0)],
timeout=60,
)
except (MessageEmptyError, TimeoutError):
return await utils.answer(message, "<b>Не удалось отправить голосовое сообщение.</b>")
finally:
os.remove("say.ogg")
await sent_message[0].delete() | nickname0q/Friendly_Telegram | SayText.py | SayText.py | py | 1,662 | python | ru | code | 0 | github-code | 90 |
70764020136 | import pygame as pg
import pytmx
# import sys
# from os import path
# # ---- #
# import pygame
# import pytmx
# # import cv2
# # ---- #
#
# from strings import *
#
# # ---- #
#
# def collide_hit_rect(one, two):
# return one.hit_rect.colliderect(two.rect)
class TiledMap:
def __init__(self, filename):
tm = pytmx.load_pygame(filename, pixelalpha=True)
self.width = tm.width * tm.tilewidth
self.height = tm.height * tm.tileheight
self.tmxdata = tm
def render(self, surface):
ti = self.tmxdata.get_tile_image_by_gid
for layer in self.tmxdata.visible_layers:
if isinstance(layer, pytmx.TiledTileLayer):
for x, y, gid, in layer:
tile = ti(gid)
if tile:
surface.blit(tile, (x * self.tmxdata.tilewidth,
y * self.tmxdata.tileheight))
# if isinstance(layer, pytmx.TiledObjectGroup):
# if layer.name == "collision":
# for obj in layer:
# if pygame.Rect(obj.x, obj.y, obj.width, obj.height).colliderect(block.rect) == True:
# print("Collide!")
# break
def make_map(self):
temp_surface = pg.Surface((self.width, self.height))
self.render(temp_surface)
return temp_surface
| AnthonyMc0525/PokemonGame | src/tiledmap.py | tiledmap.py | py | 1,414 | python | en | code | 0 | github-code | 90 |
9178615925 | import sys
import os
import os.path as osp
import math
import time
import requests
import zipfile, tarfile, gzip
import torch
from glob import glob
from tqdm import tqdm
def download_file(url, filepath):
"""Downloads a file from the given URL."""
print("Downloading %s..." % url)
r = requests.get(url, stream=True)
total_size = int(r.headers.get('content-length', 0))
block_size = 1024 * 1024
wrote = 0
with open(filepath, 'wb') as f:
for data in tqdm(r.iter_content(block_size), total=math.ceil(total_size // block_size), unit='MB'):
wrote = wrote + len(data)
f.write(data)
if total_size != 0 and wrote != total_size:
print("Downloading failed")
sys.exit(1)
def extract_gzfile(filepath, dstdir='data'):
os.makedirs(dstdir, exist_ok=True)
filename = osp.basename(filepath)
print('Extracting {}...'.format(filename))
gz = gzip.GzipFile(filepath, 'r')
filename = filename.replace('.gz', '')
open(osp.join(dstdir, filename), 'w+').write(gz.read())
gz.close()
def extract_zipfile(filepath, dstdir='data'):
os.makedirs(dstdir, exist_ok=True)
filename = osp.basename(filepath)
print('Extracting {}...'.format(filename))
zip = zipfile.ZipFile(filepath, 'r')
zip.extractall(dstdir)
zip.close()
def extract_tarfile(filepath, dstdir='data'):
os.makedirs(dstdir, exist_ok=True)
filename = osp.basename(filepath)
print('Extracting {}...'.format(filename))
tar = tarfile.TarFile(filepath, 'r')
tar.extractall(dstdir)
tar.close()
def get_last_checkpoint(dstdir):
"""Returns the last checkpoint file name in the given dstdir path."""
checkpoints = glob(osp.join(dstdir, '*.pth'))
checkpoints.sort()
if len(checkpoints) == 0:
return None
return checkpoints[-1]
def save_checkpoint(logdir, epoch, epochs_since_improvement, model, optimizer, loss, is_best):
state_dict = {
'epoch': epoch,
'epochs_since_improvement': epochs_since_improvement,
'loss': loss,
'model': model,
'optimizer': optimizer
}
checkpoint_file_name = 'final_checkpoint.pth'
torch.save(state_dict, osp.join(logdir, checkpoint_file_name))
print(f"Saved the checkpoint (epoch={epoch:04d}) to '{checkpoint_file_name}'")
# If this checkpoint is the best so far, store a copy so it doesn't get overwritten by a worse checkpoint
if is_best:
torch.save(state_dict, osp.join(logdir, 'best_checkpoint.pth'))
print(f"Saved the checkpoint (epoch={epoch:04d}) to 'best_checkpoint.pth'")
def load_checkpoint(logdir, checkpoint_file_name=None):
"""Loads the checkpoint into the given model and optimizer."""
checkpoint_file_name = checkpoint_file_name \
if checkpoint_file_name is None else 'final_checkpoint.pth'
checkpoint = torch.load(osp.join(logdir, checkpoint_file_name))
epoch = checkpoint['epoch'] + 1
epochs_since_improvement = checkpoint['epochs_since_improvement']
loss = checkpoint['loss']
model = checkpoint['model']
optimizer = checkpoint['optimizer']
print(f"Loaded the checkpoint (epoch={epoch:04d}) from '{checkpoint_file_name}'")
return epoch, epochs_since_improvement, model, optimizer, loss
| atomicoo/Tacotron2-PyTorch | utils/common.py | common.py | py | 3,274 | python | en | code | 12 | github-code | 90 |
28042448127 | '''Display images and predicted masks using streamlit'''
import torch
import torchvision.transforms as transforms
import numpy as np
import streamlit as st
import os
import argparse
import skimage.io as io
import src.utils as utils
import torch
import numpy as np
from PIL import Image
from src.models import ResNetModel
from src.post_process import CleanUp
def show_header(name, avatar_image_url, **links):
links = ' | '.join('[%s](%s)' % (key, url) for key, url in links.items())
st.write(
"""
<img src="%s" style="border-radius:50%%;height:100px;vertical-align:text-bottom;padding-bottom:10px"/>
<span style="display:inline-block;padding-left:10px;padding-bottom:20px;font-size:3rem;vertical-align:bottom">%s</span>
%s
""" % (avatar_image_url, name, links))
show_header(
avatar_image_url="https://hongshan-public.s3-us-west-2.amazonaws.com/hongshan_headshot_icon.png",
name="Hongshan Li",
github='https://github.com/HongshanLi/TreeDetector',
linkedin='https://www.linkedin.com/in/hongshanli/',
)
st.markdown("# Welcome to TreeDetector")
st.write(
"This is the Streamlit demo of the deep project I completed as an Artificial Intelligence Fellow at Insight Data Science. \
The goal of the project is to train a deep learning model that can segment \
trees from 2D aerial imagery. My best performing model uses ResNet152 as backbone feature extractor.\
You can play with the model and see it in action here.")
@st.cache
def load_image(filename):
img = io.imread("sample_raw_data/037185-0_RGB-Ir.tif")
large_image = img[:,:,0:3]
small_image = Image.fromarray(large_image)
small_image.thumbnail((600, 600))
small_image = np.array(small_image)
return large_image, small_image
@st.cache
def init_clean_up():
return CleanUp()
cleanup = CleanUp(threshold=0.5)
img, thumbnail = load_image("sample_raw_data/037185-0_RGB-Ir.tif")
#st.write(img.shape)
#st.write(thumbnail.shape)
#st.image(img, width=600)
st.write("The image below comes from the test set:")
st.image(thumbnail, use_column_width=True, caption="sample image from test set (not used in training)")
st.write("You can crop a 250 x 250 sub-image from it by moving the slide bar below. The x and y value from the slide bar will be the x and y offsets (the coordinates of the top-left corner) of the sub-image:")
x = st.slider('X offset', 0, 0, 1000, 1)
y = st.slider('Y offset', 0, 0, 1000, 1)
st.write("Once you cropped the image, the model will draw a contour (in red) around the place, where it thinks has trees.")
sub_img = img[y:y+250, x:x+250, :]
result_caption="Running the detector in realtime."
result = st.image(sub_img, width=250, caption=result_caption)
device = torch.device('cuda:0' if torch.cuda.is_available()
else 'cpu')
@st.cache
def load_model():
model = ResNetModel(pretrained=False,use_lidar=False)
model.load_state_dict(
torch.load('resnet_real_ckps/model_9.pth', map_location=device))
model.to(device)
return model
# normalize the image
model = load_model()
x = sub_img.astype(np.float32)
transform = transforms.Compose([
transforms.ToTensor()
])
x = transform(x)
mean = torch.mean(x, dim=(1,2))
std = torch.std(x, dim=(1,2))
mean = mean.view(3, 1, 1)
std = std.view(3, 1, 1)
x = (x - mean) / std
x = x.unsqueeze(0)
x = x.to(device)
mask = model(x)
_,_,h,w = mask.shape
mask = mask.view(h,w)
mask = mask.detach().cpu().numpy()
mask = cleanup(mask)
mask = np.array(mask)
mask = mask[:, :, 0] / 255
mask = np.array([mask, mask, mask]).transpose((1,2,0))
sub_img = sub_img / 255
red = np.zeros(sub_img.shape)+[1,0,0]
#st.image(red)
mask = 0.5*(1 + mask)
composite = sub_img * mask + red*(1- mask)
result.image(composite, width=250, caption=result_caption)
# stack mask on top of image
#st.image([mask])
#st.button(label="test")
#x = st.slider(label="x coordinate of the crop center")
#y = st.slider(label="y coordinate of the crop center")
#st.write(x, y)
def pixelwise_accuracy(mask, target):
'''compute pixelwise accuracy
Args:
mask (np.float32): black and white mask
black = object, white = background
target (np.float32): ...
'''
correct = (mask == target).astype(np.float32)
acc = np.sum(correct) / (mask.shape[0]*mask.shape[1])
return acc.item()
def compute_iou(mask, target):
'''compute intersection over union
Args:
mask (np.float32): black and white mask
black = object, white = background
target (np.float32): ...
'''
# make object have pixel value 1
mask = (mask == 0).astype(np.float32)
target = (target == 0).astype(np.float32)
intersection = mask*target
union = mask + target - intersection
iou = np.sum(intersection) / np.sum(union)
return iou.item()
def get_background(img):
'''
Args:
img (np.uint8): input image
Return (np.float32): mask of objects in the image
white pixel for background, black pixel for non-background
pixel value is normalized between [0, 1]
'''
img = img.astype(np.float32)
img = np.mean(img, axis=2)
img = img / 255
img = (img == 1).astype(np.float32)
return img
| HongshanLi/TreeDetector | streamlit_proj.py | streamlit_proj.py | py | 5,284 | python | en | code | 5 | github-code | 90 |
4317804831 | class Graph():
def __init__(self,v,g):
self.v = v
self.g = g
def dijkstra(self,src):
dist=[9999]*self.v
mst=[False]*self.v
dist[src]=0
for cout in range(self.v-1):
u = self.mindist(dist, mst)
mst[u] = True
for v in range(self.v):
if self.g[u][v]!=0 and mst[v] == False and dist[v] > dist[u] + self.g[u][v]:
dist[v] = dist[u] + self.g[u][v]
self.printres(dist)
def printres(self,dist):
for node in range(self.v):
print('{} to {} is {}\n'.format(src,node,dist[node]))
def mindist(self, dist, mst):
min=9999
for v in range(self.v):
if dist[v] < min and mst[v] == False:
min = dist[v]
min_index = v
return min_index
v=int(input("Enter no. of vertex::"))
g=[[0, 4, 0, 0, 0, 0, 0, 8, 0],
[4, 0, 8, 0, 0, 0, 0, 11, 0],
[0, 8, 0, 7, 0, 4, 0, 0, 2],
[0, 0, 7, 0, 9, 14, 0, 0, 0],
[0, 0, 0, 9, 0, 10, 0, 0, 0],
[0, 0, 4, 14, 10, 0, 2, 0, 0],
[0, 0, 0, 0, 0, 2, 0, 1, 6],
[8, 11, 0, 0, 0, 0, 1, 0, 7],
[0, 0, 2, 0, 0, 0, 6, 7, 0] ]
'''
for i in range(v):
a=[]
for j in range(v):
a.append(int(input('{} to {} weight::'.format(i,j)))) # incase of dynamic input
g.append(a)
'''
graph=Graph(v,g)
src=int(input("source:"))
graph.dijkstra(src)
| saikat519/Algorithms | dijkstra.py | dijkstra.py | py | 1,526 | python | en | code | 0 | github-code | 90 |
71183789737 | '''
Sorts title candidates for a given document
'''
# built-in
import os
import argparse
import pdb
import json
import pickle
import logging
# external
import pandas as pd
# customs
import data
import engine
import utils
logging.basicConfig(level=logging.DEBUG)
def main(
data_path,
train_data_path,
val_data_path,
test_data_path,
output_path,
prediction_name='suggestion.json',
cache_dir=None,
model_type='lda',
):
'''
train a model and make a prediction
Args:
data_path: path to the data json file
train_data_path: path to the train data
val_data_path: path to the val data
test_data_path: path to the test data
output_path: path to the output dir
prediction_name: the name of prediction output file
cache_dir: where to save cache
model: which model to use
Returns:
None
'''
# load data
print('Loading data')
documents, titles = data.load_doc_title(
data_path,
cache_path=os.path.join(cache_dir, 'preproccessed') if cache_dir is not None else None,
)
train_data = data.load_train(train_data_path)
val_data = data.load_val(val_data_path)
test_data = data.load_test(test_data_path)
# convert to corpus if needed
if model_type in ('lda', ):
print('Preparing corpus')
dictionary = utils.make_dictionary(
documents.content,
cache_path=os.path.join(cache_dir, 'dictionary') if cache_dir is not None else None,
filter_=False,
)
documents['bow'] = utils.make_corpus(documents.content, dictionary)
titles['bow'] = utils.make_corpus(titles.content, dictionary)
# train
print('Training model')
if model_type == 'lda':
model = engine.CustomLDA(documents, titles, dictionary)
model = model.train(train_data, val_data, output_path)
elif model_type == 'doc2vec':
model = engine.CustomDoc2vec(documents, titles)
model = model.train(train_data, val_data, output_path)
else: raise ValueError(model_type)
# inference
prediction = model.predict(test_data)
prediction_output = os.path.join(output_path, prediction_name)
data.dump_prediction(prediction, prediction_output)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='python3 main.py',
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
'--data_path', default='data/exam_data1.json',
help='Path to the doc/title data file.\n'
'Default: %(default)s'
)
parser.add_argument(
'--train_data_path', default='data/train_q.json',
help='Path to the train data file.\n'
'Default: %(default)s'
)
parser.add_argument(
'--val_data_path', default='data/val_q.json',
help='Path to the validation data file.\n'
'Default: %(default)s'
)
parser.add_argument(
'--test_data_path', default='data/test_q.json',
help='Path to the test data file.\n'
'Default: %(default)s'
)
parser.add_argument(
'--output_path', default='./temp_output',
help='Path to the model_output dir.\n'
'Default: %(default)s'
)
parser.add_argument(
'--cache_dir', default=None,
help='Wehre to store/load the cache directory.\n'
'Default: disable cache'
)
parser.add_argument(
'--model_type', default='lda',
help='Mdoel selection.. [lda, doc2vec]\n'
'Default: %(default)s'
)
args = parser.parse_args()
main(**vars(args))
| yoshihikoueno/TitleEstimator | main.py | main.py | py | 3,642 | python | en | code | 0 | github-code | 90 |
35788935275 | # from kinematic import *
# from DDkinematic_final import *
from uproot import open
from os import listdir
from fnmatch import filter
from numpy import ravel, unique, array, empty, concatenate, ones, logical_and
from numpy import abs as np_abs
from numpy.random import choice
# from DD_utils_final import isolate_int, count_tauh, call_dict_with_list, replace_prefix_in_list, flatten_2D_list, RandomGenerate_count_tauh
from copy import deepcopy
import numpy as np
from scipy.optimize import brentq
from functools import reduce
from operator import iconcat
from numbers import Number
from pandas import DataFrame
from tqdm import tqdm
from concurrent.futures import ProcessPoolExecutor
import os
import sys
sys.path.append('./FeatureRegression/')
from kinematic_custom import *
# /home/ddemler/HNLclassifier/fnn_FeatureRegression/All_particles/kinematic_custom.py
# p4calc, motherpair_vals, Energy_tot
# np.random.seed(39)
# np.rand
# import yaml
# Global variables
output_vars_v1 = ['event', 'genWeight', 'deltaR_12', 'deltaR_13', 'deltaR_23', 'pt_123', 'mt_12', 'mt_13', 'mt_23', 'Mt_tot', 'n_tauh']
output_vars_v2 = ['event', 'genWeight', 'deltaphi_12', 'deltaphi_13', 'deltaphi_23', 'deltaeta_12', 'deltaeta_13', 'deltaeta_23',
'deltaR_12', 'deltaR_13', 'deltaR_23', 'pt_123', 'mt_12', 'mt_13', 'mt_23', 'Mt_tot', 'n_tauh']
output_vars_v3 = ['event', 'genWeight', 'deltaphi_12', 'deltaphi_13', 'deltaphi_23', 'deltaeta_12', 'deltaeta_13', 'deltaeta_23',
'deltaR_12', 'deltaR_13', 'deltaR_23', 'pt_123', 'mt_12', 'mt_13', 'mt_23', 'Mt_tot',
['HNL_CM_angle_with_MET_1', 'HNL_CM_angle_with_MET_2'], ['W_CM_angle_HNL_1', 'W_CM_angle_HNL_2'],
['W_CM_angle_HNL_with_MET_1', 'W_CM_angle_HNL_with_MET_2'], ['HNL_CM_mass_1', 'HNL_CM_mass_2'],
['HNL_CM_mass_with_MET_1', 'HNL_CM_mass_with_MET_2'], 'n_tauh']
output_vars_v4 = ['event', 'genWeight',
'charge_1', 'charge_2', 'charge_3',
'pt_1', 'pt_2', 'pt_3', 'pt_MET',
'eta_1', 'eta_2', 'eta_3',
'mass_1', 'mass_2', 'mass_3',
'phi_1', 'phi_2', 'phi_3', 'phi_MET',
'deltaphi_12', 'deltaphi_13', 'deltaphi_23',
'deltaphi_1MET', 'deltaphi_2MET', 'deltaphi_3MET',
['deltaphi_1(23)', 'deltaphi_2(13)', 'deltaphi_3(12)',
'deltaphi_MET(12)', 'deltaphi_MET(13)', 'deltaphi_MET(23)',
'deltaphi_1(2MET)', 'deltaphi_1(3MET)', 'deltaphi_2(1MET)', 'deltaphi_2(3MET)', 'deltaphi_3(1MET)', 'deltaphi_3(2MET)'],
'deltaeta_12', 'deltaeta_13', 'deltaeta_23',
['deltaeta_1(23)', 'deltaeta_2(13)', 'deltaeta_3(12)'],
'deltaR_12', 'deltaR_13', 'deltaR_23',
['deltaR_1(23)', 'deltaR_2(13)', 'deltaR_3(12)'],
'pt_123',
'mt_12', 'mt_13', 'mt_23',
'mt_1MET', 'mt_2MET', 'mt_3MET',
['mt_1(23)', 'mt_2(13)', 'mt_3(12)',
'mt_MET(12)', 'mt_MET(13)', 'mt_MET(23)',
'mt_1(2MET)', 'mt_1(3MET)', 'mt_2(1MET)', 'mt_2(3MET)', 'mt_3(1MET)', 'mt_3(2MET)'],
'mass_12', 'mass_13', 'mass_23',
'mass_123',
'Mt_tot',
['HNL_CM_angle_with_MET_1', 'HNL_CM_angle_with_MET_2'],
['W_CM_angle_to_plane_1', 'W_CM_angle_to_plane_2'], ['W_CM_angle_to_plane_with_MET_1', 'W_CM_angle_to_plane_with_MET_2'],
['HNL_CM_mass_1', 'HNL_CM_mass_2'],
['HNL_CM_mass_with_MET_1', 'HNL_CM_mass_with_MET_2'],
['W_CM_angle_12','W_CM_angle_13', 'W_CM_angle_23', 'W_CM_angle_1MET', 'W_CM_angle_2MET', 'W_CM_angle_3MET'],
'n_tauh']
output_vars_v5 = ['event', 'genWeight',
'charge_1', 'charge_2', 'charge_3',
'pt_1', 'pt_2', 'pt_3', 'pt_MET',
'eta_1', 'eta_2', 'eta_3',
'mass_1', 'mass_2', 'mass_3',
'phi_1', 'phi_2', 'phi_3', 'phi_MET',
'deltaphi_12', 'deltaphi_13', 'deltaphi_23',
'deltaphi_1MET', 'deltaphi_2MET', 'deltaphi_3MET',
['deltaphi_1(23)', 'deltaphi_2(13)', 'deltaphi_3(12)',
'deltaphi_MET(12)', 'deltaphi_MET(13)', 'deltaphi_MET(23)',
'deltaphi_1(2MET)', 'deltaphi_1(3MET)', 'deltaphi_2(1MET)', 'deltaphi_2(3MET)', 'deltaphi_3(1MET)', 'deltaphi_3(2MET)'],
'deltaeta_12', 'deltaeta_13', 'deltaeta_23',
['deltaeta_1(23)', 'deltaeta_2(13)', 'deltaeta_3(12)'],
'deltaR_12', 'deltaR_13', 'deltaR_23',
['deltaR_1(23)', 'deltaR_2(13)', 'deltaR_3(12)'],
'pt_123',
'mt_12', 'mt_13', 'mt_23',
'mt_1MET', 'mt_2MET', 'mt_3MET',
['mt_1(23)', 'mt_2(13)', 'mt_3(12)',
'mt_MET(12)', 'mt_MET(13)', 'mt_MET(23)',
'mt_1(2MET)', 'mt_1(3MET)', 'mt_2(1MET)', 'mt_2(3MET)', 'mt_3(1MET)', 'mt_3(2MET)'],
'mass_12', 'mass_13', 'mass_23',
'mass_123',
'Mt_tot',
['HNL_CM_angle_with_MET_1', 'HNL_CM_angle_with_MET_2', 'HNL_CM_angle_with_MET_3'],
['W_CM_angle_to_plane_1', 'W_CM_angle_to_plane_2', 'W_CM_angle_to_plane_3'], ['W_CM_angle_to_plane_with_MET_1', 'W_CM_angle_to_plane_with_MET_2', 'W_CM_angle_to_plane_with_MET_3'],
['HNL_CM_mass_1', 'HNL_CM_mass_2', 'HNL_CM_mass_3'],
['HNL_CM_mass_with_MET_1', 'HNL_CM_mass_with_MET_2', 'HNL_CM_mass_with_MET_3'],
['W_CM_angle_12','W_CM_angle_13', 'W_CM_angle_23', 'W_CM_angle_1MET', 'W_CM_angle_2MET', 'W_CM_angle_3MET'],
'n_tauh',
['px_1', 'py_1', 'pz_1', 'E_1', 'px_2', 'py_2', 'pz_2', 'E_2', 'px_3', 'py_3', 'pz_3', 'E_3'],
['moth_mass_12', 'moth_mass_13', 'moth_mass_23', 'moth_pt_12', 'moth_pt_13', 'moth_pt_23', 'moth_eta_12', 'moth_eta_13', 'moth_eta_23', 'moth_phi_12', 'moth_phi_13', 'moth_phi_23', 'moth_px_12', 'moth_px_13', 'moth_px_23', 'moth_py_12', 'moth_py_13', 'moth_py_23', 'moth_pz_12', 'moth_pz_13', 'moth_pz_23', 'moth_E_12', 'moth_E_13', 'moth_E_23'],
'E_tot']
#===================================================================================================
class Data_extractor():
"""
A Data_extractor extracts data from a folder of root files containing the anatuples.
It takes a channel as argument : channel = "tee" "tem" "tmm" "tte" or "ttm"
When called, it returns the variables of interest for the DNN training
"""
def __init__(self, channel, raw_vars_general, raw_vars_lepton1, raw_vars_lepton2, raw_vars_lepton3, output_vars, functions, input_vars):
"""
-channel : flavour of the 3 prompt leptons present in the decay. channel = "tee" "tem" "tmm" "tte" or "ttm"
-raw_vars_general : names of variables in the root files that will be loaded and which are present only once, and not for each lepton
-raw_vars_lepton(1,2,3) : end of names of variables in the root files that will be loaded and which are defined for a specific lepton.
The naming convention for such variables is L_X where L = Electron(1,2), Muon(1,2), Tau(1,2). Only specify
_X, since L will be deduced from the channel
-output_vars : names of variable of interest that will be created by the data extractor
-functions : functions that will be used to compute the output_vars (one function for each output_vars in the right order). If the
corresponding output variable is already present as raw variable, put None as a function.
-input_vars : list of lists of variables that are passed to the functions to compute the output_vars. If the variable in question
is specific to one lepton, then "(1,2,3)_X" will be converted to lepton(1,2,3)_X.
For example, in tee channel "3_mass"->"Electron2_mass"
"""
self.channel = channel
if self.channel == "tee":
self.n_taus = 1
self.lepton1 = "Tau"
self.lepton2 = "Electron1"
self.lepton3 = "Electron2"
elif self.channel == "tem":
self.n_taus = 1
self.lepton1 = "Tau"
self.lepton2 = "Electron"
self.lepton3 = "Muon"
elif self.channel == "tmm":
self.n_taus = 1
self.lepton1 = "Tau"
self.lepton2 = "Muon1"
self.lepton3 = "Muon2"
elif self.channel == "tte":
self.n_taus = 2
self.lepton1 = "Tau1"
self.lepton2 = "Tau2"
self.lepton3 = "Electron"
elif self.channel == "ttm":
self.n_taus = 2
self.lepton1 = "Tau1"
self.lepton2 = "Tau2"
self.lepton3 = "Muon"
else:
raise ValueError("The channel name \""+channel+"\" is not valid")
self.raw_vars = raw_vars_general
for var in raw_vars_lepton1:
self.raw_vars.append(self.lepton1+var)
for var in raw_vars_lepton2:
self.raw_vars.append(self.lepton2+var)
for var in raw_vars_lepton3:
self.raw_vars.append(self.lepton3+var)
self.input_vars = replace_prefix_in_list(input_vars, to_replace=['1','2','3'], replace_by=[self.lepton1, self.lepton2, self.lepton3])
self.functions = functions
self.output_vars = output_vars
self.flat_output_vars = flatten_2D_list(output_vars)
def __call__(self, path, signal_prefix = ['HNL'], real_data_prefix = ['EGamma', 'SingleMuon', 'Tau'], data = None, file_list = None, with_mass_hyp = True):
"""
Arguments :
-path : the path to the root files
-signal_prefix : beginning of names of the files containing the signal (here "HNL"). It can be a string or a list of strings
-real_data_prefix : beginning of filenames that correspond to real data, and that will be ignored
-data : dictionnary to which the extracted data will be appended (if None, the dictionary will be created)
-file_list : list of root files from which data will be extracted (if None, all root files present in path will be used).
-with_mass_hyp : if True, the data will contain , the HNL mass hypothesis in GeV for the signal events, and a random choice
among the different hypothesis for background events
Output :
-data : dictionary containing the event indices, the variables of interest, the label of the event, and the type of event.
By default, data will contain the entries "signal_label" (1 for signal, 0 for background), "channel" and "event_type" (name of the
file in which the events were taken)
"""
total_keys = deepcopy(self.flat_output_vars)
total_keys.extend(['signal_label', 'channel', 'event_type'])
if with_mass_hyp:
total_keys.append('mass_hyp')
value_list = []
for i in range(len(self.flat_output_vars)):
value_list.append(empty((0,)))
data = dict(zip(self.flat_output_vars, value_list))
if with_mass_hyp:
total_keys.append('mass_hyp')
data['mass_hyp'] = []
data['signal_label'] = []
data['channel'] = []
data['event_type'] = []
if set(list(data.keys())) != set(total_keys):
raise KeyError("The data keys don't match the names of the variable created by the data extractor : ", list(data.keys()), total_keys)
if file_list == None:
file_list = filter(listdir(path), '*.root')
# Create a list of all considered HNL mass hypothesis
if type(signal_prefix) != list:
signal_prefix = [signal_prefix]
mass_hyps = []
if with_mass_hyp:
for filename in file_list:
for prefix in signal_prefix:
if filename[:len(prefix)] == prefix:
mass_hyps.append(isolate_int(filename, separators=['-', '_'])[0])
mass_hyps = unique(array(mass_hyps))
weightsum1=0
weightsum2=0
numsum2=0
for filename in file_list:
RealData = False
for prefix in real_data_prefix:
if filename[:len(prefix)] == prefix:
RealData = True
if RealData:
continue
# Raw data loading
limit_charge = 3
limit_tau_jet = 5
limit_em_iso = 0.15
cut = ''
if self.channel == 'tte':
cut = '(abs(Tau1_charge + Tau2_charge + Electron_charge) < {}) & (Tau1_idDeepTau2018v2p5VSjet >= {}) & (Tau2_idDeepTau2018v2p5VSjet >= {}) & (Electron_pfRelIso03_all < {})'.format(limit_charge, limit_tau_jet, limit_tau_jet, limit_em_iso)
if self.channel == 'tee':
cut = '(abs(Tau_charge + Electron1_charge + Electron2_charge) < {}) & (Tau_idDeepTau2018v2p5VSjet >= {}) & (Electron1_pfRelIso03_all < {}) & (Electron2_pfRelIso03_all < {})'.format(limit_charge, limit_tau_jet, limit_em_iso, limit_em_iso)
if self.channel == 'tem':
cut = '(abs(Tau_charge + Electron_charge + Muon_charge) < {}) & (Tau_idDeepTau2018v2p5VSjet >= {}) & (Electron_pfRelIso03_all < {}) & (Muon_pfRelIso03_all < {})'.format(limit_charge, limit_tau_jet, limit_em_iso, limit_em_iso)
if self.channel == 'tmm':
cut = '(abs(Tau_charge + Muon1_charge + Muon2_charge) < {}) & (Tau_idDeepTau2018v2p5VSjet >= {}) & (Muon1_pfRelIso03_all < {}) & (Muon2_pfRelIso03_all < {})'.format(limit_charge, limit_tau_jet, limit_em_iso, limit_em_iso)
if self.channel == 'ttm':
cut = '(abs(Tau1_charge + Tau2_charge + Muon_charge) < {}) & (Tau1_idDeepTau2018v2p5VSjet >= {}) & (Tau2_idDeepTau2018v2p5VSjet >= {}) & (Muon_pfRelIso03_all < {})'.format(limit_charge, limit_tau_jet, limit_tau_jet, limit_em_iso)
anatuple_before_cut = open(path+filename)['Event;1'].arrays(self.raw_vars, library='np') # type: ignore
weightsum_before_cut = anatuple_before_cut['genWeight'].sum()
weightsum1 += weightsum_before_cut
# print('weightsum before cut : ', weightsum_before_cut)
anatuple = open(path+filename)['Event;1'].arrays(self.raw_vars, cut=cut, library='np') # type: ignore
weightsum_after_cut = anatuple['genWeight'].sum()
weightsum2 += weightsum_after_cut
numsum2 += len(anatuple['genWeight'])
n = len(anatuple[list(anatuple.keys())[0]])
if n==0:
continue
anatuple['channel'] = [self.channel]*n
# Creation of the data
for i, var in enumerate(self.output_vars):
if self.functions[i] == None:
data[var] = concatenate((data[var], anatuple[self.input_vars[i][0]]))
else:
outputs = self.functions[i](*call_dict_with_list(anatuple, self.input_vars[i]))
if type(var) == list:
for j,v in enumerate(var):
data[v] = concatenate((data[v], outputs[j]))
else:
data[var] = concatenate((data[var], outputs))
label = 0
mass = ones((n,))
for prefix in signal_prefix:
if filename[:len(prefix)] == prefix:
label = 1
if with_mass_hyp:
mass *= isolate_int(filename,separators=['-', '_'])[0]
if label == 0 and with_mass_hyp:
mass = choice(mass_hyps, n)
# Add mass hypothesis
if with_mass_hyp:
if 'mass_hyp' in data.keys():
data['mass_hyp'] = concatenate((data['mass_hyp'], mass))
else:
data['mass_hyp'] = mass
# Add signal label (by default)
if 'signal_label' in data.keys():
data['signal_label'] = concatenate((data['signal_label'], ones((n,))*label))
else:
data['signal_label'] = ones((n,))*label
# Add channel (by default)
if 'channel' in data.keys():
data['channel'].extend([self.channel]*n)
else:
data['channel'] = [self.channel]*n
# Add event type (by default)
if 'event_type' in data.keys():
data['event_type'].extend([filename.replace('.root','')]*n)
else:
data['event_type'] = [filename.replace('.root','')]*n
# print('weightsum before cut : ', weightsum1)
# print('weightsum after cut : ', weightsum2)
# print('numsum after cut : ', numsum2)
# weightsum= data['genWeight'].sum()
# print("weightsum = ", weightsum)
return data
#===================================================================================================
class Data_extractor_test(Data_extractor):
def __init__(self):
output_vars = ['test1', ['test_mix1', 'test_mix2'], 'test2']
functions = [None, lambda a : (a[0]*a[1], a[0]+a[1]), lambda a : 2*a]
raw_vars_general = ['test1', 'test2']
raw_vars_lepton1 = []
raw_vars_lepton2 = []
raw_vars_lepton3 = []
input_vars = [['test1'], ['test1', 'test2'], ['test2']]
super().__init__(channel='tte', raw_vars_general=raw_vars_general, raw_vars_lepton1=raw_vars_lepton1, raw_vars_lepton2=raw_vars_lepton2,
raw_vars_lepton3=raw_vars_lepton3, output_vars=output_vars, functions=functions, input_vars=input_vars, )
class Data_extractor_v1(Data_extractor):
def __init__(self, channel):
output_vars = deepcopy(output_vars_v1)
functions =[None, None, deltaR, deltaR, deltaR, sum_pt, transverse_mass, transverse_mass, transverse_mass, total_transverse_mass, count_tauh]
raw_vars_general = ['event', 'genWeight', 'MET_pt', 'MET_phi']
raw_vars_lepton1=['_eta', '_mass', '_phi', '_pt', '_genPartFlav']
raw_vars_lepton2=['_eta', '_mass', '_phi', '_pt', '_genPartFlav']
raw_vars_lepton3=['_eta', '_mass', '_phi', '_pt', '_genPartFlav']
input_vars = [['event'], ['genWeight'], ['1_eta', '2_eta', '1_phi', '2_phi'], ['1_eta', '3_eta', '1_phi', '3_phi'],
['2_eta', '3_eta', '2_phi', '3_phi'], [['1_pt', '2_pt', '3_pt'],['1_phi', '2_phi', '3_phi'],['1_eta', '2_eta', '3_eta'],
['1_mass', '2_mass', '3_mass']], ['1_pt', '2_pt', '1_phi', '2_phi'], ['1_pt', '3_pt', '1_phi', '3_phi'],
['2_pt', '3_pt', '2_phi', '3_phi'], ['1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi'],
['channel', '1_genPartFlav', '2_genPartFlav', '3_genPartFlav']]
super().__init__(channel, raw_vars_general=raw_vars_general, raw_vars_lepton1=raw_vars_lepton1, raw_vars_lepton2=raw_vars_lepton2,
raw_vars_lepton3=raw_vars_lepton3, output_vars=output_vars, functions=functions, input_vars=input_vars)
class Data_extractor_v2(Data_extractor):
def __init__(self, channel):
output_vars = deepcopy(output_vars_v2)
functions =[None, None, deltaphi, deltaphi, deltaphi, deltaeta, deltaeta, deltaeta, deltaR, deltaR, deltaR, sum_pt, transverse_mass, transverse_mass, transverse_mass, total_transverse_mass, count_tauh]
raw_vars_general = ['event', 'genWeight', 'MET_pt', 'MET_phi']
raw_vars_lepton1=['_eta', '_mass', '_phi', '_pt', '_genPartFlav']
raw_vars_lepton2=['_eta', '_mass', '_phi', '_pt', '_genPartFlav']
raw_vars_lepton3=['_eta', '_mass', '_phi', '_pt', '_genPartFlav']
input_vars = [['event'], ['genWeight'], ['1_phi', '2_phi'], ['1_phi', '3_phi'], ['2_phi', '3_phi'], ['1_eta', '2_eta'],
['1_eta', '3_eta'], ['2_eta', '3_eta'], ['1_eta', '2_eta', '1_phi', '2_phi'], ['1_eta', '3_eta', '1_phi', '3_phi'],
['2_eta', '3_eta', '2_phi', '3_phi'], [['1_pt', '2_pt', '3_pt'],['1_phi', '2_phi', '3_phi'],['1_eta', '2_eta', '3_eta'],
['1_mass', '2_mass', '3_mass']], ['1_pt', '2_pt', '1_phi', '2_phi'], ['1_pt', '3_pt', '1_phi', '3_phi'],
['2_pt', '3_pt', '2_phi', '3_phi'], ['1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi'],
['channel', '1_genPartFlav', '2_genPartFlav', '3_genPartFlav']]
super().__init__(channel, raw_vars_general=raw_vars_general, raw_vars_lepton1=raw_vars_lepton1, raw_vars_lepton2=raw_vars_lepton2,
raw_vars_lepton3=raw_vars_lepton3, output_vars=output_vars, functions=functions, input_vars=input_vars)
class Data_extractor_v3(Data_extractor):
def __init__(self, channel):
output_vars = deepcopy(output_vars_v3)
functions =[None, None, deltaphi, deltaphi, deltaphi, deltaeta, deltaeta, deltaeta, deltaR, deltaR, deltaR, sum_pt, transverse_mass,
transverse_mass, transverse_mass, total_transverse_mass, HNL_CM_angles_with_MET, W_CM_angles_to_plane,
W_CM_angles_to_plane_with_MET, HNL_CM_masses, HNL_CM_masses_with_MET, count_tauh]
raw_vars_general = ['event', 'genWeight', 'MET_pt', 'MET_phi']
lepton_specific = ['_eta', '_mass', '_phi', '_pt', '_charge', '_genPartFlav']
raw_vars_lepton1 = lepton_specific
raw_vars_lepton2 = lepton_specific
raw_vars_lepton3 = lepton_specific
input_vars = [['event'], ['genWeight'], ['1_phi', '2_phi'], ['1_phi', '3_phi'], ['2_phi', '3_phi'], ['1_eta', '2_eta'],
['1_eta', '3_eta'], ['2_eta', '3_eta'], ['1_eta', '2_eta', '1_phi', '2_phi'], ['1_eta', '3_eta', '1_phi', '3_phi'],
['2_eta', '3_eta', '2_phi', '3_phi'], [['1_pt', '2_pt', '3_pt'],['1_phi', '2_phi', '3_phi'],['1_eta', '2_eta', '3_eta'],
['1_mass', '2_mass', '3_mass']], ['1_pt', '2_pt', '1_phi', '2_phi'], ['1_pt', '3_pt', '1_phi', '3_phi'],
['2_pt', '3_pt', '2_phi', '3_phi'], ['1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi'],
['1_charge', '2_charge', '3_charge', '1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['1_charge', '2_charge', '3_charge', '1_pt', '2_pt', '3_pt', '1_phi', '2_phi', '3_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['1_charge', '2_charge', '3_charge', '1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['1_charge', '2_charge', '3_charge', '1_pt', '2_pt', '3_pt', '1_phi', '2_phi', '3_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['1_charge', '2_charge', '3_charge', '1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['channel', '1_genPartFlav', '2_genPartFlav', '3_genPartFlav']]
super().__init__(channel, raw_vars_general=raw_vars_general, raw_vars_lepton1=raw_vars_lepton1, raw_vars_lepton2=raw_vars_lepton2,
raw_vars_lepton3=raw_vars_lepton3, output_vars=output_vars, functions=functions, input_vars=input_vars)
class Data_extractor_v4(Data_extractor):
def __init__(self, channel):
output_vars = deepcopy(output_vars_v4)
functions =[None, None, # event, genWeight
None, None, None, # charges
None, None, None, None, # pts
None, None, None, # etas
None, None, None, # masses
None, None, None, None, # phis
deltaphi, deltaphi, deltaphi,
deltaphi, deltaphi, deltaphi,
deltaphi3,
deltaeta, deltaeta, deltaeta,
deltaeta3,
deltaR, deltaR, deltaR,
deltaR3,
sum_pt,
transverse_mass, transverse_mass, transverse_mass,
transverse_mass, transverse_mass, transverse_mass,
transverse_mass3,
invariant_mass, invariant_mass, invariant_mass,
invariant_mass,
total_transverse_mass,
HNL_CM_angles_with_MET,
W_CM_angles_to_plane, W_CM_angles_to_plane_with_MET,
HNL_CM_masses,
HNL_CM_masses_with_MET,
W_CM_angles,
count_tauh]
raw_vars_general = ['event', 'genWeight', 'MET_pt', 'MET_phi']
lepton_specific = ['_eta', '_mass', '_phi', '_pt', '_charge', '_genPartFlav']
raw_vars_lepton1 = lepton_specific
raw_vars_lepton2 = lepton_specific
raw_vars_lepton3 = lepton_specific
input_vars = [['event'], ['genWeight'],
['1_charge'], ['2_charge'], ['3_charge'],
['1_pt'], ['2_pt'], ['3_pt'], ['MET_pt'],
['1_eta'], ['2_eta'], ['3_eta'],
['1_mass'], ['2_mass'], ['3_mass'],
['1_phi'], ['2_phi'], ['3_phi'], ['MET_phi'],
['1_phi', '2_phi'], ['1_phi', '3_phi'], ['2_phi', '3_phi'],
['1_phi', 'MET_phi'], ['2_phi', 'MET_phi'], ['3_phi', 'MET_phi'],
['1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['1_eta', '2_eta'], ['1_eta', '3_eta'], ['2_eta', '3_eta'],
['1_pt', '2_pt', '3_pt', '1_phi', '2_phi', '3_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['1_eta', '2_eta', '1_phi', '2_phi'], ['1_eta', '3_eta', '1_phi', '3_phi'], ['2_eta', '3_eta', '2_phi', '3_phi'],
['1_pt', '2_pt', '3_pt', '1_phi', '2_phi', '3_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
[['1_pt', '2_pt', '3_pt'],['1_phi', '2_phi', '3_phi'],['1_eta', '2_eta', '3_eta'], ['1_mass', '2_mass', '3_mass']],
['1_pt', '2_pt', '1_phi', '2_phi'], ['1_pt', '3_pt', '1_phi', '3_phi'], ['2_pt', '3_pt', '2_phi', '3_phi'],
['1_pt', 'MET_pt', '1_phi', 'MET_phi'], ['2_pt', 'MET_pt', '2_phi', 'MET_phi'], ['3_pt', 'MET_pt', '3_phi', 'MET_phi'],
['1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
[['1_pt', '2_pt'],['1_phi', '2_phi'],['1_eta', '2_eta'], ['1_mass', '2_mass']], [['1_pt', '3_pt'],['1_phi', '3_phi'],['1_eta', '3_eta'], ['1_mass', '3_mass']], [['2_pt', '3_pt'],['2_phi', '3_phi'],['2_eta', '3_eta'], ['2_mass', '3_mass']],
[['1_pt', '2_pt', '3_pt'],['1_phi', '2_phi', '3_phi'],['1_eta', '2_eta', '3_eta'], ['1_mass', '2_mass', '3_mass']],
['1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi'],
['1_charge', '2_charge', '3_charge', '1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['1_charge', '2_charge', '3_charge', '1_pt', '2_pt', '3_pt', '1_phi', '2_phi', '3_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'], ['1_charge', '2_charge', '3_charge', '1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['1_charge', '2_charge', '3_charge', '1_pt', '2_pt', '3_pt', '1_phi', '2_phi', '3_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['1_charge', '2_charge', '3_charge', '1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['channel', '1_genPartFlav', '2_genPartFlav', '3_genPartFlav']]
super().__init__(channel, raw_vars_general=raw_vars_general, raw_vars_lepton1=raw_vars_lepton1, raw_vars_lepton2=raw_vars_lepton2,
raw_vars_lepton3=raw_vars_lepton3, output_vars=output_vars, functions=functions, input_vars=input_vars)
class Data_extractor_v5(Data_extractor):
def __init__(self, channel):
output_vars = deepcopy(output_vars_v5)
functions =[None, None, # event, genWeight
None, None, None, # charges
None, None, None, None, # pts
None, None, None, # etas
None, None, None, # masses
None, None, None, None, # phis
deltaphi, deltaphi, deltaphi,
deltaphi, deltaphi, deltaphi,
deltaphi3,
deltaeta, deltaeta, deltaeta,
deltaeta3,
deltaR, deltaR, deltaR,
deltaR3,
sum_pt,
transverse_mass, transverse_mass, transverse_mass,
transverse_mass, transverse_mass, transverse_mass,
transverse_mass3,
invariant_mass, invariant_mass, invariant_mass,
invariant_mass,
total_transverse_mass,
HNL_CM_angles_with_MET,
W_CM_angles_to_plane, W_CM_angles_to_plane_with_MET,
HNL_CM_masses,
HNL_CM_masses_with_MET,
W_CM_angles,
count_tauh,
p4calc,
motherpair_vals,
Energy_tot]
raw_vars_general = ['event', 'genWeight', 'MET_pt', 'MET_phi']
lepton_specific = ['_eta', '_mass', '_phi', '_pt', '_charge', '_genPartFlav']
raw_vars_lepton1 = lepton_specific
raw_vars_lepton2 = lepton_specific
raw_vars_lepton3 = lepton_specific
input_vars = [['event'], ['genWeight'],
['1_charge'], ['2_charge'], ['3_charge'],
['1_pt'], ['2_pt'], ['3_pt'], ['MET_pt'],
['1_eta'], ['2_eta'], ['3_eta'],
['1_mass'], ['2_mass'], ['3_mass'],
['1_phi'], ['2_phi'], ['3_phi'], ['MET_phi'],
['1_phi', '2_phi'], ['1_phi', '3_phi'], ['2_phi', '3_phi'],
['1_phi', 'MET_phi'], ['2_phi', 'MET_phi'], ['3_phi', 'MET_phi'],
['1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['1_eta', '2_eta'], ['1_eta', '3_eta'], ['2_eta', '3_eta'],
['1_pt', '2_pt', '3_pt', '1_phi', '2_phi', '3_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['1_eta', '2_eta', '1_phi', '2_phi'], ['1_eta', '3_eta', '1_phi', '3_phi'], ['2_eta', '3_eta', '2_phi', '3_phi'],
['1_pt', '2_pt', '3_pt', '1_phi', '2_phi', '3_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
[['1_pt', '2_pt', '3_pt'],['1_phi', '2_phi', '3_phi'],['1_eta', '2_eta', '3_eta'], ['1_mass', '2_mass', '3_mass']],
['1_pt', '2_pt', '1_phi', '2_phi'], ['1_pt', '3_pt', '1_phi', '3_phi'], ['2_pt', '3_pt', '2_phi', '3_phi'],
['1_pt', 'MET_pt', '1_phi', 'MET_phi'], ['2_pt', 'MET_pt', '2_phi', 'MET_phi'], ['3_pt', 'MET_pt', '3_phi', 'MET_phi'],
['1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
[['1_pt', '2_pt'],['1_phi', '2_phi'],['1_eta', '2_eta'], ['1_mass', '2_mass']], [['1_pt', '3_pt'],['1_phi', '3_phi'],['1_eta', '3_eta'], ['1_mass', '3_mass']], [['2_pt', '3_pt'],['2_phi', '3_phi'],['2_eta', '3_eta'], ['2_mass', '3_mass']],
[['1_pt', '2_pt', '3_pt'],['1_phi', '2_phi', '3_phi'],['1_eta', '2_eta', '3_eta'], ['1_mass', '2_mass', '3_mass']],
['1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi'],
[ '1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
[ '1_pt', '2_pt', '3_pt', '1_phi', '2_phi', '3_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'], ['1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
[ '1_pt', '2_pt', '3_pt', '1_phi', '2_phi', '3_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
[ '1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['channel', '1_genPartFlav', '2_genPartFlav', '3_genPartFlav'],
['1_pt', '2_pt', '3_pt', '1_phi', '2_phi', '3_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['1_pt', '2_pt', '3_pt', '1_phi', '2_phi', '3_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['1_pt', '2_pt', '3_pt', '1_phi', '2_phi', '3_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass', 'MET_pt']
]
super().__init__(channel, raw_vars_general=raw_vars_general, raw_vars_lepton1=raw_vars_lepton1, raw_vars_lepton2=raw_vars_lepton2,
raw_vars_lepton3=raw_vars_lepton3, output_vars=output_vars, functions=functions, input_vars=input_vars)
class Data_generator():
def __init__(self, numevents, normalize=False):
self.output_vars = deepcopy(output_vars_v4)
self.functions =[None, None, # event, genWeight
None, None, None, # charges
None, None, None, None, # pts
None, None, None, # etas
None, None, None, # masses
deltaphi, deltaphi, deltaphi,
deltaphi, deltaphi, deltaphi,
deltaphi3,
deltaeta, deltaeta, deltaeta,
deltaeta3,
deltaR, deltaR, deltaR,
deltaR3,
sum_pt,
transverse_mass, transverse_mass, transverse_mass,
transverse_mass, transverse_mass, transverse_mass,
transverse_mass3,
invariant_mass, invariant_mass, invariant_mass,
invariant_mass,
total_transverse_mass,
HNL_CM_angles_with_MET,
W_CM_angles_to_plane, W_CM_angles_to_plane_with_MET,
HNL_CM_masses,
HNL_CM_masses_with_MET,
W_CM_angles,
RandomGenerate_count_tauh]
self.raw_vars_general = ['event', 'genWeight', 'MET_pt', 'MET_phi']
lepton_specific = ['_eta', '_mass', '_phi', '_pt', '_charge', '_genPartFlav']
raw_vars_lepton1 = lepton_specific
raw_vars_lepton2 = lepton_specific
raw_vars_lepton3 = lepton_specific
self.input_vars = [['event'], ['genWeight'],
['1_charge'], ['2_charge'], ['3_charge'],
['1_pt'], ['2_pt'], ['3_pt'], ['MET_pt'],
['1_eta'], ['2_eta'], ['3_eta'],
['1_mass'], ['2_mass'], ['3_mass'],
['1_phi', '2_phi'], ['1_phi', '3_phi'], ['2_phi', '3_phi'],
['1_phi', 'MET_phi'], ['2_phi', 'MET_phi'], ['3_phi', 'MET_phi'],
['1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['1_eta', '2_eta'], ['1_eta', '3_eta'], ['2_eta', '3_eta'],
['1_pt', '2_pt', '3_pt', '1_phi', '2_phi', '3_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['1_eta', '2_eta', '1_phi', '2_phi'], ['1_eta', '3_eta', '1_phi', '3_phi'], ['2_eta', '3_eta', '2_phi', '3_phi'],
['1_pt', '2_pt', '3_pt', '1_phi', '2_phi', '3_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
[['1_pt', '2_pt', '3_pt'],['1_phi', '2_phi', '3_phi'],['1_eta', '2_eta', '3_eta'], ['1_mass', '2_mass', '3_mass']],
['1_pt', '2_pt', '1_phi', '2_phi'], ['1_pt', '3_pt', '1_phi', '3_phi'], ['2_pt', '3_pt', '2_phi', '3_phi'],
['1_pt', 'MET_pt', '1_phi', 'MET_phi'], ['2_pt', 'MET_pt', '2_phi', 'MET_phi'], ['3_pt', 'MET_pt', '3_phi', 'MET_phi'],
['1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
[['1_pt', '2_pt'],['1_phi', '2_phi'],['1_eta', '2_eta'], ['1_mass', '2_mass']], [['1_pt', '3_pt'],['1_phi', '3_phi'],['1_eta', '3_eta'], ['1_mass', '3_mass']], [['2_pt', '3_pt'],['2_phi', '3_phi'],['2_eta', '3_eta'], ['2_mass', '3_mass']],
[['1_pt', '2_pt', '3_pt'],['1_phi', '2_phi', '3_phi'],['1_eta', '2_eta', '3_eta'], ['1_mass', '2_mass', '3_mass']],
['1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi'],
['1_charge', '2_charge', '3_charge', '1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['1_charge', '2_charge', '3_charge', '1_pt', '2_pt', '3_pt', '1_phi', '2_phi', '3_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'], ['1_charge', '2_charge', '3_charge', '1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['1_charge', '2_charge', '3_charge', '1_pt', '2_pt', '3_pt', '1_phi', '2_phi', '3_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['1_charge', '2_charge', '3_charge', '1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
['1_pt', '2_pt', '3_pt', 'MET_pt', '1_phi', '2_phi', '3_phi', 'MET_phi', '1_eta', '2_eta', '3_eta', '1_mass', '2_mass', '3_mass'],
[ '1_genPartFlav', '2_genPartFlav', '3_genPartFlav']]
# ['channel', '1_genPartFlav', '2_genPartFlav', '3_genPartFlav']]
self.data = self.generate_fake_data2(numevents)
old_keys = [f"{i}_{var}" for i in range(1, 4) for var in ['charge', 'pt', 'eta', 'mass']] + ['MET_pt']
new_keys = [f"{var}_{i}" for i in range(1, 4) for var in ['charge', 'pt', 'eta', 'mass']] + ['pt_MET']
# Convert key names from "1_charge" to "charge_1" etc.
for old_key, new_key in zip(old_keys, new_keys):
if old_key in self.data:
self.data[new_key] = self.data[old_key]
del self.data[old_key] # Remove old key-value pair from the dictionary
# return data
# self.cleanup_data()
# print(data.keys())
if normalize:
self.add_norm_features()
def getData(self):
return self.data
@staticmethod
def worker(instance, start, end):
data_chunk={var: [] for var in (instance.raw_vars_general + [f'{i}_{var}' for i in range(1, 4) for var in ['eta', 'mass', 'phi', 'pt', 'charge', 'genPartFlav']] + instance.flat_output_vars)}
genPartFlav_options = [1,2,3,4] # Define the possible values for genPartFlav
inputs_chunk= {var: [] for sublist in instance.input_vars for var in (sublist if isinstance(sublist[0], str) else sublist[0])}
pt_dict={'pt_1': [0.02536545873792836, 0.4934279110259645], 'pt_2': [0.019151151336495566, 0.3995434049215345], 'pt_3': [0.023038543045718854, 0.31375795899486003], 'pt_MET': [0.014081741982300087, 0.13542242088536358]}
for i in range(start, end):
sample = {}
for var in instance.raw_vars_general:
if var == 'event':
sample[var] = np.random.randint(0, 10000)
elif var == 'genWeight':
sample[var] = np.random.uniform(-1, 1)
elif var == 'MET_pt':
sample[var] = generate_random_data(pt_dict['pt_MET'][0], pt_dict['pt_MET'][1])
elif var == 'MET_phi':
sample[var] = np.random.uniform(-np.pi, np.pi) # Assuming 'MET_phi' ranges from -pi to pi
eta_low, eta_high = -2.5, 2.5
mass_low, mass_high = 0, 11
phi_low, phi_high = -np.pi, np.pi
# pt_low, pt_high = 0, 1000
for i in range(1, 4): # For three leptons
eta = np.random.uniform(low=eta_low, high=eta_high)
mass = np.random.uniform(low=mass_low, high=mass_high)
phi = np.random.uniform(low=phi_low, high=phi_high)
# pt = np.random.uniform(low=pt_low, high=pt_high)
pt=generate_random_data(pt_dict[f'pt_{i}'][0], pt_dict[f'pt_{i}'][1])
charge = np.random.choice([1, -1])
genPartFlav = np.random.choice(genPartFlav_options)
sample[f'{i}_eta'] = eta
sample[f'{i}_mass'] = mass
sample[f'{i}_phi'] = phi
sample[f'{i}_pt'] = pt
sample[f'{i}_charge'] = charge
sample[f'{i}_genPartFlav'] = genPartFlav
if sample['1_charge']== sample['2_charge'] == sample['3_charge']:
numflip = np.random.randint(1,4)
sample[f'{numflip}_charge'] = -sample[f'{numflip}_charge']
# Initialize empty lists for the output variables
# for var in self.output_vars:
# if isinstance(var, list):
# for v in var:
# data[v] = []
# else:
# data[var] = []
for key in sample:
inputs_chunk[key].append(sample[key])
for key, value in sample.items():
data_chunk[key].append(value)
return data_chunk, inputs_chunk
def generate_fake_data2(self, num_samples):
self.flat_output_vars=[]
for sublist in self.output_vars:
if isinstance(sublist, list):
for item in sublist:
self.flat_output_vars.append(item)
else:
self.flat_output_vars.append(sublist)
data = {var: [] for var in (self.raw_vars_general + [f'{i}_{var}' for i in range(1, 4) for var in ['eta', 'mass', 'phi', 'pt', 'charge', 'genPartFlav']] + self.flat_output_vars)}
# data = {var: [] for var in (self.raw_vars_general + [f'{i}_{var}' for i in range(1, 4) for var in ['eta', 'mass', 'phi', 'pt', 'charge', 'genPartFlav']] + self.output_vars)}
genPartFlav_options = [1,2,3,4] # Define the possible values for genPartFlav
inputs = {var: [] for sublist in self.input_vars for var in (sublist if isinstance(sublist[0], str) else sublist[0])}
pt_dict={'pt_1': [0.02536545873792836, 0.4934279110259645], 'pt_2': [0.019151151336495566, 0.3995434049215345], 'pt_3': [0.023038543045718854, 0.31375795899486003], 'pt_MET': [0.014081741982300087, 0.13542242088536358]}
num_chunks = os.cpu_count() # or any other number based on your preference
if num_chunks > 15: num_chunks = num_chunks - 5
print(f'Using {num_chunks} workers')
chunk_size = num_samples // num_chunks
futures = []
# seeds=[1,2,3,5,6,7]
with ProcessPoolExecutor() as executor:
for i in range(num_chunks):
start = i * chunk_size
end = (i + 1) * chunk_size if i != num_chunks - 1 else num_samples
futures.append(executor.submit(self.worker, self, start, end))
# Collect results from all workers
for future in tqdm(futures, desc='Collecting results'):
chunk_data, chunk_inputs = future.result()
for key, value in chunk_data.items():
data[key].extend(value)
for key, value in chunk_inputs.items():
inputs[key].extend(value)
tq2=tqdm(enumerate(self.functions), desc='Applying functions')
for i, func in tq2:
if func is not None:
func_inputs = [np.array(call_dict_with_list(inputs, var)) for var in self.input_vars[i]]
func_outputs = func(*func_inputs)
# Add outputs to data
if isinstance(self.output_vars[i], list):
for j, v in enumerate(self.output_vars[i]):
if len(data[v]) == 0:
data[v] = func_outputs[j]
else:
data[v] = np.concatenate((data[v], func_outputs[j]))
else:
if len(data[self.output_vars[i]]) == 0:
data[self.output_vars[i]] = func_outputs
else:
data[self.output_vars[i]] = np.concatenate((data[self.output_vars[i]], func_outputs))
# for key in sample:
# data[key].append(sample[key])
for key in data:
data[key] = np.array(data[key])
return data
def generate_fake_data(self, num_samples):
# Initialize a dictionary with each key being a variable and each value being an empty list
# Flatten the list
flat_output_vars=[]
for sublist in self.output_vars:
if isinstance(sublist, list):
for item in sublist:
flat_output_vars.append(item)
else:
flat_output_vars.append(sublist)
# flat_output_vars = [item for sublist in self.output_vars for item in sublist]
# Use the flattened list in your dictionary comprehension
data = {var: [] for var in (self.raw_vars_general + [f'{i}_{var}' for i in range(1, 4) for var in ['eta', 'mass', 'phi', 'pt', 'charge', 'genPartFlav']] + flat_output_vars)}
# data = {var: [] for var in (self.raw_vars_general + [f'{i}_{var}' for i in range(1, 4) for var in ['eta', 'mass', 'phi', 'pt', 'charge', 'genPartFlav']] + self.output_vars)}
genPartFlav_options = [1,2,3,4] # Define the possible values for genPartFlav
inputs = {var: [] for sublist in self.input_vars for var in (sublist if isinstance(sublist[0], str) else sublist[0])}
pt_dict={'pt_1': [0.02536545873792836, 0.4934279110259645], 'pt_2': [0.019151151336495566, 0.3995434049215345], 'pt_3': [0.023038543045718854, 0.31375795899486003], 'pt_MET': [0.014081741982300087, 0.13542242088536358]}
tq = tqdm(range(num_samples), desc='Generating raw data')
for j in tq:
sample = {}
for var in self.raw_vars_general:
if var == 'event':
sample[var] = np.random.randint(0, 10000)
elif var == 'genWeight':
sample[var] = np.random.uniform(-1, 1)
elif var == 'MET_pt':
sample[var] = generate_random_data(pt_dict['pt_MET'][0], pt_dict['pt_MET'][1])
elif var == 'MET_phi':
sample[var] = np.random.uniform(-np.pi, np.pi) # Assuming 'MET_phi' ranges from -pi to pi
eta_low, eta_high = -2.5, 2.5
mass_low, mass_high = 0, 11
phi_low, phi_high = -np.pi, np.pi
# pt_low, pt_high = 0, 1000
for i in range(1, 4): # For three leptons
eta = np.random.uniform(low=eta_low, high=eta_high)
mass = np.random.uniform(low=mass_low, high=mass_high)
phi = np.random.uniform(low=phi_low, high=phi_high)
# pt = np.random.uniform(low=pt_low, high=pt_high)
pt=generate_random_data(pt_dict[f'pt_{i}'][0], pt_dict[f'pt_{i}'][1])
charge = np.random.choice([1, -1])
genPartFlav = np.random.choice(genPartFlav_options)
sample[f'{i}_eta'] = eta
sample[f'{i}_mass'] = mass
sample[f'{i}_phi'] = phi
sample[f'{i}_pt'] = pt
sample[f'{i}_charge'] = charge
sample[f'{i}_genPartFlav'] = genPartFlav
if sample['1_charge']== sample['2_charge'] == sample['3_charge']:
numflip = np.random.randint(1,4)
sample[f'{numflip}_charge'] = -sample[f'{numflip}_charge']
# Initialize empty lists for the output variables
# for var in self.output_vars:
# if isinstance(var, list):
# for v in var:
# data[v] = []
# else:
# data[var] = []
for key in sample:
inputs[key].append(sample[key])
# data[key].append(sample[key])
for key, value in sample.items():
data[key].append(value)
tq2=tqdm(enumerate(self.functions), desc='Applying functions')
for i, func in tq2:
if func is not None:
func_inputs = [np.array(call_dict_with_list(inputs, var)) for var in self.input_vars[i]]
func_outputs = func(*func_inputs)
# Add outputs to data
if isinstance(self.output_vars[i], list):
for j, v in enumerate(self.output_vars[i]):
if len(data[v]) == 0:
data[v] = func_outputs[j]
else:
data[v] = np.concatenate((data[v], func_outputs[j]))
else:
if len(data[self.output_vars[i]]) == 0:
data[self.output_vars[i]] = func_outputs
else:
data[self.output_vars[i]] = np.concatenate((data[self.output_vars[i]], func_outputs))
# for key in sample:
# data[key].append(sample[key])
for key in data:
data[key] = np.array(data[key])
return data
def add_norm_features(self):
feat_toadd=['norm_mt_1(23)', 'norm_mt_2(13)', 'norm_mt_3(12)',
'norm_mt_MET(12)', 'norm_mt_MET(13)', 'norm_mt_MET(23)',
'norm_mt_1(2MET)', 'norm_mt_1(3MET)', 'norm_mt_2(1MET)', 'norm_mt_2(3MET)', 'norm_mt_3(1MET)', 'norm_mt_3(2MET)', 'norm_mt_12', 'norm_mt_13', 'norm_mt_23']
feat_orig=feat_toadd.copy()
feat_orig = [i.replace('norm_', '') for i in feat_orig]
for i, feat in enumerate(feat_toadd):
self.data[feat] = outlier_normalization(self.data['pt_1'], self.data['pt_2'], self.data['pt_3'], self.data['pt_MET'], self.data[feat_orig[i]])
return
# def inverted_exponential_cdf(p, lambd, c):
# """Inverted exponential cumulative distribution function."""
# a = 0
# b = 10
# while np.sign(exponential_cdf(a, lambd, c) - p) == np.sign(exponential_cdf(b, lambd, c) - p):
# b *= 2
# return brentq(lambda x: exponential_cdf(x, lambd, c) - p, a, b)
def inverted_exponential_cdf(p, lambd, c):
"""Inverted exponential cumulative distribution function."""
# return (-np.log(1 - p) - c) / lambd
return (c - np.log(1 - p)) / lambd
def generate_random_data( lambd, c):
"""Generate random data from the approximate CDF."""
p = np.random.uniform(0, 1)
return inverted_exponential_cdf(p, lambd, c)
def exponential_cdf(x, lambd,c):
"""The exponential cumulative distribution function."""
return 1 - np.exp(-lambd * x+c)
def outlier_normalization(Pt_1,Pt_2, Pt_3, MET, Xvar):
Psum=np.sum([Pt_1,Pt_2, Pt_3, MET])
return Xvar/Psum
def remove_outliers(data, feature_name, limits):
feature_limits = limits.get(feature_name)
if feature_limits is None:
lower_limit, upper_limit = 0.03, 99.7
elif 'do_not_cut' in feature_limits and feature_limits['do_not_cut']:
return data[feature_name]
else:
lower_limit = feature_limits.get('lower_percentile', 0.03)
upper_limit = feature_limits.get('upper_percentile', 99.7)
lower_value, upper_value = np.percentile(data[feature_name], [lower_limit, upper_limit])
mask = (data[feature_name] >= lower_value) & (data[feature_name] <= upper_value)
return data[feature_name][mask]
def remove_all_outliers(data, limits):
for feature_name in data.keys():
data[feature_name] = remove_outliers(data, feature_name, limits)
return data
def flatten_2D_list(multi_dim_list):
new_list = []
for ele in multi_dim_list:
if type(ele) is list:
new_list.append(ele)
else:
new_list.append([ele])
return reduce(iconcat, new_list, [])
def normalize(dataframe, key, sum, weight_name='genWeight'):
classes = dataframe[key].unique()
if isinstance(sum, Number):
sum = dict(zip(classes, [sum]*len(classes)))
if len(sum)!= len(classes):
raise ValueError("The number of elements in sum doesn't match the number of classes in the dataframe")
for c in classes:
mask = dataframe[key] == c
dataframe.loc[mask, weight_name] *= sum[c] / dataframe.loc[mask, weight_name].sum()
return dataframe
def bucketize(dataframe, key, return_dict = True):
"""
Input :
-dataframe : pandas dataframe or dictionary
-key : key of the dataframe representing the classes names, that will be turned into indices
-return_dict : if True, the function returns the dictionary linking the former class names to the corresponding integer indices
Output :
-output : dataframe with integers replacing the values of dataframe[key] (one index per different value)
-class_names : dictionary linking the former class names to the corresponding integer indices
"""
dictionary = False
if type(dataframe) == dict:
dictionary = True
dataframe = pd.DataFrame(dataframe)
class_names = {}
for i,class_name in enumerate(dataframe[key]):
if not class_name in class_names:
class_names[class_name] = len(class_names)
output = dataframe.copy()
output[key].replace(list(class_names.keys()), list(class_names.values()), inplace=True)
if dictionary:
output = output.to_dict()
if return_dict :
return output, class_names
return output
def count_tauh(channel, genPartFlavs_1, genPartFlavs_2, genPartFlavs_3):
"""
Input :
-channel : string of three characters corresponding to the three prompt leptons in the decay
-genPartFlavs : 3 (1 for each lepton) arguments describing the flavour of genParticle
Output :
-number of hadronic taus present in the event (either 0, 1 or 2)
"""
# if len(args) == 1:
# if len(args[0]) != 4:
# raise TypeError("Wrong number of arguments")
# channel = args[0][0][0]
# genPartFlavs = args[0][1:]
# elif len(args) == 4:
# channel = args[0][0]
# genPartFlavs = args[1:]
# else:
# raise TypeError("Wrong number of arguments")
channel = channel[0]
is_list = False
genPartFlavs = [genPartFlavs_1, genPartFlavs_2, genPartFlavs_3]
if type(genPartFlavs[0]) == list:
is_list = True
for lepton_flav in genPartFlavs:
lepton_flav = np.array(lepton_flav)
n_tauh = np.zeros_like(genPartFlavs[0]).astype('int64')
for i, lepton_flav in enumerate(genPartFlavs):
if channel[i] == 't':
n_tauh += (lepton_flav==5).astype('int64')
if is_list:
n_tauh = n_tauh.tolist()
return n_tauh
def replace_prefix_in_list(list_, to_replace, replace_by):
"""
Input :
-list_ : python list of strings, potentially multidimensional
-to_replace : list of characters or substrings that will be replaced in each element of the list
-replace_by : list of characters or substrings that will replace the "to_replace" elements
Output :
-list with the same structure as the input list, with the replaced characters
"""
if type(list_) != list:
for i,s in enumerate(to_replace):
if list_[:len(s)] == s:
list_ = list_.replace(list_[:len(s)],replace_by[i])
return list_
else:
sublist = []
for el in list_:
sublist.append(replace_prefix_in_list(el, to_replace, replace_by))
return sublist
def isolate_int(string, separators):
if type(separators) != list:
separators = [separators]
ints = []
for i in range(1,len(separators)):
string = string.replace(separators[i], separators[0])
for z in string.split(separators[0]):
if z.isdigit():
ints.append(int(z))
return ints
def call_dict_with_list(dictionary, list_):
"""
Input :
-python dictionary
-python list (potentially multidimensional) of entries
Output :
-list with the same structure as the input list, but with the keys replaced by the values of the dictionary at the corresponding keys
"""
if type(list_) != list:
return dictionary[list_]
else:
sublist = []
for el in list_:
sublist.append(call_dict_with_list(dictionary, el))
return sublist
def RandomGenerate_count_tauh(genPartFlavs_1, genPartFlavs_2, genPartFlavs_3):
channels = ['tee', 'tem', 'tmm', 'tte', 'ttm']
channel = random.choice(channels)
"""
Input :
-channel : string of three characters corresponding to the three prompt leptons in the decay
-genPartFlavs : 3 (1 for each lepton) arguments describing the flavour of genParticle
Output :
-number of hadronic taus present in the event (either 0, 1 or 2)
"""
is_list = False
genPartFlavs = [genPartFlavs_1, genPartFlavs_2, genPartFlavs_3]
if type(genPartFlavs[0]) == list:
is_list = True
for lepton_flav in genPartFlavs:
lepton_flav = np.array(lepton_flav)
n_tauh = np.zeros_like(genPartFlavs[0]).astype('int64')
for i, lepton_flav in enumerate(genPartFlavs):
if channel[i] == 't':
n_tauh += (lepton_flav==5).astype('int64')
if is_list:
n_tauh = n_tauh.tolist()
return n_tauh
def split_dataset(data, ratio_train = 0.75, shuffle = True, print_sizes = True):
"""
Input :
- data : dictionnary containing the variables of interest for each event
- ratio_train : percentage of train + validation events going in the train dataset
- shuffle : if True, the training and validation set are shuffled
Output :
- data_train : training dataset as pandas dataframe
- data_val : validation dataset as pandas dataframe
- data_test : test dataset as pandas dataframe
- data_meas : measurement dataset as pandas dataframe
"""
df = DataFrame.from_dict(data)
data_tv = df.query("(event % 4 == 0) or (event % 4 == 1)")
data_test = df.query("event % 4 == 2").reset_index(drop=True)
data_meas = df.query("event % 4 == 3").reset_index(drop=True)
if shuffle:
data_tv = data_tv.sample(frac=1).reset_index(drop=True)
data_train = data_tv.sample(frac = ratio_train)
data_val = data_tv.drop(data_train.index)
if print_sizes :
N = len(df)
print("Total number of events : ", N)
print("Train set : {:.2f} %".format(100*len(data_train)/N))
print("Validation set : {:.2f} %".format(100*len(data_val)/N))
print("Test set : {:.2f} %".format(100*len(data_test)/N))
print("Measurement set : {:.2f} %".format(100*len(data_meas)/N))
return data_train, data_val, data_test, data_meas
def split_dataset2(data, ratio_train = 0.5, ratio_val = 0.1, shuffle = True, print_sizes = True):
"""
Input :
- data : dictionnary containing the variables of interest for each event
- ratio_train : percentage of events going in the train dataset
- ratio_val : percentage of events going in the validation dataset
- shuffle : if True, the training and validation set are shuffled
Output :
- data_train : training dataset as pandas dataframe
- data_val : validation dataset as pandas dataframe
- data_test : test dataset as pandas dataframe
"""
df = DataFrame.from_dict(data)
# Calculate total number of events here
N = len(df)
if shuffle:
df = df.sample(frac=1).reset_index(drop=True)
data_train = df.sample(frac = ratio_train)
df = df.drop(data_train.index)
data_val = df.sample(frac = ratio_val / (1 - ratio_train))
data_test = df.drop(data_val.index)
if print_sizes :
print("Total number of events : ", N)
print("Train set : {:.2f} %".format(100*len(data_train)/N))
print("Validation set : {:.2f} %".format(100*len(data_val)/N))
print("Test set : {:.2f} %".format(100*len(data_test)/N))
return data_train, data_val, data_test
def split_dataset_multitrain(data, ratio_train1=0.4, ratio_train2=0.4, ratio_val1=0.1, ratio_val2=0.1, shuffle=True, print_sizes=True):
"""
Input :
- data : dictionary containing the variables of interest for each event
- ratio_train1 : percentage of events going in the first train dataset
- ratio_train2 : percentage of events going in the second train dataset
- ratio_val1 : percentage of events going in the first validation dataset
- ratio_val2 : percentage of events going in the second validation dataset
- shuffle : if True, the datasets are shuffled
Output :
- data_train1 : first training dataset as pandas dataframe
- data_train2 : second training dataset as pandas dataframe
- data_val1 : first validation dataset as pandas dataframe
- data_val2 : second validation dataset as pandas dataframe
"""
df = DataFrame.from_dict(data)
N = len(df)
if shuffle:
df = df.sample(frac=1).reset_index(drop=True)
data_train1 = df.sample(frac=ratio_train1)
df = df.drop(data_train1.index)
data_train2 = df.sample(frac=ratio_train2 / (1 - ratio_train1))
df = df.drop(data_train2.index)
data_val1 = df.sample(frac=ratio_val1 / (1 - ratio_train1 - ratio_train2))
df = df.drop(data_val1.index)
data_val2 = df
if print_sizes:
print("Total number of events:", N)
print("Train1 set: {:.2f} %".format(100*len(data_train1)/N))
print("Train2 set: {:.2f} %".format(100*len(data_train2)/N))
print("Validation1 set: {:.2f} %".format(100*len(data_val1)/N))
print("Validation2 set: {:.2f} %".format(100*len(data_val2)/N))
return data_train1, data_train2, data_val1, data_val2
| DimaPdemler/HNLclassifier | utils/DD_data_extractor_git.py | DD_data_extractor_git.py | py | 63,747 | python | en | code | 0 | github-code | 90 |
28062940628 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
class Customer(object):
def __init__(self, first_name = '', last_name = '', phone_num = None,
zip_code = None, freq_mil_num = None):
self.first_name = first_name
self.last_name = last_name
self.phone_num = phone_num
self.zip_code = int(zip_code)
self.freq_mil_num = int(freq_mil_num)
def find_store(store):
target = []
while self.zip_code != None:
for i in store:
if store.address.zip_code == self.zip_code:
target.append(target)
return target
def __str__(self):
cust_str = """
Name: {name}
Contact: {phone_num}
Address: {zip_code}
Frequent Milleage Number: {freq_mil_num}""".format(name = self.first_name + self.last_name,
phone_num = self.phone_num, zip_code = self.zip_code,
freq_mil_num = self.freq_mil_num)
return cust_str
| les1smore/Pizza-Ordering-in-OOP | 5_Customer.py | 5_Customer.py | py | 1,113 | python | en | code | 2 | github-code | 90 |
72318645418 | from django.conf.urls import patterns, url
from sharetools import views
urlpatterns = patterns('sharetools.views',
#User/Profile -----------------------------------------------------------
url(r'^register/$', views.RegisterView.as_view(), name='register'),
url(r'^login/$', views.LoginView.as_view(), name='login'),
url(r'^logout/$', views.logout_view, name='logout'),
url(r'^profile/$', views.my_profile_view, name='myProfile'),
url(r'^profile/edit$', views.EditProfileView.as_view(), name='editProfile'),
url(r'^profile/(\w+)$', views.ProfileView.as_view(), name='profile'),
#Sheds -------------------------------------------------------------------
url(r'^sheds/$', views.MyShedsView.as_view(), name='mySheds'),
url(r'^sheds/(\d+)$', views.ShedView.as_view(), name='shed'),
url(r'^sheds/create$', views.shed_create_view, name='makeShed'),
url(r'^sheds/delete/(\d+)+$', views.shed_delete_view, name='shedDeletion'),
url(r'^sheds/(\d+)/admin$', views.ShedModView.as_view(), name='shedAdmin'),
url(r'^sheds/(\d+)/move$', views.tool_move_view, name='moveTool'),
url(r'^sheds/(\d+)/admin/approve/(\d)', views.approve_membership_view, name='approveMem'),
#Tools -------------------------------------------------------------------
url(r'^tools/$', views.my_tools_view, name='myTools'),
url(r'^tools/new$', views.make_tool_view,name="newTool"),
url(r'^tools/all$', views.all_tools_view,name="allTool"),
url(r'^tools/all/(\w+)$', views.all_tools_view,name="allTool"),
url(r'^tools/(\d+)/edit/$',views.tool_edit_view,name='toolEdit'),
url(r'^tools/(\d+)$', views.ToolView.as_view(), name='tool'),
#Shares ----------------------------------------------------------
url(r'^shares/$', views.shares_view, name='shares'),
url(r'^shares/new/(\d+)+$', views.MakeShareView.as_view(), name='makeShareContract'),
url(r'^$', views.IndexView.as_view(), name='index'),
)
| dxslly/toolshare | sharetools/urls.py | urls.py | py | 1,886 | python | en | code | 0 | github-code | 90 |
30200516846 | import hashlib # for hashlib.md5
key = 'bgvyzdsv'
i = 0
while True:
encoded = (key + str(i)).encode('utf-8')
digest = hashlib.md5(encoded).hexdigest()
# if digest[0:6] == '000000': # part2
if digest[0:5] == '00000': # part1
break
i += 1
print(f'The answer is {i}')
| MarcinKozak005/AdventOfCode | 2015/04.py | 04.py | py | 297 | python | en | code | 0 | github-code | 90 |
18321277789 | class Factorial():
def __init__(self, mod=10**9 + 7):
self.mod = mod
self._factorial = [1]
self._size = 1
self._factorial_inv = [1]
self._size_inv = 1
def fact(self, n):
''' n! % mod '''
if n >= self.mod:
return 0
if self._size < n+1:
for i in range(self._size, n+1):
self._factorial.append(self._factorial[i-1]*i % self.mod)
self._size = n+1
return self._factorial[n]
def fact_inv(self, n):
''' n!^-1 % mod '''
if n >= self.mod:
raise ValueError('Modinv is not exist! arg={}'.format(n))
if self._size < n+1:
for i in range(self._size, n+1):
self._factorial.append(self._factorial[i-1]*i % self.mod)
self._size = n+1
if self._size_inv < n+1:
for i in range(self._size_inv, n+1):
self._factorial_inv.append(self.modinv(self._factorial[i]))
self._size_inv = n+1
return self._factorial_inv[n]
def comb(self, n, r):
''' nCr % mod '''
if r > n:
return 0
t = self.fact(n) * self.fact_inv(n-r) % self.mod
t = t * self.fact_inv(r) % self.mod
return t
@staticmethod
def xgcd(a, b):
'''
Return (gcd(a, b), x, y) such that a*x + b*y = gcd(a, b)
'''
x0, x1, y0, y1 = 0, 1, 1, 0
while a != 0:
(q, a), b = divmod(b, a), a
y0, y1 = y1, y0 - q * y1
x0, x1 = x1, x0 - q * x1
return b, x0, y0
def modinv(self, n):
g, x, _ = self.xgcd(n, self.mod)
if g != 1:
raise ValueError('Modinv is not exist! arg={}'.format(n))
return x % self.mod
x, y = sorted(map(int, input().split()))
q, r = divmod(x+y, 3)
if r != 0:
print(0)
else:
fact = Factorial()
print(fact.comb(q, y-q))
| Aasthaengg/IBMdataset | Python_codes/p02862/s518924964.py | s518924964.py | py | 1,937 | python | en | code | 0 | github-code | 90 |
71740963498 | def pal(n):
temp=n
rev=0
while n!=0:
d=n%10
rev=rev*10+d
n=n//10
if rev==temp:
print(rev," is palindrome")
else:
print("not a palindrome")
return rev
num=int(input("enter a number"))
result=pal(num)
print() | gollabharadwaj/python | palindrome or not.py | palindrome or not.py | py | 306 | python | en | code | 0 | github-code | 90 |
2934501919 | import os
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import resource_loader
from tflite_micro.python.tflite_micro.signal.ops import framer_op
from tflite_micro.python.tflite_micro.signal.utils import util
class FramerOpTest(tf.test.TestCase):
_PREFIX_PATH = resource_loader.get_path_to_datafile('')
def GetResource(self, filepath):
full_path = os.path.join(self._PREFIX_PATH, filepath)
with open(full_path, 'rt') as f:
file_text = f.read()
return file_text
def SingleFramerTest(self, filename):
lines = self.GetResource(filename).splitlines()
args = lines[0].split()
frame_size = int(args[0])
frame_step = int(args[1])
prefill = bool(int(args[2]))
func = tf.function(framer_op.framer)
input_size = len(lines[1].split())
concrete_function = func.get_concrete_function(
tf.TensorSpec(input_size, dtype=tf.int16), frame_size, frame_step,
prefill)
interpreter = util.get_tflm_interpreter(concrete_function, func)
# Skip line 0, which contains the configuration params.
# Read lines in triplets <input, expected output, expected valid>
i = 1
while i < len(lines):
in_block = np.array([int(j) for j in lines[i].split()], dtype=np.int16)
out_frame_exp = [[int(j) for j in lines[i + 1].split()]]
out_valid_exp = [int(j) for j in lines[i + 2].split()]
# TFLM
interpreter.set_input(in_block, 0)
interpreter.invoke()
out_frame = interpreter.get_output(0)
out_valid = interpreter.get_output(1)
self.assertEqual(out_valid, out_valid_exp)
if out_valid:
self.assertAllEqual(out_frame, out_frame_exp)
# TF
out_frame, out_valid = self.evaluate(
framer_op.framer(in_block, frame_size, frame_step, prefill))
self.assertEqual(out_valid, out_valid_exp)
if out_valid:
self.assertAllEqual(out_frame, out_frame_exp)
i += 3
def MultiFrameRandomInputFramerTest(self, n_frames):
# Terminonlogy: input is in blocks, output is in frames
frame_step = 160
frame_size = 400
prefill = True
block_num = 10
block_size = frame_step * n_frames
test_input = np.random.randint(np.iinfo('int16').min,
np.iinfo('int16').max,
block_size * block_num,
dtype=np.int16)
expected_output = np.concatenate((np.zeros(frame_size - frame_step,
dtype=np.int16), test_input))
func = tf.function(framer_op.framer)
concrete_function = func.get_concrete_function(
tf.TensorSpec(block_size, dtype=tf.int16), frame_size, frame_step,
prefill)
interpreter = util.get_tflm_interpreter(concrete_function, func)
block_index = 0
frame_index = 0
while block_index < block_num:
in_block = test_input[(block_index * block_size):((block_index + 1) *
block_size)]
expected_valid = 1
expected_frame = [
expected_output[((frame_index + i) *
frame_step):((frame_index + i) * frame_step +
frame_size)] for i in range(n_frames)
]
# TFLM
interpreter.set_input(in_block, 0)
interpreter.invoke()
out_frame = interpreter.get_output(0)
out_valid = interpreter.get_output(1)
self.assertEqual(out_valid, expected_valid)
if out_valid:
self.assertAllEqual(out_frame, expected_frame)
# TF
out_frame, out_valid = self.evaluate(
framer_op.framer(in_block, frame_size, frame_step, prefill))
frame_index += n_frames
self.assertEqual(out_valid, expected_valid)
self.assertAllEqual(out_frame, expected_frame)
block_index += 1
def testFramerVectors(self):
self.SingleFramerTest('testdata/framer_test1.txt')
def testFramerRandomInput(self):
self.MultiFrameRandomInputFramerTest(1)
def testFramerRandomInputNframes2(self):
self.MultiFrameRandomInputFramerTest(2)
def testFramerRandomInputNframes4(self):
self.MultiFrameRandomInputFramerTest(4)
def testStepSizeTooLarge(self):
framer_input = np.zeros(160, dtype=np.int16)
with self.assertRaises((tf.errors.InvalidArgumentError, ValueError)):
self.evaluate(framer_op.framer(framer_input, 128, 129))
def testStepSizeNotEqualInputSize(self):
framer_input = np.zeros(122, dtype=np.int16)
with self.assertRaises((tf.errors.InvalidArgumentError, ValueError)):
self.evaluate(framer_op.framer(framer_input, 321, 123))
if __name__ == '__main__':
np.random.seed(0)
tf.test.main()
| tensorflow/tflite-micro | python/tflite_micro/signal/ops/framer_op_test.py | framer_op_test.py | py | 4,721 | python | en | code | 1,398 | github-code | 90 |
29486972827 | t1 = ("Ayush", "Tripathi")
t2 = ("Sakshi", "Shete")
list1 = []
for i in range(0, 2):
list1.append(t1[i])
for i in range(0, 2):
list1.append((t2[i]))
def Convert(a):
it = iter(a)
res_dct = dict(zip(it, it))
return res_dct
print(Convert(list1))
| ayush-t02/python-practice | tuple-list-dict.py | tuple-list-dict.py | py | 285 | python | en | code | 0 | github-code | 90 |
3013248906 | import importlib
import inspect
from typing import Callable, Mapping, TypedDict, TypeVar
ArgType = TypeVar("ArgType")
ReturnType = TypeVar("ReturnType")
FunctionType = Callable[[ArgType], ReturnType]
__all__ = ["build_fn_kwargs", "smart_instantiate"]
def _is_var_kwargs(p: inspect.Parameter):
return p.kind == inspect.Parameter.VAR_KEYWORD
def build_fn_kwargs(
function: FunctionType, *dict_args: Mapping, **kwargs
) -> Mapping:
"""
Apply kwargs to function.
:param function:
:param kwargs:
:return:
"""
full_kwargs = {}
for dict_arg in dict_args:
full_kwargs.update(dict_arg)
full_kwargs.update(kwargs)
signature = inspect.signature(function)
parameters = signature.parameters
if len(parameters) == 0:
return {}
if any(p for p in parameters.values() if _is_var_kwargs(p)):
return full_kwargs
needed_args = list(parameters)
if needed_args[0] == "self":
needed_args = needed_args[1:]
output = {k: v for k, v in full_kwargs.items() if k in needed_args}
for name, p in parameters.items():
if p.default != inspect._empty:
output.setdefault(name, p.default)
return output
class SupportsInstantiate(TypedDict, total=False):
_class: str
def instantiatable(dict: Mapping):
if isinstance(dict, Mapping):
return "_class" in dict
else:
return False
def smart_instantiate(dict_obj: SupportsInstantiate, **kwargs):
assert instantiatable(dict_obj)
class_string = dict_obj.pop("_class")
modulename, classname = class_string.rsplit(".", 1)
module = importlib.import_module(modulename)
clazz = getattr(module, classname)
init_kwargs = build_fn_kwargs(clazz, kwargs)
for k, v in dict_obj.items():
if instantiatable(v):
init_kwargs[k] = smart_instantiate(v, **kwargs)
else:
init_kwargs[k] = v
return clazz(**init_kwargs)
def smart_call(func_or_clazz, *dict_args: Mapping, **kwargs):
call_kwargs = build_fn_kwargs(func_or_clazz, *dict_args, **kwargs)
return func_or_clazz(**call_kwargs)
| shichao-wang/CRNet-ISWC2022 | src/molurus/functions.py | functions.py | py | 2,128 | python | en | code | 0 | github-code | 90 |
15469135351 | import cv2
from keras.models import load_model
import numpy as np
model = load_model('keras_model.h5')
capture = cv2.VideoCapture(0)
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
while True:
ret, frame = capture.read()
resized_frame = cv2.resize(frame, (224, 224), interpolation=cv2.INTER_AREA)
image_np = np.array(resized_frame)
normalized_image = (image_np.astype(np.float32) / 127.0) - 1 # Normalise the image
data[0] = normalized_image
prediction = model.predict(data)
cv2.imshow('frame', frame)
print(prediction)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
capture.release()
cv2.destroyAllWindows()
| kumar2020/RPS | RPS_model.py | RPS_model.py | py | 671 | python | en | code | 0 | github-code | 90 |
21473387425 | # Shows example of SimpleSpriteSheetAnimation object
# 1 - Import library
import pygame
from pygame.locals import *
import sys
import pygwidgets
from SimpleSpriteSheetAnimation import *
# 2 Define constants
SCREEN_WIDTH = 640
SCREEN_HEIGHT = 480
FRAMES_PER_SECOND = 30
BGCOLOR = (0, 128, 128)
# 3 - Initialize the world
pygame.init()
window = pygame.display.set_mode([SCREEN_WIDTH, SCREEN_HEIGHT])
clock = pygame.time.Clock()
# 4 - Load assets: images(s), sounds, etc.
# 5 - Initialize variables
oWaterAnimation = SimpleSpriteSheetAnimation(window, (22, 140), 'images/water_003.png', 50, 192, 192, .05)
oPlayButton = pygwidgets.TextButton(window, (60, 320), "Play")
# 6 - Loop forever
while True:
# 7 - Check for and handle events
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if oPlayButton.handleEvent(event):
oWaterAnimation.play()
# 8 - Do any "per frame" actions
oWaterAnimation.update()
# 9 - Clear the window
window.fill(BGCOLOR)
# 10 - Draw all window elements
oWaterAnimation.draw()
oPlayButton.draw()
# 11 - Update the window
pygame.display.update()
# 12 - Slow things down a bit
clock.tick(FRAMES_PER_SECOND) # make PyGame wait the correct amount
| IrvKalb/Object-Oriented-Python-Code | Chapter_14/SimpleSpriteSheetAnimation/Main_SimpleSpriteSheetAnimation.py | Main_SimpleSpriteSheetAnimation.py | py | 1,315 | python | en | code | 207 | github-code | 90 |
26112034269 | from libqtile import bar, layout, widget
from libqtile.config import Click, Drag, Group, Key, Match, Screen
from libqtile.lazy import lazy
#from libqtile.utils import guess_terminal
import subprocess
import os
# ================================ CONSTANTES SECTIONS ===================================================
# CONSTANTS
mod = "mod4" # key to use mod (mod4 is a windows key)
terminal = 'terminator'#guess_terminal() # guess_terminal use default terminal
SHIFT_KEY="shift"
CONTROL_KEY="control"
FONT="MesloLGS NF"
COLORS = {
"dark": ["#292d3e", "#292d3e"],
"grey": ["#434758", "#434758"],
"light": ["#ffffff", "#ffffff"],
"text": ["#292d3e", "#292d3e"],
"focus": ["#A77AC4", "#A77AC4"],
"urgent": ["#ff5555", "#ff5555"],
"active": ["#f1ffff", "#f1ffff"],
"inactive": ["#4c566a", "#4c566a"],
"color1": ["#ff5555", "#ff5555"],
"color2": ["#A77AC4", "#A77AC4"],
"color3": ["#7197E7", "#7197E7"],
"color4": ["#ffb86c", "#ffb86c"]
}
# ========================================================================================================
# ================================ KEYS SECTIONS =========================================================
# SHORTCUTS
keys = [
# Switch between windows
Key([mod], "h", lazy.layout.left(), desc="Move focus to left"),
Key([mod], "l", lazy.layout.right(), desc="Move focus to right"),
Key([mod], "j", lazy.layout.down(), desc="Move focus down"),
Key([mod], "k", lazy.layout.up(), desc="Move focus up"),
Key([mod], "space", lazy.layout.next(), desc="Move window focus to other window"),
# Move windows between left/right columns or move up/down in current stack.
Key([mod, SHIFT_KEY], "h", lazy.layout.shuffle_left(), desc="Move window to the left"),
Key([mod, SHIFT_KEY], "l", lazy.layout.shuffle_right(), desc="Move window to the right"),
Key([mod, SHIFT_KEY], "j", lazy.layout.shuffle_down(), desc="Move window down"),
Key([mod, SHIFT_KEY], "k", lazy.layout.shuffle_up(), desc="Move window up"),
# Grow windows. If current window is on the edge of screen and direction
Key([mod, CONTROL_KEY], "h", lazy.layout.grow_left(), desc="Grow window to the left"),
Key([mod, CONTROL_KEY], "l", lazy.layout.grow_right(), desc="Grow window to the right"),
Key([mod, CONTROL_KEY], "j", lazy.layout.grow_down(), desc="Grow window down"),
Key([mod, CONTROL_KEY], "k", lazy.layout.grow_up(), desc="Grow window up"),
# RESET ALL WINDOWS
Key([mod], "n", lazy.layout.normalize(), desc="Reset all window sizes"),
# Toggle between split and unsplit sides of stack.
Key(
[mod, SHIFT_KEY],
"Return",
lazy.layout.toggle_split(),
desc="Toggle between split and unsplit sides of stack",
),
Key([mod], "Return", lazy.spawn(terminal), desc="Launch terminal"),
# Toggle between different layouts as defined below
Key([mod], "Tab", lazy.next_layout(), desc="Toggle between layouts"),
Key([mod], "w", lazy.window.kill(), desc="Kill focused window"),
Key([mod, CONTROL_KEY], "r", lazy.reload_config(), desc="Reload the config"),
Key([mod, CONTROL_KEY], "q", lazy.shutdown(), desc="Shutdown Qtile"),
Key([mod], "r", lazy.spawncmd(), desc="Spawn a command using a prompt widget"),
# Custom
Key([mod], "b", lazy.spawn("brave-browser"), desc="Launch Brave Browser"),
Key([mod], "m", lazy.spawn("rofi -show drun"), desc="Launch Application Explorer"),
Key([mod, SHIFT_KEY], "m", lazy.spawn("rofi -show"), desc="Launch Application Explorer Current Group"),
# Hardware
Key([], "XF86AudioRaiseVolume", lazy.spawn("pactl set-sink-volume @DEFAULT_SINK@ +5%")),
Key([], "XF86AudioLowerVolume", lazy.spawn("pactl set-sink-volume @DEFAULT_SINK@ -5%")),
Key([], "XF86AudioMute", lazy.spawn("pactl set-sink-mute @DEFAULT_SINK@ toggle")),
Key([mod], "o", lazy.spawn("shutdown now")),
# Screens
Key([mod, SHIFT_KEY], "comma", lazy.prev_screen()),
Key([mod], "comma", lazy.next_screen()),
]
# ========================================================================================================
# ================================ GROUPS SECTIONS =======================================================
# ORDERS
# (nf-dev-terminal) Terminal
# (nf-fa-code) Code
# (nf-fa-code) Rest
# (nf-fa-code) Navigator
# (nf-fa-code) Databases - Others
# (nf-fa-code) Databases - Others
group_configure = [
("1", { 'label': '', 'layout': 'columns', 'matches': [Match(wm_class=["terminator"])], 'init': True }),
("2", { 'label': '', 'layout': 'columns', 'matches': [Match(wm_class=["code"])] }),
("3", { 'label': 'ﱲ', 'layout': 'columns', 'matches': [] }),
("4", { 'label': '', 'layout': 'columns', 'matches': [Match(wm_class=["brave-browser"])] }),
("5", { 'label': '', 'layout': 'columns', 'matches': [Match(wm_class=["Archivos", "beekeeper-studio"])] }),
("6", { 'label': 'ﭮ', 'layout': 'columns', 'matches': [Match(wm_class=["discord", "slack"])] })
]
groups = [Group(name, **args) for name, args in group_configure]
for i, (name, args) in enumerate(group_configure, 1):
keys.append(Key([mod], str(i), lazy.group[name].toscreen()))
keys.append(Key([mod, SHIFT_KEY], str(i), lazy.window.togroup(name, switch_group=True)))
# ========================================================================================================
# ================================ LAYOUT SECTIONS =======================================================
layout_general_configure = {
'border_focus_stack': ["#d75f5f", "#8f3d3d"],
'border_width': 0.2,
'margin': 10
}
# LAYOUTS | FORM TO WINDOWS SPLITTER
layouts = [
layout.Columns(**layout_general_configure),
#layout.Max(),
# Try more layouts by unleashing below layouts.
# layout.Stack(num_stacks=2),
# layout.Bsp(),
# layout.Matrix(),
#layout.MonadTall(**layout_general_configure),
# layout.MonadWide(),
# layout.RatioTile(),
# layout.Tile(),
# layout.TreeTab(),
# layout.VerticalTile(),
# layout.Zoomy(),
]
floating_layout = layout.Floating(
float_rules=[
# Run the utility of `xprop` to see the wm class and name of an X client.
*layout.Floating.default_float_rules,
Match(wm_class="confirmreset"), # gitk
Match(wm_class="makebranch"), # gitk
Match(wm_class="maketag"), # gitk
Match(wm_class="ssh-askpass"), # ssh-askpass
Match(title="branchdialog"), # gitk
Match(title="pinentry"), # GPG key password entry
]
)
# ========================================================================================================
widget_defaults = dict(
font=FONT,
fontsize=14,
padding=7,
)
extension_defaults = widget_defaults.copy()
# ================================ SCREEN SECTIONS =======================================================
def base(fg='text', bg='dark'):
return { 'foreground': COLORS[fg], 'background': COLORS[bg] }
def separator():
return widget.Sep(**base(), linewidth=0, padding=5)
def icon(fg='text', bg='dark', fontsize=16, text="?", padding=3):
return widget.TextBox(**base(fg, bg), fontsize=fontsize, text=text, padding=padding)
def texto(fg='text', bg='dark', fontsize=16, text="?"):
return widget.TextBox(**base(fg, bg), fontsize=fontsize, text=text)
def powerline(fg='text', bg='dark'):
return widget.TextBox(**base(fg, bg), text="", fontsize=40, padding=0)
def dockerVersion():
command = subprocess.run('docker version --format "{{.Server.Version}}"', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if command.returncode != 0:
error = command.stderr.decode("UTF-8")
return error
else :
return command.stdout.decode("UTF-8").strip()
def groupSpace():
return [
separator(),
widget.GroupBox(
**base(fg='light'),
font=FONT,
fontsize=25,
margin_y=3,
margin_x=0,
padding_y=8,
padding_x=5,
borderwidth=2,
active=COLORS["active"],
inactive=COLORS["inactive"],
rounded=True,
highlight_method='block',
urgent_alert_method='block',
urgent_border=COLORS["urgent"],
this_current_screen_border=COLORS["focus"],
this_screen_border=COLORS["grey"],
other_current_screen_border=COLORS["dark"],
other_screen_border=COLORS["dark"],
disable_drag=True
),
separator(),
widget.WindowName(**base(fg='focus'), format='{name}', max_chars=50,fontsize=14, padding=5),
#widget.Spacer(**base(fg='focus'), length=650),
separator(),
]
primaryScreenBar = [
# Ubuntu Log Section
icon(bg='color1',fg='dark', fontsize=30, text=""),
separator(),
# Group Section
*groupSpace(),
separator(),
# RAM Section
#powerline('color4', 'dark'),
#icon(bg='color4', text='', fontsize=30, padding=0),
#widget.Memory(**base(bg="color4"), measure_mem='G'),
# Docker Section
powerline('color3', 'dark'),
icon(bg='color3', text='', fontsize=35, padding=0),
texto(bg='color3', text=dockerVersion(), fontsize=14),
# Layout Section
#powerline('color2', 'color3'),
#widget.CurrentLayoutIcon(**base(bg="color2"), scale=0.65),
#widget.CurrentLayout(**base(bg="color2"), padding=5),
# Hour Section
powerline('dark', 'color3'),
icon(bg="dark", fg="light", text='', fontsize=30, padding=0),
widget.Memory(**base(bg="dark", fg="light"), measure_mem='G'),
#icon(bg="dark", fg="light", text='', fontsize=18, padding=0),
widget.Battery(**base(bg="dark", fg="light"),
format='{char} {percent:2.0%} {hour:d}:{min:02d}',
charge_char='',
discharge_char='',
font="MesloLGS NF"),
# Utils Sections
powerline('color1', 'dark'),
icon(bg='color1', text='', fontsize=25),
widget.Clock(**base(bg="color1"), format='%d/%m/%Y - %H:%M ')
]
secondaryScreenBar = [
# Ubuntu Log Section
icon(bg='color1',fg='dark', fontsize=30, text=""),
separator(),
# Group Section
*groupSpace(),
separator(),
# RAM Section
#powerline('color4', 'dark'),
#icon(bg='color4', text='', fontsize=30, padding=0),
#widget.Memory(**base(bg="color4"), measure_mem='G'),
# Layout Section
powerline('color1', 'dark'),
widget.CurrentLayoutIcon(**base(bg="color1"), scale=0.65),
widget.CurrentLayout(**base(bg="color1"), padding=5),
# Docker Section
powerline('color3', 'color1'),
icon(bg='color3', text='', fontsize=40, padding=0),
texto(bg='color3', text=dockerVersion(), fontsize=14),
# Hour Section
powerline('dark', 'color3'),
icon(bg="dark", fg="light", text='', fontsize=30, padding=0),
widget.Memory(**base(bg="dark", fg="light"), measure_mem='G'),
#icon(bg="dark", fg="light", text='', fontsize=18, padding=0),
widget.Battery(**base(bg="dark", fg="light"),
format='{char} {percent:2.0%} {hour:d}:{min:02d}',
charge_char='',
discharge_char='',
font="MesloLGS NF"),
# Utils Sections
powerline('color1', 'dark'),
icon(bg='color1', text='', fontsize=25),
widget.Clock(**base(bg="color1"), format='%d/%m/%Y - %H:%M ')
]
originalBarr = [
widget.TextBox("", fontsize=30),
#widget.CurrentLayout(),
widget.GroupBox(fontsize=25),
widget.Prompt(),
widget.WindowName(),
widget.Chord(
chords_colors={
"launch": ("#ff0000", "#ffffff"),
},
name_transform=lambda name: name.upper(),
),
widget.TextBox("default config", name="default"),
widget.TextBox("Press <M-r> to spawn", foreground="#d75f5f"),
widget.Systray(),
widget.Clock(format="%Y-%m-%d %a %I:%M %p"),
widget.Battery(
charge_char='',
discharge_char='',
font="MesloLGS NF"
)
]
def detectSecondMonitor():
commandXrandr = "xrandr | grep -w 'connected' | cut -d ' ' -f 2 | wc -l"
command = subprocess.run(commandXrandr, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if command.returncode != 0:
error = command.stderr.decode("UTF-8")
print(error)
return 1
else:
return int(command.stdout.decode("UTF-8"))
monitors = detectSecondMonitor()
screens = [
Screen(
top=bar.Bar(primaryScreenBar, 27, opacity=0.92),
)
]
if monitors > 1:
screens.append(
Screen(
top=bar.Bar(secondaryScreenBar, 27, opacity=0.92),
)
)
# ========================================================================================================
# ================================ MOUSE SECTIONS ========================================================
# Drag floating layouts.
mouse = [
Drag([mod], "Button1", lazy.window.set_position_floating(), start=lazy.window.get_position()),
Drag([mod], "Button3", lazy.window.set_size_floating(), start=lazy.window.get_size()),
Click([mod], "Button2", lazy.window.bring_to_front()),
]
# ========================================================================================================
# ================================ GENERAL SECTIONS ======================================================
dgroups_key_binder = None
dgroups_app_rules = [] # type: list
follow_mouse_focus = True
bring_front_click = False
cursor_warp = False
auto_fullscreen = True
focus_on_window_activation = "smart"
reconfigure_screens = True
auto_minimize = True
wl_input_rules = None
wmname = "LG3D"
# ========================================================================================================
commandsToExecuteWhenStart = []
if monitors > 1:
commandsToExecuteWhenStart.append(
"xrandr --auto --output eDP-1 --mode 1366x768 --primary --left-of HDMI-1 --output HDMI-1 --mode 2560x1080"
)
commandsToExecuteWhenStart.append("feh --bg-center ~/Imágenes/bosques-uw2.jpg")
#commandsToExecuteWhenStart = [
#"xrandr --auto --output HDMI-1 --mode 2560x1080 --primary --right-of eDP-1 --output eDP-1 --mode 1366-768"
#"feh --bg-center ~/Imagenes/luces_noche.jpg",
#"xrandr --auto --output HDMI-1 --mode 1920x1080 --primary --right-of LVDS-1 --output LVDS-1 --mode 1366x768",
#"xrandr --auto --output eDP-1 --mode 1366x768 --primary --right-of HDMI-1 --output HDMI-1 --mode 2560x1080",
#"xrandr --auto --output HDMI-1 --mode 1920x1080 --primary --right-of LVDS-1"
#"xrandr --auto --output LVDS-1 --mode 1366x768 --right-of HDMI-1" # in case laptop primary screen
#]
for command in commandsToExecuteWhenStart:
os.system(command)
| moiseR29/.dotfiles | .config/qtile/config-old.py | config-old.py | py | 15,375 | python | en | code | 0 | github-code | 90 |
27020818406 | import csv
import importlib
import subprocess
import webbrowser
import os
import random
import requests
import re
import sys
import pandas as pd
import numpy as np
from PyQt5.QtWidgets import (QTableView, QHeaderView , QMessageBox, QApplication, QMainWindow, QFileDialog, QAction, QTableWidget, QTextEdit, QTableWidgetItem, QAbstractItemView, QWidget, QLineEdit, QPushButton, QSlider, QLabel, QHBoxLayout, QVBoxLayout, QProxyStyle, QStyle, qApp, QCheckBox)
from PyQt5.QtGui import QIcon, QPixmap
from PyQt5.QtCore import Qt, QAbstractTableModel, QVariant, QModelIndex, QCoreApplication
version = "2023.12.21" # Replace with your actual version number
class GrowingTextEdit(QTextEdit):
def __init__(self, *args, **kwargs):
super(GrowingTextEdit, self).__init__(*args, **kwargs)
self.document().contentsChanged.connect(self.sizeChange)
self.heightMin = 0
self.heightMax = 8
def sizeChange(self):
docHeight = self.document().size().height()
if self.heightMin <= docHeight <= self.heightMax:
self.setMinimumHeight(int(docHeight))
class PandasModel(QAbstractTableModel):
_df = pd.DataFrame()
_changed = False
def __init__(self, df=pd.DataFrame(), parent=None):
QAbstractTableModel.__init__(self, parent=parent)
self._df = df
self._changed = False
self._filters = {}
self._sortBy = []
self._sortDirection = []
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
try:
return self._df.columns.tolist()[section]
except (IndexError,):
return QVariant()
elif orientation == Qt.Vertical:
try:
# return self.df.index.tolist()
return self._df.index.tolist()[section]
except (IndexError,):
return QVariant()
def data(self, index, role):
if role == Qt.DisplayRole or role == Qt.EditRole:
try:
row = index.row()
col = index.column()
name = self._struct[col]['name']
return self._data[row][name]
except:
pass
elif role == Qt.CheckStateRole:
return None
return QVariant(str(self._df.iloc[index.row(), index.column()]))
def flags(self, index):
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable
'''
def setData(self, index, value, role=Qt.EditRole):
row = index.row()
col = index.column()
name = self._struct[col]['name']
self._data[row][name] = value
self.emit(SIGNAL('dataChanged()'))
return True
'''
def setData(self, index, value, role=Qt.EditRole):
row = self._df.index[index.row()]
col = self._df.columns[index.column()]
if hasattr(value, 'toPyObject'):
# PyQt4 gets a QVariant
value = value.toPyObject()
else:
# PySide gets an unicode
dtype = self._df[col].dtype
if dtype != object:
value = None if value == '' else dtype.type(value)
# self._df.set_value(row, col, value)
self._df.at[row, col] = value
self._changed = True
# self.emit(SIGNAL('dataChanged()'))
return True
def rowCount(self, parent=QModelIndex()):
return len(self._df.index)
def columnCount(self, parent=QModelIndex()):
return len(self._df.columns)
def sort(self, column, order):
colname = self._df.columns.tolist()[column]
index = self._df.index.tolist()
self.layoutAboutToBeChanged.emit()
# self._df.sort_values(colname, ascending=order == Qt.AscendingOrder, inplace=True)
# self._df.reset_index(inplace=True, drop=True)
try:
self._df.sort_values(colname, ascending=order == Qt.AscendingOrder, inplace=True)
except:
pass
try:
self._df.reset_index(inplace=True, drop=True)
except:
pass
self.layoutChanged.emit()
class CustomQTableView(QTableView):
df = pd.DataFrame()
def __init__(self, *args):
super().__init__(*args)
self.resize(800, 600)
self.setEditTriggers(QAbstractItemView.NoEditTriggers |
QAbstractItemView.DoubleClicked)
def keyPressEvent(self, event): # Reimplement the event here
return
class PoweredQTableView(QTableView):
def __init__(self, *args):
super().__init__(*args)
self.setAlignment(Qt.AlignLeft | Qt.AlignVCenter) # Set default alignment for all columns
def setColumnAlignment(self, column, alignment):
self.horizontalHeader().setSectionResizeMode(column, QHeaderView.Interactive) # Enable interactive resizing
self.horizontalHeader().setSectionResizeMode(column, QHeaderView.Stretch) # Stretch the column width
self.horizontalHeader().setSectionResizeMode(column, QHeaderView.ResizeToContents) # Resize the column width to contents
self.horizontalHeader().setDefaultAlignment(alignment) # Set the alignment for the column
path = ''
def __init__(self, *args):
super().__init__(*args)
self.setAcceptDrops(True)
self.resize(800, 600)
self.setEditTriggers(QAbstractItemView.NoEditTriggers |
QAbstractItemView.DoubleClicked)
def keyPressEvent(self, event): # Reimplement the event here
return
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
files = [(u.toLocalFile()) for u in event.mimeData().urls()]
for f in files:
if 'csv' in f or 'xls' in f:
print('Drag', f)
self.path = f
if ('csv' in f):
self.parent().raw = pd.read_csv(f,engine='python')
elif ('xls' in f):
self.parent().raw = pd.read_excel(f,engine='openpyxl')
# #print(self.raw)
event.accept()
else:
event.ignore()
def dropEvent(self, event):
files = [(u.toLocalFile()) for u in event.mimeData().urls()]
for f in files:
print('Drop')
class AppWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle('GeoPyLite')
self.setGeometry(100, 100, 800, 600)
self.df = pd.DataFrame()
self.DataFileInputPath =''
self.DataFileOutputPath =''
self.create_menu()
self.create_main_frame()
def create_menu(self):
menu_bar = self.menuBar()
# File menu
file_menu = menu_bar.addMenu('File')
open_file_action = self.create_action('Open File', self.open_file)
save_file_action = self.create_action('Save File', self.save_file)
file_menu.addAction(open_file_action)
file_menu.addAction(save_file_action)
# Result menu
result_menu = menu_bar.addMenu('Result')
generate_result_action = self.create_action('Generate Result', self.generate_result)
result_menu.addAction(generate_result_action)
# Help menu
help_menu = menu_bar.addMenu('Help')
version_action = self.create_action('Version', self.show_version)
help_menu.addAction(version_action)
def create_action(self, text, slot=None):
action = QAction(text, self)
if slot is not None:
action.triggered.connect(slot)
return action
def create_main_frame(self):
self.main_frame = QWidget()
# self.setCentralWidget(self.table_view)
self.table_view = PoweredQTableView(self.main_frame)
self.table_view.setObjectName('tableView')
self.table_view.setSortingEnabled(True)
self.open_button = QPushButton('&Open')
self.open_button.clicked.connect(self.open_file)
self.save_button = QPushButton('&Save')
self.save_button.clicked.connect(self.save_file)
self.vbox = QVBoxLayout()
self.vbox.addWidget(self.table_view)
self.hbox = QHBoxLayout()
for w in [self.open_button,self.save_button,]:
self.hbox.addWidget(w)
self.vbox.addLayout(self.hbox)
self.main_frame.setLayout(self.vbox)
self.setCentralWidget(self.main_frame)
self.model = PandasModel(self.df)
self.table_view.setModel(self.model)
def open_file(self):
DataFileInput, filetype = QFileDialog.getOpenFileName(self,'Opne File',
'./',
'CSV Files (*.csv);;Excel Files (*.xlsx);;Excel 2003 Files (*.xls)') # 设置文件扩展名过滤,注意用双分号间隔
print(DataFileInput)
if ('csv' in DataFileInput):
self.df = pd.read_csv(DataFileInput, engine='python')
elif ('xls' in DataFileInput):
self.df = pd.read_excel(DataFileInput,engine='openpyxl')
self.model = PandasModel(self.df)
self.table_view.setModel(self.model)
def save_file(self):
DataFileOutput, filetype = QFileDialog.getSaveFileName(self,
'Save File',
'./',
'CSV Files (*.csv);;Excel Files (*.xlsx)')
if ('csv' in DataFileOutput):
self.df.to_csv(DataFileOutput, sep=',', encoding='utf-8',index=False)
QMessageBox.information(self, "File Saved", f"Your file saved as: {DataFileOutput}.")
elif ('xls' in DataFileOutput):
self.df.to_excel(DataFileOutput,index=False)
QMessageBox.information(self, "File Saved", f"Your file saved as: {DataFileOutput}.")
else:
pass
def generate_result(self):
# Implement your logic to generate result here
pass
def show_version(self):
# Implement your logic to show version here
QMessageBox.information(self, "Version", f"Current version: {version}")
def ErrorEvent(self, text=''):
# Implement your error handling logic here
pass
def main():
app = QApplication(sys.argv)
window = AppWindow()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| GeoPyTool/GeoPyLite | Basement.py | Basement.py | py | 10,729 | python | en | code | 0 | github-code | 90 |
5969801161 | # This sample code uses the Appium python client v2
# pip install Appium-Python-Client
# Then you can paste this into a file and simply run with Python
from appium import webdriver
from appium.webdriver.common.appiumby import AppiumBy
from time import sleep
# For W3C actions
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.actions import interaction
from selenium.webdriver.common.actions.action_builder import ActionBuilder
from selenium.webdriver.common.actions.pointer_input import PointerInput
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from appium.webdriver.common.touch_action import TouchAction
caps = {}
caps["platformName"] = "Android"
caps["appium:deviceName"] = "M2012K11AC"
caps["appium:appPackage"] = "com.yek.android.kfc.activitys"
caps["appium:appActivity"] = "com.yum.brandkfc.SplashAct"
caps["appium:platformVersion"] = "13"
caps["appium:noReset"] = True
# caps["appium:unicodeKeyboard"] = True
# caps["appium:resetKeyboard"] = True
caps["appium:dontStopAppOnReset"] = False
caps["appium:ensureWebviewsHavePages"] = True
caps["appium:nativeWebScreenshot"] = True
caps["appium:newCommandTimeout"] = 3600
caps["appium:connectHardwareKeyboard"] = True
driver = webdriver.Remote("http://127.0.0.1:4723/wd/hub", caps)
wait = WebDriverWait(driver, 15)
try:
button = wait.until(EC.presence_of_element_located((By.ID, 'com.yek.android.kfc.activitys:id/common_iv_close')))
button.click()
except TimeoutException:
print("超时没找广告关闭按钮")
try:
button = wait.until(EC.presence_of_element_located((By.ID, 'com.yek.android.kfc.activitys:id/homev2_view_me_iv_12')))
button.click()
except TimeoutException:
print("超时没找主页签到按钮")
sleep(3)
try:
elements = driver.find_elements(By.CLASS_NAME ,'android.widget.TextView')
for i in elements:
if '签到' == i.text:
location = i.location
size = i.size
x = location['x'] + size['width'] / 2
y = location['y'] + size['height'] / 2
action = TouchAction(driver)
action.tap(x=x, y=y).perform()
break
continue
except NoSuchElementException:
# 处理找不到元素的情况
print("超时没找签到按钮")
# wait = WebDriverWait(driver, 5)
# try:
# element = wait.until(EC.element_to_be_clickable((By.XPATH, '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.FrameLayout/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup[1]/android.widget.ScrollView/android.view.ViewGroup/android.view.ViewGroup[2]/android.view.ViewGroup[3]/android.view.ViewGroup[2]')))
# location = element.location
# size = element.size
# x = location['x'] + size['width'] / 2
# y = location['y'] + size['height'] / 2
# action = TouchAction(driver)
# action.tap(x=x, y=y).perform()
# except TimeoutException:
# print("超时没找签到按钮")
sleep(2)
driver.quit() | xiaocongsen/MakeDown_File | python/测试软件Appium学习/签到/肯德基.py | 肯德基.py | py | 3,461 | python | en | code | 0 | github-code | 90 |
9512471902 | from __future__ import print_function
"""
base class for generating code appropriate to the selected backend
"""
from ctree.visitors import NodeVisitor
from ctree.util import flatten
class CodeGenVisitor(NodeVisitor):
"""
Return a string containing the program text.
"""
def __init__(self, indent=0):
self._indent = indent
# -------------------------------------------------------------------------
# common support methods
def _tab(self):
"""return correct spaces if tab found"""
return " " * self._indent
def _genblock(self, forest, insert_curly_brackets=True,
increase_indent=True):
"""generate block of code adding semi colons as necessary"""
if increase_indent:
self._indent += 1
body = ""
for tree in flatten(forest):
if not hasattr(tree, '_requires_semicolon'):
body += self._tab() + str(tree) + "\n"
else:
semicolon_opt = ";" if tree._requires_semicolon() else ""
block = tree.codegen(self._indent)
if block is not "":
body += self._tab() + block + semicolon_opt + "\n"
if increase_indent:
self._indent -= 1
if insert_curly_brackets:
return "{\n%s%s}" % (body, self._tab())
else:
return "\n%s" % body
def _parenthesize(self, parent, child):
"""A format string that includes parentheses if needed."""
if self._requires_parentheses(parent, child) or \
child._force_parentheses is True:
return "(%s)" % child
else:
return "%s" % child
def _requires_parentheses(self, parent, child):
"""True by default."""
return True
| mbdriscoll/ctree | ctree/codegen.py | codegen.py | py | 1,809 | python | en | code | 3 | github-code | 90 |
16687355563 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
# Time: O(n)
# Space: O(1)
# https://leetcode.com/problems/same-tree
class Solution:
def isSameTree(self, p: Optional[TreeNode], q: Optional[TreeNode]) -> bool:
# Helper function to traverse both trees simultaneously
# Will check at each level whether or not the roots are equivalent in value
# As it goes to the next level, both trees will be sent to the recursive call
# with the same branch to make sure we're comparing the same node
def dfs(root1, root2):
if root1 is None and root2 is None:
return True
elif root1 is None or root2 is None:
return False
elif root1.val != root2.val:
return False
return dfs(root1.left, root2.left) and dfs(root1.right, root2.right)
return dfs(p, q)
| jpal91/leetcode | Python/same-tree.py | same-tree.py | py | 1,068 | python | en | code | 0 | github-code | 90 |
34372456680 | #controllers/backend/categories/edit.py
import config
from bottle import template
from copy import deepcopy
from models.categorydb import editdb
def call(id):
kdict = deepcopy(config.kdict)
kdict['pageTitle'] = 'ទំព័រកែប្រែ'
kdict['route'] = 'category'
kdict['edit'] = True
categories, count, category = editdb.call(id, kdict['maxItemList'])
kdict['items'] = categories
kdict['count'] = count
kdict['item'] = category
return template('backend/admin.tpl', data=kdict) | Sokhavuth/khmerweb-multimedia | controllers/backend/categories/edit.py | edit.py | py | 553 | python | en | code | 0 | github-code | 90 |
33696424710 | from typing import Any, List, Tuple
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
def convolution(
features: int, k_size: int, strides: int = 1, bias: bool = True, name: str = None
):
return keras.layers.Conv2D(
features, k_size, strides, use_bias=bias, padding="same", name=name
)
def psnr(x1, x2):
return tf.image.psnr(x1, x2, max_val=255)
def ssim(x1, x2):
return tf.image.ssim(x1, x2, max_val=255)
# run lr through the model and output sr
def convert(model, lr):
lr = tf.cast(lr, tf.float32)
sr = model(lr)
sr = tf.clip_by_value(sr, 0, 255)
sr = tf.round(sr)
sr = tf.cast(sr, tf.uint8)
return sr
# evaluate data using the input model
def evaluate(
model: "SRModel", data: List[Tuple[tf.Tensor, tf.Tensor]]
) -> Tuple[float, float]:
"""Perform evaluation on the given model and return a tuple of psnr and ssim"""
psnr_values = []
ssim_values = []
for lr, hr in data:
lr, hr = add_num_images(lr), add_num_images(hr)
sr = convert(model, lr)
ssim_values.append(ssim(hr, sr)[0])
psnr_values.append(psnr(hr, sr)[0])
return tf.reduce_mean(psnr_values), tf.reduce_mean(ssim_values)
def add_num_images(image):
return image.reshape(1, image.shape[0], image.shape[1], image.shape[2])
class SRModel(keras.Model):
"""Base Class for all of the modes"""
def __init__(self, scale=4, name_suffix="", *args, **kwargs):
super().__init__(*args, **kwargs)
self.scale = scale
self.name_suffix = name_suffix
def call(self, inputs, training=False):
raise NotImplementedError
@property
def save_name(self):
temp_name = f"{self.name}_{self.scale}"
if self.name_suffix:
temp_name = f"{temp_name}_{self.name_suffix}"
return temp_name
class DescriminatorBlock(tf.Module):
"""Defines a single descriminator block"""
def __init__(
self,
conv_f: convolution,
features: int,
k_size: int,
strides: int = 1,
bias: bool = True,
norm: bool = True,
activation: keras.layers.Activation = keras.layers.ReLU(),
name: str = None,
):
super().__init__(name=name)
block_layers = []
block_layers.append(conv_f(features, k_size, strides=strides, bias=bias))
if norm:
block_layers.append(keras.layers.BatchNormalization())
block_layers.append(activation)
self.block = keras.Sequential(layers=block_layers)
def __call__(self, inputs) -> Any:
return self.block(inputs)
class ResBlock(tf.Module):
"""Defines a single residual block"""
def __init__(
self,
conv_f: convolution,
features: int,
k_size: int,
bias: bool = True,
norm: bool = False,
activation=keras.layers.ReLU(),
residual_scale: int = 1,
name: str = None,
):
super().__init__(name=name)
block_layers = []
for i in range(2):
block_layers.append(conv_f(features, k_size, bias=bias))
if norm:
block_layers.append(keras.layers.BatchNormalization())
if i == 0:
block_layers.append(activation)
self.block = keras.Sequential(layers=block_layers)
self.residual_scale = residual_scale
def __call__(self, inputs) -> Any:
return tf.multiply(self.block(inputs), self.residual_scale)
class MeanShift(keras.layers.Layer):
def __init__(
self,
rgb_mean=(0.4488, 0.4371, 0.4040),
rgb_std=(1.0, 1.0, 1.0),
sign=-1,
**kwargs,
):
# TODO: Check this
super().__init__(**kwargs)
mean = tf.constant(rgb_mean, np.float32)
std = tf.constant(rgb_std, np.float32)
self.bias = sign * mean / std
def __call__(self, inputs):
return inputs + self.bias
class PixelShuffler(keras.layers.Layer):
def __init__(self, factor: int, name: str = None) -> None:
super().__init__(name=name)
self.pixel_shuffler = lambda x: tf.nn.depth_to_space(x, factor)
def __call__(self, inputs) -> Any:
return self.pixel_shuffler(inputs)
class UpSampler(keras.Sequential):
"""Defines a upsample sequence"""
def __init__(
self,
convolution: convolution,
scale: int,
features: int,
norm: bool = False,
activation: keras.layers.Activation = None,
bias: bool = True,
name: str = None,
):
def __upsample_base(l: List[keras.layers.Layer], factor: int, name: str = None):
# TODO: Fix this convolution features size (Check this)
l.append(
convolution(
(factor ** 2) * features, 3, bias=bias, name=f"{name}_convolution"
)
)
l.append(PixelShuffler(factor=factor, name=f"{name}_pixel_shuffler"))
if norm:
l.append(keras.layers.BatchNormalization(name=f"{name}_normalization"))
if activation:
l.append(activation)
return l
layers = []
if scale == 1:
pass
elif scale == 2:
layers = __upsample_base(layers, factor=2, name="upsample_1_scale_2")
elif scale == 3:
layers = __upsample_base(layers, factor=3, name="upsample_1_scale_3")
elif scale == 4:
layers = __upsample_base(layers, factor=2, name="upsample_1_scale_2")
layers = __upsample_base(layers, factor=2, name="upsample_3_scale_2")
else:
raise ValueError(
f"Scale must be between 1 and 4. The set scale was: {scale}"
)
super().__init__(layers=layers, name=name)
| dblincoe/csds-438-super-resolution | models/common.py | common.py | py | 5,858 | python | en | code | 1 | github-code | 90 |
72289509418 | import warnings
import copy
import sys
class Node:
def __init__(self, name, neighbors=[], occupant=None, occupiable=True):
self.name = name
self.neighbors = set(neighbors)
self.occupant = occupant
self.occupiable = occupiable
def connect(self, neighbor):
self.neighbors.add(neighbor)
neighbor.neighbors.add(self)
def __str__(self):
neighbor_str = ', '.join([neighbor.name for neighbor in self.neighbors])
occupiable = '' if self.occupiable else '*'
if self.occupant:
return f"Node({self.name}{occupiable} ({self.occupant.name}) -> [{neighbor_str}])"
else:
return f"Node({self.name}{occupiable} -> [{neighbor_str}])"
def __repr__(self):
return self.__str__()
class Amphipod:
cost_dict = {'A': 1, 'B': 10, 'C': 100, 'D': 1000}
def __init__(self, name, type, location, node_dict, moved=0, steps=0):
self.name = name
self.type = type
self.location = node_dict[location]
self.location.occupant = self
self.node_dict = node_dict
self.moved = moved
self.stepcost = self.cost_dict[type]
self.steps = steps
RA0 = node_dict['RA0']
RA1 = node_dict['RA1']
RB0 = node_dict['RB0']
RB1 = node_dict['RB1']
RC0 = node_dict['RC0']
RC1 = node_dict['RC1']
RD0 = node_dict['RD0']
RD1 = node_dict['RD1']
self.home_dict = {'A': [RA0, RA1],
'B': [RB0, RB1],
'C': [RC0, RC1],
'D': [RD0, RD1]}
self.homes = self.home_dict[type]
def __str__(self):
return f"Amph({self.name} @ {self.location.name}, {self.moved}/2)"
def __repr__(self):
return self.__str__()
def available(self):
locations = {}
visited = set()
neighbors = [(neighbor, 1) for neighbor in self.location.neighbors if not neighbor.occupant]
while neighbors:
neighbor, steps = neighbors.pop()
visited.add(neighbor)
if neighbor.occupiable:
if self.moved < 1:
locations[neighbor] = steps
elif self.moved == 1 and neighbor in self.homes:
locations[neighbor] = steps
new_neighbors = [(new_neighbor, steps+1) for new_neighbor in neighbor.neighbors
if not new_neighbor.occupant and new_neighbor not in visited]
neighbors.extend(new_neighbors)
if self.moved:
if self.homes[1] in locations and self.homes[0] in locations:
del locations[self.homes[0]]
return locations
def move(self, target):
if not isinstance(target, Node):
target = self.node_dict[target]
available = self.available()
if self.moved >= 2:
warnings.warn(f"{self} has moved too many times already")
return False
if target not in available:
warnings.warn(f"Invalid move of {self} from {self.location} to {target}")
return False
target.occupant = self
self.location.occupant = None
self.location = target
self.steps += available[target]
self.moved += 1
return True
class State:
def __init__(self, node_dict, amphipods):
self.node_dict = node_dict
self.amphipods = amphipods
def __getitem__(self, key):
return self.amphipods[key]
def state_tuple(self):
return tuple((name, amphipod.moved, amphipod.location.name) for name, amphipod in self.amphipods.items())
def is_equal(self, other):
return hash(self.state_tuple()) == hash(other.state_tuple())
def get_dead(self):
return [name for name, amphipod in self.amphipods.items() if amphipod.moved >= 2]
def copy(self):
return copy.deepcopy(self)
def movable(self):
return [amphipod for amphipod in self.amphipods.values() if amphipod.moved < 2]
def next_moves(self, include_state=True):
next_moves = []
for amphipod in self.movable():
for target in amphipod.available():
if include_state:
next_moves.append((amphipod.name, target.name, self))
else:
next_moves.append((amphipod, target))
return next_moves
def success(self):
for amphipod in self.amphipods.values():
if amphipod.location not in amphipod.homes:
return False
return True
def cost(self):
return sum([amphipod.stepcost*amphipod.steps for amphipod in self.amphipods.values()])
def make_map():
H0 = Node('H0')
H1 = Node('H1')
H2 = Node('H2', occupiable=False)
H3 = Node('H3')
H4 = Node('H4', occupiable=False)
H5 = Node('H5')
H6 = Node('H6', occupiable=False)
H7 = Node('H7')
H8 = Node('H8', occupiable=False)
H9 = Node('H9')
H10 = Node('H10')
RA0 = Node('RA0')
RA1 = Node('RA1')
RB0 = Node('RB0')
RB1 = Node('RB1')
RC0 = Node('RC0')
RC1 = Node('RC1')
RD0 = Node('RD0')
RD1 = Node('RD1')
node_dict = {node.name: node for node in [H0, H1, H2, H3, H4, H5, H6, H7, H8, H9, H10,
RA0, RA1, RB0, RB1, RC0, RC1, RD0, RD1]}
hallway = [H0, H1, H2, H3, H4, H5, H6, H7, H8, H9, H10]
for i in range(len(hallway))[:-1]:
Ha = hallway[i]
Hb = hallway[i+1]
Ha.connect(Hb)
RA0.connect(RA1)
RB0.connect(RB1)
RC0.connect(RC1)
RD0.connect(RD1)
H2.connect(RA0)
H4.connect(RB0)
H6.connect(RC0)
H8.connect(RD0)
return node_dict
def make_amphipods(node_dict, a0='RA0', a1='RB1', b0='RA1', b1='RC0', c0='RB0', c1='RD1', d0='RC1', d1='RD0'):
A0 = Amphipod('A0', 'A', a0, node_dict)
A1 = Amphipod('A1', 'A', a1, node_dict)
B0 = Amphipod('B0', 'B', b0, node_dict)
B1 = Amphipod('B1', 'B', b1, node_dict)
C0 = Amphipod('C0', 'C', c0, node_dict)
C1 = Amphipod('C1', 'C', c1, node_dict)
D0 = Amphipod('D0', 'D', d0, node_dict)
D1 = Amphipod('D1', 'D', d1, node_dict)
amphipods = {amphipod.name: amphipod for amphipod in [A0, A1, B0, B1, C0, C1, D0, D1]}
return amphipods
def make_state(a0='RA0', a1='RB1', b0='RA1', b1='RC0', c0='RB0', c1='RD1', d0='RC1', d1='RD0'):
node_dict = make_map()
amphipods = make_amphipods(node_dict, a0=a0, a1=a1, b0=b0, b1=b1, c0=c0, c1=c1, d0=d0, d1=d1)
state = State(node_dict, amphipods)
return state
def print_map(node_dict):
base_map = (
"""#############
#...........#
###.#.#.#.###
#.#.#.#.#
#########""")
base_list = [[c for c in line] for line in base_map.split('\n')]
locs = {'H0': (1, 1),
'H1': (1, 2),
'H2': (1, 3),
'H3': (1, 4),
'H4': (1, 5),
'H5': (1, 6),
'H6': (1, 7),
'H7': (1, 8),
'H8': (1, 9),
'H9': (1, 10),
'H10': (1, 11),
'RA0': (2, 3),
'RA1': (3, 3),
'RB0': (2, 5),
'RB1': (3, 5),
'RC0': (2, 7),
'RC1': (3, 7),
'RD0': (2, 9),
'RD1': (3, 9)
}
for name, node in node_dict.items():
if node.occupant:
i, j = locs[name]
base_list[i][j] = node.occupant.type
if not node.occupiable:
i, j = locs[name]
base_list[i][j] = 'o'
view = '\n'.join([''.join([c for c in line]) for line in base_list])
return view, base_list
state = make_state()
moves = [('D1', 'H9'),
('B1', 'H3'),
('C1', 'H5'),
('D1', 'RD1'),
('D0', 'RD0'),
('C1', 'RC1'),
('C0', 'RC0'),
('A1', 'H5'),
('B1', 'RB1'),
('A0', 'H1'),
('B0', 'RB0'),
('A0', 'RA1'),
('A1', 'RA0')]
for amphipod, target in moves:
state[amphipod].move(target) | jlazear/advent2021 | day23/amphipods.py | amphipods.py | py | 8,096 | python | en | code | 0 | github-code | 90 |
12848491465 | from fastapi_pagination import Params, paginate
from loguru import logger
from clients.remote_component_client import remote_component_client
from core.enum.component_enum import is_state
from database.session import SessionClass
from repository.application.application_repo import application_repo
from repository.component.app_component_relation_repo import app_component_relation_repo
from repository.component.group_service_repo import service_info_repo
from repository.component.service_config_repo import mnt_repo, volume_repo
from service.app_config.volume_service import volume_service
class AppMntService(object):
SHARE = 'share-file'
CONFIG = 'config-file'
def delete_service_mnt_relation(self, session, tenant, service, dep_vol_id, user_name=''):
dep_volume = volume_repo.get_service_volume_by_pk(session, dep_vol_id)
try:
if service.create_status == "complete":
data = {
"depend_service_id": dep_volume.service_id,
"volume_name": dep_volume.volume_name,
"enterprise_id": tenant.tenant_name,
"operator": user_name
}
res, body = remote_component_client.delete_service_dep_volumes(session,
service.service_region,
tenant.tenant_name,
service.service_alias, data)
logger.debug("delete service mnt info res:{0}, body {1}".format(res, body))
mnt_repo.delete_mnt_relation(session, service.service_id, dep_volume.service_id, dep_volume.volume_name)
except remote_component_client.CallApiError as e:
logger.exception(e)
if e.status == 404:
logger.debug('service mnt relation not in region then delete rel directly in console')
mnt_repo.delete_mnt_relation(service.service_id, dep_volume.service_id, dep_volume.volume_name)
return 200, "success"
def get_service_mnt_details(self, session: SessionClass, tenant, service, volume_types, page=1, page_size=20):
all_mnt_relations = mnt_repo.get_service_mnts_filter_volume_type(session, tenant.tenant_id, service.service_id,
volume_types)
total = len(all_mnt_relations)
params = Params(page=page, size=page_size)
event_paginator = paginate(all_mnt_relations, params)
mnt_relations = event_paginator.items
mounted_dependencies = []
if mnt_relations:
for mount in mnt_relations:
dep_service = service_info_repo.get_service_by_service_id(session, mount.dep_service_id)
if dep_service:
gs_rel = app_component_relation_repo.get_group_by_service_id(session, dep_service.service_id)
group = None
if gs_rel:
group = application_repo.get_by_primary_key(session=session, primary_key=gs_rel.group_id)
dep_volume = volume_repo.get_service_volume_by_name(session, dep_service.service_id, mount.mnt_name)
if dep_volume:
mounted_dependencies.append({
"local_vol_path": mount.mnt_dir,
"dep_vol_name": dep_volume.volume_name,
"dep_vol_path": dep_volume.volume_path,
"dep_vol_type": dep_volume.volume_type,
"dep_app_name": dep_service.service_cname,
"dep_app_group": group.group_name if group else '未分组',
"dep_vol_id": dep_volume.ID,
"dep_group_id": group.ID if group else -1,
"dep_app_alias": dep_service.service_alias
})
return mounted_dependencies, total
def get_service_unmount_volume_list(self, session: SessionClass, tenant, service, service_ids, page, page_size,
is_config=False):
"""
1. 获取租户下其他所有组件列表,方便后续进行名称的冗余
2. 获取其他组件的所有可共享的存储
3. 获取已经使用的存储,方便后续过滤
4. 遍历存储,组装信息
"""
for serviceID in service_ids:
if serviceID == service.service_id:
service_ids.remove(serviceID)
services = service_info_repo.get_services_by_service_ids(session, service_ids)
state_services = [] # 有状态组件
for svc in services:
if is_state(svc.extend_method):
state_services.append(svc)
state_service_ids = [svc.service_id for svc in state_services]
current_tenant_services_id = service_ids
# 已挂载的组件路径
mounted = mnt_repo.get_service_mnts(session, tenant.tenant_id, service.service_id)
mounted_ids = [mnt.volume_id for mnt in mounted]
# 当前未被挂载的共享路径
service_volumes = []
# 配置文件无论组件是否是共享存储都可以共享,只需过滤掉已经挂载的存储;其他存储类型则需要考虑排除有状态组件的存储
if is_config:
service_volumes = volume_repo.get_services_volumes_by_config(session, current_tenant_services_id,
self.CONFIG, mounted_ids)
else:
service_volumes = volume_repo.get_services_volumes_by_share(session, current_tenant_services_id,
self.SHARE, mounted_ids,
state_service_ids)
total = len(service_volumes)
params = Params(page=page, size=page_size)
event_paginator = paginate(service_volumes, params)
page_volumes = event_paginator.items
un_mount_dependencies = []
for volume in page_volumes:
gs_rel = app_component_relation_repo.get_group_by_service_id(session, volume.service_id)
group = None
if gs_rel:
group = application_repo.get_by_primary_key(session=session, primary_key=gs_rel.group_id)
dep_app_name = ""
dep_app_alias = ""
for ser in services:
if ser.service_id == volume.service_id:
dep_app_name = ser.service_cname
dep_app_alias = ser.service_alias
un_mount_dependencies.append({
"dep_app_name": dep_app_name,
"dep_app_group": group.group_name if group else '未分组',
"dep_vol_name": volume.volume_name,
"dep_vol_path": volume.volume_path,
"dep_vol_type": volume.volume_type,
"dep_vol_id": volume.ID,
"dep_group_id": group.ID if group else -1,
"dep_app_alias": dep_app_alias
})
return un_mount_dependencies, total
def get_volume_dependent(self, session: SessionClass, tenant, service):
mnts = mnt_repo.get_by_dep_service_id(session, tenant.tenant_id, service.service_id)
if not mnts:
return None
service_ids = [mnt.service_id for mnt in mnts]
services = service_info_repo.get_services_by_service_ids(session, service_ids)
# to dict
id_to_services = {}
for svc in services:
if not id_to_services.get(svc.service_id, None):
id_to_services[svc.service_id] = [svc]
continue
id_to_services[svc.service_id].append(svc)
result = []
for mnt in mnts:
# get volume
vol = volume_repo.get_service_volume_by_name(session, service.service_id, mnt.mnt_name)
if not vol:
continue
# services that depend on this volume
services_dep_vol = id_to_services[mnt.service_id]
for svc in services_dep_vol:
result.append({
"volume_name": vol.volume_name,
"service_name": svc.service_cname,
"service_alias": svc.service_alias,
})
return result
def add_service_mnt_relation(self, session: SessionClass, tenant, service, source_path, dep_volume, user_name=''):
if not dep_volume:
return
if service.create_status == "complete":
if dep_volume.volume_type != "config-file":
data = {
"depend_service_id": dep_volume.service_id,
"volume_name": dep_volume.volume_name,
"volume_path": source_path,
"enterprise_id": tenant.enterprise_id,
"volume_type": dep_volume.volume_type
}
else:
config_file = volume_repo.get_service_config_file(session, dep_volume)
data = {
"depend_service_id": dep_volume.service_id,
"volume_name": dep_volume.volume_name,
"volume_path": source_path,
"volume_type": dep_volume.volume_type,
"file_content": config_file.file_content,
"enterprise_id": tenant.enterprise_id
}
data["operator"] = user_name
res, body = remote_component_client.add_service_dep_volumes(session,
service.service_region,
tenant.tenant_name, service.service_alias,
data)
logger.debug("add service mnt info res: {0}, body:{1}".format(res, body))
mnt_relation = mnt_repo.add_service_mnt_relation(session, tenant.tenant_id, service.service_id,
dep_volume.service_id,
dep_volume.volume_name, source_path)
logger.debug(
"mnt service {0} to service {1} on dir {2}".format(mnt_relation.service_id, mnt_relation.dep_service_id,
mnt_relation.mnt_dir))
def batch_mnt_serivce_volume(self, session: SessionClass, tenant, service, dep_vol_data, user_name=''):
local_path = []
tenant_service_volumes = volume_service.get_service_volumes(session=session, tenant=tenant, service=service)
local_path = [l_path["volume_path"] for l_path in tenant_service_volumes]
for dep_vol in dep_vol_data:
volume_service.check_volume_path(session=session, service=service, volume_path=dep_vol["path"],
local_path=local_path)
for dep_vol in dep_vol_data:
dep_vol_id = dep_vol['id']
source_path = dep_vol['path'].strip()
dep_volume = volume_repo.get_service_volume_by_pk(session, dep_vol_id)
try:
self.add_service_mnt_relation(session=session, tenant=tenant, service=service, source_path=source_path,
dep_volume=dep_volume, user_name=user_name)
except Exception as e:
logger.exception(e)
mnt_service = AppMntService()
| wutong-paas/wutong-console | service/mnt_service.py | mnt_service.py | py | 11,761 | python | en | code | 6 | github-code | 90 |
23585174541 | from matplotlib import pyplot as plt
variance = [1, 2, 4, 8, 16, 32, 64, 128, 256]
bias_squared = [256, 128, 64, 32, 16, 8, 4, 2, 1]
# Суммарная ошибка
total_error = [x + y for x, y in zip(variance, bias_squared)]
xs = [i for i, _ in enumerate(variance)]
print(xs)
plt.plot(xs, variance, 'g-', label='дисперсия')
plt.plot(xs, bias_squared, 'r-.', label='смещение^2')
plt.plot(xs, total_error, 'b:', label='суммарная ошибка')
plt.legend(loc=9)
plt.xlabel("Сложность модели")
plt.title("Компромисс между смещением и дисперсией")
plt.show()
| 1mmo/data-science-learning | LineGraphs.py | LineGraphs.py | py | 640 | python | ru | code | 0 | github-code | 90 |
19921171426 | import os
import json
import sys
import copy
import torch
import argparse
from tqdm import tqdm
sys.path.append('../../../')
sys.path.append('../../../python_parser')
from python_parser.run_parser import get_identifiers, remove_comments_and_docstrings, get_example_batch
from utils import _tokenize
from transformers import (RobertaForMaskedLM, RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer, RobertaModel)
from model import CodeBERT, GraphCodeBERT
from run import CodeBertTextDataset, GraphCodeBertTextDataset
import numpy as np
MODEL_CLASSES = {
'codebert_roberta': (RobertaConfig, RobertaModel, RobertaTokenizer),
'graphcodebert_roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer)
}
def get_embeddings(code, variables, tokenizer_mlm, codebert_mlm, args):
new_code = copy.deepcopy(code)
chromesome = {}
for i in variables:
chromesome[i] = '<unk>'
new_code = get_example_batch(new_code, chromesome, "java")
_, _, code_tokens = get_identifiers(remove_comments_and_docstrings(new_code, "java"), "java")
processed_code = " ".join(code_tokens)
words, sub_words, keys = _tokenize(processed_code, tokenizer_mlm)
sub_words = [tokenizer_mlm.cls_token] + sub_words[:args.block_size - 2] + [tokenizer_mlm.sep_token]
input_ids_ = torch.tensor([tokenizer_mlm.convert_tokens_to_ids(sub_words)])
with torch.no_grad():
embeddings = codebert_mlm.roberta(input_ids_.to('cuda'))[0]
return embeddings
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--all_data_file", default=None, type=str,
help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
parser.add_argument("--cache_dir", default="", type=str,
help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
parser.add_argument("--model_name", default="", type=str,
help="model name")
args = parser.parse_args()
args.device = torch.device("cuda")
args.seed = 123456
args.eval_batch_size = 32
args.language_type = 'java'
args.store_path = './%s_all_subs.json' % args.model_name
args.n_gpu = 2
args.block_size = 512
if args.model_name == 'codebert':
args.output_dir = '../code/saved_models'
args.model_type = 'codebert_roberta'
args.config_name = 'microsoft/codebert-base'
args.model_name_or_path = 'microsoft/codebert-base'
args.tokenizer_name = 'roberta-base'
args.base_model = 'microsoft/codebert-base-mlm'
args.number_labels = 2
if args.model_name == 'graphcodebert':
args.output_dir = '../code/saved_models'
args.model_type = 'graphcodebert_roberta'
args.config_name = 'microsoft/graphcodebert-base'
args.tokenizer_name = 'microsoft/graphcodebert-base'
args.model_name_or_path = 'microsoft/graphcodebert-base'
args.base_model = 'microsoft/graphcodebert-base'
args.code_length = 448
args.data_flow_length = 64
args.number_labels = 1
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None)
config.num_labels = args.number_labels
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name,
do_lower_case=False,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.block_size <= 0:
args.block_size = tokenizer.max_len_single_sentence
args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)
if args.model_name_or_path:
model = model_class.from_pretrained(args.model_name_or_path,
from_tf=bool('.ckpt' in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
else:
model = model_class(config)
if args.model_name == 'codebert':
model = CodeBERT(model, config, tokenizer, args)
elif args.model_name == 'graphcodebert':
model = GraphCodeBERT(model, config, tokenizer, args)
checkpoint_prefix = 'checkpoint-best-f1/%s_model.bin' % (args.model_name)
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
model.load_state_dict(torch.load(output_dir))
model.to(args.device)
codebert_mlm = RobertaForMaskedLM.from_pretrained(args.base_model)
tokenizer_mlm = RobertaTokenizer.from_pretrained(args.base_model)
codebert_mlm.to('cuda')
url_to_code={}
all_data = []
with open('./data.jsonl') as f:
for line in f:
line=line.strip()
js=json.loads(line)
url_to_code[js['idx']]=js['func']
with open(args.all_data_file) as f:
for i, line in enumerate(f):
item = {}
line=line.strip()
url1, url2, label = line.split('\t')
if url1 not in url_to_code or url2 not in url_to_code:
continue
if label=='0':
label=0
item["id1"] = url1
item["id2"] = url2
item["code1"] = url_to_code[url1]
item["code2"] = url_to_code[url2]
item["label"] = label
all_data.append(item)
else:
label=1
item["id1"] = url1
item["id2"] = url2
item["code1"] = url_to_code[url1]
item["code2"] = url_to_code[url2]
item["label"] = label
all_data.append(item)
print(len(all_data))
if args.model_name == 'codebert':
all_examples = CodeBertTextDataset(tokenizer, args, args.all_data_file)
elif args.model_name == 'graphcodebert':
all_examples = GraphCodeBertTextDataset(tokenizer, args, args.all_data_file)
assert len(all_examples) == len(all_data)
all_labels = {}
with open(args.store_path, "w") as wf:
for index in tqdm(range(0, 15000)):
item = all_data[index]
example = all_examples[index]
logits, preds = model.get_results([example], args.eval_batch_size)
if args.model_name == 'codebert':
true_label = str(int(example[1].item()))
elif args.model_name == 'graphcodebert':
true_label = str(int(example[6].item()))
orig_prob = np.max(logits[0])
orig_label = str(int(preds[0]))
if not true_label == orig_label:
continue
if true_label not in all_labels.keys():
all_labels[true_label] = []
code1 = item["code1"]
code2 = item["code2"]
variable_name1, function_name1, _ = get_identifiers(code1, "java")
variable_name2, function_name2, _ = get_identifiers(code2, "java")
variables1 = []
variables1.extend(variable_name1)
variables1.extend(function_name1)
variables2 = []
variables2.extend(variable_name2)
variables2.extend(function_name2)
embeddings1 = get_embeddings(code1, variables1, tokenizer_mlm, codebert_mlm, args)
embeddings2 = get_embeddings(code2, variables2, tokenizer_mlm, codebert_mlm, args)
if not os.path.exists('./%s_all_subs' % args.model_name):
os.makedirs('./%s_all_subs' % args.model_name)
np.save('./%s_all_subs/%s_%s_%s' % (args.model_name, str(orig_label), str(index), '1'), embeddings1.cpu().numpy())
np.save('./%s_all_subs/%s_%s_%s' % (args.model_name, str(orig_label), str(index), '2'), embeddings2.cpu().numpy())
all_labels[true_label].append({'code1': code1, 'code2': code2, 'embeddings_index': index,
'variable_name1': variable_name1, 'variable_name2': variable_name2,
'function_name1': function_name1, 'function_name2': function_name2})
wf.write(json.dumps(all_labels) + '\n')
if __name__ == "__main__":
main()
| tianzhaotju/CODA | test/CloneDetection/dataset/get_reference.py | get_reference.py | py | 8,676 | python | en | code | 5 | github-code | 90 |
7593781060 | # coding = utf-8
import sys
import os
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import (QApplication, QMenuBar, QGridLayout, QPushButton, QDialog,
QLabel, QTableView, QHeaderView, QLineEdit, QFormLayout, QMessageBox, QFileDialog)
from PyQt5.QtGui import QPixmap, QFont, QImage
from PyQt5.QtCore import QDate, QTime, QTimer, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtSql import QSqlDatabase, QSqlQueryModel
from PyQt5.Qt import QThread, QMutex
from face_dbinit import *
import numpy as np
import cv2
import dlib
import shutil
import xlwt
import time
import datetime
style_file = './UIface.qss'
# 人脸检测器
face_rgt = dlib.face_recognition_model_v1("./model/dlib_face_recognition_resnet_model_v1.dat")
# 加载人脸检测器
detector = dlib.get_frontal_face_detector()
# 特征点检测器
predictor = dlib.shape_predictor('./model/shape_predictor_68_face_landmarks.dat')
Path_face = "./data/face_database/"
def distance(face_1, face_2):
"""
计算欧式距离
:param face_1:
:param face_2:
:return:
"""
face_1 = np.array(face_1)
face_2 = np.array(face_2)
dist = np.sqrt(np.sum(np.square(face_1 - face_2)))
if dist > 0.4:
return False
else:
return True
class MainUI(QtWidgets.QWidget):
"""
应用主界面
"""
def __init__(self, parent=None):
"""
页面元素初始化
:param parent:
"""
super(MainUI, self).__init__(parent)
# 窗口属性初始化
# self.resize(920, 560)
self.setFixedSize(920, 560)
self.setWindowTitle("MaX.打卡系统--V1.0")
# 变量初始化
self.menu_bar = None # 菜单栏
self.logcat_menu = None # 打卡日志
self.admin_login = None # 管理员登录
self.image = None # 图片初始化
self.image_path = r"G:\githublocal\drawable\MaXlogo.jpg"
self.button_in = None # 输入按钮
self.button_check = None # 打卡按钮
self.widget = None # 控件
self.time_label = None # 时间标签
self.name_label = None # 打卡名字显示
self.time = None # 获取当前时间
self.date = None # 获取当前日期
self.timer = None # 定时器
self.text = None # 时间格式化
self.time_flag = "08:00:00" # 打卡时间设置
self.pic_num = 0 # 图片存储标记,最多存储15张人脸
self.sign = 1 # 标记,1代表打卡,2代表录入
self.idn = None # id号
self.admin = None
self.im_rd = None
self._sign = 0
self.check_face = [[], []] # 打卡数据列表
# 相机定时器
self.timer_camera = QTimer()
self.cap = cv2.VideoCapture() # 设置相机
# 布局初始化
self.glayout = QGridLayout()
self.glayout.setSpacing(10)
self.setLayout(self.glayout)
# 动态显示时间
self.timer = QTimer(self)
self.timer.timeout.connect(self.current_time)
self.timer.start()
# 函数初始化
self.set_menu()
self.show_time_label()
self.current_time()
self.set_operation()
self.set_image()
self.show_name_label()
self.clicked_activity()
def clicked_activity(self):
"""
控件信号处理
:return:
"""
self.logcat_menu.triggered.connect(lambda: self.on_log_dialog())
self.admin_login.triggered.connect(lambda: self.on_admin_dialog())
self.button_in.clicked.connect(lambda: self.on_info_dialog())
self.button_check.clicked.connect(lambda: self.new_create_time())
self.timer_camera.timeout.connect(lambda: self.show_camera())
def set_menu(self):
"""
菜单栏部分界面
:return:
"""
self.menu_bar = QMenuBar(self) # 菜单栏
self.menu_bar.setObjectName('menu_bar')
self.logcat_menu = self.menu_bar.addAction("打卡日志")
self.menu_bar.addSeparator()
self.admin_login = self.menu_bar.addAction("管理员登录")
self.glayout.addWidget(self.menu_bar, 0, 0, 1, 30)
def set_operation(self):
"""
点击按钮
:return:
"""
self.button_in = QPushButton("录入人脸")
self.button_in.setObjectName('button_in')
self.button_check = QPushButton("开始打卡")
self.button_check.setObjectName('button_check')
self.glayout.addWidget(self.button_in, 10, 2, 10, 10)
self.glayout.addWidget(self.button_check, 12, 2, 10, 10)
def set_image(self):
"""
预设图片
:return:
"""
self.image = QLabel(self)
self.image.setObjectName('image')
self.image.setPixmap(QPixmap(self.image_path).scaled(600, 400))
self.glayout.addWidget(self.image, 1, 15, 15, 15)
def show_time_label(self):
"""
打卡时间显示
:return:
"""
# widget = QtWidgets.QWidget()
self.time_label = QLabel()
self.time_label.setObjectName('time_label')
self.time_label.setFrameShape(QtWidgets.QFrame.Box)
self.glayout.addWidget(self.time_label, 3, 0, 8, 15)
def show_name_label(self):
"""
打卡姓名显示
:return:
"""
self.name_label = QLabel(self)
self.name_label.setObjectName('name_label')
self.name_label.setText("暂无打卡信息")
self.name_label.setAlignment(Qt.AlignCenter)
# self.name_label.setGeometry(50, 500, 20, 20)
self.name_label.setFrameShape(QtWidgets.QFrame.Box)
self.glayout.addWidget(self.name_label, 16, 17, 4, 10)
def current_time(self):
"""
获取当前日期时间,显示到label标签
:return:
"""
self.date = QDate.currentDate()
self.time = QTime.currentTime()
self.text = self.date.toString(Qt.DefaultLocaleLongDate) + "\n" + self.time.toString()
self.time_label.setText(self.text)
self.time_label.setAlignment(Qt.AlignCenter) # 字体居中
def on_log_dialog(self):
logcat = LogDialog()
logcat.setStyleSheet(CommonHelper.read_qss(style_file))
logcat.exec_()
def on_admin_dialog(self):
"""
打开管理员弹窗
:return:
"""
if self.admin_login.text() == "管理员登录":
admin_dialog = AdminLoginDialog()
admin_dialog.setStyleSheet(CommonHelper.read_qss(style_file))
admin_dialog.adname.connect(self.ad_name)
admin_dialog.exec_()
if self.admin:
self.admin_login.setText(self.admin) # 更改菜单名
else:
admin_dialog = AdminDialog()
admin_dialog.setStyleSheet(CommonHelper.read_qss(style_file))
admin_dialog.flag_re.connect(self.path_change_fun) # 链接槽函数
admin_dialog.exec_()
def on_info_dialog(self):
"""
打开信息注册弹窗
:return:
"""
info = InfoDialog()
info.setStyleSheet(CommonHelper.read_qss(style_file))
info.idtext.connect(self.id_num)
info.exec_()
if self.idn:
self.sign = 2
self.new_create_time()
@pyqtSlot(str)
def id_num(self, s):
self.idn = s
@pyqtSlot(str)
def ad_name(self, n):
self.admin = n
@pyqtSlot(str, str, str)
def path_change_fun(self, *args):
self.image_path = args[0]
def new_create_time(self):
if self.timer_camera.isActive() is False:
flag = self.cap.open(0)
if flag is False:
QMessageBox.warning(self, u"警告", u"请检测相机与电脑是否连接正确",
buttons=QMessageBox.Ok,
defaultButton=QMessageBox.Ok)
else:
self.timer_camera.start(30)
if self.sign == 1:
self.feature = load_face()
self.button_check.setText("停止打卡")
else:
self.timer_camera.stop()
self.sign = 1
self.cap.release()
if self.button_check.text() == "停止打卡":
print(int(self.name_label.text().split(" ")[0]))
print(set([tuple(t) for t in self.check_face]))
insert_logcat(int(self.name_label.text().split(" ")[0]), self.date.toString(Qt.ISODate),
self.time.toString(), self.time_subtraction())
self.button_check.setText("开始打卡")
self.name_label.setText("暂无打卡信息")
self.image.setPixmap(QPixmap(r"G:\githublocal\drawable\MaXlogo.jpg").scaled(600, 400))
def show_camera(self):
flag, self.im_rd = self.cap.read()
# key = cv2.waitKey(10)
# 人脸数
dets = detector(self.im_rd, 1)
# 检测到人脸
if len(dets) != 0:
equal_face = dets[0]
# 占比最大的脸
max_area = 0
for det in dets:
w = det.right() - det.left()
h = det.top() - det.bottom()
if w * h > max_area:
equal_face = det
max_area = w * h
# 绘制矩形框
cv2.rectangle(self.im_rd, tuple([equal_face.left(), equal_face.top()]),
tuple([equal_face.right(), equal_face.bottom()]),
(255, 0, 0), 2)
show = cv2.resize(self.im_rd, (600, 400))
show = cv2.cvtColor(show, cv2.COLOR_BGR2RGB) # 颜色通道转换
show_image = QImage(show.data, show.shape[1], show.shape[0], QImage.Format_RGB888)
self.image.setPixmap(QPixmap.fromImage(show_image))
if self.sign == 2:
# 保存截图
face_height = equal_face.bottom() - equal_face.top()
face_width = equal_face.right() - equal_face.left()
im_blank = np.zeros((face_height, face_width, 3), np.uint8) # 初始化一个三通道的图像矩阵
# print(im_blank)
try:
for height in range(face_height):
for width in range(face_width):
im_blank[height][width] = self.im_rd[int(equal_face.top()) + height][
int(equal_face.left()) + width]
self.pic_num += 1
cv2.imwrite(Path_face + self.idn + "/face_img" + str(self.pic_num) + ".jpg",
im_blank) # 中文路径无法存储,故采用id为文件名
if self.pic_num >= 15: # 当提取了15张图后,结束提取
into_db = ThreadIntoDB(self.idn)
into_db.start()
self.pic_num = 0
self.new_create_time()
except:
print("异常")
else:
try:
shape = predictor(self.im_rd, equal_face) # 提取特征点
face_cap = face_rgt.compute_face_descriptor(self.im_rd, shape) # 计算128维向量
# 将当前人脸与数据库人脸对比
for i, face_data in enumerate(self.feature[1]): # 对人脸进行遍历
compare = distance(face_cap, face_data)
if compare is True:
str_info = str(self.feature[0][i]) + " " + self.feature[2][i]
self.name_label.setText(str_info)
self.check_face.append(str_info)
break
except:
print("异常")
def time_subtraction(self):
time_string1 = self.date.toString(Qt.ISODate) + " " + self.time_flag
time_string2 = self.date.toString(Qt.ISODate) + " " + self.time.toString()
ta = time.strptime(time_string2, "%Y-%m-%d %H:%M:%S")
tb = time.strptime(time_string1, "%Y-%m-%d %H:%M:%S")
y, m, d, H, M, S = ta[0:6]
data_timea = datetime.datetime(y, m, d, H, M, S)
y, m, d, H, M, S = tb[0:6]
data_timeb = datetime.datetime(y, m, d, H, M, S)
if data_timea <= data_timeb:
return "0"
else:
secondsDiff = (data_timea - data_timeb).seconds
return str(secondsDiff // 60)
class LogDialog(QDialog):
"""
日志弹窗类
"""
def __init__(self, parent=None):
super(LogDialog, self).__init__(parent)
self.setWindowTitle("打卡日志")
self.setWindowModality(Qt.ApplicationModal) # 隐藏父窗口
self.setFixedSize(600, 480)
self.table = None
self.button_export = None
self.model = None
self.file = None
self.load_data()
self.log_dialog()
def log_dialog(self):
"""
日志弹窗
:return:
"""
self.table = QTableView(self)
self.table.resize(600, 400)
self.table.setModel(self.model)
self.table.setEditTriggers(QTableView.NoEditTriggers) # 设置表单不可编辑
self.table.setSelectionMode(QTableView.NoSelection) # 设置表单不可选中
self.table.resizeColumnsToContents() # 列根据内容调整大小
self.table.resizeRowsToContents() # 行根据内容调整大小
self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch) # 表单自适应
self.button_export = QPushButton("导出日志", self)
self.button_export.clicked.connect(self.export_xls)
self.button_export.move(220, 415)
def load_data(self):
"""
使用自带的QSqlQueryModel方法进行数据库查询
:return:
"""
db = QSqlDatabase.addDatabase("QSQLITE") # 选着数据库类型
db.setDatabaseName("./sys_db.db")
db.open()
self.model = QSqlQueryModel()
self.model.setQuery(
"""select tb1.id,tb1.sname,tb2.clcokdate,tb2.clocktime,tb2.latetime from
staff_tb as tb1 join logcat_tb as tb2 on tb1.id = tb2.id""")
self.model.setHeaderData(0, Qt.Horizontal, "ID")
self.model.setHeaderData(1, Qt.Horizontal, "姓名")
self.model.setHeaderData(2, Qt.Horizontal, "打卡日期")
self.model.setHeaderData(3, Qt.Horizontal, "打卡时间")
self.model.setHeaderData(4, Qt.Horizontal, "迟到时长")
def export_xls(self):
self.file = xlwt.Workbook(encoding="utf-8")
log = load_logcat()
sheet = self.file.add_sheet(u"日志")
row0 = [u"ID", u"姓名", u"打卡日期", u"打卡时间", u"迟到时长"]
for i in range(len(row0)):
sheet.write(0, i, row0[i])
for i in range(len(log)):
for j in range(len(log[i])):
print(log[i][j])
sheet.write(i + 1, j, log[i][j])
cu_time = time.strftime(u'%Y-%m-%d', time.localtime(time.time()))
self.file.save("./" + cu_time + "日志.xls")
class AdminLoginDialog(QDialog):
"""
管理员登录弹窗
"""
adname = pyqtSignal(str)
def __init__(self, parent=None):
super(AdminLoginDialog, self).__init__(parent)
self.setFixedSize(350, 250)
self.setWindowTitle("管理员登录")
self.setWindowModality(Qt.ApplicationModal)
self.setAutoFillBackground(True)
self.label_name = None
self.label_passwd = None
self.button_login = None
self.name_edit = None
self.passwd_edit = None
self.glayout = None
self.admin_name = None
self.set_login()
self.admin_layout()
self.activity()
def activity(self):
self.button_login.clicked.connect(self.contrast)
def set_login(self):
self.label_name = QLabel("用户名:", self)
self.label_name.setFont(QFont("Roman times", 15, QFont.Bold))
self.label_name.setAlignment(Qt.AlignCenter)
self.label_passwd = QLabel("密码:", self)
self.label_passwd.setFont(QFont("Roman times", 15, QFont.Bold))
self.label_passwd.setAlignment(Qt.AlignCenter)
self.name_edit = QLineEdit(self)
self.name_edit.setFont(QFont("Roman times", 15, QFont.Bold))
self.passwd_edit = QLineEdit(self)
self.passwd_edit.setFont(QFont("Roman times", 15, QFont.Bold))
self.passwd_edit.setEchoMode(QLineEdit.Password)
self.button_login = QPushButton("登录")
def admin_layout(self):
self.glayout = QGridLayout(self)
self.glayout.addWidget(self.label_name, 0, 0)
self.glayout.addWidget(self.label_passwd, 1, 0)
self.glayout.addWidget(self.name_edit, 0, 1, 1, 2)
self.glayout.addWidget(self.passwd_edit, 1, 1, 1, 2)
self.glayout.addWidget(self.button_login, 2, 1)
def contrast(self):
"""
将用户名、密码与数据库进行对比
:return:
"""
if self.name_edit.text() and self.passwd_edit.text():
self.admin_name = load_admin(self.name_edit.text(), self.passwd_edit.text())
if self.admin_name:
self.adname.emit(self.admin_name)
self.close()
else:
self.name_edit.clear()
self.passwd_edit.clear()
QMessageBox.information(self, "提示", "用户名或密码错误", QMessageBox.Yes)
class InfoDialog(QDialog):
"""
录入信息填写
"""
idtext = pyqtSignal(str)
def __init__(self, parent=None):
super(InfoDialog, self).__init__(parent)
self.setFixedSize(350, 200)
self.setWindowTitle("信息")
self.setWindowModality(Qt.ApplicationModal)
self.flayout = None
self.id_edit = None
self.name_edit = None
self.department_edit = None
self.button_next = None
self.set_info()
self.activity()
def activity(self):
self.button_next.clicked.connect(self.insert_data)
def set_info(self):
self.flayout = QFormLayout()
id_label = QLabel("ID:")
id_label.setFont(QFont("Roman times", 15, QFont.Bold))
id_label.setAlignment(Qt.AlignCenter)
name_label = QLabel("姓名:")
name_label.setFont(QFont("Roman times", 15, QFont.Bold))
name_label.setAlignment(Qt.AlignCenter)
department_label = QLabel("部门:")
department_label.setFont(QFont("Roman times", 15, QFont.Bold))
department_label.setAlignment(Qt.AlignCenter)
self.id_edit = QLineEdit()
self.id_edit.setFont(QFont("Roman times", 15, QFont.Bold))
self.name_edit = QLineEdit()
self.name_edit.setFont(QFont("Roman times", 15, QFont.Bold))
self.department_edit = QLineEdit()
self.department_edit.setFont(QFont("Roman times", 15, QFont.Bold))
self.button_next = QPushButton("下一步")
self.flayout.addRow(id_label, self.id_edit)
self.flayout.addRow(name_label, self.name_edit)
self.flayout.addRow(department_label, self.department_edit)
self.flayout.addWidget(self.button_next)
self.setLayout(self.flayout)
def insert_data(self):
"""
插入员工数据
:return:
"""
if self.id_edit.text() and self.name_edit.text() and self.department_edit.text():
insert_staff(self.id_edit.text(), self.name_edit.text(), self.department_edit.text())
os.mkdir(Path_face + self.id_edit.text())
string = self.id_edit.text()
self.idtext.emit(string)
self.close()
else:
QMessageBox.information(self, "提示", "输入内容不能为空", QMessageBox.Yes)
class AdminDialog(QDialog):
"""
管理页面
"""
flag_re = pyqtSignal(str, str, str) # 自定义信号
def __init__(self, parent=None):
super().__init__(parent)
self.setFixedSize(550, 400)
self.setWindowTitle("设置管理")
self.setWindowModality(Qt.ApplicationModal)
self.glayout = None # 布局
self.flag_time_label = None
self.flag_time_edit = None
self.img_path_label = None
self.button_img_change = None
self.excel_path = None
self.path_edit = None
self.path_change_button = None
self.excel_label = None
self.dele_staff_label = None
self.dele_staff_edit = None
self.button_dele = None
self.button_y = None
self.button_n = None
self.path_img = None
self.path_excel = None
self.set_ui()
self.admin_layout()
def set_ui(self):
self.flag_time_label = QLabel("设置打卡时间(24小时制):", self)
self.flag_time_label.setObjectName("admin_dia")
self.flag_time_edit = QLineEdit(self)
self.flag_time_edit.setAlignment(Qt.AlignCenter)
self.flag_time_edit.setInputMask("00:00")
self.flag_time_edit.setFont(QFont("Roman times", 15, QFont.Bold))
self.img_path_label = QLabel(self)
self.img_path_label.setObjectName("admin_dia")
self.button_img_change = QPushButton("修改图片", self)
self.button_img_change.setObjectName("button_admin")
self.excel_label = QLabel(self)
self.excel_label.setObjectName("admin_dia")
self.path_change_button = QPushButton("修改路径", self)
self.path_change_button.setObjectName("button_admin")
self.dele_staff_label = QLabel("删除员工数据:", self)
self.dele_staff_label.setObjectName("admin_dia")
self.dele_staff_edit = QLineEdit(self)
self.dele_staff_edit.setPlaceholderText("请输入ID号")
self.dele_staff_edit.setFont(QFont("Roman times", 15, QFont.Bold))
self.button_dele = QPushButton("删除", self)
self.button_dele.setObjectName("button_admin")
self.button_y = QPushButton("确定", self)
self.button_y.setObjectName("button_admin")
self.button_n = QPushButton("取消", self)
self.button_n.setObjectName("button_admin")
self.set_laebl()
self.set_activity()
def admin_layout(self):
self.glayout = QGridLayout()
self.glayout.addWidget(self.flag_time_label, 1, 1, 1, 10)
self.glayout.addWidget(self.flag_time_edit, 1, 11, 1, 10)
self.glayout.addWidget(self.img_path_label, 4, 1, 1, 22)
self.glayout.addWidget(self.button_img_change, 4, 25, 1, 5)
self.glayout.addWidget(self.excel_label, 7, 1, 1, 22)
self.glayout.addWidget(self.path_change_button, 7, 25, 1, 5)
self.glayout.addWidget(self.dele_staff_label, 10, 1, 1, 7)
self.glayout.addWidget(self.dele_staff_edit, 10, 8, 1, 10)
self.glayout.addWidget(self.button_dele, 10, 25, 1, 5)
self.glayout.addWidget(self.button_y, 13, 18, 1, 5)
self.glayout.addWidget(self.button_n, 13, 25, 1, 5)
self.setLayout(self.glayout)
def set_activity(self):
self.button_img_change.clicked.connect(self.set_path_img)
self.path_change_button.clicked.connect(self.set_path_ex)
self.button_y.clicked.connect(self.clicked_yes)
self.button_n.clicked.connect(self.close) # 关闭
self.button_dele.clicked.connect(self.dele_staff)
def set_laebl(self):
self.path_img = "G:\\githublocal\\drawable\\MaXlogo.jpg"
self.img_path_label.setText("图片路径:" + self.path_img)
self.path_excel = "C:\\Users\\ULTRAMANSE\\Desktop"
self.excel_label.setText("日志保存路径:" + self.path_excel)
def set_path_img(self):
file_name, _ = QFileDialog.getOpenFileName(self,
"选择图片",
"./",
"All Files(*);;"
"JPG Files (*.jpg);;"
"PNG Files (*.png);;"
"IMG Files (*.img)"
) # 选择图片
if file_name is not "":
self.path_img = file_name
self.img_path_label.setText("图片路径:" + self.path_img)
def set_path_ex(self):
ex_dir = QFileDialog.getExistingDirectory(self,
"选择文件夹",
"./") # 选择保存路径
if ex_dir is not "":
self.path_excel = ex_dir
self.excel_label.setText("日志保存路径:" + self.path_excel)
def clicked_yes(self):
self.flag_re.emit(self.path_img, self.path_excel, self.flag_time_edit.text())
self.close()
def dele_staff(self):
temp = "<font size='9'>是否删除id为" + self.dele_staff_edit.text() + "的员工</font>"
message = QMessageBox.warning(self, "警告", temp, QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if message == QMessageBox.Yes:
delete_data(int(self.dele_staff_edit.text()))
elif message == QMessageBox.No:
self.dele_staff_edit.clear()
lock = QMutex() # 创建进程锁
class ThreadIntoDB(QThread):
def __init__(self, idn=None, parent=None):
super().__init__(parent)
self.id = idn
def run(self):
lock.lock()
pics = os.listdir(Path_face + self.id)
feature_list = []
feature_average = []
for i in range(len(pics)):
pic_path = Path_face + self.id + "/" + pics[i]
print("读取成功:", pic_path)
img = cv2.imread(pic_path) # 读入图片
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # 处理图片颜色空间转换为RGB
dets = detector(img_gray, 1)
if len(dets) != 0: # 检测是否有人脸
shape = predictor(img_gray, dets[0]) # 检测人脸特征点
face_descriptor = face_rgt.compute_face_descriptor(img_gray, shape) # 通过特征点获取人脸描述子
feature_list.append(face_descriptor) # 把人脸描述子保存在list中
else:
face_descriptor = 0
print("未在照片中识别到人脸")
if len(feature_list) > 0:
for j in range(128): # 128维度 ,防止越界
feature_average.append(0)
for i in range(len(feature_list)):
feature_average[j] += feature_list[i][j]
feature_average[j] = (feature_average[j]) / len(feature_list) # 对齐
insert_face(self.id, feature_average) # 插入数据库
shutil.rmtree(Path_face + self.id) # 递归删除文件
lock.unlock()
class CommonHelper:
def __init__(self):
pass
@staticmethod
def read_qss(stylefile):
with open(stylefile, 'r') as f:
return f.read()
if __name__ == '__main__':
App = QApplication(sys.argv)
style = CommonHelper.read_qss(style_file)
ex = MainUI()
ex.setStyleSheet(style)
ex.show()
sys.exit(App.exec_())
| ULTRAMANSE/maxface | UIface.py | UIface.py | py | 27,958 | python | en | code | 0 | github-code | 90 |
2334871256 | import pandas as pd
import numpy as np
from model.gmf import GMFEngine
from model.mlp import MLPEngine
from model.neumf import NeuMFEngine
from data import SampleGenerator
import os
import torch
torch.cuda.is_available()
import argparse
# procedures on training each model
def train_model(model, config):
engine = model(config)
best_hit = 0
for epoch in range(config['num_epoch']):
print('Epoch {} starts !'.format(epoch))
print('-' * 70)
train_loader = sample_generator.instance_a_train_loader(config['num_negative'], config['batch_size'])
engine.train_an_epoch(train_loader, epoch_id=epoch)
hit_ratio, ndcg = engine.evaluate(evaluate_data, epoch_id=epoch)
if epoch % 20 == 0:
engine.save(config['alias'], epoch, hit_ratio, ndcg)
elif (epoch == config['num_epoch'] - 1):
engine.save(config['alias'], epoch, hit_ratio, ndcg)
if hit_ratio > best_hit:
best_hit = hit_ratio
engine.save(config['alias'], epoch, hit_ratio, ndcg, backup=False)
print('Outputing the Best model')
engine.full_save(config['alias'])
return best_hit
#gmf configuration
gmf_config = {'alias': 'gmf_factor8neg4-implict',
'num_epoch': 200,
'batch_size': 4,
# 'optimizer': 'sgd',
# 'sgd_lr': 1e-3,
# 'sgd_momentum': 0.9,
# 'optimizer': 'rmsprop',
# 'rmsprop_lr': 1e-3,
# 'rmsprop_alpha': 0.99,
# 'rmsprop_momentum': 0,
'optimizer': 'adam',
'adam_lr': 1e-3,
'num_users': num_userid,
'num_items': num_itemid,
'latent_dim': 8,
'num_negative': 4,
'l2_regularization': 0, # 0.01
'use_cuda': True,
'device_id': 0,
'model_dir':'checkpoints/{}_Epoch{}_HR{:.4f}_NDCG{:.4f}.model'}
# mlp configuration
mlp_config = {'alias': 'mlp_factor8neg4_bz256_166432168_pretrain_reg_0.0000001',
'num_epoch': 200,
'batch_size': 4, # 1024,
'optimizer': 'adam',
'adam_lr': 1e-3,
'num_users': num_userid,
'num_items': num_itemid,
'latent_dim': 8,
'num_negative': 4,
'layers': [16,64,32,16,8], # layers[0] is the concat of latent user vector & latent item vector
'l2_regularization': 0.0000001, # MLP model is sensitive to hyper params
'use_cuda': True,
'device_id': 0,
'pretrain': True,
'pretrain_mf': 'gmf_factor8neg4-implict_best.model',
'model_dir':'checkpoints/{}_Epoch{}_HR{:.4f}_NDCG{:.4f}.model'}
# neumf configuration
neumf_config = {'alias': 'pretrain_neumf_factor8neg4',
'num_epoch': 200,
'batch_size': 4,
'optimizer': 'adam',
'adam_lr': 1e-3,
'num_users': num_userid,
'num_items': num_itemid,
'latent_dim_mf': 8,
'latent_dim_mlp': 8,
'num_negative': 4,
'layers': [16,64,32,16,8], # layers[0] is the concat of latent user vector & latent item vector
'l2_regularization': 0.0000001,
'use_cuda': True,
'device_id': 0,
'pretrain': True,
'pretrain_mf': 'gmf_factor8neg4-implict_best.model',
'pretrain_mlp': 'mlp_factor8neg4_bz256_166432168_pretrain_reg_0.0000001_best.model',
'model_dir':'checkpoints/{}_Epoch{}_HR{:.4f}_NDCG{:.4f}.model'
}
# train all three models - the entire training pipeline
def train_full_pipeline(data_path):
print('Preparing Data...')
# DataLoader for training
sample_generator = SampleGenerator(data_path)
## need to add asserts in SampleGenerator to check input format is correct
evaluate_data = sample_generator.evaluate_data
num_itemid, num_userid = sample_generator.usr_item_unique()
for config in [gmf_config, mlp_config, neumf_config]:
config['num_users'] = num_userid
config['num_items'] = num_itemid
print('Preparing Data... Done!')
print('Stage 1: Training GMF... ')
gmf_best = train_model(GMFEngine, gmf_config)
print('Stage 1: Training GMF... Done!')
print('Stage 2: Training MLP...')
mlp_best = train_model(MLPEngine, mlp_config)
print('Stage 2: Training MLP... Done!')
print('Stage 3: Training NeuMF... ')
neumf_best = train_model(NeuMFEngine, neumf_config)
print('Stage 3: Training NeuMF... Done! ')
print('All Training Completed\n')
print('** Result Report **\n')
print('Stage 1 - GMF Hit Rate: {:.2f}%'.format(gmf_best))
print('Stage 2 - MLP Hit Rate: {:.2f}%'.format(mlp_best))
print('Stage 3 - NeuMF Hit Rate: {:.2f}%'.format(neumf_best))
# workflow
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("data_directory", help="state the directory of the csv data file", type=str)
args = parser.parse_args()
train_full_pipeline(args.data_directory)
| sleung852/tdc-product-recommendation | train_v3.py | train_v3.py | py | 5,202 | python | en | code | 0 | github-code | 90 |
18582683919 | import sys
Q = int(input())
pair = []
MAX = 0
for i in range(Q):
l, r = map(int, input().split())
MAX = max(MAX, l, r)
pair.append((l, r))
N = 101010
N = 25
N = MAX + 1
is_prime = [1 for i in range(N)]
is_prime[0] = is_prime[1] = 0
# sieve
for i in range(2, N):
if not is_prime[i]:
# 0, 1, 4, 6, 9, ...
continue
for j in range(i*2, N, i):
#print(j)
is_prime[j] = 0
#print('sieve')
#print(is_prime)
# 2017-like
a = [0 for i in range(N)]
for i in range(N):
if i % 2 == 0:
continue
if is_prime[i] and is_prime[(i+1) // 2]:
a[i] = 1
#print('2017')
#print(a)
# accum
s = [0]
for i, n in enumerate(a):
s.append(s[i] + n)
s.pop(0)
#print('accum')
#print(s)
# Query
#print(pair)
#print('ANS')
for l, r in pair:
print(s[r] - s[l-1])
| Aasthaengg/IBMdataset | Python_codes/p03476/s437285248.py | s437285248.py | py | 818 | python | en | code | 0 | github-code | 90 |
14012231198 | import json
import os
import deepspeed
import torch
from deepspeed.ops.adam import DeepSpeedCPUAdam, FusedAdam
from transformers.modeling_utils import no_init_weights
from elixir.kernels.attn_wrapper import wrap_attention
from elixir.utils import get_model_size
from example.common.models import get_model
def train_init(batch_size: int, model_name: str, zero_stage: int, cpu_offload: bool):
cur_path = os.path.abspath(os.path.dirname(__file__))
if zero_stage == 2:
ds_path = os.path.join(cur_path, 'zero2_config.json')
else:
ds_path = os.path.join(cur_path, 'zero3_config.json')
ds_config = json.load(open(ds_path))
if not cpu_offload:
zero_optim = ds_config.get('zero_optimization')
zero_optim.pop('offload_optimizer')
if zero_stage == 3:
zero_optim.pop('offload_param')
total_bs = batch_size * int(os.environ['WORLD_SIZE'])
ds_config['train_batch_size'] = total_bs
ds_config['train_micro_batch_size_per_gpu'] = batch_size
deepspeed.init_distributed()
if zero_stage == 2:
with no_init_weights():
model = get_model(model_name)
numel = get_model_size(model)
else:
with deepspeed.zero.Init(config_dict_or_path=ds_config):
model = get_model(model_name)
numel = deepspeed.runtime.zero.partition_parameters.param_count
if cpu_offload:
optimizer = DeepSpeedCPUAdam(model.parameters(), lr=1e-3)
else:
optimizer = FusedAdam(model.parameters(), lr=1e-3)
model, optimizer, _, _ = deepspeed.initialize(model=model, optimizer=optimizer, config=ds_config)
model.gradient_checkpointing_enable()
model = wrap_attention(model)
model.train()
def forward(data):
return model(**data)
def backward(loss):
model.backward(loss)
def optim():
model.step()
return forward, backward, optim, numel
if __name__ == '__main__':
train_init(1, 'opt-1b', 3, False)
exit(0)
| hpcaitech/Elixir | example/common/ds.py | ds.py | py | 1,995 | python | en | code | 8 | github-code | 90 |
22436531168 | import asyncio
import logging
import os
import time
from datetime import datetime
from PyDictionary import PyDictionary
from userbot import TEMP_DOWNLOAD_DIRECTORY, bot
from userbot.events import register
from userbot.utils import progress
@register(outgoing=True, pattern=r"^\.def(?: |$)(.*)")
async def _(event):
word = event.pattern_match.group(1)
dictionary = PyDictionary()
words = dictionary.meaning(word)
output = f"**Word :** `{word}`\n\n"
try:
for a, b in words.items():
output += f"**{a}**:\n"
for i in b:
output += f">`{i}`\n"
await event.edit(output)
except Exception:
await event.edit(f"Couldn't fetch meaning of {word}")
@register(outgoing=True, pattern=r"^\.imgs(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
reply_to_id = event.message.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
await event.edit("```Converting.....```")
if not os.path.isdir(TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(TEMP_DOWNLOAD_DIRECTORY)
if event.reply_to_msg_id:
filename = "stkr.jpg"
file_name = filename
reply_message = await event.get_reply_message()
to_download_directory = TEMP_DOWNLOAD_DIRECTORY
downloaded_file_name = os.path.join(to_download_directory, file_name)
downloaded_file_name = await bot.download_media(
reply_message, downloaded_file_name
)
if os.path.exists(downloaded_file_name):
picc = await bot.send_file(
event.chat_id,
downloaded_file_name,
force_document=False,
reply_to=reply_to_id,
)
os.remove(downloaded_file_name)
else:
await event.edit("```Ooof i can't handel dat```")
await event.delete()
@register(outgoing=True, pattern=r"^\.stik(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
reply_to_id = event.message.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
await event.edit("```Converting.....```")
if not os.path.isdir(TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(TEMP_DOWNLOAD_DIRECTORY)
if event.reply_to_msg_id:
filename = "kek.webp"
file_name = filename
reply_message = await event.get_reply_message()
to_download_directory = TEMP_DOWNLOAD_DIRECTORY
downloaded_file_name = os.path.join(to_download_directory, file_name)
downloaded_file_name = await bot.download_media(
reply_message, downloaded_file_name
)
if os.path.exists(downloaded_file_name):
picc = await bot.send_file(
event.chat_id,
downloaded_file_name,
force_document=False,
reply_to=reply_to_id,
)
os.remove(downloaded_file_name)
else:
await event.edit("```Ooff i can't Handel Dat```")
await event.delete()
@register(outgoing=True, pattern=r"^\.tft(?: |$)(.*)")
async def get(event):
name = event.text[5:]
if name is None:
await event.edit("`reply correctly u DUMB`")
return
m = await event.get_reply_message()
if m.text:
with open(name, "w") as f:
f.write(m.message)
await event.delete()
await bot.send_file(event.chat_id, name, force_document=True)
os.remove(name)
@register(outgoing=True, pattern=r"^\.nfc(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
if not event.reply_to_msg_id:
await event.edit("```Reply to any media file LOL.```")
return
reply_message = await event.get_reply_message()
if not reply_message.media:
await event.edit("reply to media file")
return
input_str = event.pattern_match.group(1)
if input_str is None:
await event.edit("`U DUMB DUDE`")
return
if input_str in ["mp3", "voice"]:
await event.edit("`converting...`")
else:
await event.edit("try `.nfc voice` or`.nfc mp3`")
return
try:
start = datetime.now()
c_time = time.time()
downloaded_file_name = await bot.download_media(
reply_message,
TEMP_DOWNLOAD_DIRECTORY,
progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
progress(d, t, event, c_time, "trying to download")
),
)
except Exception as e: # pylint:disable=C0103,W0703
await event.edit(str(e))
else:
end = datetime.now()
ms = (end - start).seconds
await event.edit(
"Downloaded to `{}` in {} seconds.".format(downloaded_file_name, ms)
)
new_required_file_name = ""
new_required_file_caption = ""
command_to_run = []
voice_note = False
supports_streaming = False
if input_str == "voice":
new_required_file_caption = "voice_" + str(round(time.time())) + ".opus"
new_required_file_name = (
TEMP_DOWNLOAD_DIRECTORY + "/" + new_required_file_caption
)
command_to_run = [
"ffmpeg",
"-i",
downloaded_file_name,
"-map",
"0:a",
"-codec:a",
"libopus",
"-b:a",
"100k",
"-vbr",
"on",
new_required_file_name,
]
voice_note = True
supports_streaming = True
elif input_str == "mp3":
new_required_file_caption = "mp3_" + str(round(time.time())) + ".mp3"
new_required_file_name = (
TEMP_DOWNLOAD_DIRECTORY + "/" + new_required_file_caption
)
command_to_run = [
"ffmpeg",
"-i",
downloaded_file_name,
"-vn",
new_required_file_name,
]
voice_note = False
supports_streaming = True
else:
await event.edit("not supported")
os.remove(downloaded_file_name)
return
logging.info(command_to_run)
# TODO: re-write create_subprocess_exec 😉
process = await asyncio.create_subprocess_exec(
*command_to_run,
# stdout must a pipe to be accessible as process.stdout
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
# Wait for the subprocess to finish
stdout, stderr = await process.communicate()
stderr.decode().strip()
stdout.decode().strip()
os.remove(downloaded_file_name)
if os.path.exists(new_required_file_name):
end_two = datetime.now()
force_document = False
await bot.send_file(
entity=event.chat_id,
file=new_required_file_name,
allow_cache=False,
silent=True,
force_document=force_document,
voice_note=voice_note,
supports_streaming=supports_streaming,
progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
progress(d, t, event, c_time, "trying to upload")
),
)
(end_two - end).seconds
os.remove(new_required_file_name)
await event.delete()
| niteshraj2310/RemixGeng | userbot/modules/test.py | test.py | py | 7,557 | python | en | code | 9 | github-code | 90 |
71795060137 | import scrapy
from design.items import DesignItem
import json
data = {
'channel': 'laisj',
'evt': 3,
}
class DesignCaseSpider(scrapy.Spider):
name = 'laisj'
allowed_domains = ['www.laisj.com']
page = 1
def start_requests(self):
yield scrapy.FormRequest(
url='http://www.laisj.com/publics2/work/list',
formdata={'page': str(self.page)},
callback=self.parse
)
def parse(self, response):
content = json.loads(response.text)
detail_list = content['data']
for i in detail_list:
url = i['url']
yield scrapy.Request('http://www.laisj.com' + url, callback=self.parse_detail)
last_page = content['last_page']
if self.page < int(last_page):
self.page += 1
yield scrapy.FormRequest(
url='http://www.laisj.com/publics2/work/list',
formdata={'page': str(self.page)},
callback=self.parse
)
def parse_detail(self, response):
item = DesignItem()
url = response.url
img_url = response.xpath('//div[@class="content-other"]//img/@src').extract()[0]
title = response.xpath('//div[@class="content-table"]/div[1]/div[1]/div/text()').extract()[0]
company = response.xpath('//div[@class="info-name"]/text()').extract()[0]
tags = []
try:
tag1 = response.xpath('//div[@class="content-table"]/div[1]/div[2]/div/text()').extract()[0]
except:
pass
else:
tags.append(tag1)
try:
tag2 = response.xpath('//div[@class="content-label"]/a/text()').extract()[0].strip()
except:
pass
else:
tags.append(tag2)
tags = ','.join(tags)
item['url'] = url
item['title'] = title
item['img_url'] = img_url
item['company'] = company
item['tags'] = tags
for key, value in data.items():
item[key] = value
yield item
| LIMr1209/Internet-worm | design/design/spiders/laisj.py | laisj.py | py | 2,114 | python | en | code | 0 | github-code | 90 |
14013963706 | from django.shortcuts import render,redirect
from django.contrib import messages
from .forms import Productaddform
from .models import ProductForCustomer,CustomerCheckout
from Home.models import UserData
from django.contrib.auth.decorators import login_required
import razorpay
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from django.template.loader import render_to_string
from django.http import HttpResponseBadRequest
from django.contrib.auth.decorators import login_required
from datetime import datetime
razorpay_client = razorpay.Client(
auth=(settings.RAZOR_KEY_ID, settings.RAZOR_KEY_SECRET))
@login_required(login_url="SignIn")
def ProductAdd(request):
form = Productaddform
products = ProductForCustomer.objects.filter(user = request.user)
if request.method == "POST":
form = Productaddform(request.POST,request.FILES)
if form.is_valid():
prod = form.save()
prod.user = request.user
prod.save()
messages.info(request,"Product added to list")
return redirect('ProductAdd')
context = {
"form":form,
"products":products
}
return render(request,'farmer/myproducts.html',context)
@login_required(login_url="SignIn")
def DeleteCustomerProduct(request,pk):
ProductForCustomer.objects.get(id = pk).delete()
messages.info(request,"Item Deleted")
return redirect('ProductAdd')
@login_required(login_url="SignIn")
def ProductSingleViewCustomer(request,pk):
product = ProductForCustomer.objects.filter(id = pk)
product1 = ProductForCustomer.objects.get(id = pk)
userdata1 = UserData.objects.filter(user = request.user)
if request.method == "POST":
name = request.POST["name"]
phone = request.POST["phone"]
city = request.POST["city"]
state = request.POST["state"]
house = request.POST["house"]
if UserData.objects.filter(user = request.user).exists():
userdata = UserData.objects.get(user = request.user)
userdata1 = UserData.objects.filter(user = request.user)
userdata.name = name
userdata.phone = phone
userdata.city = city
userdata.state = state
userdata.house = house
userdata.save()
else:
userdata = UserData.objects.create(name = name, house = house,phone = phone,city = city,state = state,user = request.user)
userdata.save()
checkout = CustomerCheckout.objects.create(product = product1 ,user = request.user,status = "Customer Ordered")
checkout.save()
return redirect("CustomerPayment", pk= pk)
context = {
"product":product,
"userdata1":userdata1,
"datalen":len(userdata1)
}
return render(request,'productview.html',context)
@login_required(login_url="SignIn")
def CustomerMybooking(request):
product = CustomerCheckout.objects.filter(user = request.user)
context = {
"product":product
}
return render(request,"customerorder.html",context)
@login_required(login_url="SignIn")
def AllProducts(request):
products = ProductForCustomer.objects.all()
context = {
"products":products
}
return render(request,"products.html",context)
@login_required(login_url="SignIn")
def CancelOrderCustomer(request,pk):
FRCKOT = CustomerCheckout.objects.get(id = pk)
FRCKOT.status = "Cancelled By User"
FRCKOT.save()
messages.info(request,"Item Cancelled")
return redirect("CustomerMybooking")
@login_required(login_url="SignIn")
def DeleteOrderCustomer(request,pk):
CustomerCheckout.objects.get(id = pk).delete()
messages.info(request,"Item Deleted")
return redirect("CustomerMybooking")
def CustomerOrderFarmerview(request):
orders = CustomerCheckout.objects.all()
context = {
"orders":orders
}
return render(request,"farmer/customerordersfarmerview.html",context)
def AcceptOrderCustomer(request,pk):
order = CustomerCheckout.objects.get(id = pk)
order.status = "Order Accepted"
order.save()
return redirect("CustomerOrderFarmerview")
def DespachOrderCustomer(request,pk):
order = CustomerCheckout.objects.get(id = pk)
order.status = "Order Despached"
order.save()
return redirect("CustomerOrderFarmerview")
def RejectOrderCustomer(request,pk):
order = CustomerCheckout.objects.get(id = pk)
order.status = "Order Rejected"
order.save()
return redirect("CustomerOrderFarmerview")
def DeleteOrderCustomer(request,pk):
CustomerCheckout.objects.get(id = pk).delete()
messages.info(request,"item deleted")
return redirect("CustomerMybooking")
def CustomerPayment(request,pk):
product1 = ProductForCustomer.objects.get(id = pk)
currency = 'INR'
amount = product1.Product_price * 100 # Rs. 200
# Create a Razorpay Order Pyament Integration.....
razorpay_order = razorpay_client.order.create(dict(amount=amount,
currency=currency,
payment_capture='0'))
# order id of newly created order.
razorpay_order_id = razorpay_order["id"]
callback_url = 'paymenthandlercus'
# we need to pass these details to frontend.
context = {}
context['razorpay_order_id'] = razorpay_order_id
context['razorpay_merchant_key'] = settings.RAZOR_KEY_ID
context['razorpay_amount'] = amount
context['currency'] = currency
context['callback_url'] = callback_url
context['slotid'] = "1"
return render(request,'makepayment.html',context)
@csrf_exempt
def paymenthandlercus(request):
if request.method == "POST":
try:
payment_id = request.POST.get('razorpay_payment_id', '')
razorpay_order_id = request.POST.get('razorpay_order_id', '')
signature = request.POST.get('razorpay_signature', '')
params_dict = {
'razorpay_order_id': razorpay_order_id,
'razorpay_payment_id': payment_id,
'razorpay_signature': signature
}
# verify the payment signature.
result = razorpay_client.utility.verify_payment_signature(params_dict)
if result is not None:
amount = 800 * 100 # Rs. 200
try:
print("working 1")
razorpay_client.payment.capture(payment_id, amount)
return redirect('Success1')
# render success page on successful caputre of payment
except:
print("working 2")
return redirect('Success1')
# if there is an error while capturing payment.
else:
return render(request, 'paymentfail.html')
# if signature verification fails.
except:
return HttpResponseBadRequest()
# if we don't find the required parameters in POST data
else:
# if other than POST request is made.
return HttpResponseBadRequest()
def Success1(request):
return render(request,'Paymentconfirm.html')
| pramodthundathil/Smartfarm | Products/views.py | views.py | py | 7,268 | python | en | code | 0 | github-code | 90 |
40239596698 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 3 15:34:06 2021
@author: SethHarden
"""
import sys
class Solution(object):
fp = 0
def read(buf, n):
"""
:type buf: Destination buffer (List[str])
:type n: Number of characters to read (int)
:rtype: The number of actual characters read (int)
"""
buffRead = buf[0:n]
print("buffer: ", buf[0:n])
fp = buf[n:n*2]
print("buffer is now at:", fp)
return saveBuff(fp, buf[0:n])
def saveBuff(fp, reader):
arr = []
buf = "sethharden"
n = 4
read(buf,n)
read(buf,n)
read(buf,n)
read(buf,n) | sethmh82/SethDevelopment | Practice/Read-Buffer.py | Read-Buffer.py | py | 708 | python | en | code | 1 | github-code | 90 |
10544729886 | import config
from other.colors import bcolors as bcolors
def enter(BuySell):
if config.mode == 'live':
direction = True
if BuySell == "S":
direction = False
try:
config.con.open_trade(symbol=config.currency, is_buy=direction, amount=config.amount,
time_in_force='GTC', order_type='AtMarket', is_in_pips=True,
limit=config.limit, stop=config.stop, trailing_step=10)
except:
print(" Error Opening Trade.")
else:
print(" Trade Opened Successfully.")
else:
direction = True
if BuySell == "S":
direction = False
config.MyPosition = {"symbol": config.currency, "is_buy": BuySell,
"price": config.pricedata['bidclose'][len(config.pricedata['bidclose']) - 1]}
print(bcolors.OKGREEN + "Trade Opened Successfully." + bcolors.ENDC)
print("\t", end='')
print(bcolors.OKGREEN + str(config.MyPosition) + bcolors.ENDC)
def exit(BuySell=None):
if config.mode == 'live':
openpositions = config.con.get_open_positions(kind='list')
isbuy = True
if BuySell == "S":
isbuy = False
for position in openpositions:
if position['currency'] == config.currency:
if BuySell is None or position['isBuy'] == isbuy:
print(" Closing tradeID: " + position['tradeId'])
try:
closetrade = config.con.close_trade(trade_id=position['tradeId'], amount=position['amountK'])
except:
print(" Error Closing Trade.")
else:
print(" Trade Closed Successfully.")
else:
if config.MyPosition['is_buy'] == BuySell:
print(bcolors.OKGREEN + "Trade Closed Successfully.")
price = config.pricedata['bidclose'][len(config.pricedata['bidclose']) - 1]
if BuySell == "S":
config.PipsProfit += (config.MyPosition['price'] - price)
print(bcolors.OKGREEN + "\tProfit: " + str(
int((config.MyPosition['price'] - price) * 100000)) + ' Pips' + bcolors.ENDC)
if config.MyPosition['price'] - price >= 0:
config.TradeWin += 1
else:
config.TradeLoss += 1
else:
config.PipsProfit += (price - config.MyPosition['price'])
print(bcolors.OKGREEN + "\tProfit: " + str(
int((price - config.MyPosition['price']) * 100000)) + ' Pips' + bcolors.ENDC)
if price - config.MyPosition['price'] >= 0:
config.TradeWin += 1
else:
config.TradeLoss += 1
print(bcolors.OKBLUE + '\nTotal Pips profit : ' + str(
int(round(config.PipsProfit, 5) * 100000)) + bcolors.ENDC)
print(bcolors.OKBLUE + 'Win trades : ' + str(config.TradeWin) + bcolors.ENDC)
print(bcolors.OKBLUE + 'trades Loss : ' + str(config.TradeLoss) + '\n' + bcolors.ENDC)
config.MyPosition = None
def countOpenTrades(BuySell=None):
if config.mode == 'live':
openpositions = config.con.get_open_positions(kind='list')
isbuy = True
counter = 0
if BuySell == "S":
isbuy = False
for position in openpositions:
if position['currency'] == config.currency:
if BuySell is None or position['isBuy'] == isbuy:
counter += 1
return counter
else:
if config.MyPosition == None:
return 0
return 1
| SillyDEV/Market-Finance-Introduction-Forex | update/buyAndSell.py | buyAndSell.py | py | 3,769 | python | en | code | 0 | github-code | 90 |
32452926319 |
from sys import stdin
import sys
sys.setrecursionlimit(10000)
stdin = open("input.txt", "r")
row_max, col_max = map(int, stdin.readline().split())
height_ary = [list(map(int, stdin.readline().split())) for _ in range(row_max)]
check = [[-1] * col_max for _ in range(row_max)]
check[row_max - 1][col_max - 1] = 1
visit = [[0] * col_max for _ in range(row_max)]
'''
의사 코드
dfs (row, col) -> 최소 경로의 개수를 리턴할거야
if 만약에 check[row][col] != -1 이면
그 값을 그대로 retrun 해
주변을 살펴봐
height_ary[row, col] 보다 작은 곳이 있어??
그러면 그곳으로 dfs 한걸 받아와
그값이 -1 이 아니라면
check_ary[row, col]에 그걸 넣어두자
rst 에 그값을 모아두자
return rst 하자
'''
visit[0][0] = 1
def dfs(row : int, col : int) :
if check[row][col] != -1 :
return check[row][col]
rst = 0
if row + 1 < row_max and visit[row+1][col] == 0 :
if height_ary[row+1][col] < height_ary[row][col] :
visit[row+1][col] = 1
tmp = dfs(row+1, col)
visit[row+1][col] = 0
if tmp != 0 :
check[row+1][col] = tmp
rst += tmp
if 0 <= row - 1 and visit[row-1][col] == 0 :
if height_ary[row-1][col] < height_ary[row][col] :
visit[row-1][col] = 1
tmp = dfs(row-1, col)
visit[row-1][col] = 0
if tmp != 0 :
check[row-1][col] = tmp
rst += tmp
if col + 1 < col_max and visit[row][col + 1] == 0 :
if height_ary[row][col+1] < height_ary[row][col] :
visit[row][col+1] = 1
tmp = dfs(row, col+1)
visit[row][col+1] = 0
if tmp != 0 :
check[row][col+1] = tmp
rst += tmp
if 0 <= col - 1 and visit[row][col - 1] == 0 :
if height_ary[row][col-1] < height_ary[row][col] :
visit[row][col-1] = 1
tmp = dfs(row, col-1)
visit[row][col-1] = 0
if tmp != 0 :
check[row][col-1] = tmp
rst += tmp
return rst
print(dfs(0, 0))
| choekko/algorithm | Python/inJungle/4주차/시험2tmp.py | 시험2tmp.py | py | 1,972 | python | ko | code | 0 | github-code | 90 |
44996270049 | #Module_Name: initProject
#Author: Dahir Muhammad Dahir
#Date: 27-February-2018
#About: this module initialize the projects, allows the user
# to choose what the want to do.
from startNewProject import startNewProject
from continueExistingProject import continueExistingProject
from updateExistingProject import updateExistingProject
from addNewExtension import addNewExtension
from showCompletedProject import showCompletedProject
from showUncompletedProject import showUncompletedProject
def start():
print("""
{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}
{#} {#}
{#} <======:: DMCRAWL ::=======> {#}
{#} {#}
{#} <=======:: Author: Dahir Muhammad Dahir ::=======> {#}
{#} <=======:: 27th-February-2018::========> {#}
{#} <===:: spider:crawl:download || whatever u want ::===> {#}
{#} {#}
{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}{#}
"""
)
task = raw_input(
"""
{1} ==> Start a new Project
{2} ==> Continue Existing Project [Unfinished]
{3} ==> Updating Existing project
{4} ==> Add new file extension
{5} ==> Show completed projects
{6} ==> Show Uncompleted projects
{7} ==> Exit
What do you want to do? Choose [1 - 7]\n""")
if task == "1":
startNewProject()
elif task == "2":
continueExistingProject()
elif task == "3":
updateExistingProject()
elif task == "4":
addNewExtension()
elif task == "5":
showCompletedProject(listOnly=True)
elif task == "6":
showUncompletedProject(listOnly=True)
elif task == "7":
exit()
else:
print("Invalid Entry, please choose from option [1 - 7]")
if __name__=="__main__":
start()
| Ethic41/codes | python/dmcrawl/modules/initProject.py | initProject.py | py | 2,058 | python | en | code | 1 | github-code | 90 |
86592238498 | import decimal
import re
from enum import Enum
from typing import Optional, Union
from boto3.dynamodb.conditions import Key
from boto3.dynamodb.types import TypeSerializer, TypeDeserializer
from botocore.exceptions import ClientError
from typhoon.aws.boto3_helper import boto3_session
from typhoon.aws.exceptions import TyphoonResourceNotFoundError
"""Module containing low-level functions to interact with DynamoDB
In general all functions take a dynamodb client or resource.
We do not worry about creating those resources/clients in this layer.
"""
class DynamoDBConnectionType(Enum):
RESOURCE = 'resource'
CLIENT = 'client'
def dynamodb_connection(
aws_profile: Optional[str] = None,
conn_type: Union[str, DynamoDBConnectionType] = 'resource',
aws_region: Optional[str] = None,
endpoint_url: Optional[str] = None,
):
session = boto3_session(aws_profile)
aws_region = aws_region or getattr(session, 'region_name', None)
extra_params = {'region_name': aws_region} if aws_region else {}
endpoint_url = endpoint_url if not re.match(r'dynamodb\.[\w-]+\.amazonaws\.com', endpoint_url) else None
if endpoint_url:
extra_params = {
'aws_access_key_id': 'dummy',
'aws_secret_access_key': 'dummy',
'endpoint_url': endpoint_url,
**extra_params,
}
if conn_type is DynamoDBConnectionType.CLIENT or conn_type == 'client':
ddb = session.client('dynamodb', **extra_params)
elif conn_type is DynamoDBConnectionType.RESOURCE or conn_type == 'resource':
ddb = session.resource('dynamodb', **extra_params)
else:
raise ValueError(f'Expected conn_type as client or resource, found: {conn_type}')
return ddb
def scan_dynamodb_table(ddb_resource, table_name: str):
table = ddb_resource.Table(table_name)
response = table.scan()
data = response['Items']
while 'LastEvaluatedKey' in response:
response = table.scan(ExclusiveStartKey=response['LastEvaluatedKey'])
data.extend(response['Items'])
return data
def dynamodb_table_exists(ddb_client, table_name: str):
existing_tables = ddb_client.list_tables()['TableNames']
return table_name in existing_tables
def create_dynamodb_table(
ddb_client,
table_name: str,
primary_key: str,
range_key: Union[str, None] = None, # May have other types in the future
read_capacity_units: int = 1,
write_capacity_units: int = 1,
):
key_schema = [
{
'AttributeName': primary_key,
'KeyType': 'HASH'
},
]
attribute_definitions = [
{
'AttributeName': primary_key,
'AttributeType': 'S'
},
]
if range_key:
key_schema.append({
'AttributeName': range_key,
'KeyType': 'RANGE'
})
if isinstance(range_key, str):
attribute_type = 'S'
else:
raise ValueError(f'Expected range key to be in [str]. Found: {type(range_key)}')
attribute_definitions.append({
'AttributeName': range_key,
'AttributeType': attribute_type
})
table = ddb_client.create_table(
TableName=table_name,
KeySchema=key_schema,
AttributeDefinitions=attribute_definitions,
ProvisionedThroughput={
'ReadCapacityUnits': read_capacity_units,
'WriteCapacityUnits': write_capacity_units
}
)
return table
def dynamodb_put_item(ddb_client, table_name: str, item: dict):
serializer = TypeSerializer()
serialized_item = serializer.serialize(item)['M']
try:
ddb_client.put_item(
TableName=table_name,
Item=serialized_item)
except ddb_client.exceptions.ResourceNotFoundException:
raise TyphoonResourceNotFoundError(f'Table {table_name} does not exist in DynamoDB')
def dynamodb_get_item(ddb_client, table_name: str, key_name: str, key_value: str):
try:
response = ddb_client.get_item(
TableName=table_name,
Key={key_name: {'S': key_value}}
)
except ddb_client.exceptions.ResourceNotFoundException:
raise TyphoonResourceNotFoundError(f'Table "{table_name}" does not exist in DynamoDB')
if 'Item' not in response:
raise TyphoonResourceNotFoundError(
f'Item {key_name}="{key_value}" does not exist in DynamoDB table {table_name}')
deserializer = TypeDeserializer()
return {k: deserializer.deserialize(v) for k, v in response['Item'].items()}
def dynamodb_query_item(
ddb_resource,
table_name: str,
partition_key_name: str,
partition_key_value: str,
):
try:
table = ddb_resource.Table(table_name)
response = table.query(KeyConditionExpression=Key(partition_key_name).eq(partition_key_value))
except ClientError:
raise TyphoonResourceNotFoundError(f'Table "{table_name}" does not exist in DynamoDB')
if 'Items' not in response or not response['Items']:
raise TyphoonResourceNotFoundError(
f'Item {partition_key_name}="{partition_key_value}" does not exist in DynamoDB table {table_name}')
deserializer = TypeDeserializer()
return {k: deserializer.deserialize(v) for k, v in response['Items'][0].items()}
def dynamodb_delete_item(ddb_client, table_name, key_name: str, key_value: str):
ddb_client.delete_item(
TableName=table_name,
Key={key_name: {'S': key_value}}
)
def replace_decimals(obj):
if isinstance(obj, list):
for i in range(len(obj)):
obj[i] = replace_decimals(obj[i])
return obj
elif isinstance(obj, dict):
for k, v in obj.items():
obj[k] = replace_decimals(v)
return obj
elif isinstance(obj, set):
return set(replace_decimals(i) for i in obj)
elif isinstance(obj, decimal.Decimal):
if obj % 1 == 0:
return int(obj)
else:
return float(obj)
else:
return obj
| typhoon-data-org/typhoon-orchestrator | typhoon/aws/dynamodb_helper.py | dynamodb_helper.py | py | 6,091 | python | en | code | 29 | github-code | 90 |
18895315715 |
def one_binary_function(key, string):
string_ord = []
binary_ord = []
cipher_list = []
final_list = []
for c in string:
string_ord.append(ord(c))
for i in string_ord:
binary_ord.append(bin(i))
binary_key = bin(key)
counter = 0
for b in binary_ord:
x = int(binary_ord[counter], 2)
y = int(binary_key, 2)
cipher_list.append(x ^ y)
counter += 1
for i in cipher_list:
character = chr(i)
final_list.append(character)
return "".join(final_list)
| Patchyst/XOR-Encryption-GUI | encryption1function.py | encryption1function.py | py | 581 | python | en | code | 1 | github-code | 90 |
18386900339 | #https://atcoder.jp/contests/diverta2019-2/submissions/11229318
n = int(input())
t = [tuple(map(int, input().split())) for _ in range(n)]
s = set(t)
cnt = 0
for i in range(n-1):
for j in range(i+1,n):
u,v = t[i]
x,y = t[j]
p = u-x; q = v-y
c = sum((x-p, y-q) in s for x,y in t)
if cnt < c: cnt = c
print(n-cnt)
| Aasthaengg/IBMdataset | Python_codes/p03006/s835988517.py | s835988517.py | py | 333 | python | en | code | 0 | github-code | 90 |
13266318402 | # !/user/bin/env python
# -*- coding:utf-8 -*-
# author:Zfy date:2021/9/4 17:46
import math
def func(m, n):
sum = 0
for i in range(n):
sum += m
m = math.sqrt(m)
return round(sum, 2)
print(func(2, 2))
| feiyu7348/python-Learning | 普通练习/数列和.py | 数列和.py | py | 235 | python | en | code | 0 | github-code | 90 |
20432301020 | import functools
arr = ['fab', 'fed', 'f', 'ed','e']
ab = {'f':10,'e':11,'d':12,'c':13,'b':14,'a':15}
def comparator(a,b):
s_a = ''
s_b = ''
for i in a:
s_a += str(ab[i])
for i in list(b):
s_b += str(ab[i])
print(s_a,s_b)
if s_a > s_b:
return 1
else:
return -1
print(arr)
arr.sort(key=functools.cmp_to_key(comparator))
print(arr) | NyeongB/python_2 | test1.py | test1.py | py | 401 | python | en | code | 0 | github-code | 90 |
18067766809 | N = int(input())
l = list(map(int,input().split()))
l_ans = [[] for _ in range(201)]
for i in range(-100,101):
sum = 0
for j in range(N):
sum = sum + (i-l[j])**2
l_ans[i+100] = sum
print(min(l_ans)) | Aasthaengg/IBMdataset | Python_codes/p04031/s485808660.py | s485808660.py | py | 222 | python | en | code | 0 | github-code | 90 |
42986062636 | from math import *
# tính tổng hai class phân số trong python
class P:
def __init__(po,tu=None,mau=None):
po.tu = tu
po.mau = mau
# hàm __str__ là hàm có sẵn, xác định kiểu chuỗi trả vè dc hiển thị như thế nào
def __str__(po):
return f'{po.tu}/{po.mau}'
# khi goi a+b thif nó sẽ chạy vào hầm add này
def __add__(po, other):
c= P()
c.mau = po.mau * other.mau
c.tu = po.tu * other.mau + po.mau * other.tu
c.rg()
return c
def rg(po):
r = gcd(po.tu, po.mau)
po.tu //=r
po.mau //=r
list = [int(i) for i in input().split()]
a = P(list[0], list[1])
b = P(list[2], list[3])
c = a + b
print(c)
| nguyenkien0703/python_ptit | PY04004.py | PY04004.py | py | 759 | python | vi | code | 0 | github-code | 90 |
19644717354 | from AlorPy import AlorPy # Работа с Alor OpenAPI V2
from Config import Config # Файл конфигурации
if __name__ == '__main__': # Точка входа при запуске этого скрипта
apProvider = AlorPy(Config.UserName, Config.RefreshToken) # Подключаемся к торговому счету. Логин и Refresh Token берутся из файла Config.py
# apProvider = AlorPy(Config.DemoUserName, Config.DemoRefreshToken, True) # Подключаемся к демо счету
print('Кол-во тикеров на бирже:')
for exchange in apProvider.exchanges: # Пробегаемся по всем биржам
securities = apProvider.GetSecuritiesExchange(exchange) # Получаем все тикеры на бирже
print(securities[0])
print(f'- {exchange} {len(securities)}')
boards = tuple(set(security['primary_board'] for security in securities)) # Все классы инструментов
for board in boards: # Пробегаемся по всем классам
boardSymbols = [security for security in securities if security['primary_board'] == board]
print(f' - {board} {len(boardSymbols)}')
portfolios = apProvider.GetPortfolios() # Портфели: Фондовый рынок / Фьючерсы и опционы / Валютный рынок
for p in portfolios: # Пробегаемся по всем портфелям
portfolioName = portfolios[p][0]['portfolio'] # Название портфеля
account = portfolios[p][0]['tks'] # Счет
print(f'{p}: Портфель {portfolioName}, Счет {account}')
tradeServersInfo = portfolios[p][0]['tradeServersInfo'] # Торговый сервер
print('- Торговые серверы')
for tradeServerInfo in tradeServersInfo: # Пробегаемся по всем торговым серверам
print(f' - {tradeServerInfo["tradeServerCode"]} для контрактов {tradeServerInfo["contracts"]}')
for exchange in apProvider.exchanges: # Пробегаемся по всем биржам
print(f'- Биржа {exchange}')
positions = apProvider.GetPositions(portfolioName, exchange, True) # Позиции без денежной позиции
for position in positions: # Пробегаемся по всем позициям
symbol = position['symbol'] # Тикер
symbolInfo = apProvider.GetSymbol(exchange, symbol) # Информация о тикере
size = position['qty'] * symbolInfo['lotsize'] # Кол-во в штуках
entryPrice = round(position['volume'] / size, 2) # Цена входа
pl = position['unrealisedPl'] * symbolInfo['priceMultiplier'] # Бумажная прибыль/убыток
lastPrice = round((position['volume'] + pl) / size, 2) # Последняя цена
print(f' - Позиция {position["shortName"]} ({symbol}) {size} @ {entryPrice} / {lastPrice}')
money = apProvider.GetMoney(portfolioName, exchange) # Денежная позиция
print(f' - Баланс {round(money["portfolio"] - money["cash"], 2)} / {money["cash"]}')
orders = apProvider.GetOrders(portfolioName, exchange) # Получаем список активных заявок
for order in orders: # Пробегаемся по всем активным заявкам
print(f' - Заявка номер {order["id"]} {"Покупка" if order["side"] == "buy" else "Продажа"} {order["exchange"]}.{order["symbol"]} {order["qty"]} @ {order["price"]}')
stopOrders = apProvider.GetStopOrders(portfolioName, exchange) # Получаем список активных стоп заявок
for stopOrder in stopOrders: # Пробегаемся по всем активным стоп заявкам
print(f' - Стоп заявка номер {stopOrder["id"]} {"Покупка" if stopOrder["side"] == "buy" else "Продажа"} {stopOrder["exchange"]}.{stopOrder["symbol"]} {stopOrder["qty"]} @ {stopOrder["price"]}')
| KlimShaman/trading | Examples/02 - Accounts.py | 02 - Accounts.py | py | 4,351 | python | ru | code | 0 | github-code | 90 |
27144426678 | def calculate_tax(yearly_salary):
tax_brackets = [
(0, 22000, 0.1),
(22001, 89450, 0.12),
(89451, 190750, 0.22),
(190751, 364200, 0.24),
(364201, 462500, 0.32),
(462501, 693750, 0.35),
(693750, float('inf'), 0.37)
]
tax_owed = 0
salary_remaining = yearly_salary
for bracket in tax_brackets:
bracket_min, bracket_max, tax_rate = bracket
if salary_remaining <= 0:
break
taxable_income = min(salary_remaining, bracket_max - bracket_min + 1)
tax_owed += taxable_income * tax_rate
salary_remaining -= taxable_income
return tax_owed
salary = int(input("How much does your household bring in a year?: "))
tax_owed = calculate_tax(salary)
print("Total tax owed:", "$",tax_owed, sep="")
| Zrebric/Python | main.py | main.py | py | 818 | python | en | code | 0 | github-code | 90 |
2805535329 | from django.urls import path
from .import views
app_name = 'foncier'
urlpatterns = [
path('DimFoncier/', views.DimFon, name='DimFoncier'),
path('DimFoncierGouvernanc/', views.DFG, name='DimFoncierGouvernanc'),
# path('DimGeographie/', views.DG, name='DimGeographie'),
# path('FactFoncier/', views.FF, name='FactFoncier'),
path('de_fon<int:id>/', views.delete_fon, name='de_fon'),
path('up_fon<int:id>/', views.update_fon, name='up_fon'),
path('de_Gouve<int:id>/', views.delete_up_Gouve, name='de_Gouve'),
path('up_Gouve<int:id>/', views.update_up_Gouve, name='up_Gouve'),
# path('up_geo<int:id>/', views.update_geo, name='up_geo'),
#path('de_geo<int:id>/', views.delete_geo, name='de_geo'),
]
| ndire92/daroukhoudosse | foncier/urls.py | urls.py | py | 739 | python | fr | code | 0 | github-code | 90 |
1911866221 | class Portfolio:
def __init__(self, asset, fiat, interest_asset = 0, interest_fiat = 0):
self.asset =asset
self.fiat =fiat
self.interest_asset = interest_asset
self.interest_fiat = interest_fiat
def valorisation(self, price):
return sum([
self.asset * price,
self.fiat,
- self.interest_asset * price,
- self.interest_fiat
])
def real_position(self, price):
return (self.asset - self.interest_asset)* price / self.valorisation(price)
def position(self, price):
return self.asset * price / self.valorisation(price)
def trade_to_position(self, position, price, trading_fees):
# Repay interest
current_position = self.position(price)
interest_reduction_ratio = 1
if (position <= 0 and current_position < 0):
interest_reduction_ratio = min(1, position/current_position)
elif (position >= 1 and current_position > 1):
interest_reduction_ratio = min(1, (position-1)/(current_position-1))
if interest_reduction_ratio < 1:
self.asset = self.asset - (1-interest_reduction_ratio) * self.interest_asset
self.fiat = self.fiat - (1-interest_reduction_ratio) * self.interest_fiat
self.interest_asset = interest_reduction_ratio * self.interest_asset
self.interest_fiat = interest_reduction_ratio * self.interest_fiat
# Proceed to trade
asset_trade = (position * self.valorisation(price) / price - self.asset)
if asset_trade > 0:
asset_trade = asset_trade / (1 - trading_fees + trading_fees * position)
asset_fiat = - asset_trade * price
self.asset = self.asset + asset_trade * (1 - trading_fees)
self.fiat = self.fiat + asset_fiat
else:
asset_trade = asset_trade / (1 - trading_fees * position)
asset_fiat = - asset_trade * price
self.asset = self.asset + asset_trade
self.fiat = self.fiat + asset_fiat * (1 - trading_fees)
def update_interest(self, borrow_interest_rate):
self.interest_asset = max(0, - self.asset)*borrow_interest_rate
self.interest_fiat = max(0, - self.fiat)*borrow_interest_rate
def __str__(self): return f"{self.__class__.__name__}({self.__dict__})"
def describe(self, price): print("Value : ", self.valorisation(price), "Position : ", self.position(price))
def get_portfolio_distribution(self):
return {
"asset":max(0, self.asset),
"fiat":max(0, self.fiat),
"borrowed_asset":max(0, -self.asset),
"borrowed_fiat":max(0, -self.fiat),
"interest_asset":self.interest_asset,
"interest_fiat":self.interest_fiat,
}
class TargetPortfolio(Portfolio):
def __init__(self, position ,value, price):
super().__init__(
asset = position * value / price,
fiat = (1-position) * value,
interest_asset = 0,
interest_fiat = 0
)
| ClementPerroud/Gym-Trading-Env | src/gym_trading_env/utils/portfolio.py | portfolio.py | py | 3,092 | python | en | code | 141 | github-code | 90 |
20667958391 | import pytest
from lxml.builder import ElementMaker
from podcast_dl import rss_parsers as rspa
def _make_item(url, title, episode=None, link=None):
episode_ns = "http://www.itunes.com/dtds/podcast-1.0.dtd"
E = ElementMaker(nsmap={"itunes": episode_ns})
item = E.item(
E.enclosure(url=url, length="1234", type="audio/mpeg"),
E.title(title),
)
if episode is not None:
item.append(E("{" + episode_ns + "}episode", str(episode)))
if link is not None:
item.append(E("link", link))
return item
@pytest.mark.parametrize(
"url, title, expected_filename",
(
(
"https://talkpython.fm/episodes/download/0/introducing-the-show.mp3",
"#0 Introducing the show!",
"0000-Introducing-the-show.mp3",
),
(
"https://talkpython.fm/episodes/download/180/what-s-new-in-python-3.7-and-beyond.mp3",
"#180 What's new in Python 3.7 and beyond",
"0180-What-s-new-in-Python-3-7-and-beyond.mp3",
),
(
"https://pythonbytes.fm/episodes/download/95/unleash-the-py-spy.mp3",
"#95 Unleash the py-spy!",
"0095-Unleash-the-py-spy.mp3",
),
(
"https://pythonbytes.fm/episodes/download/3/python-3.6-is-coming-and-it-s-awesome-plus-superior-text-processing-with-pynini.mp3",
"#3 Python 3.6 is coming, and it's awesome plus superior text processing with Pynini",
"0003-Python-3-6-is-coming-and-it-s-awesome-plus-superior-text-processing-with-Pynini.mp3",
),
),
ids=["ep0", "3-digits", "2-digits", "1-digit"],
)
def test_talkpython(url, title, expected_filename):
item = _make_item(url, title)
assert rspa.TalkPythonItem(item).filename == expected_filename
@pytest.mark.parametrize(
"url, title, episode, expected_filename",
(
(
"https://www.podcastinit.com/podlove/file/79/s/feed/c/mp3/introductory_episode.mp3",
"Podcast.__init__ - Introduction",
0,
"0000-Podcast-init-Introduction.mp3",
),
(
"https://www.podcastinit.com/podlove/file/78/s/feed/c/mp3/Episode_1_-_Thomas_Hatch.mp3",
"Thomas Hatch",
1,
"0001-Thomas-Hatch.mp3",
),
(
"https://www.podcastinit.com/podlove/file/69/s/feed/c/mp3/Episode_10_-_Brian_Granger_and_Fernando_Perez_of_the_IPython_Project.mp3",
"Brian Granger and Fernando Perez of the IPython Project",
10,
"0010-Brian-Granger-and-Fernando-Perez-of-the-IPython-Project.mp3",
),
(
"https://www.podcastinit.com/podlove/file/51/s/feed/c/mp3/Episode_28_-_Kay_Hayen_-_Nuitka.mp3",
"Kay Hayen on Nuitka",
28,
"0028-Kay-Hayen-on-Nuitka.mp3",
),
(
"https://www.podcastinit.com/podlove/file/50/s/feed/c/mp3/Episode_29__-_Anthony_Scopatz_on_Xonsh.mp3",
"Anthony Scopatz on Xonsh",
29,
"0029-Anthony-Scopatz-on-Xonsh.mp3",
),
(
"https://www.podcastinit.com/podlove/file/84/s/feed/c/mp3/Episode-80-Sean-Gillies.mp3",
"Python for GIS with Sean Gillies",
80,
"0080-Python-for-GIS-with-Sean-Gillies.mp3",
),
(
"https://www.podcastinit.com/podlove/file/454/s/feed/c/mp3/Episode-114-Factory-Automation-with-Jonas-Neuberg.mp3",
"Industrial Automation with Jonas Neuberg",
114,
"0114-Industrial-Automation-with-Jonas-Neuberg.mp3",
),
(
"https://www.podcastinit.com/podlove/file/582/s/feed/c/mp3/Episode-135-Surprise.mp3",
"Surprise! Recommendation Algorithms with Nicolas Hug",
135,
"0135-Surprise-Recommendation-Algorithms-with-Nicolas-Hug.mp3",
),
),
ids=[
"0",
"first",
"10",
"28-underscore-multiple",
"29-double-underscore-",
"80-simple",
"114-dash-only",
"135-exclamation",
],
)
def test_podcastinit(url, title, episode, expected_filename):
item = _make_item(url, title, episode)
assert rspa.BaseItem(item).filename == expected_filename
@pytest.mark.parametrize(
"url, title, episode, expected_filename",
(
(
"https://cdn.changelog.com/uploads/podcast/1/the-changelog-1.mp3",
"Haml, Sass, Compass",
"1",
"0001-Haml-Sass-Compass.mp3",
),
(
"https://cdn.changelog.com/uploads/podcast/42/the-changelog-42.mp3",
"Rails 3.1 and SproutCore",
"42",
"0042-Rails-3-1-and-SproutCore.mp3",
),
(
"https://cdn.changelog.com/uploads/podcast/192/the-changelog-192.mp3",
"Crystal: Fast as C, Slick as Ruby",
"192",
"0192-Crystal-Fast-as-C-Slick-as-Ruby.mp3",
),
(
"https://cdn.changelog.com/uploads/podcast/317/the-changelog-317.mp3",
"#Hacktoberfest isn’t just about a free shirt",
"317",
"0317-Hacktoberfest-isnt-just-about-a-free-shirt.mp3",
),
),
ids=["1-digit", "2-digits", "two-colons", "3-digits"],
)
def test_changelog(url, title, episode, expected_filename):
item = _make_item(url, title, episode)
assert rspa.ChangelogItem(item).filename == expected_filename
def test_changelog_no_episode():
url = "https://cdn.changelog.com/uploads/podcast/afk-jeff-bonus/the-changelog-afk-jeff-bonus.mp3"
title = "Jeff Robbins is an actual rockstar"
link = "https://changelog.com/podcast/afk-jeff-bonus"
item = _make_item(url, title, episode=None, link=link)
assert (
rspa.ChangelogItem(item).filename
== "afk-jeff-bonus-Jeff-Robbins-is-an-actual-rockstar.mp3"
)
| kissgyorgy/simple-podcast-dl | tests/test_filename_parsers.py | test_filename_parsers.py | py | 5,936 | python | en | code | 51 | github-code | 90 |
30287519438 | def print_file():
with open("class.txt", mode="r", encoding="utf-8") as f:
num = 0
summ = 0
for line in f:
l = line.split(" ")
grade = int(l[2])
num += 1
summ += grade
if grade < 3:
print(line)
print("Средний балл:", summ // num)
print_file()
| nikita26078/Python-exercises | ex10/4.py | 4.py | py | 368 | python | en | code | 0 | github-code | 90 |
23943886195 | # fungsi type() untuk mengetahui type-type data
a = 10 # tipe data int
b = "bejo" # tipe data str
c = 17.5 # tipe data float
d = True # tipe data boolean
print("Nilai data a adalah",type(a))
print("NIlai data b adalah",type(b))
print("Nilai data c adalah",type(c))
print("Nilai data d adalah", type(d))
## tipe data khusus python
# tipe data komplex, ex 5i
data_complex = complex(5,6)
# tipe data dari bahasa C
# tipe data double
from ctypes import c_double
data_c_double = c_double(10,5)
| Mfadlyp/Python_Basic | 2. Tipe data/Main.py | Main.py | py | 497 | python | id | code | 0 | github-code | 90 |
18804705682 | import torch
import torch.nn as nn
import torch.nn.functional as F
from model.model_utils import _get_padding_mask, _get_visibility_mask
from cadlib.macro import CMD_ARGS_MASK
class CADLoss(nn.Module):
def __init__(self, cfg):
super().__init__()
self.n_commands = cfg.n_commands
self.args_dim = cfg.args_dim + 1
self.weights = cfg.loss_weights
self.register_buffer("cmd_args_mask", torch.tensor(CMD_ARGS_MASK))
def forward(self, output):
# Target & predictions
tgt_commands, tgt_args = output["tgt_commands"], output["tgt_args"]
visibility_mask = _get_visibility_mask(tgt_commands, seq_dim=-1)
padding_mask = _get_padding_mask(tgt_commands, seq_dim=-1, extended=True) * visibility_mask.unsqueeze(-1)
command_logits, args_logits = output["command_logits"], output["args_logits"]
mask = self.cmd_args_mask[tgt_commands.long()]
loss_cmd = F.cross_entropy(command_logits[padding_mask.bool()].reshape(-1, self.n_commands), tgt_commands[padding_mask.bool()].reshape(-1).long())
loss_args = F.cross_entropy(args_logits[mask.bool()].reshape(-1, self.args_dim), tgt_args[mask.bool()].reshape(-1).long() + 1) # shift due to -1 PAD_VAL
loss_cmd = self.weights["loss_cmd_weight"] * loss_cmd
loss_args = self.weights["loss_args_weight"] * loss_args
res = {"loss_cmd": loss_cmd, "loss_args": loss_args}
return res
| ChrisWu1997/DeepCAD | trainer/loss.py | loss.py | py | 1,456 | python | en | code | 167 | github-code | 90 |
35351353057 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import json
import requests
from datetime import datetime,date,timedelta
import time
from time import sleep
import winsound
import pyttsx3
engine=pyttsx3.init()
engine.setProperty('rate',250)
engine.setProperty('volume',1.0)
def check_in_file(di):
return di
def send_whatsapp_message(event):
x=["me"]
PATH="C:\Program Files (x86)\chromedriver.exe"
options = webdriver.ChromeOptions()
options.add_argument('--user-data-dir=C:/Users/sreyans/yo/User_Data')
print(event)
driver = webdriver.Chrome(executable_path=PATH,options=options)
driver.get('https://web.whatsapp.com/')
try:
#wait for max 200s
#whatsapp loads but the search button does not appear and hence WebDriverWait to wait until search loads
initi = WebDriverWait(driver, 200).until(
EC.presence_of_element_located((By.XPATH, "/html/body/div[1]/div/div/div[3]/div/div[1]/div/label/div/div[2]")))
#initi = WebDriverWait(driver, 200).until(EC.presence_of_element_located((By.CLASS_NAME, "C28xL")))
for target in x:
#print("Wishing",target,"on their",event)
input_box_search=driver.find_element_by_xpath('/html/body/div[1]/div/div/div[3]/div/div[1]/div/label/div/div[2]')
input_box_search.click()
input_box_search.send_keys(target,Keys.ENTER)
print("Target Successfully Selected")
sleep(1)
inp_xpath = "/html/body/div[1]/div/div/div[4]/div/footer/div[1]/div[2]/div/div[2]"
input_box = WebDriverWait(driver,20).until(EC.presence_of_element_located((
By.XPATH, inp_xpath)))
sleep(0.1)
for string in event:
input_box.send_keys(str(string))
sleep(0.01)
input_box.send_keys(Keys.SHIFT+Keys.ENTER)
input_box.send_keys("https://selfregistration.cowin.gov.in")
input_box.send_keys(Keys.ENTER)
sleep(1)
print("Successfully Send Message to : "+ target + '\n')
print("DONE")
except Exception as E:
print(E)
finally:
print("DONE all")
f=open("alreadysent.csv","a")
fin=[]
for i in event:
f.write(i["D"]+","+i["N"]+","+str(i["Cap"])+","+i["V"]+"\n")
f.close()
#whenever qr code dena padega, usko driver.quit() nahi karke
#khud hi quit karna hoga->manually
driver.quit()
while(True):
t=date.today()#+timedelta(days=1)
k=time.localtime()
if k.tm_hour>=17:
t=t+timedelta(days=1)
#print(t)
#winsound.Beep(750,800)
#params1={"district_id":294,"date":t}
#params2={"district_id":265,"date":t}
keyval=0
headers={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36 Edg/90.0.818.51"}
flag=2
di=[]
while(flag):
f=t.strftime("%d-%m-%Y")
print(f)
params2={"district_id":294,"date":f}
response=requests.get("https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/findByDistrict",headers=headers,params=params2)
print(response)
final=response.json()
#print(final)
try:
if final["sessions"]:
for i in final["sessions"]:
if i["vaccine"]=="COVAXIN" and i['min_age_limit']==18 and i['available_capacity_dose1'] :
print(i["name"],i["pincode"])
engine.say((i["name"],i["pincode"][-3],i["pincode"][-2],i["pincode"][-1],i['available_capacity_dose1']))
engine.runAndWait()
di.append(({"P":i["pincode"],"D":f,"N":i['name'],"Cap":i['available_capacity_dose1'],"Add":i['address'],"F":i['fee'],"V":i["vaccine"],"S":i["session_id"],"sl":i["slots"]}))
#print(di)
except:
winsound.Beep(600,800)
t=t+timedelta(days=1)
flag-=1
if di:
send_whatsapp_message(di)
sleep(15)
winsound.Beep(600,800)
| sreyansb/Vaccine_notifier | api_in_python.py | api_in_python.py | py | 4,427 | python | en | code | 6 | github-code | 90 |
38107239073 | import pandas as pd
from magicPlanAPI import MagicPlanAPI
class DataCentre:
def __init__(self):
"""
Instance to simulate a Cache by storing values in files and reading at the start of the program into variables
"""
self.paths = {
'plans': 'resources/data/plans.json',
'credentials': 'resources/data/credentials.json',
'users': 'resources/data/users.json'
}
self.data = dict()
self.load_data()
self.connect_to_api()
def get_search_plans(self, search):
"""
Returns all plans which match the search term
If no search term is provided all the plans get returned
:param search: search term
:return: Dict of plans with dict['ID','name']
"""
if search is not None:
# searching plans for search term with case sensitivity
mask = self.data['plans']['name'].str.contains(search)
filtered_plans = self.data['plans'][self.data['plans']['name'].str.contains(search)]
plans = dict()
if len(filtered_plans) == 0:
return dict()
for index, plan in filtered_plans.iterrows():
plans[plan['id']] = plan['name']
return plans
else:
return self.get_plans()
def load_data(self):
"""
Loads all the files into class variables
"""
for datasource in self.paths:
try:
self.data[datasource] = pd.read_json(self.paths[datasource])
except FileNotFoundError:
print("Die Datei konnte nicht geöffnet werden.")
def get_plans(self):
"""
Returns all plans
:return: Dict of plans with dict['ID','name']
"""
plans = dict()
for index, plan in self.data['plans'].iterrows():
plans[plan['id']] = plan['name']
return plans
def reload_plans(self):
"""
Reloads plans by pulling them from the magicplan API
:return: Dict of plans with dict['ID','name']
"""
self.data['plans'] = pd.DataFrame(self.magic_api.get_projects(as_json=True))
return self.get_plans()
def save_data(self):
"""
Stores the data in class variables into files to save them
"""
for datasource in self.paths:
with open(self.paths[datasource], 'w') as f:
f.write(self.data[datasource].to_json(indent=2))
print("{}\t -> \t{}".format(datasource, self.paths[datasource]))
def connect_to_api(self):
"""
Initiates magicplanAPI instance with credentials if defined
:return: MagicPlanAPI instance
"""
try:
customer_id = self.data['credenitals']['customerID'][0]
private_key = self.data['credenitals']['private_key'][0]
user_email = self.data['credenitals']['user_email'][0]
self.magic_api = MagicPlanAPI(customerID=customer_id, private_key=private_key, user_email=user_email)
except:
self.magic_api = MagicPlanAPI()
| jkleinau/aufmassConverterPy | dataCentre.py | dataCentre.py | py | 3,146 | python | en | code | 0 | github-code | 90 |
31381940634 | from socket import *
from time import *
import os
host = ''
port = 520
bufsize = 1024
addr = (host, port)
sersock = socket(AF_INET, SOCK_STREAM)
sersock.bind(addr)
sersock.listen(5)
def getstatus(cmd):
info = os.popen(cmd)
info_text = info.read()
info_status = info.close()
return info_text, info_status
while True:
print("waiting for connection...")
clisock, addr = sersock.accept()
print("connected from :", addr)
while True:
data = clisock.recv(bufsize)
if not data:
break
text, status = getstatus(data.strip())
if not status:
clisock.send(text)
else:
clisock.send('Eroor eyna')
clisock.close()
sersock.close()
| xahiddin/MyPython | sockets/server.py | server.py | py | 733 | python | en | code | 0 | github-code | 90 |
72555633897 | # Chapter 9 Case study: Word play > Think python
import csv
import textwrap
fin = open('words.txt')
fin.readline()
'a\ar\n'
line = fin.readline()
word = line.strip()
print(word)
'''
fin = open('words.txt')
for line in fin:
word = line.strip()
print(word)
'''
# Inner exercises
# 9-1
'''
fin = open('words.txt')
for line in fin:
word = line.strip()
if len(word) > 20:
print (word)
'''
# 9-2
'''
fin = open('words.txt')
def has_no_e(word):
for char in word:
if char in 'Ee':
return False
return True
count = 0
for line in fin:
word = line.strip()
if has_no_e(word):
count += 1
print (word)
percent = (count / 113809.0) * 100
print (str(percent)) + "% of the words don't have an 'e'."
'''
# 9-3
'''
fin = open('words.txt')
def avoids(word,letter):
for char in word:
if char in letter:
return False
return True
letter = raw_input('What letters to exclude? ')
count = 0
for line in fin:
word = line.strip()
if avoids(word, letter):
count += 1
print word
percent = (count / 113809.0) * 100
print (str(percent) + "% of the words don't have " + letter + '.')
'''
# 9-4
'''
def uses_only(word, letters):
"""returns true if word is made only out of letters else flase"""
for letter in word:
if letter not in letters:
return False
return True
'''
# Search
'''
def has_no_e(word):
for letter in word:
if letter == 'e':
return False
return True
def avoids(word, forbidden):
for letter in word:
if letter in forbidden:
return False
return True
def uses_only(word, available):
for letter in word:
if letter not in available:
return False
return True
def uses_all(word, required):
for letter in required:
if letter not in word:
return False
return True
'''
# Program development plan called reduction to a previously
# solved problem
'''
def uses_all(word, required):
return uses_only(required, word)
'''
# Looping with Indeces
'''
def is_abecedarian(word):
previous = word[0]
for c in word:
if c < previous:
return False
previous = c
return True
'''
# An alternative recursion would be:
'''
def is_abecedarian(word):
if len(word) <= 1:
return True
if word[0] > word[1]:
return False
return is_abecedarian(word[1:])
'''
# Using a while loop
'''
def is_abecedarian(word):
i = 0
while i < len(word)-1:
if word[i+1] < word[i]:
return False
i = i+1
return True
print(is_abecedarian(word))
'''
# Palindrome
'''
def is_palindrome(word):
i = 0
j = len(word)-1
while i<j:
if word[i] != word[j]:
return False
i = i+1
j = j-1
return True
# Or reduced by
def is_palindrome(word):
return is_reverse(word, word)
'''
# Exercise 9-7
def is_triple_double(word):
"""Tests if a word contains three consecutive double letters.
word: string
returns: bool
"""
i = 0
count = 0
while i < len(word)-1:
if word[i] == word[i+1]:
count = count + 1
if count == 3:
return True
i = i + 2
else:
i = i + 1 - 2*count
count = 0
return False
def find_triple_double():
"""Reads a word list and prints words with triple double letters."""
fin = open('words.txt')
for line in fin:
word = line.strip()
if is_triple_double(word):
print(word)
print('Here are all the words in the list that have')
print('three consecutive double letters.')
find_triple_double()
print('')
# Exercise 9-8
def has_palindrome(i, start, length):
"""Checks if the string representation of i has a palindrome.
i: integer
start: where in the string to start
length: length of the palindrome to check for
"""
s = str(i)[start:start+length]
return s[::-1] == s
def check(i):
"""Checks if the integer (i) has the desired properties.
i: int
"""
return (has_palindrome(i, 2, 4) and
has_palindrome(i+1, 1, 5) and
has_palindrome(i+2, 1, 4) and
has_palindrome(i+3, 0, 6))
def check_all():
"""Enumerate the six-digit numbers and print any winners.
"""
i = 100000
while i <= 999996:
if check(i):
print(i)
i = i + 1
print('The following are the possible odometer readings:')
check_all()
print()
# Exercise 9-9
def str_fill(i, n):
"""Returns i as a string with at least n digits.
i: int
n: int length
returns: string
"""
return str(i).zfill(n)
def are_reversed(i, j):
"""Checks if i and j are the reverse of each other.
i: int
j: int
returns:bool
"""
return str_fill(i, 2) == str_fill(j, 2)[::-1]
def num_instances(diff, flag=False):
"""Counts the number of palindromic ages.
Returns the number of times the mother and daughter have
palindromic ages in their lives, given the difference in age.
diff: int difference in ages
flag: bool, if True, prints the details
"""
daughter = 0
count = 0
while True:
mother = daughter + diff
# assuming that mother and daughter don't have the same birthday,
# they have two chances per year to have palindromic ages.
if are_reversed(daughter, mother) or are_reversed(daughter, mother+1):
count = count + 1
if flag:
print(daughter, mother)
if mother > 120:
break
daughter = daughter + 1
return count
def check_diffs():
"""Finds age differences that satisfy the problem.
Enumerates the possible differences in age between mother
and daughter, and for each difference, counts the number of times
over their lives they will have ages that are the reverse of
each other.
"""
diff = 10
while diff < 70:
n = num_instances(diff)
if n > 0:
print(diff, n)
diff = diff + 1
print('diff #instances')
check_diffs()
print()
print('daughter mother')
num_instances(18, True)
| joakor89/Think-Python | chapter_9.py | chapter_9.py | py | 6,283 | python | en | code | 0 | github-code | 90 |
18543226999 | a,b,c,x,y = map(int,input().split())
ans = 0
ab = min(a+b,c*2)
temp = min(x,y)
ans += ab*temp
x -= temp
y -= temp
ans += min(a,c*2)*x
ans += min(b,c*2)*y
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03371/s621425941.py | s621425941.py | py | 165 | python | en | code | 0 | github-code | 90 |
34371232724 | import os
from flask import Flask, render_template, request
import memcache
mc = memcache.Client(['127.0.0.1:11211'], debug=0)
mc.set("Issledovanie_matematiki", "Ivanov,2000,publons/1")
mc.set("Issledovanie_fiziki", "Petrov,2001,publons/2")
mc.set("Issledovanie_himii", "Ivanov,2002,pubmed/1")
mc.set("Issledovanie_literatury", "Petrov,2001,elibrary/1")
mc.set("Issledovanie_BZD", "Sidorov,2001,elibrary/1")
mc.set("Ivanov", "Issledovanie_matematiki,Issledovanie_himii")
mc.set("Petrov", "Issledovanie_fiziki,Issledovanie_literatury")
mc.set("Sidorov", "Issledovanie_BZD")
mc.set("2000", "Issledovanie_matematiki")
mc.set("2001", "Issledovanie_fiziki,Issledovanie_literatury,Issledovanie_BZD")
mc.set("2002", "Issledovanie_himii")
mc.set("publons", "Issledovanie_matematiki,Issledovanie_fiziki")
mc.set("pubmed", "Issledovanie_himii")
mc.set("elibrary", "Issledovanie_literatury,Issledovanie_BZD")
app = Flask(__name__)
@app.route('/', methods=["GET", "POST"])
def main():
if request.method == "POST":
source = str(request.form["SOURCE"])
fio = str(request.form["FIO"])
year = str(request.form["YEAR"])
source_empty = 1
fio_empty = 1
year_empty = 1
if source != '':
source_res_set = set(str(mc.get(source)).split(','))
source_empty = 0
if fio != '':
fio_res_set = set(str(mc.get(fio)).split(','))
fio_empty = 0
if year != '':
year_res_set = set(str(mc.get(year)).split(','))
year_empty = 0
res_set = source_res_set
if fio_empty == 0:
res_set = res_set & fio_res_set
if year_empty == 0:
res_set = res_set & year_res_set
result_keys = list(res_set)
results = []
for key in result_keys:
tmp = mc.get(key)
results.append(str(tmp).split(',')[-1])
return render_template("results.html", result = results)
else:
return render_template("main.html")
if __name__ == '__main__':
app.run(host="localhost")
| moevm/nosql2h21-papers-memcached | main.py | main.py | py | 2,140 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.