seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
70226513299 | from django.conf.urls import url
from django.views.generic.base import RedirectView, TemplateView
from .views import *
urlpatterns = [
url(r'^$', RedirectView.as_view(url='/roo/courses/', permanent=False), name='index'),
url(r'^(?P<pk>\d+)', CourseUpdate.as_view(), name="detail"),
url(r'^expertise/(?P<pk>\d+)/$', ExpertiseUpdate.as_view(), name="detail"),
url(r'course_json/(?P<course_id>\d+)/$', course_json, name='course_json'),
url(r'send_course/(?P<course_id>\d+)/$', send_course, name='send_course'),
url(r'description/(?P<course_id>\d+)/$', show_description, name='show_description'),
url(r'^merge/(?P<pk_1>\d+)/(?P<pk_2>\d+)/$', merge, name="merge"),
url(r'data/$', data, name='data'),
url(r'some_view/', some_view, name='some_view'),
url(r'visible_columns_courses/', visible_columns_courses, name='visible_columns_courses'),
url(r'visible_columns_expertises/', visible_columns_expertises, name='visible_columns_expertises'),
url(r'courses/', courses_list, name='courses_list'),
# url(r'expertises/', expertises, name='expertises'),
url(r'expertises/', expertises_list, name='expertises_list'),
url(r'expertises_new/', expertises_list, name='expertises_list'),
url(r'expertises_edit/', expertises_edit, name='expertises_edit'),
url(r'data/get_active_tasks/$', get_active_tasks, name='get_active_tasks'),
url(r'upload_json/', upload_comments, name='upload_comments'),
url(r'courses_edit/', courses_edit, name='courses_edit'),
url(r'courses_list/', courses_list, name='courses_list'),
url(r'create_expertise/(?P<course_id>\d+)/$', new_expertise, name='create_expertise'),
url(r'create_teacher/', TeacherCreate.as_view(), name='create_teacher'),
url(r'create_course/', CourseCreate.as_view(), name='create_course'),
url(r'update_course/', TableCourseUpdate, name='update_course'),
url(r'exs/', expertises_json, name='expertises_json'),
url(r'update_expertise/', TableExpertiseUpdate, name='update_expertise'),
url(r'^close/$', TemplateView.as_view(template_name='roo/close.html')),
]
| ITOO-UrFU/openedu | apps/roo/urls.py | urls.py | py | 2,103 | python | en | code | 0 | github-code | 13 |
33690512296 | import numpy as np
class PSO:
def __init__(self, params):
self.n = params['n']
self.omega = params['omega']
self.a1 = params['a1']
self.a2 = params['a2']
self.X = params['X']
self.V = params['V']
self.X_hat = params['X_hat']
self.g_hat = params['g_hat']
self.objective_fn = params['objective_fn']
self.maximize = params['maximize']
self.flip = 1 if self.maximize else -1
if params['range']:
self.clip = True
self.x_lim = params['range'][0]
self.y_lim = params['range'][0]
else:
self.clip = False
def step(self):
for i in range(self.n):
r1, r2 = 0.5, 0.5
self.V[i] = self.omega*self.V[i]+self.a1*r1 * \
(self.X_hat[i] - self.X[i]) + self.a2*r2*(self.g_hat-self.X[i])
self.X[i] = self.X[i] + self.V[i]
if self.clip:
self.X[:,0] = np.clip(self.X[:,0],*self.x_lim)
self.X[:,1] = np.clip(self.X[:,1],*self.y_lim)
for i in range(self.n):
cur_obj = self.flip*self.objective_fn(self.X[None,i])
if cur_obj > self.flip*self.objective_fn(self.X_hat[None,i]):
self.X_hat[i] = self.X[i]
if cur_obj > self.flip*self.objective_fn(self.g_hat):
self.g_hat = self.X[None,i]
| DavidLeeftink/NaturalComputing2021 | Assignment_2/Code/pso.py | pso.py | py | 1,397 | python | en | code | 0 | github-code | 13 |
13030508085 | N, M, Q = map(int, input().split())
dart = [list(map(int, input().split())) for _ in range(N)]
spin = [list(map(int, input().split())) for _ in range(Q)]
def spin_dart(x,d,k):
for i in range(N):
if (i+1)%x==0:
new = [0]*M
if d==0:
for j in range(M):
new[(j+k)%M] = dart[i][j]
dart[i] = new
else:
for j in range(M):
new[(j-k)%M] = dart[i][j]
dart[i] = new
def remove_number():
flag=0
check = [[0]*M for _ in range(N)]
for i in range(N):
for j in range(M):
if dart[i][j]==-1:
continue
if dart[i][j]==dart[i][(j-1)%M]:
check[i][j]=check[i][(j-1)%M]=flag=1
if dart[i][j]==dart[i][(j+1)%M]:
check[i][j]=check[i][(j+1)%M]=flag=1
if i==0:
if dart[0][j]==dart[1][j]:
check[0][j]=check[1][j]=flag=1
elif i==N-1:
if dart[N-1][j]==dart[N-2][j]:
check[N-1][j]=check[N-1][j]=flag=1
else:
if dart[i][j]==dart[i-1][j]:
check[i][j]=check[i-1][j]=flag=1
if dart[i][j]==dart[i+1][j]:
check[i][j]=check[i+1][j]=flag=1
if flag:
for i in range(N):
for j in range(M):
if check[i][j]:
dart[i][j] = -1
else:
normalize()
def normalize():
mean,total = 0,0
for i in range(N):
for j in range(M):
if dart[i][j]!=-1:
total+=dart[i][j]
mean+=1
mean = total//mean
for i in range(N):
for j in range(M):
if dart[i][j]!=-1:
if dart[i][j]>mean:
dart[i][j]-=1
elif dart[i][j]<mean:
dart[i][j]+=1
for x,d,k in spin:
spin_dart(x,d,k)
remove_number()
answer=0
for i in range(N):
for j in range(M):
if dart[i][j]!=-1:
answer+=dart[i][j]
print(answer) | chaeheejo/algorithm | samsung_previous/weird_dart_game.py | weird_dart_game.py | py | 2,118 | python | en | code | 0 | github-code | 13 |
71595941779 | # Type help("robolink") or help("robodk") for more information
# Documentation: https://robodk.com/doc/en/RoboDK-API.html
# Reference: https://robodk.com/doc/en/PythonAPI/index.html
# Note: It is not required to keep a copy of this file, your python script is saved with the station
from robolink import * # RoboDK API
from robodk import * # Robot toolbox
import time # Standard time library
import re # Imports the regex expressions
######################################################### FUNCTIONS #########################################################
def extruder_offline(robot, arg):
'''Turns on and off the extrusion ''' # Not used in code for reasons described above
if arg == 'START': # User sets the arg to start printing
robot.setDO('$OUT[16]', 'True') # Set the output value to start printing
elif arg == 'STOP': # User sets the arg to stop printing
robot.setDO('$OUT[16]', 'False') # Set the output value to stop printing
else:
print('The input of extruder() has to be either START or STOP')
def parser(filename):
'''Parse gcode commands to be used in our code'''
parsed_file = list() # Create a list
layer = 0 # Z-axis, or current height of the print
with open(filename) as gcode: # Open gcode path and use as variable
for line in gcode: # For each line in gcode:
line = line.strip() # Delete all unnecessary ASCII characters
if re.findall("LAYER:", line): # If new layer found, update z-axes
layer += 1 # Add the layer value
continue # Skip the next if
if re.findall("G1", line): # Check if line begins with "G1"
coord = re.findall(r'[XY].?\d+.\d+', line) # Assign coord values
if len(coord) == 2: # If both coords were assigned
X = re.findall('\d*\.?\d+', coord[0])[0] # Assign X-value
Y = re.findall('\d*\.?\d+', coord[1])[0] # Assign Y-value
parsed_file.append([float(X), float(Y), layer, True]) # Append our desired output for use in the project
return parsed_file
########################################################### MAIN ###########################################################
# ROBODK INITIALIZATION
RDK = Robolink() # sys.path.insert(0, "C:\ProgramFiles\RoboDK\Python")
robot = RDK.Item('KUKA KR 6 R700 sixx') # Create the robot instance
RDK.setSimulationSpeed(1) # ADJUSTABLE: Simulation speed (1=default)
reference = robot.Parent() # Retrieve the robot reference frame
robot.setPoseFrame(reference) # Use the robot base frame as active reference
home = [0, -90, 90, 0, 0, 0] # Setup the home joint position
robot.MoveJ(home) # Move to home in RoboDK
robot.setSpeed(speed_linear=100) # ADJUSTABLE: Change the printing speed here
layer_height = 0.55 # ADJUSTABLE: edit the layer height
path = parser('desired/path/to/gcode') # ADJUSTABLE: Parse the gcode
item_frame = RDK.Item('task_frame') # ADJUSTABLE Load the task_frame from RoboDK
home_joints = [-5.520000, -107.550000, 115.910000, 1.620000,
37.390000, -0.2400006] # ADJUSTABLE Orient robot with joints to P1
robot.setFrame(item_frame) # Set the robot frame
robot.MoveJ(home_joints) # Move to home position (First move must be in joints)
# TOOL ORIENTATION + INVERSE KINEMATICS
orient_frame2tool = invH(item_frame.Pose())*robot.SolveFK(home_joints) # Homogeneous matrix * joint orientation
orient_frame2tool[0:3, 3] = Mat([0, 0, 0]) # Remove the last column of homogeneous matrix
# TASK_FRAME CHECK
robot.MoveL(transl([0, 0, 0])*orient_frame2tool) # Move to the first corner
robot.MoveL(transl([0, 70, 0])*orient_frame2tool) # Move to the second corner
robot.MoveL(transl([70, 70, 0])*orient_frame2tool) # Move to the third corner
robot.MoveL(transl([70, 0, 0])*orient_frame2tool) # Move to the forth corner
robot.MoveL(transl([0, 0, 0])*orient_frame2tool) # Move back to the start
# GET RID OF EXTRA FILAMENT (CAN BE REMOVED)
robot.MoveL(transl([0, 0, 0])*orient_frame2tool) # Printing blob
time.sleep(2) # Used to leave the blob
robot.MoveL(transl([0, 0, 3])*orient_frame2tool) # Move up to get rid of the filament
# PRINTING
RDK.setSimulationSpeed(1) # ADJUSTABLE, set to default speed
printing_status = False # Default value for printing status
for item in path: # Go through each coordinate from the parsed GCode file
if item[3] == False and printing_status == True: # If we want to stop extrusion which is running right now
extruder_offline(robot, "STOP") # Function to stop extrusion is called
printing_status = False # Set the printing status to false
elif item[3] == True and printing_status == False: # If we want to start printing and we are not
extruder_offline(robot, "START") # Call the function to start extruding
printing_status = True # Set the printing status to true
target_point = [item[0], item[1], layer_height*item[2]+0.15] # ADJSUTABLE, call the coord to go to, 0.55 is the layer heigh
target0 = transl(target_point)*orient_frame2tool # Set the target point with correct rotation of the extruder
robot.MoveL(target0, blocking=False) # Move the robot to target
robot.MoveJ(home) # Move back home at the end of program | malek-luky/Industrial-Robotics | 3D Printing/62607_FinalReport_Team4/3Dprint_offline.py | 3Dprint_offline.py | py | 7,204 | python | en | code | 0 | github-code | 13 |
30754654555 | import numpy as np
import matplotlib.pyplot as plt
import os
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Dense, Input, Dropout,Flatten, Conv2D
from tensorflow.keras.layers import BatchNormalization, Activation, MaxPooling2D
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
import tensorflow as tf
path = '/home/christian/Documents/ARTIFICIAL/TRAIN/'
img_size = 224
channels = 3
batch_size = 64
datagen = ImageDataGenerator(
rescale=1./255,
validation_split = 0.2
)
train_generator = datagen.flow_from_directory(path,
target_size=(img_size,img_size),
color_mode="rgb",
batch_size=batch_size,
class_mode='categorical',
subset = 'training',
shuffle=True
)
validation_generator = datagen.flow_from_directory(path,
target_size=(img_size,img_size),
color_mode="rgb",
batch_size=batch_size,
class_mode='categorical',
subset='validation',
shuffle=False
)
numclasses = 7
# Initialising the CNN
import keras
from keras.models import Sequential
from tensorflow.keras import layers
conv_base = tf.keras.applications.MobileNetV2(weights='imagenet', include_top=False, input_shape=(img_size,img_size,channels))
conv_base.trainable = False
model = tf.keras.Sequential([
conv_base,
layers.Flatten(),
layers.Dense(256),
layers.BatchNormalization(),
layers.Activation('relu'),
layers.Dropout(0.25),
layers.Dense(512),
layers.BatchNormalization(),
layers.Activation('relu'),
layers.Dropout(0.25),
layers.Dense(numclasses),
layers.Activation('softmax')
])
opt = Adam(learning_rate=0.01)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
epochs = 50
steps_per_epoch = train_generator.n//train_generator.batch_size
validation_steps = validation_generator.n//validation_generator.batch_size
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=3, min_lr=0.00001, mode='auto')
checkpoint = ModelCheckpoint("best_model_artificial.h5", monitor='val_accuracy', save_best_only=True, save_weights_only=False, mode='max', verbose=1)
callbacks = [checkpoint, reduce_lr]
history = model.fit(
x=train_generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_data = validation_generator,
validation_steps = validation_steps,
callbacks=callbacks
)
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([0,1])
plt.title('Training and Validation Accuracy')
plt.xlabel('epoch')
plt.subplot(1, 2, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Loss')
plt.ylim([0,5])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.savefig("LearningCurves.eps")
| cimejia/novel-FER-datasets | Training/emotiondetectionCNN-ARTIFICIAL-training.py | emotiondetectionCNN-ARTIFICIAL-training.py | py | 4,008 | python | en | code | 1 | github-code | 13 |
1020758301 | from datetime import datetime, date
from django.shortcuts import render, redirect
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib import messages # for message
from django.urls import reverse
from django.views import generic
from django.utils.safestring import mark_safe
from datetime import timedelta
import calendar
from django.shortcuts import get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from agenda.models import Note
from agenda.forms import NoteForm
# Create your views here.
@login_required
def agenda_view(request):
context = {}
template_name = 'pages/agendas/agenda_view.html'
return render(request, template_name, context)
@login_required
def agenda_detail(request, agenda_id):
context = {}
template_name = 'pages/agendas/agenda_detail.html'
return render(request, template_name, context)
@login_required
def note_view(request):
user = request.user
form = NoteForm(request.POST)
if request.method == 'POST':
if form.is_valid():
note = form.save(commit=False)
note.user = request.user
note.save()
messages.success(request, "Note enregistrée!")
# redirect to a new URL:
return HttpResponseRedirect(reverse('agenda:note_view'))
else:
form = NoteForm()
note_list = Note.objects.filter(user=user).order_by('-created_date')
context = {
'form': form,
'note_list': note_list,
}
template_name = 'pages/agendas/note_view.html'
return render(request, template_name, context)
@login_required
def note_detail(request, id):
note = Note.objects.get(id=id)
context = {
'note': note
}
template_name = 'pages/agendas/note_detail.html'
return render(request, template_name, context)
class NoteDeleteView(generic.DeleteView):
model = Note
login_url = '/accounts/login/'
template_name = 'pages/agendas/remove_note.html'
success_url = reverse_lazy('agenda:note_view')
| Kgermando/es-script | agenda/views.py | views.py | py | 2,154 | python | en | code | 0 | github-code | 13 |
10902239166 | #===============================================================================
# Default tasks.
# Can be overwritten by product configuration.
#===============================================================================
import os
import dragon
import shutil
import argparse
#===============================================================================
# Hooks
#===============================================================================
def hook_post_clean(task, args):
dragon.exec_cmd("rm -rf %s" % dragon.POLICE_OUT_DIR)
dragon.exec_cmd("rm -rf %s" % dragon.IMAGES_DIR)
dragon.exec_cmd("rm -rf %s" % os.path.join(dragon.OUT_DIR, "release-*"))
dragon.exec_cmd("rm -rf %s" % os.path.join(dragon.OUT_DIR, "pinstrc"))
def hook_geneclipse(task, args):
if (len(args) == 0):
raise dragon.TaskError("module argument missing")
if args[0] == "--help" or args[0] == "-h":
dragon.LOGI("usage: ./build.sh -t %s [-f] <module1> <module2> ...", task.name)
return
if args[0] == "--full" or args[0] == "-f":
build_option = "-f"
if (len(args) == 1):
raise dragon.TaskError("module argument missing")
projects = args[1:]
else:
build_option = "-d"
projects = args[0:]
# dump alchemy database in xml
alchemy_xml = os.path.join(dragon.OUT_DIR, "alchemy-database.xml")
dragon.exec_dir_cmd(dirpath=dragon.WORKSPACE_DIR,
cmd="./build.sh -p %s-%s -A dump-xml" %
(dragon.PRODUCT, dragon.VARIANT))
# invoke alchemy eclipseproject python script
build_cmd = r"-p \${TARGET_PRODUCT}-\${TARGET_PRODUCT_VARIANT} -A"
dragon.exec_dir_cmd(dirpath=dragon.WORKSPACE_DIR,
cmd="%s/scripts/eclipseproject.py %s -b \"%s\" %s %s" %
(dragon.ALCHEMY_HOME, build_option, build_cmd, alchemy_xml,
" ".join(projects)))
def hook_genqtcreator(task, args):
if (len(args) == 0):
raise dragon.TaskError("module or atom.mk directory argument missing")
if args[0] == "--help" or args[0] == "-h":
dragon.LOGI("usage: ./build.sh -t %s [-f] <module1|dir1> <module2|dir2> ...", task.name)
return
projects = args[0:]
# dump alchemy database in xml
alchemy_xml = os.path.join(dragon.OUT_DIR, "alchemy-database.xml")
dragon.exec_dir_cmd(dirpath=dragon.WORKSPACE_DIR,
cmd="./build.sh -p %s-%s -A dump-xml" %
(dragon.PRODUCT, dragon.VARIANT))
# invoke alchemy qtcreatorproject python script
build_cmd = "-p %s-%s -A" % (dragon.PRODUCT, dragon.VARIANT)
dragon.exec_dir_cmd(dirpath=dragon.WORKSPACE_DIR,
cmd="%s/scripts/qtcreatorproject.py %s -b '%s' %s" %
(dragon.ALCHEMY_HOME, alchemy_xml, build_cmd, " ".join(projects)))
#===============================================================================
# Tasks
#===============================================================================
dragon.add_meta_task(
name = "build",
desc = "Build everything and generate final directory",
subtasks=["alchemy all final"],
weak = True,
)
dragon.add_meta_task(
name = "clean",
desc = "Clean everything",
subtasks=["alchemy clobber"],
posthook = hook_post_clean,
weak = True,
)
dragon.add_meta_task(
name="all",
desc="Build and generate images for product",
subtasks=["build", "images"],
weak=True
)
dragon.add_alchemy_task(
name = "alchemy",
desc = "Directly pass commands to alchemy",
product = dragon.PRODUCT,
variant = dragon.VARIANT,
weak = False,
)
# Use generic configuration tasks
dragon.add_meta_task(
name="xconfig",
desc="Modules configuration with graphical interface.",
subtasks=["alchemy xconfig"],
weak = True,
)
dragon.add_meta_task(
name="menuconfig",
desc="Modules configuration with ncurses interface.",
subtasks=["alchemy menuconfig"],
weak = True,
)
# Kernel config
dragon.add_meta_task(
name="linux-xconfig",
desc="Kernel configuration with graphical interface.",
subtasks=["alchemy linux-xconfig"],
weak = True,
)
dragon.add_meta_task(
name="linux-menuconfig",
desc="Kernel configuration with ncurses interface.",
subtasks=["alchemy linux-menuconfig"],
weak = True,
)
dragon.add_meta_task(
name = "geneclipse",
desc = "Generate Eclipse CDT project",
posthook = hook_geneclipse
)
dragon.add_meta_task(
name = "genqtcreator",
desc = "Generate QtCreator project",
posthook = hook_genqtcreator
)
| HPCL-micros/bebop_codes | parrot_arsdk/sdk/arsdk_3_11_0_p0_stripped/build/dragon_build/deftasks.py | deftasks.py | py | 4,527 | python | en | code | 0 | github-code | 13 |
27720797233 | import os
from easybuild.easyblocks.generic.rpm import Rpm
class EB_QLogicMPI(Rpm):
def make_module_extra(self):
"""Add MPICH_ROOT to module file."""
txt = super(EB_QLogicMPI, self).make_module_extra()
txt += self.module_generator.set_environment('MPICH_ROOT', self.installdir)
return txt
def sanity_check_step(self):
"""Custom sanity check for QLogicMPI."""
custom_paths = {
'files': [os.path.join('bin', x) for x in ['mpirun', 'mpicc', 'mpicxx', 'mpif77', 'mpif90']] +
[os.path.join('include', 'mpi.h')],
'dirs': [],
}
super(EB_QLogicMPI, self).sanity_check_step(custom_paths=custom_paths)
| ULHPC/modules | easybuild/easybuild-easyblocks/easybuild/easyblocks/q/qlogicmpi.py | qlogicmpi.py | py | 778 | python | en | code | 2 | github-code | 13 |
9070664080 | from sys import stdin
array = [True for i in range(1000001)]
for i in range(2, 1001):
if array[i]:
for k in range(i + i, 1000001, i):
array[k] = False
while True:
n = int(stdin.readline())
if n == 0: break
for i in range(3, len(array)):
if array[i] and array[n-i]:
print(n, "=", i, "+", n-i)
break
'''소수를 구하는 과정을 도출하는건 정말 쉬웠는데 그다음 b-a가 어렵고 풀이를 봐도 잘 모르겠다. 어쨰서 if array[i] and array[n-i]: 이부분이 n이 되는지를 모르겠다;;
'''
| mins1031/coding-test | baekjoon/GoldbachConjecture_6588.py | GoldbachConjecture_6588.py | py | 595 | python | ko | code | 0 | github-code | 13 |
28003863160 | import os
import sacrebleu
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--model', required=True, type=str)
parser.add_argument('--clean', action="store_true")
args = parser.parse_args()
def get_sentences(path, type):
path = os.path.join(path, "test" + type)
with open(path, "r", encoding="UTF-8") as f:
lines = [line.strip() for line in f.readlines()]
return lines
def get_vocab(path):
with open(path, "r", encoding="UTF-8") as f:
lines = [line.strip() for line in f.readlines()]
vocab = list()
for line in lines:
line = line.split('\t')
line_res = [line[i].split(" ||| ")[1] for i in range(len(line))]
vocab.append(line_res)
return vocab
def cal_tur(predict_sentence, golden_sentence, vocab):
true_num, total_num = 0, 0
assert len(predict_sentence) == len(golden_sentence)
assert len(predict_sentence) == len(vocab)
for predict, golden, words in zip(predict_sentence, golden_sentence, vocab):
total_num += len(words)
for word in words:
if word in predict:
true_num += 1
return true_num, total_num
if __name__ == "__main__":
data_path = os.path.join("result", args.model)
vocab_path = "data/test.term_lines"
vocab = get_vocab(vocab_path)
if args.clean:
prediction_sentences = get_sentences(data_path, ".clean.prediction")
else:
prediction_sentences = get_sentences(data_path, ".prediction")
golden_sentences = get_sentences(data_path, ".tgt")
bleu = sacrebleu.corpus_bleu(prediction_sentences, [golden_sentences], force=True)
true_num, total_num = cal_tur(prediction_sentences, golden_sentences, vocab)
print("bleu = {:.2f} | term_use_rate = {:.2f}% ({:4d} / {:4d})".format \
(bleu.score, 100.0*true_num/total_num, true_num, total_num)) | Coda-s/NMT | nmt/evaluate.py | evaluate.py | py | 1,889 | python | en | code | 0 | github-code | 13 |
43008758472 | """
File: Draw lines
Name: Elven Liu
-----------------------
Users can click anywhere in the window first and that place will have a ball. And users click another place in the window,
then this place and the circle will connect to be a line.
"""
from campy.graphics.gobjects import GOval, GLine
from campy.graphics.gwindow import GWindow
from campy.gui.events.mouse import onmouseclicked
SIZE = 10
window = GWindow()
number = 0
m_x = 0
m_y = 0
circle = GOval(SIZE, SIZE)
def main():
"""
This program creates lines on an instance of GWindow class.
There is a circle indicating the user’s first click. A line appears
at the condition where the circle disappears as the user clicks
on the canvas for the second time.
"""
onmouseclicked(line)
def line(m):
global number, m_x, m_y, circle
# when number = 0, if you click, that place will have a ball.
if number == 0:
circle = GOval(SIZE, SIZE)
circle.color = 'black'
window.add(circle, m.x - SIZE/2, m.y - SIZE/2)
m_x = m.x - SIZE/2
m_y = m.y - SIZE/2
number += 1
# when number = 1, if you click, that place will connect with the ball to be a line.
else:
straight = GLine(m_x, m_y, m.x, m.y)
straight.color = 'black'
window.add(straight)
window.remove(circle)
# when the line appears, the number will be 0 and start from drawing the ball.
number -= 1
if __name__ == "__main__":
main()
| elven-liu/stanCode-projects | SC101/SC101_Assignment1/draw_line.py | draw_line.py | py | 1,544 | python | en | code | 0 | github-code | 13 |
29864466110 | import requests
import json
from flask import Flask, redirect, url_for, Blueprint, request, render_template, session
import datetime
import time
from satori import satori
from satori import satori_common
from satori import satori_bearer_token
from satori import satori_errors as error
from satori import satori_taxonomy as taxonomy
from satori import satori_datastore as datastore
satori_taxonomy = Blueprint('satoritaxonomy', __name__)
@satori_taxonomy.route('/satori/taxonomy/find_by_<action>', methods=['GET'])
def satori_get_taxonomy(action):
if satori.apikey in (request.args.get('apikey'), request.headers.get('apikey')):
if action in ('name', 'id', 'tag'):
# Authenticate to Satori for a bearer token every hour, else use cache
headers = satori_bearer_token.check_token()
if None != request.args.get('search'):
search = request.args.get('search')
print("attempting to find taxonomy using " + action) if satori.logging else None
response = taxonomy.get_one_taxonomy(headers, action, search)
return response
else:
return error.USER_PARAMS_MISSING
else:
return error.BAD_PATH
else:
return error.INVALID_APIKEY
@satori_taxonomy.route('/satori/taxonomy/create', methods=['POST'])
def satori_create_taxonomy():
if satori.apikey in (request.args.get('apikey'), request.headers.get('apikey')):
# Authenticate to Satori for a bearer token every hour, else use cache
headers = satori_bearer_token.check_token()
print("attempting to create taxonomy") if satori.logging else None
response = taxonomy.create_taxonomy(headers, request.json)
return response
else:
return error.INVALID_APIKEY
@satori_taxonomy.route('/satori/copy/taxonomy', methods=['GET'])
def satori_copy_taxonomy():
if satori.apikey in (request.args.get('apikey'), request.headers.get('apikey')):
# Authenticate to Satori for a bearer token every hour, else use cache
headers = satori_bearer_token.check_token()
if None not in (request.args.get('source_datastore_id'), request.args.get('target_datastore_id')):
source_datastore_id = request.args.get('source_datastore_id')
target_datastore_id = request.args.get('target_datastore_id')
print("attempting to copy taxonomy") if satori.logging else None
source_name = datastore.get_one_datastore(headers, source_datastore_id).json()['name']
target_name = datastore.get_one_datastore(headers, target_datastore_id).json()['name']
source_locations = datastore.get_datastore_locations(headers, source_datastore_id)
target_locations = datastore.get_datastore_locations(headers, target_datastore_id)
tag_response = {"source" : source_name, "target" : target_name}
for source_item in source_locations.json()['records']:
schema = source_item['location']['schema']
table = source_item['location']['table']
column = source_item['location']['column']
tags = source_item['tags']
for target_item in target_locations.json()['records']:
if schema == target_item['location']['schema'] and \
table == target_item['location']['table'] and \
column == target_item['location']['column']:
target_location_id = target_item['id']
if tags:
for tagitem in tags:
print("updating " + schema + ":" + table + ":" + column + " with tag: " + tagitem['name'])
datastore.update_datastore_locations(headers, target_location_id, tagitem['name'])
tag_response[target_location_id] = "updated " + schema + ":" + table + ":" + column + " with tag: " + tagitem['name']
return tag_response
else:
return error.USER_PARAMS_MISSING
else:
return error.INVALID_APIKEY
| northwestcoder/satori-api-server | routes/route_taxonomy.py | route_taxonomy.py | py | 3,635 | python | en | code | 0 | github-code | 13 |
7439620245 | import datetime
import time
from CTkMessagebox import CTkMessagebox
from ChatMate.client_side.meeting_page.meeting_client import MeetingClient
from ChatMate.client_side.meeting_page import meeting_page_functionality
from ChatMate.client_side import client_utils
from ChatMate.client_side.meeting_page import Sender
from tkinter import *
import customtkinter as ctk
import threading
class MeetingPage:
def __init__(self, meeting_tcp_address, meeting_udp_address, meeting_key):
"""
The constructor method of the MeetingPage class.
It initializes the meeting page GUI and sets up various attributes and widgets.
"""
self.root = ctk.CTk()
self.name = self.get_name_from_client()
if self.name is not False:
self.senders = {}
self.last_refresh_senders = list(self.senders.values())
self.meeting_client = MeetingClient(self, meeting_tcp_address, meeting_udp_address, meeting_key)
self.name = self.meeting_client.name
self.root.geometry(f"800x450")
# Make the window resizable
self.root.resizable(True, True)
# Create a frame for all the widgets
self.main_frame = ctk.CTkFrame(self.root, fg_color=("gray95", "gray10"))
self.main_frame.pack(fill="both", expand=True)
self.main_frame.bind("<Configure>", lambda event: threading.Thread(target=self.refresh_videos).start())
self.buttons_frame = ctk.CTkFrame(self.main_frame, height=40)
self.buttons_frame.pack(side=TOP, pady=5)
self.frame_for_pictures = ctk.CTkFrame(self.main_frame, fg_color=("gray95", "gray10"))
self.frame_for_pictures.pack()
self.frame_for_pictures.place(relx=0.5, rely=0.52, anchor='center')
self.screen_frame = ctk.CTkFrame(self.main_frame, fg_color=("gray95", "gray10"))
self.screen_canvas = Canvas(self.screen_frame, highlightbackground="black")
self.screen_canvas.pack()
self.user_data_frame = ctk.CTkFrame(self.main_frame)
self.chat_frame = ctk.CTkFrame(self.main_frame)
self.messages_frame = ctk.CTkScrollableFrame(self.chat_frame, width=200, height=300)
self.messages_frame.columnconfigure(0, weight=2)
self.messages_frame.columnconfigure(1, weight=1)
self.messages_frame.pack(expand=True, fill="both", padx=5, pady=5)
self.on_off_camera = StringVar()
self.on_off_screen = StringVar()
self.on_off_microphone = StringVar()
self.screen_or_camera = StringVar()
self.create_widgets()
self.create_sender(("127.0.0.1", 22222), self.name)
self.senders[self.name].lock_my_widgets()
self.thread_counter = 0
tcp_thread = threading.Thread(target=self.meeting_client.tcp_listen)
tcp_thread.start()
self.thread_counter += 1
self.root.protocol("WM_DELETE_WINDOW", self.meeting_client.on_closing)
self.root.mainloop()
def get_name_from_client(self):
"""
A method that prompts the user to enter their name for the meeting.
It validates the input and returns the name entered by the user.
"""
name = ""
# get the name from the client
while True:
name_dialog = ctk.CTkInputDialog(text="Enter what you want to be "
"called in the meeting:", title="Meeting Name")
name = name_dialog.get_input()
if name is None:
msg = CTkMessagebox(title="Exit?", message="Do you want to close the program?",
icon="question", option_1="Cancel", option_2="Yes")
if msg.get() == "Yes":
self.root.destroy()
return False
else:
if not client_utils.is_alphanumeric(name):
CTkMessagebox(title="invalid name", message="name can contain only english letters and numbers",
icon="cancel").get()
else:
return name
def create_widgets(self):
"""
A method that creates and configures the widgets (buttons, labels, entry fields) on the meeting page GUI.
"""
view_or_hide = StringVar()
view_or_hide.set("view users")
btn_users = ctk.CTkButton(self.buttons_frame,
textvariable=view_or_hide,
text_color='#202020',
width=0,
command=lambda: meeting_page_functionality.view_hide_users(self.user_data_frame,
view_or_hide))
btn_users.pack(side=LEFT, padx=5)
# Create all the buttons on the top
# Create a StringVar to track the state of the button
self.on_off_camera.set("start camera")
btn_camera = ctk.CTkButton(self.buttons_frame,
textvariable=self.on_off_camera,
text_color='#202020',
width=0,
command=lambda: meeting_page_functionality.start_stop_camera(self.meeting_client,
self.on_off_camera))
btn_camera.pack(side=LEFT, padx=5)
self.on_off_microphone.set("start microphone")
btn_microphone = ctk.CTkButton(self.buttons_frame,
textvariable=self.on_off_microphone,
text_color='#202020',
width=0,
command=lambda: meeting_page_functionality.start_stop_microphone(
self.meeting_client,
self.on_off_microphone))
btn_microphone.pack(side=LEFT, padx=5)
self.on_off_screen.set("start screen sharing")
btn_screen = ctk.CTkButton(self.buttons_frame,
textvariable=self.on_off_screen,
text_color='#202020',
width=0,
command=lambda: meeting_page_functionality.start_stop_screen(self.meeting_client,
self.on_off_screen))
btn_screen.pack(side=LEFT, padx=5)
self.screen_or_camera.set("press to watch share screen")
self.screen_camera_button = ctk.CTkButton(self.buttons_frame,
textvariable=self.screen_or_camera,
text_color='#202020',
width=0,
command=lambda: meeting_page_functionality.screen_camera_switch(self))
open_or_close = StringVar()
open_or_close.set("open chat")
btn_chat = ctk.CTkButton(self.buttons_frame,
textvariable=open_or_close,
text_color='#202020',
width=0,
command=lambda: meeting_page_functionality.open_close_chat(self.chat_frame,
open_or_close))
btn_chat.pack(side=RIGHT, padx=5)
self.chat_entry = ctk.CTkEntry(self.chat_frame, width=170, placeholder_text="Type message here")
self.chat_entry.pack(side=LEFT, padx=5, pady=5)
chat_send_btn = ctk.CTkButton(self.chat_frame, text="send",
text_color='#202020', width=30,
command=lambda: meeting_page_functionality.send_message(self.chat_entry,
self.meeting_client))
chat_send_btn.pack(side=RIGHT, padx=5, pady=5)
video_delay_label = ctk.CTkLabel(self.user_data_frame, text="Video delay")
video_delay_label.grid(row=0, column=0, padx=5, columnspan=2)
self.video_delay_menu = ctk.CTkOptionMenu(self.user_data_frame,
values=["0.0", "0.1", "0.2", "0.3", "0.4", "0.5", "0.6", "0.7", "0.8",
"0.9", "1.0", "1.1", "1.2", "1.3", "1.4", "1.5", "1.6", "1.7",
"1.8", "1.9", "2.0"],
command=lambda choice: meeting_page_functionality.pause_video
(self.root, self.meeting_client, 2000))
self.video_delay_menu.grid(row=0, column=2, padx=5, columnspan=3)
audio_delay_label = ctk.CTkLabel(self.user_data_frame, text="Audio delay")
audio_delay_label.grid(row=1, column=0, padx=5, columnspan=2)
self.audio_delay_menu = ctk.CTkOptionMenu(self.user_data_frame,
values=["0.0", "0.1", "0.2", "0.3", "0.4", "0.5", "0.6", "0.7", "0.8",
"0.9", "1.0", "1.1", "1.2", "1.3", "1.4", "1.5", "1.6",
"1.7", "1.8", "1.9", "2.0"],
command=lambda choice: meeting_page_functionality.pause_audio
(self.root, self.meeting_client, 2000))
self.audio_delay_menu.grid(row=1, column=2, padx=5, columnspan=3)
# Create the user data frame
# Create a label widget for the "Name" column
name_label = ctk.CTkLabel(self.user_data_frame, text="Name")
name_label.grid(row=2, column=0, padx=5)
# Create a label widget for the "Manager" column
manager_label = ctk.CTkLabel(self.user_data_frame, text="Manager")
manager_label.grid(row=2, column=1, padx=5)
# Create a label widget for the "Video" column
camera_label = ctk.CTkLabel(self.user_data_frame, text="Camera")
camera_label.grid(row=2, column=2, padx=5)
# Create a label widget for the "Microphone" column
microphone_label = ctk.CTkLabel(self.user_data_frame, text="Microphone")
microphone_label.grid(row=2, column=3, padx=5)
# Create a label widget for the "Share Screen" column
share_screen_label = ctk.CTkLabel(self.user_data_frame, text="Share Screen")
share_screen_label.grid(row=2, column=4, padx=5)
def create_sender(self, udp_addr, name):
"""A method that creates a new sender (participant) in the meeting.
It creates the necessary widgets for the sender's information and adds them to the GUI."""
# get the number of rows and columns in the grid
columns, rows = self.user_data_frame.grid_size()
name_label = ctk.CTkLabel(self.user_data_frame, text=name)
name_label.grid(row=rows, column=0, padx=5, pady=5)
is_manager_var = ctk.Variable(value=False)
is_manager_switch = ctk.CTkSwitch(self.user_data_frame, variable=is_manager_var, text="", width=0,
command=lambda: meeting_page_functionality.promote_or_degrade(
self.meeting_client, is_manager_var, name))
is_manager_switch.grid(row=rows, column=1, padx=5, pady=5)
kick_btn = ctk.CTkButton(self.user_data_frame, text="kick", width=10,
command=lambda: self.meeting_client.tcp_send(("kick " + name).encode()))
if self.senders != {}:
if self.senders[self.name].get_is_manager():
values = ["hide", "block"]
screen_values = ["block"]
kick_btn.grid(row=rows, column=5, padx=5, pady=5)
else:
is_manager_switch.configure(state='disabled')
values = ["hide"]
screen_values = []
else:
values = ["hide"]
screen_values = []
camera_var = ctk.StringVar(value="closed")
camera_option_menu = ctk.CTkOptionMenu(self.user_data_frame,
values=values,
variable=camera_var,
command=lambda event: meeting_page_functionality.camera_commands
(self.meeting_client, camera_option_menu, camera_var, name),
width=10)
camera_option_menu.grid(row=rows, column=2, padx=5, pady=5)
microphone_var = ctk.StringVar(value="closed")
microphone_option_menu = ctk.CTkOptionMenu(self.user_data_frame,
values=values,
variable=microphone_var,
command=lambda event: meeting_page_functionality.microphone_commands
(self.meeting_client, microphone_option_menu, microphone_var, name),
width=10)
microphone_option_menu.grid(row=rows, column=3, padx=5, pady=5)
screen_var = ctk.StringVar(value="closed")
screen_option_menu = ctk.CTkOptionMenu(self.user_data_frame,
values=screen_values,
variable=screen_var,
command=lambda event: meeting_page_functionality.screen_commands
(self.meeting_client, screen_option_menu, screen_var, name),
width=10)
screen_option_menu.grid(row=rows, column=4, padx=5, pady=5)
frame_for_canvas = ctk.CTkFrame(self.frame_for_pictures)
canvas = ctk.CTkCanvas(frame_for_canvas, highlightbackground="black")
canvas.pack()
name_label2 = ctk.CTkLabel(frame_for_canvas, text=name)
client_utils.canvas_close_camera(canvas)
canvas.create_window(0, 0, window=name_label2, anchor=NW)
new_sender = Sender.Sender(udp_addr, name, name_label, is_manager_switch, is_manager_var, camera_option_menu,
microphone_option_menu, screen_option_menu, kick_btn, frame_for_canvas, canvas)
self.senders[name] = new_sender
threading.Thread(target=self.refresh_videos).start()
# this function is always refresh the layout of the videos on the screen to fit inside the window
def refresh_videos(self):
# I am using a copy of the senders dict because if a sender left mid-iteration of the loop it causes an error
current_senders = list(self.senders.values())
self.main_frame.update()
num_of_senders = current_senders.__len__()
# determining the height of the share screen canvas
share_height = min(self.main_frame.winfo_width(),
int(self.main_frame.winfo_height()
* 16 / 9)) / 16 * 9 - self.buttons_frame.winfo_height() * 2
# updating the share screen canvas
self.screen_canvas.configure(width=share_height * 16 / 9, height=share_height)
# this function returns the size of each video and how to arrange the video on a table optimally
width, columns, rows = client_utils.optimal_layout(self.main_frame.winfo_width(),
self.main_frame.winfo_height(), num_of_senders,
self.buttons_frame.winfo_height())
# number of empty spaces in the grid
empty_spaces = rows * columns - num_of_senders
# counter to check we are not passing the number of users in the loop
counter = 0
# the loop grid the users on the correct spots on the grid
for row_num in range(rows):
# if we are on the last row and there are empty spaces we need to grid the videos in the middle
if row_num == (rows - 1):
for column_num in range(columns):
if counter < num_of_senders:
if empty_spaces % 2 == 0:
current_senders[counter].get_frame().grid(row=row_num + 1,
column=column_num + (empty_spaces // 2),
columnspan=1,
padx=5, pady=5)
current_senders[counter].get_canvas().configure(width=(width - 8),
height=((width - 8) * 9 / 16) - 4)
else:
current_senders[counter].get_frame().grid(row=row_num + 1,
column=column_num + (empty_spaces // 2),
columnspan=2,
padx=5, pady=5)
current_senders[counter].get_canvas().configure(width=(width - 8),
height=((width - 8) * 9 / 16) - 4)
counter += 1
# if im not on the last row
else:
for column_num in range(columns):
if counter < num_of_senders:
current_senders[counter].get_frame().grid(row=row_num + 1, column=column_num, padx=5, pady=5)
current_senders[counter].get_canvas().configure(width=(width - 8),
height=((width - 8) * 9 / 16) - 4)
counter += 1
for last_refresh_sender in self.last_refresh_senders:
if last_refresh_sender not in current_senders:
last_refresh_sender.get_frame().destroy()
self.last_refresh_senders = current_senders
def got_camera_blocked(self):
if self.senders[self.name].get_camera():
self.on_off_camera.set("start camera")
self.senders[self.name].set_camera(False)
client_utils.canvas_close_camera(self.senders[self.name].get_canvas())
self.senders[self.name].get_camera_option_menu().set("blocked")
def got_screen_blocked(self):
if self.senders[self.name].get_screen():
self.on_off_screen.set("start screen sharing")
self.senders[self.name].set_screen(False)
self.screen_camera_button.pack_forget()
if self.screen_frame.winfo_ismapped():
meeting_page_functionality.screen_camera_switch(self)
self.senders[self.name].get_screen_option_menu().set("blocked")
def got_microphone_blocked(self):
if self.senders[self.name].get_microphone():
self.on_off_microphone.set("start microphone")
self.senders[self.name].set_microphone(False)
self.senders[self.name].get_microphone_option_menu().set("blocked")
def remove_sender_by_name(self, name):
self.senders[name].remove_from_commands()
time.sleep(0.2)
self.senders.pop(name)
self.refresh_videos()
def show_message(self, message):
_, rows = self.messages_frame.grid_size()
message_label = ctk.CTkLabel(self.messages_frame, text=message)
message_label.grid(row=rows, column=0, sticky='w')
current_time = datetime.datetime.now()
time_label = ctk.CTkLabel(self.messages_frame, text=current_time.strftime("%H:%M"))
time_label.grid(row=rows, column=1, sticky='e')
| NONAME4322/ChatMate | ChatMate/client_side/meeting_page/meeting_page_gui.py | meeting_page_gui.py | py | 20,844 | python | en | code | 0 | github-code | 13 |
8734182556 | import tensorflow as tf
Model = tf.keras.applications.mobilenet_v2.MobileNetV2(
input_shape=None,
alpha=1.0,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
)
Model.save('./base.model', save_format=None)
print(dir(Model))
print(Model)
print('\n\n')
#print(Model.get_config())
print('\n\n')
layers = Model.layers
names = [var.name for var in layers ]
print(names)
| OsinValery/camera_effects | python/base_model_saver.py | base_model_saver.py | py | 468 | python | en | code | 0 | github-code | 13 |
39155274195 | import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
def sigmoid(x):
return 1/(1 + np.exp(-x))
raw_data = pd.read_csv("binary.csv")
target = raw_data['admit']
## Last error
last_loss = None
## Preprocessing Data
# One-Hot encoding of catagorical data
data = raw_data.drop('admit',axis=1)
data = pd.get_dummies(data, columns=['rank'])
# Standardization of GRE and Grade scores
# make the distribution of these columns with 0 mean and std of 1
for field in ['gre','gpa']:
mean,std = np.mean(data[field]) , np.std(data[field])
data.loc[:,field] = (data[field] - mean) / std
## Drawing weights from normal distribution
n_records, n_features = data.shape
np.random.seed(42)
weights = np.random.normal(scale= 1/n_features**.5,size=n_features)
## Spliting data for testing and training
train_features, test_features, train_labels, test_labels = train_test_split(data,target,test_size=0.10,random_state=42)
## Neural Network Hyperparamters
epochs = 1000
learning_rate = 0.5
for e in range(epochs):
del_w = np.zeros(weights.shape)
for x, y in zip(np.array(train_features),np.array(train_labels)):
output = np.dot(x,weights)
# print ("output: {:}, x: {:}, y: {:}" .format(output,x,y))
activation = sigmoid(output)
# print ("Actication out: {:}, output_sum: {:}" .format(activation,output))
error = y - activation
sigmoid_grad = activation * (1 - activation )
error_term = error * sigmoid_grad
# print ("error_term: {:}, x: {:}".format(error_term,x))
del_w += learning_rate * error_term * x
weights += del_w
if e % (epochs / 10) == 0:
sig_out = sigmoid(np.dot(np.array(train_features),weights))
loss = np.mean((np.array(train_labels) - sig_out) ** 2)
if last_loss and last_loss < loss:
print ("Training loss: {:}, WARNING - Loss increaseing" .format(loss))
else:
print ("Training loss: {:}" .format(loss))
## Tesing on test data ##
tes_out = sigmoid(np.dot(np.array(test_features),weights))
predictions = tes_out > 0.5
accuracy = np.mean((np.array(test_labels) == predictions))
print ("Prediction Accuracy: {}" .format(accuracy))
| SidGATOR/MachineLearning | perceptron/school_admit.py | school_admit.py | py | 2,231 | python | en | code | 0 | github-code | 13 |
7752615106 | """ get Covid19 rates and plot them
Initial date: 22 Oct 2020
Author: Margot Clyne
File get_rates.py
"""
from my_utils import get_column
from my_utils import binary_search
from my_utils import plot_lines
import sys
import argparse
from operator import itemgetter
from datetime import datetime
import matplotlib
import matplotlib.pylab as plt
matplotlib.use('Agg')
def main():
"""
get Covid19 case data and census data and convert to per-capita rates
data are from two different files
Returns:
---------
per_capita_rates: list
list of cases / population
dates: list
list of dates in format datetime.date(YYYY, MM, D)
"""
# TODO: add main def docstring
# parse command line arguments
parser = argparse.ArgumentParser(description='process args for \
reading covid data CSV file')
parser.add_argument('--covid_file_name',
type=str,
help='Name of the input covid cases data file',
required=True)
parser.add_argument('--census_file_name',
type=str,
help='Name of the input census data file',
required=True)
parser.add_argument('--plot_file_name',
type=str,
help='output plot file generated',
required=True)
parser.add_argument('--state',
type=str,
help='Name of the State',
required=True)
parser.add_argument('--coviddata_county',
type=str,
help='Name of the county in covid CSV file',
required=True)
parser.add_argument('--census_county',
type=str,
help='Name of the county in census CSV file',
required=True)
parser.add_argument('--coviddata_county_column',
type=int,
help='column ind for county names in covid CSVfile')
parser.add_argument('--cases_column',
type=int,
help='column ind for number of cases in covid CSVfile')
parser.add_argument('--date_column',
type=int,
default=0,
help='column ind for date in covid CSV file')
parser.add_argument('--census_state_column',
type=int,
help='column ind for state names in census CSV file')
parser.add_argument('--census_county_column',
type=int,
help='column ind for county names in census CSV file')
parser.add_argument('--pop_column',
type=int,
help='column ind for populaiton in census CSV file')
parser.add_argument('--daily_new',
type=bool,
default=False,
help='daily newcases. default is cumulativ dailycases')
parser.add_argument('--running_avg',
type=bool,
default=False,
help='running average of cases.\
default is False, window size is required')
parser.add_argument('--window',
type=int,
default=5,
help='Window size of running average')
# parse arguments and store them in args
args = parser.parse_args()
# assign arguments
coviddata_file_name = args.covid_file_name
coviddata_county_column = args.coviddata_county_column
plot_file_name = args.plot_file_name
coviddata_county_name = args.coviddata_county
cases_column = args.cases_column
date_column = args.date_column
daily_new = args.daily_new
running_avg = args.running_avg
window = args.window
census_file_name = args.census_file_name
census_state_column = args.census_state_column
state = args.state
census_county_name = args.census_county
census_county_column = args.census_county_column
pop_column = args.pop_column
# run get_column() on covid data and census data
cases_data_cumulative = get_column(coviddata_file_name,
coviddata_county_column,
coviddata_county_name,
result_columns=[cases_column],
date_column=date_column,
return_dates=True)
census_state_data = get_column(census_file_name, census_state_column,
state,
result_columns=[census_county_column,
pop_column],
date_column=None)
# convert cases from type str to int
cases_data_cumulative[0] = list(map(int, cases_data_cumulative[0]))
# dates are stored in last index of list, in datetime format
dates = cases_data_cumulative[-1]
# daily cases option
if daily_new is True:
from my_utils import get_daily_count
cases = get_daily_count(cases_data_cumulative[0]) # not dates column
else:
cases = cases_data_cumulative[0]
# print runing average cases option
if running_avg is True:
from my_utils import running_average
cases = running_average(cases, window)
# census_state_data is of list [[county_names], [census2010pops])
# sort census_state_data by county name
sorted_pairs = sorted(zip(census_state_data[0], census_state_data[1]))
tuples = zip(*sorted_pairs)
list1, list2 = [list(tuple) for tuple in tuples]
census_state_data_sorted = [list1, list2]
# use binary search to get county pop census data out of state data
county_pop = binary_search(census_county_name, census_state_data_sorted)
# raise error if county census not found
if county_pop is None:
ValueError
print('county census not found')
sys.exit(1)
county_pop = int(county_pop)
# convert cases to per-capita rates by dividing county case by population
if type(cases) == list:
cases = np.asarray(cases)
per_capita_rates = cases / county_pop
# convert per_capita_rates back from nparray to list
per_capita_rates = per_capita_rates.tolist()
# plot using plot_lines
plot_points = [[]]
for point in range(0, len(per_capita_rates)):
plot_points[0].append([dates[point], per_capita_rates[point]])
plot_labels = ['dates', 'per_capita_rates']
plot = plot_lines(plot_points, plot_labels, plot_file_name)
return plot # NOTE: idk if this line is needed?
if __name__ == '__main__':
main()
| cu-swe4s-fall-2020/python-refresher-maclyne | get_rates_saveHW5.py | get_rates_saveHW5.py | py | 6,969 | python | en | code | 1 | github-code | 13 |
29572945619 | n=int(input())
l1=[int(input()) for x in range(n)]
class solution():
def insertion_sort(self,l1):
for i in range(len(l1)):
key=l1[i]
j=i-1
while j>=0 and key<l1[j]:
l1[j+1]=l1[j]
j-=1
l1[j+1]=key
print(l1)
s1=solution()
s1.insertion_sort(l1)
| ShabbeirShaik/python | sorting_algorithms/insertionSort.py | insertionSort.py | py | 358 | python | en | code | 0 | github-code | 13 |
25110165599 | from flask import Flask, render_template, request, jsonify
from prometheus_flask_exporter.multiprocess import GunicornInternalPrometheusMetrics
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
from opentelemetry.sdk.trace.export import BatchSpanProcessor
from prometheus_flask_exporter import PrometheusMetrics
import pymongo
import logging, os, random, opentracing
from flask_pymongo import PyMongo
from flask_opentracing import FlaskTracing
from jaeger_client import Config
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.instrumentation.flask import FlaskInstrumentor
from opentelemetry.instrumentation.requests import RequestsInstrumentor
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import (
ConsoleSpanExporter,
SimpleExportSpanProcessor,
)
trace.set_tracer_provider(TracerProvider(resource=Resource.create({SERVICE_NAME: "backend-service"})))
# Create the exporter for jaeger
jaeger_exporter = JaegerExporter()
# Add the exporter to the spanner
spanner = BatchSpanProcessor(jaeger_exporter)
# Make sure it is added here
trace.get_tracer_provider().add_span_processor(spanner)
# make sure to define the tracer here
tracer = trace.get_tracer(__name__)
# Define the app, the Prometheus Metrics and the instrumentor for Flask, also determine the server used
app = Flask(__name__)
FlaskInstrumentor().instrument_app(app)
gunicorn_app = "gunicorn" in os.environ.get("SERVER_SOFTWARE", "")
if gunicorn_app:
metrics = GunicornInternalPrometheusMetrics(app)
else:
metrics = PrometheusMetrics(app)
#trace.set_tracer_provider(TracerProvider())
#trace.get_tracer_provider().add_span_processor(
# SimpleExportSpanProcessor(ConsoleSpanExporter())
#)
#app = Flask(__name__)
#metrics = GunicornInternalPrometheusMetrics(app)
#FlaskInstrumentor().instrument_app(app)
#RequestsInstrumentor().instrument()
app.config['MONGO_DBNAME'] = 'example-mongodb'
app.config['MONGO_URI'] = 'mongodb://example-mongodb-svc.default.svc.cluster.local:27017/example-mongodb'
mongo = PyMongo(app)
"""
def init_tracer(service):
logging.getLogger('').handlers = []
logging.basicConfig(format='%pat(message)s', level=logging.DEBUG)
config = Config(
config={
'sampler': {
'type': 'const',
'param': 1,
},
'logging': True
},
service_name=service,
)
# this call also sets opentracing.tracer
return config.initialize_tracer()
tracer = init_tracer('backend-service')
tracing = FlaskTracing(tracer,True,app)
flask_head_tracing = tracing.get_span() """
@app.route('/')
def homepage():
with opentracing.tracer.start_span("base-endpoint", child_of=flask_head_tracing) as span:
return "Hello World"
@app.route('/api')
def my_api():
with opentracing.tracer.start_span("api-endpoint", child_of=flask_head_tracing) as span:
answer = "something"
span.set_tag("answer", answer)
return jsonify(repsonse=answer)
@app.route('/star', methods=['POST'])
def add_star():
with opentracing.tracer.start_span("star-endpoint", child_of=flask_head_tracing) as span:
star = mongo.db.stars
name = request.json['name']
distance = request.json['distance']
star_id = star.insert({'name': name, 'distance': distance})
new_star = star.find_one({'_id': star_id })
output = {'name' : new_star['name'], 'distance' : new_star['distance']}
return jsonify({'result' : output})
@app.route('/errors')
def error_message():
errors_choice = [400,500]
return jsonify({"error": "Error endpoint",}), random.choice(errors_choice)
metrics.register_default(
metrics.counter(
'by_path_counter', 'Request count by request paths',
labels={'path': lambda: request.path}
)
)
if __name__ == "__main__":
app.run()
| Patralekha/Metrics-Dashboard | backend/app.py | app.py | py | 3,932 | python | en | code | 0 | github-code | 13 |
16100486811 | from rest_framework.routers import SimpleRouter
from django.urls import include, path
from users.views import (
UsersViewSet,
get_token,
sign_up
)
router_v1 = SimpleRouter()
router_v1.register(r'users', UsersViewSet)
urlpatterns = [
path('v1/', include(router_v1.urls)),
path('v1/auth/signup/', sign_up, name='sign_up'),
path('v1/auth/token/', get_token, name='get_token'),
]
| ShelepovNikita/api_yamdb | api_yamdb/users/urls.py | urls.py | py | 403 | python | en | code | 0 | github-code | 13 |
21880677921 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" utils.py - utility functions """
import sys
import os
import pathlib
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import torch
import torch.nn as nn
from torchvision import datasets, transforms
import torch_training_toolkit as t3
MEANS, STDS = 0.5, 0.5
def get_data(data_dir, debug = False):
xforms = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(MEANS, STDS)
]
)
# data_dir = os.path.join(os.getcwd(), "data")
train_dataset = datasets.FashionMNIST(
root = data_dir, train = True, download = True, transform = xforms
)
test_dataset = datasets.FashionMNIST(
root = data_dir, train = False, download = True, transform = xforms
)
# split the test dataset into test/cross-val sets
val_dataset, test_dataset = t3.split_dataset(test_dataset, split_perc = 0.20)
if debug:
print(
f"train_dataset: {len(train_dataset)} recs - val_dataset: {len(val_dataset)} recs - "
f"test_dataset: {len(test_dataset)} recs"
)
print(f"classes: {train_dataset.classes}")
return train_dataset, val_dataset, test_dataset, train_dataset.classes
def get_data_cifar10(data_dir, debug = False):
xforms = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(MEANS, STDS)
]
)
# data_dir = os.path.join(os.getcwd(), "data")
train_dataset = datasets.CIFAR10(
root = data_dir, train = True, download = True, transform = xforms
)
test_dataset = datasets.CIFAR10(
root = data_dir, train = False, download = True, transform = xforms
)
# split the test dataset into test/cross-val sets
val_dataset, test_dataset = t3.split_dataset(test_dataset, split_perc = 0.20)
if debug:
print(
f"train_dataset: {len(train_dataset)} recs - val_dataset: {len(val_dataset)} recs - "
f"test_dataset: {len(test_dataset)} recs"
)
print(f"classes: {train_dataset.classes}")
return train_dataset, val_dataset, test_dataset, train_dataset.classes
def display_sample(dataset, class_names, title = None, fig_size = (8, 8)):
dataloader = torch.utils.data.DataLoader(dataset, batch_size = 64, shuffle = True)
images, labels = next(iter(dataloader))
images, labels = images.cpu().numpy(), labels.cpu().numpy()
num_rows, num_cols = 8, 8
with sns.axes_style("whitegrid"):
plt.style.use("seaborn")
sns.set(context = "notebook", font_scale = 0.95)
sns.set_style(
{"font.sans-serif": ["SF UI Text", "Calibri", "Arial", "DejaVu Sans", "sans"]}
)
f, ax = plt.subplots(num_rows, num_cols, figsize = fig_size)
f.tight_layout()
f.subplots_adjust(top = 0.90)
for r in range(num_rows):
for c in range(num_cols):
index = r * num_cols + c
ax[r, c].axis("off")
sample_image = images[index]
sample_image = sample_image.transpose((1, 2, 0))
sample_image = (sample_image * STDS) + MEANS
ax[r, c].imshow(sample_image, cmap = "Greys", interpolation = "nearest")
ax[r, c].set_title(class_names[labels[index]])
if title is not None:
plt.suptitle(title)
plt.show()
if __name__ == "__main__":
print(f"Oops! It looks like you are running a utility functions module: {__file__}")
| mjbhobe/dl-pytorch | modern_cv_with_pytorch/utils.py | utils.py | py | 3,560 | python | en | code | 8 | github-code | 13 |
72371736979 | from django.http import HttpResponse
from datetime import datetime
from django.template import Context, Template, loader
import random
from home.models import Persona, Familiar
def hola(request):
return HttpResponse('<h1>Buenas clase 41765!</h1>')
def fecha(request):
fecha_y_hora = datetime.now()
return HttpResponse(f'La fecha y hora actual es {fecha_y_hora}')
def calcular_fecha_nac(request, edad):
fecha = datetime.now().year - edad
return HttpResponse(f'Tu fecha de nacimiento aproximada para tus {edad} años sería {fecha}')
def mi_template(request):
cargar_archivo = open(r'D:\Trabajo_con_python\Proyectos_Curso\proyecto_clases\templates\mi_template.html', 'r')
template = Template(cargar_archivo.read())
cargar_archivo.close()
contexto = Context()
template_renderizado = template.render(contexto)
return HttpResponse(template_renderizado)
def tu_template(request, nombre):
template = loader.get_template('tu_template.html')
template_renderizado = template.render({'persona': nombre})
return HttpResponse(template_renderizado)
def prueba_template(request):
mi_contexto = {'rango':list(range(1,11)),
'valor_aleatorio': random.randrange(1,11)
}
template = loader.get_template('prueba_template.html')
template_renderizado = template.render(mi_contexto)
return HttpResponse(template_renderizado)
def crear_persona(request, nombre, apellido):
persona = Persona(nombre=nombre, apellido=apellido, edad=random.randrange(1,99),fecha_nacimiento=datetime.now())
persona.save()
template = loader.get_template('crear_persona.html')
template_renderizado = template.render({'persona':persona})
return HttpResponse(template_renderizado)
def ver_personas(request):
personas = Persona.objects.all()
template = loader.get_template('ver_personas.html')
template_renderizado = template.render({'personas':personas})
return HttpResponse(template_renderizado)
def crear_familiar(request):
persona1 = Familiar(nombre='Ricardo', parentezco='Padre', edad=78,fecha_nacimiento = datetime(1934,7,11))
persona2 = Familiar(nombre='Gerardo', parentezco='Pareja', edad=62,fecha_nacimiento = datetime(1960,1,1))
persona3 = Familiar(nombre='Mauro', parentezco='Hijo', edad=27,fecha_nacimiento = datetime(1995,7,29))
persona1.save()
persona2.save()
persona3.save()
template = loader.get_template('crear_familiar.html')
template_renderizado = template.render({})
return HttpResponse(template_renderizado)
def ver_familiares(request):
familiares = Familiar.objects.all()
template = loader.get_template('ver_familiares.html')
template_renderizado = template.render({'familiares':familiares})
return HttpResponse(template_renderizado) | gabicoderhouse/proyectoClase | proyectoClase/views.py | views.py | py | 2,852 | python | es | code | 0 | github-code | 13 |
37964037278 | from TrigMonitorBase.TrigGenericMonitoringToolConfig import defineHistogram, TrigGenericMonitoringToolConfig
class InDetTrigExtensProcessorMonitorBase(TrigGenericMonitoringToolConfig):
def __init__(self, name="InDetTrigExtensProcessorMonitorBase", type="electron"):
super (InDetTrigExtensProcessorMonitorBase, self).__init__(name)
self.Histograms += [ defineHistogram('numTracksOut',
type='TH1F',
title="Number of Tracks at End",
xbins = 50, xmin=0., xmax=50.)]
self.Histograms += [ defineHistogram('numTracksIn',
type='TH1F',
title="Number of Tracks on Input",
xbins = 50, xmin=0., xmax=50.)]
self.Histograms += [ defineHistogram('numExtenTracks',
type='TH1F',
title="Number of Extended Tracks saved",
xbins = 50, xmin=0., xmax=50.)]
self.Histograms += [ defineHistogram('numOrigTracks',
type='TH1F',
title="Number of Original Tracks saved",
xbins = 50, xmin=0., xmax=50.)]
class InDetTrigExtensProcessorOnlineMonitor(InDetTrigExtensProcessorMonitorBase):
def __init__(self, name="InDetTrigExtensProcessorOnlineMonitor", type="electron"):
super (InDetTrigExtensProcessorOnlineMonitor, self).__init__(name,type)
self.defineTarget("Online")
class InDetTrigExtensProcessorValidationMonitor(InDetTrigExtensProcessorMonitorBase):
def __init__(self, name="InDetTrigExtensProcessorValidationMonitor", type="electron"):
super (InDetTrigExtensProcessorValidationMonitor, self).__init__(name,type)
self.defineTarget("Validation")
| rushioda/PIXELVALID_athena | athena/InnerDetector/InDetTrigRecAlgs/InDetTrigExtensProcessor/python/InDetTrigExtensProcessorMonitoring.py | InDetTrigExtensProcessorMonitoring.py | py | 1,989 | python | en | code | 1 | github-code | 13 |
73042584019 | # Módulo para obter a ordem das letras
import name_utility
# Biblioteca para eu não precisar programar a estrutura de árvore binária
from binarytree import Node
# Função que será executada caso o programa seja executado, em vez de usado como módulo
def main():
# Arquivo de onde os nomes serão lidos
FILENAME = "names.txt"
# Lista de nomes
name_list = name_utility.get_names(FILENAME)
# Ordem das letras, ou nós da árvore
nodes = name_utility.get_order(name_list)
print(f"\nLetras ordenadas:\n{name_utility.generate_string(nodes)}\n")
# A ordem das letras agora precisa ser convertida para números,
# já que a biblioteca não aceita letras.
# Para isso, esse loop vai por todos os index da lista de letras
# e obtem o valor da letra em inteiro, e após isso se subtrai 64,
# o que faz a letra ter o valor que tem na ordem do nosso alfabeto.
# Exemplo: 'A' em inteiro é 65, subtraindo 64 temos 1.
for i in range(len(nodes)):
nodes[i] = ord(nodes[i]) - 64
print("1)------------------------------------------------------")
print(f'\n"{convert_to_letter(nodes[0])}" será inserido na árvore')
# Gerando o primeiro nó da árvore, e o removendo da lista
root = Node(nodes.pop(0))
print_tree(root)
# Para cada número da nossa lista, ele será inserido na árvore
for node in nodes:
print(f"{nodes.index(node)+2})------------------------------------------------------")
print(f'\n"{convert_to_letter(node)}" será inserido na árvore')
root = insert(node, root)
print_tree(root)
# Inserção básica para uma árvore binária de busca
# Se um valor for maior que o nó que ele está sendo inserido,
# será colocado a direita, se for menor, a esquerda.
# Isso é recursivo, pois você deve procurar uma folha na árvore.
def insert(e, node):
if not node:
node = Node(e)
elif e < node.value:
node.left = insert(e, node.left)
elif e > node.value:
node.right = insert(e, node.right)
if not node.is_balanced:
print_tree(node)
print(f"Nó a ser balanceado: {convert_to_letter(node.value)}")
node = fix_balance(node)
return node
# Essa função deve corrigir qualquer erro de balanço na árvore
def fix_balance(node):
# Pegando a altura para os dois lados do nó
h_left, h_right = get_heights(node)
# Calculando o balanço
balance = h_left-h_right
# Balanço do nó a ser rotacionado
print(f"Balanço: {balance}")
# Aplicando as regras que o professor ensinou para as rotações
# Balanço > 1
if h_left > h_right:
# Calculando os mesmos valores, porém para o nó filho à esquerda
aux = node.left
ah_left, ah_right = get_heights(aux)
# EU NÃO SEI PORQUE, porém, o que o professor ensinou no texto de apoio se aplica aqui, porém não na outra parte
# Aplicando uma rotação pra direita caso essas condições estejam certas, não vou explicar porque consta no texto de apoio
# da matéria
if ah_left > ah_right:
print("Rotação: Direita")
node = right_rotate(node)
# Aplicando uma rotação dupla direita, que é uma rotação pra esquerda no nó filho
# e uma rotação pra direita no nó raiz
elif ah_left < ah_right:
print("Rotação: Dupla direita")
node.left = left_rotate(node.left)
node = right_rotate(node)
# Balanço < -1
elif h_left < h_right:
# Calculando os mesmos valores, porém para o nó filho à direita
aux = node.right
ah_left, ah_right = get_heights(aux)
# Por algum motivo isso aqui tá invertido do que o professor ensinou
# MAS É O CORRETO!!
# Rotação dupla esquerda consta basicamente como:
# Uma rotação a direita do nó filho a esquerda
# Uma rotação a esquerda do nó raiz
if ah_left > ah_right:
print("Rotação: Dupla esquerda")
node.right = right_rotate(node.right)
node = left_rotate(node)
# Uma rotação a esquerda
elif ah_left < ah_right:
print("Rotação: Esquerda")
node = left_rotate(node)
return node
# Função para realizar a rotação para a esquerda
# De acordo com o que o professor fez no texto de apoio
def left_rotate(node):
aux_node = node.right
node.right = aux_node.left
aux_node.left = node
return aux_node
# Mesma coisa, só que pra uma rotação para a direita
def right_rotate(node):
aux_node = node.left
node.left = aux_node.right
aux_node.right = node
return aux_node
# Essa função calcula a altura do lado direito e esquerdo de um nó
def get_heights(node):
# Os valores começam zerados
h_left, h_right = 0, 0
# Caso o node a esquerda ou direita exista,
# o valor é substituído pela altura
if node.left:
h_left = node.left.height + 1
if node.right:
h_right = node.right.height + 1
# Caso um lado não tenha um node o valor da altura é mantido como 0
return h_left, h_right
# Função pra converter um valor para caractere, não sei porque fiz isso.
def convert_to_letter(value):
return chr(value + 64)
# Essa função converte os valores numéricos da árvore para letras
# (A biblioteca que usei para as árvores binárias não aceita letras, apenas números)
def print_tree(root):
# Texto que representa a árvore (usando números)
text = root.__str__()
# Esse buffer vai armazenar um número, como pode ter mais de um dígito eu pensei nessa solução
buffer = ''
# Para cada caractere no texto
for c in text:
# Se o caractere for um dígito ele será armazenado no buffer
if c.isdigit():
buffer += c
# Se o caractere não for um dígito, o buffer será convertido para a letra correspondente
if not c.isdigit() and buffer != '':
# O tamanho do número é importante pra arrumar o espaço que sobra
# O número 10 usa dois espaços, porém ele representaria a letra 'J', que só um espaço
size = len(buffer) - 1
# Convertendo o número para a letra e adicionando o espaço para corrigir a posição
# Um caractere de espaço vazio ' ' está sendo multiplicando pelo tamanho do número - 1
# Então se for 10, em vez de 'J' será 'J '
letter = chr(int(buffer) + 64) + " " * size
# Substituindo o número pela letra na string
text = text.replace(buffer, letter, 1)
# Zerando o buffer
buffer = ''
# Imprimindo a árvore após substituir pelas letras
print(text)
if __name__ == "__main__":
main()
| deiveria/av2-teoria-em-grafos | avl_tree.py | avl_tree.py | py | 6,837 | python | pt | code | 0 | github-code | 13 |
27206342719 | """
This module converts a message into numbers to prepare it for encryption
Trying out unit testing.
"""
import unittest
conversion_key = {
' ': '55',
'1': '91',
'0': '88',
'3': '93',
'2': '92',
'5': '95',
'4': '94',
'7': '97',
'6': '96',
'9': '99',
'8': '98',
'a': '11',
'c': '13',
'b': '12',
'e': '15',
'd': '14',
'g': '17',
'f': '16',
'i': '19',
'h': '18',
'k': '22',
'j': '21',
'm': '24',
'l': '23',
'o': '26',
'n': '25',
'q': '28',
'p': '27',
's': '31',
'r': '29',
'u': '33',
't': '32',
'w': '35',
'v': '34',
'y': '37',
'x': '36',
'z': '38'
}
message = "word1 word2 word3"
class MyTest(unittest.TestCase):
def test_split_by_n(self):
self.assertEqual(split_by_n("123456789", 3), ["123", "456", "789"])
self.assertEqual(split_by_n("123456789", 9), ["123456789"])
self.assertEqual(split_by_n("123", 1), ["1", "2", "3"])
def test_convert_to_numerals(self):
self.assertEqual(convert_to_numerals('123'), "919293")
self.assertEqual(convert_to_numerals('abc'), "111213")
self.assertEqual(convert_to_numerals('123abc'), "919293111213")
self.assertEqual(convert_to_numerals('123 abc'), "91929355111213")
def test_convert_to_alpha(self):
self.assertEqual(convert_to_alpha('919293'), "123")
self.assertEqual(convert_to_alpha('111213'), "abc")
self.assertEqual(convert_to_alpha('919293111213'), "123abc")
self.assertEqual(convert_to_alpha('91929355111213'), "123 abc")
def test_reverse_dict_keys_values(self):
self.assertEqual(next(reverse_dict_keys_values(
{'a': '1', 'b': '2'})), {'1': 'a', '2': 'b'})
def reverse_dict_keys_values(dict_input):
# flip the keys and values in a dictionary
yield {v: k for k, v in dict_input.iteritems()}
def convert_long_list_to_string_list(long_list):
return [str(x) for x in long_list]
def split_by_n_generator(sequence, n):
# generator splits a sequence by n
while sequence:
yield sequence[:n]
sequence = sequence[n:]
def split_by_n(sequence, n):
# returns a full list rather than a generator for split_by_n_generator
return [x for x in split_by_n_generator(sequence, n)]
def convert_to_numerals(str_input):
# converts string to numerals based on the conversion key
split_input = split_by_n(str_input, 1)
for i in range(len(split_input)):
split_input[i] = conversion_key[split_input[i]]
return ''.join(split_input)
def convert_to_alpha(int_input):
# converts a numeral sequence to alphabetical characters based on the
# conversion key
reverse_conversion_key = next(reverse_dict_keys_values(conversion_key))
split_input = split_by_n(int_input, 2)
for i in range(len(split_input)):
split_input[i] = reverse_conversion_key[split_input[i]]
return ''.join(split_input)
if __name__ == '__main__':
unittest.main()
| Kyle-Koivukangas/kCrypt | textConvert.py | textConvert.py | py | 3,038 | python | en | code | 0 | github-code | 13 |
34692311436 | import pandas as pd
import numpy as np
import collections
print(pd.__version__)
print(np.__version__)
# def my_write_answer(answer, part, number):
# name = 'answer' + str(part) + str(number) + '.txt'
# with open(name, 'w') as file:
# file.write(str(answer))
# def my_precision(d, k):
# sum = 0
# for i in d[:k]:
# sum += i
# return sum/k
# def my_AP(d):
# k = len(d)+1
# sum_1 = 0
# sum_2 = 0
# for n,i in enumerate(d):
# sum_1 += i * my_precision(d,n+1)
# sum_2 += i
# return sum_1/sum_2
#
# vec_1 = [1, 0, 0, 1]
# vec_2 = [1, 1, 0, 0]
#
# print(my_AP(vec_1))
# print(my_AP(vec_2))
# answer11 = my_AP(vec_2) - my_AP(vec_1)
# print('answer 1.1 = ', answer11)
#
# mass = ['A','B','C','D','E', 'F', 'G', 'H']
# new_mass = mass
# l = len(mass) - 1
# for i, egg in enumerate(reversed(mass)):
# print('{}: mass[{}] = {}'.format(i, l-i, egg))
# if (egg == 'A') or (egg == 'C') or (egg == 'H'):
# new_mass.pop(l-i)
#
# print(new_mass)
#
# mama = [0,1,2,3,4,5]
# # mama.pop((3,5))
# print(mama)
my_dict = {'23': 10, '10': 5, '8': 3, '99': 1, '7': 1}
my_test = ['8', '99', '8', '10', '7', '23']
my_dict = collections.Counter()
for word in my_test:
my_dict[word] += 1
print(my_dict)
# array = [[151, 5],
# [132, 3],
# [113, 1],
# [144, 4],
# [125, 2]]
# def my_sort(session):
uni_session = np.unique(my_test)
array = np.zeros((len(uni_session), 2))
print(uni_session)
| RBVV23/Coursera | Прикладные задачи анализа данных/Week_4/sandbox_4.py | sandbox_4.py | py | 1,493 | python | en | code | 0 | github-code | 13 |
34469372394 | # this script is used to remove the extra seqs that do not correspond to the target gene.
# the target gene names are given in the keywords list below.
import os
import re
from Bio import SeqIO
input_directory = "/Users/hanli/Desktop/FYP/PAML/new_fna_faa"
output_directory = "/Users/hanli/Desktop/FYP/PAML/new_fna_faa/cleaned_seq"
keywords = {
"K00548": "Methionine synthase",
"K00651": "O-acetyltransferase",
"K00799": "S-transferase",
"K01505": "deaminase",
"K07160": "5-oxoprolinase",
"K15554": "permease",
"K22955": "lactone",
"K00003": "dehydrogenase",
"K00032": "reductase",
"K00036": "dehydrogenase",
"K00383": "reductase",
"K02439": "sulfurtransferase",
"K00387": "sulfoxide",
"K00432": "gpx|btuE|bsaA",
"K00548": "synthase",
"K00640": "acetyltransferase",
"K05301": "reductase",
"K06048": "ligase",
"K15554": "permease",
"K17228": "monooxygenase",
}
if not os.path.exists(output_directory):
os.makedirs(output_directory)
for file_name in os.listdir(input_directory):
if file_name.startswith(tuple(keywords.keys())) and file_name.endswith(".fasta"):
input_file_path = os.path.join(input_directory, file_name)
output_file_path = os.path.join(output_directory, file_name)
with open(input_file_path, "r") as input_file, open(output_file_path, "w") as output_file:
fasta_records = list(SeqIO.parse(input_file, "fasta"))
for record in fasta_records:
key = file_name.split("_")[0]
if re.search(keywords[key], record.description, re.IGNORECASE):
SeqIO.write(record, output_file, "fasta")
| lihanlilyy/Sulfur-Phytobiomes | PAML/clean.py | clean.py | py | 1,685 | python | en | code | 0 | github-code | 13 |
1955632721 | from tkinter import *
from tkinter import messagebox
# creamos una clase
class App():
def __init__(self):
# creamos el objeto de tiopo tkinter
ventana=Tk()
ventana.title('ventana principal')
ventana.geometry('400x400')
#ventana.configure(bg='green')
#widgtes
self.label1=Label(ventana,text='nombre')
self.Label1,Place(x=20,y=30)
self.text1=Entry(ventana,bg='red')
self.text1,Place(x=100,y=30)
#buttons
self.bt1=Button(ventana,text='Aceptar',command=self.mensaje)
self.bt1,Place(x=60,y=80)
ventana.mainloop()
def mensaje(self):
#print('Buenvenido al sistema')
messagebox.showinfo(message='BIENVENIDO AL SISTEMA',title='Ejemplo de mensaje')
# programa principal
objecto_ventana = App() | CRISTIANS02/CLASES_JMA | TKINTER/TKINTER_1.PY | TKINTER_1.PY | py | 937 | python | es | code | 6 | github-code | 13 |
39422891639 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
BSNIP SIMPLE UI
Simple UI for BSnip class
versions:
V1.0.0 [25.02.2019]
- first wrking version
V1.1.0 [06.04.2019]
- added option to save snippets on jsonstorage.net
'''
__author__ = "Bojan"
__license__ = "GPL"
__version__ = "1.1.0"
__maintainer__ = "Bojan"
__status__ = "Development"
import sys
class UI:
'''user interface class'''
COMMANDS = {
'add': 'Add new snippet',
'delete': 'Delete snippet <bsnip delete 23>',
'update': 'Update snippet <bsnip update 23>',
'list': 'List snippets',
'search': 'Search snippets <bsnip search test>',
'run': 'Run snippet <bsnip run 23>',
'save-cloud': 'Save snippets in cloud (using jsonstorage.net)',
'load-cloud': 'Load snippets from cloud (using jsonstorage.net)',
'get-cloud-id': 'Get local cloud ID',
'set-cloud-id': 'Set local cloud ID',
'list-cloud-for-id': 'List snippets online for cloud ID',
'--version': 'App version',
'--help': 'This help'
}
def __init__(self, bs):
'''init'''
self.params = sys.argv
self.bsnip = bs
def _is_int(self, number):
'''check if it is integer
:param number: To check
:return int: Return integer value if converted or None on issue
'''
try:
return int(number)
except:
return None
def _is_params_exist(self, command):
'''check if params exist in command'''
if 'params' in command:
if not command['params']:
print('Please enter param(s)!')
return None
return True
print('Please enter param(s)!')
return None
def _parse_command(self):
'''parse command
:return dict: Return parsed command as dict or None on issue
'''
if len(self.params) < 2:
print('Please enter command!')
print('For list of commands try < bsnip --help >')
return None
if self.params[1].lower() not in self.COMMANDS.keys():
print('Please enter valid command!')
return None
command = {'comm': self.params[1].lower()}
if len(self.params) > 2:
command['params'] = self.params[2:]
return command
def _command_add(self):
'''add command'''
comm = input('Command:')
desc = input('Description:')
snip = {
'desc': desc,
'comm': comm,
}
return self.bsnip.add(snip)
def _command_delete(self, command):
'''delete command'''
if 'params' in command:
if not command['params']:
print('Please enter ID for snippet to delete!')
return None
snippet_id = self._is_int(command['params'][0])
if snippet_id:
return self.bsnip.delete(snippet_id)
print('ID need to be integer!')
return None
print('Please enter ID for snippet to run!')
return None
def _command_update(self, command):
'''update command'''
if self._is_params_exist(command):
snippet_id = self._is_int(command['params'][0])
if snippet_id:
snip = self.bsnip.get_by_id(snippet_id)
if snip:
print('Old Command:', snip['comm'])
comm = input('Command:')
print('Old Description:', snip['desc'])
desc = input('Description:')
snip = {
'id': snip['id'],
'desc': desc,
'comm': comm,
}
return self.bsnip.update(snip)
print('No snippet ID: {}!'.format(command['params'][0]))
return None
print('ID need to be integer!')
return None
return None
def _command_run(self, command):
'''run command'''
if self._is_params_exist(command):
snippet_id = self._is_int(command['params'][0])
if snippet_id:
return self.bsnip.run(snippet_id)
print('Command ID: {} not found!'.format(command['comm'][0]))
return None
return None
def _command_search(self, command):
'''search command'''
if self._is_params_exist(command):
search_term = ' '.join(command['params'])
snips = self.bsnip.search(search_term)
if snips:
self._command_list(snips)
return True
print('Can\'t find snippets for search: [{}]!'.format(search_term))
return None
return None
def _command_list(self, snips):
'''list command'''
if not snips:
print('No data to display!')
return None
print('Snippets list:')
if self.bsnip.snips['snips-online-id']:
print('CLOUD ID:', self.bsnip.snips['snips-online-id'])
else:
print('CLOUD ID: not set')
for snip in snips:
print('#{:5} {}'.format(snip['id'], snip['desc']))
print('>', snip['comm'])
print('----------')
return True
def _command_save_cloud(self):
'''save cloud command'''
if self.bsnip.write_db_cloud():
print('Snippets saved to cloud!')
def _command_load_cloud(self):
'''load cloud command'''
if not self.bsnip.get_snips_online_id():
print('Cloud ID need to be set!')
return None
if self.bsnip.load_db_cloud():
print('Snippets loaded from cloud!')
else:
print('Issues loading snippets from clud!')
return None
def _command_get_cloud_id(self):
'''get cloud id command'''
cloud_id = self.bsnip.get_snips_online_id()
if cloud_id:
print('Cloud ID:', cloud_id)
else:
print('Cloud ID not set!')
def _command_set_cloud_id(self):
'''set cloud id command'''
cloud_id = input('Enter cloud ID:')
if not cloud_id.strip():
if input('Confirm empty cloud ID [Y/n]:') != 'Y':
print('Cloud ID not changed!')
return None
self.bsnip.udate_snips_online_id(cloud_id.strip())
self._command_get_cloud_id()
return None
def _command_list_cloud_snips_for_id(self):
'''list command from cloud for selected ID
:return bool: Return True if OK or none on issue
'''
cloud_id = input('Enter cloud ID to search for:')
snips = self.bsnip.get_snips_online_for_id(cloud_id)
if not snips:
print('Not valid snips ID!')
return None
print('Snippets list:')
if snips['snips-online-id']:
print('Cloud ID:', snips['snips-online-id'])
else:
print('Cloud ID: not set')
for snip in snips['snips']:
print('#{:5} {}'.format(snip['id'], snip['desc']))
print('>', snip['comm'])
print('----------')
return True
def run_command(self):
'''run command
:return bool: Return True if all is OK
'''
command = self._parse_command()
if not command:
return None
if command['comm'] == 'add':
return self._command_add()
if command['comm'] == 'delete':
return self._command_delete(command)
if command['comm'] == 'update':
return self._command_update(command)
if command['comm'] == 'list':
return self._command_list(self.bsnip.snips['snips'])
if command['comm'] == 'search':
return self._command_search(command)
if command['comm'] == 'run':
return self._command_run(command)
if command['comm'] == 'save-cloud':
return self._command_save_cloud()
if command['comm'] == 'load-cloud':
return self._command_load_cloud()
if command['comm'] == 'get-cloud-id':
return self._command_get_cloud_id()
if command['comm'] == 'set-cloud-id':
return self._command_set_cloud_id()
if command['comm'] == 'list-cloud-for-id':
return self._command_list_cloud_snips_for_id()
if command['comm'] == '--help':
print('bSnip, simple snippet manager')
print('COMMANDS:')
for comm in self.COMMANDS:
print('{:20} {}'.format(comm, self.COMMANDS[comm]))
elif command['comm'] == '--version':
print(__version__)
return None
| abrihter/bsnip | ui.py | ui.py | py | 8,784 | python | en | code | 0 | github-code | 13 |
36533129632 |
"""Report To Kill Google Spread module.
This module allows RTK scripts to communicate with google sheets to save report records.
"""
import os
from ast import literal_eval
from google.oauth2.service_account import Credentials
import gspread
GSPREAD_CLIENT_SECRET = os.getenv("GSPREAD_CLIENT_SECRET")
WORKSHEET_NAME = "RTK"
SHEET_NAMES_ROW_NUM = 1
SHEET_REPORTS_ROW_NUM = 2
scope = ['https://spreadsheets.google.com/feeds',
# 'https://www.googleapis.com/auth/spreadsheets',
'https://www.googleapis.com/auth/drive']
creds = Credentials.from_service_account_info(
literal_eval(GSPREAD_CLIENT_SECRET), scopes=scope)
client = gspread.authorize(creds)
worksheet = client.open(WORKSHEET_NAME).sheet1
def getReportsDic():
"""
Returns a dictionary containing the reports counts of the users.
Parameters:
None
Returns:
Dictionary: key: username.
value: report count.
Raises:
None
"""
return worksheet.get_all_records()[0]
def addUser(name, value):
"""
Adds the given name as a user with the given value as his/her report count.
Parameters:
name (str): the username to add.
value (int): the value to set for the given username.
Returns:
None
Raises:
None
"""
NewUserCol = len(getReportsDic()) + 1
worksheet.add_cols(1)
worksheet._properties['gridProperties']['columnCount'] += 1
worksheet.update_cell(SHEET_NAMES_ROW_NUM, NewUserCol, name)
worksheet.update_cell(SHEET_REPORTS_ROW_NUM, NewUserCol, value)
def updateReport(name, newValue):
"""
Updates the report count of the given name.
Parameters:
name (str): the username to update.
newValue (int): the updated value to set for the given username.
Returns:
None
Raises:
None
"""
worksheet.update_cell(SHEET_REPORTS_ROW_NUM,
worksheet.find(name).col, newValue)
client.login()
| MohamedSaidSallam/RTK | RTK/util/gspread.py | gspread.py | py | 2,001 | python | en | code | 0 | github-code | 13 |
655598766 | """Search in a table"""
import re
import pandas as pd
def search(table, pattern, columns=None):
"""Return the rows of the table for which a column matches the pattern"""
assert isinstance(table, pd.DataFrame), "'table' must be a Pandas DataFrame"
if columns is None:
columns = [col for col in table.columns if table[col].dtype.kind == 'O']
if not columns:
raise ValueError('Please specific a non-empty columns arguments, and run the search '
'on a table that has string columns')
if isinstance(pattern, str):
if not pattern.startswith('.*'):
pattern = '.*(' + pattern + ')'
pattern = re.compile(pattern, re.IGNORECASE)
found = pd.Series(0, index=table.index)
for col in columns:
found += table[col].apply(lambda x: 1 if pattern.match(x) else 0)
return table.loc[found > 0]
| mwouts/world_bank_data | world_bank_data/search.py | search.py | py | 889 | python | en | code | 113 | github-code | 13 |
34739690299 | # coding: utf-8
from my_linear_algebra import *
from test_statistics import *
from test_gradient_descent import *
from my_multiple_regression import *
from test_adjusted_data import *
import math
import random
from collections import defaultdict
# 感知器(perception)可能是最简单的神经网络
def step_function(x):
return 1 if x >= 0 else 0
def perceptron_output(weights, bias, x):
"""returns 1 if the perceptron 'fires', 0 if not"""
calculation = dot(weights, x) + bias
return step_function(calculation)
# sigmoid 函数。
def sigmoid(t):
return 1 / (1 + math.exp(-t))
# 这样,我们就能计算其输出了,代码如下所示:
def neuron_output(weights, inputs):
return sigmoid(dot(weights, inputs))
# 有了这种表示方法,神经网络用起来就会非常简便:
def feed_forward(neural_network, input_vector):
"""takes in a neural network
(represented as a list of lists of lists of weights)
and returns the output from forward-propagating the input"""
outputs = []
# 每次处理一层
for layer in neural_network:
input_with_bias = input_vector + [1] # 增加一个偏倚输入
output = [neuron_output(neuron, input_with_bias) # 计算输出
for neuron in layer] # 每一个神经元
outputs.append(output) # 记住它
# 然后下一层的输入就是这一层的输出
input_vector = output
return outputs
# 所以,我们只需要调整权重,就能使得 neuron_outputs 非常接近 1 或 0 了:
xor_network = [# hidden layer
[[20, 20, -30], # 'and'神经元
[20, 20, -10]], # 'or'神经元
# output layer
[[-60, 60, -30]]] # '第二次输入不同于第一次输入'神经元
for x in [0, 1]:
for y in [0, 1]:
# feed_forward生成每个神经元的输出
# feed_forward[-1]是输出层神经元的输出
print(x, y, feed_forward(xor_network,[x, y])[-1])
# 0 0 [9.38314668300676e-14]
# 0 1 [0.9999999999999059]
# 1 0 [0.9999999999999059]
# 1 1 [9.383146683006828e-14]
# 反向传播,这个算法需要在整个训练集上多次迭代,直到网络收敛为止:
def backpropagate(network, input_vector, targets):
hidden_outputs, outputs = feed_forward(network, input_vector)
# the output * (1 - output) is from the derivative of sigmoid
output_deltas = [output * (1 - output) * (output - target)
for output, target in zip(outputs, targets)]
# adjust weights for output layer, one neuron at a time
for i, output_neuron in enumerate(network[-1]):
# focus on the ith output layer neuron
for j, hidden_output in enumerate(hidden_outputs + [1]):
# adjust the jth weight based on both
# this neuron's delta and its jth input
output_neuron[j] -= output_deltas[i] * hidden_output
# back-propagate errors to hidden layer
hidden_deltas = [hidden_output * (1 - hidden_output) *
dot(output_deltas, [n[i] for n in output_layer])
for i, hidden_output in enumerate(hidden_outputs)]
# adjust weights for hidden layer, one neuron at a time
for i, hidden_neuron in enumerate(network[0]):
for j, input in enumerate(input_vector + [1]):
hidden_neuron[j] -= hidden_deltas[i] * input
# 现在可以建立我们的神经网络了:
random.seed(0) # 得到重复的结果
input_size = 25 # 每个输入都是一个长度为25的向量
num_hidden = 5 # 隐藏层将含有5个神经元
output_size = 10 # 对于每个输入,我们需要10个输出结果
# 每一个隐藏神经元对每个输入都有一个权重和一个偏倚权重
hidden_layer = [[random.random() for __ in range(input_size + 1)]
for __ in range(num_hidden)]
# 每一个输出神经元对每个隐藏神经元都有一个权重和一个偏倚权重
output_layer = [[random.random() for __ in range(num_hidden + 1)]
for __ in range(output_size)]
# 神经网络是从随机权重开始的
network = [hidden_layer, output_layer]
raw_digits = [
"""11111
1...1
1...1
1...1
11111""",
"""..1..
..1..
..1..
..1..
..1..""",
"""11111
....1
11111
1....
11111""",
"""11111
....1
11111
....1
11111""",
"""1...1
1...1
11111
....1
....1""",
"""11111
1....
11111
....1
11111""",
"""11111
1....
11111
1...1
11111""",
"""11111
....1
....1
....1
....1""",
"""11111
1...1
11111
1...1
11111""",
"""11111
1...1
11111
....1
11111"""]
def make_digit(raw_digit):
return [1 if c == '1' else 0
for row in raw_digit.split("\n")
for c in row.strip()]
inputs = list(map(make_digit, raw_digits))
targets = [[1 if i == j else 0 for i in range(10)]
for j in range(10)]
# 这里,我们可以通过反向传播算法来训练我们的模型:
# 10 000次迭代看起来足够进行收敛
for __ in range(10000):
for input_vector, target_vector in zip(inputs, targets):
backpropagate(network, input_vector, target_vector)
# 它在训练集上效果很好:
def predict(input):
return feed_forward(network, input)[-1]
predict(inputs[7])
# [0.026, 0.0, 0.0, 0.018, 0.001, 0.0, 0.0, 0.967, 0.0, 0.0]
# 我们需要用到函数 pyplot.imshow
import matplotlib
weights = network[0][0] # 隐藏层的第一个神经元
# abs_weights = map(abs, weights) # 阴影部分只取决于绝对值
abs_weights = [abs(weight) for weight in weights]
grid = [abs_weights[row:(row+5)] # 将权重转化为5x5的网格
for row in range(0,25,5)] # [weights[0:5], ..., weights[20:25]]
ax = plt.gca() # 为了使用影线,我们需要轴
ax.imshow(grid, # 这里与plt.imshow一样
cmap=matplotlib.cm.binary, # 使用白-黑色度
interpolation='none') # 不进行插值处理
def patch(x, y, hatch, color):
"""return a matplotlib 'patch' object with the specified
location, crosshatch pattern, and color"""
return matplotlib.patches.Rectangle((x - 0.5, y - 0.5), 1, 1,
hatch=hatch, fill=False, color=color)
# 用交叉影线表示负权重
for i in range(5): # 行
for j in range(5): # 列
if weights[5*i + j] < 0: # row i, column j = weights[5*i + j]
# 加上黑白影线,这样无论深浅就都可见了
ax.add_patch(patch(j, i, '/', "white"))
ax.add_patch(patch(j, i, '\\', "black"))
plt.show()
# 对于这些输入来说,它的输出确实如我们所愿:
left_column_only = [1, 0, 0, 0, 0] * 5
print(feed_forward(network, left_column_only)[0][0]) # 1.0
center_middle_row = [0, 0, 0, 0, 0] * 2 + [0, 1, 1, 1, 0] + [0, 0, 0, 0, 0] * 2
print(feed_forward(network, center_middle_row)[0][0]) # 0.95
right_column_only = [0, 0, 0, 0, 1] * 5
print(feed_forward(network, right_column_only)[0][0]) # 0.0
| lucelujiaming/dataScienceFromSCratch | my_neural_network.py | my_neural_network.py | py | 6,871 | python | en | code | 0 | github-code | 13 |
73864669138 | '''Demo-db initialization and data population'''
# Peewee has very poor type hinting support:
# pyright: reportUnknownMemberType=false
import random
from ..config import Hosts
from ..models.sql import pizza as schema
from ..models.payloads.v1 import pizza as payloads
from ..crud import pizza as db
def create_tables():
Hosts.pizza_sqlite.instance().create_tables([
schema.Topping,
schema.Tag,
schema.Pizza,
schema.PizzaRating,
schema.PizzaTopping,
schema.PizzaTag
])
def populate_demo_data():
pizzas = [
payloads.Pizza(
name='Margherita',
toppings=['Mozarella', 'Basil'],
tags=['classic', 'simple']
),
payloads.Pizza(
name='Pepperoni',
toppings=['Mozarella', 'Pepperoni'],
tags=['family favorite']
),
payloads.Pizza(
name='Veggie Supreme',
toppings=['Mozarella', 'Onions', 'Mushrooms', 'Olives', 'Basil']
)
]
for pizza in pizzas:
entity = db.create_complete_pizza(
name=pizza.name,
toppings=pizza.toppings,
tags=pizza.tags
)
db.rate(entity, 'anonymous', random.randint(3, 5))
| danielskovli/python-rest-api | simple_rest_api/utils/init_db.py | init_db.py | py | 1,306 | python | en | code | 1 | github-code | 13 |
34278842191 | from django.core.management import setup_environ
import settings
setup_environ(settings)
from scheduler.models import *
from datetime import datetime
from TypeObservingReport import TypeObservingReport
class ScienceObservingReport(TypeObservingReport):
"Quick report for Jay Lockman"
def getTypes(self):
# get the scientific observing types
nonScience = ['calibration'
, 'commissioning'
, 'testing'
, 'maintenance'
]
types = [t.type for t in Observing_Type.objects.all() if t.type not in nonScience]
types.append('total')
return types
def computeForYear(self, year):
ps = self.getPeriodsForYear(year)
for p in ps:
obsTime, type = self.getObsTime(p)
# non-science types will be returned as None
if type is not None:
self.data[year][type] += obsTime
self.data[year]['total'] += obsTime
def getObsTime(self, period):
if not period.session.project.is_science():
return (None, None)
return (period.accounting.observed(), period.session.observing_type.type)
if __name__ == '__main__':
r = ScienceObservingReport()
r.setYears([2010, 2011, 2012])
r.report()
| nrao/nell | tools/reports/ScienceObservingReport.py | ScienceObservingReport.py | py | 1,329 | python | en | code | 0 | github-code | 13 |
38231436836 | # coding=utf-8
import statistics
import time
import color
from red import red
from tzdatastruct import *
# 装饰器,用于process_x
def nocode(fn):
fn.nocode = True
return fn
class BaseProcessor():
'''处理器 基类'''
# 注册的处理器
registered = list()
# 处理用的正则式list
# 三个元素分别为:匹配正则,flags,替换
re_list = (
# 示例
[
r'',
0,
r''
],
)
@staticmethod
def should_me(local_processor):
if local_processor == 'null':
return True
else:
return False
@staticmethod
def get_processor(local_processor):
'''得到自动处理器'''
for i in BaseProcessor.registered:
if i.should_me(local_processor):
#print('找到处理器', i)
return i()
else:
print('无法找到本地格式为{0}的处理器'.format(local_processor))
return None
def __init__(self):
self.rlist = None
def set_rlist(self, rlist):
self.rlist = rlist
def has_unhandled_quote(self, reply):
'''是否包含未处理的引用,返回True的回复不会被select'''
return False
@staticmethod
def has_quote(reply):
'''是否包含引用'''
p = red.re_dict(r'^.*?【引用开始】.*?【引用结束】')
if p.search(reply.text):
return True
else:
return False
@staticmethod
def reply_len_quote(reply):
'''一条回复的字数,返回(引用字数,回复字数,前两者之和 或 是无引用的长度)'''
all_len = len(reply.text)
tag_len = len('【引用开始】')
p1 = reply.text.find('【引用开始】')
p2 = reply.text.find('【引用结束】')
if p1 == -1 and p2 == -1:
return -1, -1, all_len
elif p1 != -1 and p2 != -1 and p2 > p1:
return p2-p1-tag_len, \
all_len-p2-tag_len, \
all_len-p1-2*tag_len
else:
print('【引用开始】和【引用结束】不配对')
return -1, -1, all_len
@staticmethod
def append_note(text, note):
'''添加处理说明'''
if not text.endswith(note):
text += '\n' + note
return text
def do_re_list(self):
'''用re_list进行替换处理'''
print('>用正则式列表替换')
# 编译
for i in self.re_list:
i.append(red.re_dict(''.join(i[0]), i[1]))
process_count = 0
for rpl in self.rlist:
#i = 0
for r in self.re_list:
rpl.text, n = r[3].subn(r[2], rpl.text)
process_count += 1 if n > 0 else 0
#if '某些文字' in rpl.text:
# print(rpl.text, '\n', i, '>>>>>>>')
# i += 1
print('...做了{0}次替换'.format(process_count))
def mark_empty(self):
'''标记空回复'''
print('>开始标记 空白回复')
p = red.re_dict(r'^\s*$')
blank_count = 0
for rpl in self.rlist:
if p.match(rpl.text):
rpl.suggest = False
blank_count += 1
if blank_count:
color_p = color.fore_color(blank_count, color.Fore.RED)
else:
color_p = color.fore_color(blank_count, color.Fore.GREEN)
print('...有{0}个空白回复'.format(color_p))
def mark_reduplicate(self):
'''标记相邻重复'''
print('>开始标记 相邻重复的回复')
last_reply = None
reduplicate_list = []
r = red.re_dict(r'^\s*$')
# 查找重复
for rpl in self.rlist:
if last_reply and last_reply.text == rpl.text and \
not r.match(rpl.text):
reduplicate_list.append(rpl)
last_reply = rpl
# 处理重复
for i in reduplicate_list:
i.text = self.append_note(i.text, '【与上一条回复重复】')
i.suggest = False
reduplicate_count = len(reduplicate_list)
if reduplicate_count:
color_p = color.fore_color(reduplicate_count, color.Fore.RED)
else:
color_p = color.fore_color(reduplicate_count, color.Fore.GREEN)
print('...有{0}个相邻重复的回复'.format(color_p))
def mark_multireply(self):
'''标记连续重复引用'''
print('>开始标记 连续重复引用的回复')
r = red.re_dict(r'^(.*?【引用开始】.*?)【引用结束】\n?(.*)$', red.S)
last_reply = None # 最后一条引用回复
last_quote = None # 最后一条引用回复的引用部分
count = 0
for rpl in self.rlist:
if not rpl.suggest:
continue
m = r.match(rpl.text)
if m == None:
last_reply = None
last_quote = None
continue
temp = m.group(1)
if last_quote == temp:
last_reply.text += '\n\n【补充回复】\n' + m.group(2)
rpl.text = ''
rpl.suggest = False
count += 1
else:
last_reply = rpl
last_quote = temp
if count:
color_p = color.fore_color(count, color.Fore.RED)
else:
color_p = color.fore_color(count, color.Fore.GREEN)
print('...有{0}个连续重复引用的回复'.format(color_p))
def mark_cantdeal(self):
'''标记无法处理'''
print('>开始标记 无法处理的引用')
quote_count = 0
for rpl in self.rlist:
if self.has_unhandled_quote(rpl):
rpl.text = self.append_note(rpl.text, '【无法处理的回复】')
rpl.suggest = False
quote_count += 1
if quote_count:
color_p = color.fore_color(quote_count, color.Fore.RED)
else:
color_p = color.fore_color(quote_count, color.Fore.GREEN)
print('...有{0}个无法处理的引用'.format(color_p))
@nocode
def process_1(self):
'''自定义处理1'''
pass
@nocode
def process_2(self):
'''自定义处理2'''
pass
@nocode
def process_3(self):
'''自定义处理3'''
pass
def process(self):
'''处理流程'''
if self.rlist == None:
print('rlist为None,不能处理')
return
# 预处理
if self.re_list != BaseProcessor.re_list:
print('预处理开始:')
t1 = time.perf_counter()
self.do_re_list()
t2 = time.perf_counter()
print('预处理结束,运行了%.5f秒\n' % (t2-t1))
if not hasattr(self.process_1, 'nocode'):
print('Process 1开始:')
t1 = time.perf_counter()
self.process_1()
t2 = time.perf_counter()
print('Process 1结束,运行了%.5f秒\n' % (t2-t1))
if not hasattr(self.process_2, 'nocode'):
print('Process 2开始:')
t1 = time.perf_counter()
self.process_2()
t2 = time.perf_counter()
print('Process 2结束,运行了%.5f秒\n' % (t2-t1))
if not hasattr(self.process_3, 'nocode'):
print('Process 3开始:')
t1 = time.perf_counter()
self.process_3()
t2 = time.perf_counter()
print('Process 3结束,运行了%.5f秒\n' % (t2-t1))
# custom.py的process(p)函数
try:
from custom import process as custom_process
except:
print('无法import custom.py里的process(p)函数')
else:
if not hasattr(custom_process, 'nocode'):
print('custom.py的process(p)函数开始:')
t1 = time.perf_counter()
custom_process(self)
t2 = time.perf_counter()
print('custom.py的process(p)函数结束,运行了%.5f秒\n' % (t2-t1))
# 后处理
print('后处理开始:')
t1 = time.perf_counter()
self.mark_empty()
self.mark_reduplicate()
self.mark_multireply()
self.mark_cantdeal()
t2 = time.perf_counter()
print('后处理结束,运行了%.5f秒\n' % (t2-t1))
def statistic(self):
'''统计'''
# 回复总数 --------------------------
print('回复总数:', len(self.rlist))
# 选择的回复数
selected_count = sum(1 for r in self.rlist if r.select)
print('选择的回复数:', selected_count)
print()
# 字数统计 --------------------------
print('以下的统计不包括空白、重复和无法处理的回复:\n')
# 排除不想参与统计的回复
p_space = red.re_dict(r'^\s*$')
def should_pick(reply):
if p_space.match(reply.text):
return False
if reply.text.endswith('【与上一条回复重复】'):
return False
if reply.text.endswith('【无法处理的回复】'):
return False
return True
lenlist = [self.reply_len_quote(r)
for r in self.rlist if should_pick(r)]
# 有引用回复 的 引用部分长度
qlenlist = [x[0] for x in lenlist if x[0] != -1]
# 有引用回复 的 回复部分长度
rlenlist = [x[1] for x in lenlist if x[0] != -1]
# 无引用回复 的 长度
noqlenlist = [x[2] for x in lenlist if x[0] == -1]
del lenlist
def num(lst, func):
if not lst:
return 0
else:
return func(lst)
print(' (引用部分 回复部分) 无引用回复')
print(' 总 数 : {0:<8} + {1:<8} = {2}'.format(
len(qlenlist),
len(noqlenlist),
len(qlenlist) + len(noqlenlist)
)
)
print('最长的字数: {0:<8} {1:<8} {2:<8}'.format(
num(qlenlist, max),
num(rlenlist, max),
num(noqlenlist, max)
)
)
print('字数平均数: {0:<8.2f} {1:<8.2f} {2:<8.2f}'.format(
num(qlenlist, statistics.mean),
num(rlenlist, statistics.mean),
num(noqlenlist, statistics.mean)
)
)
print('字数中位数: {0:<8.0f} {1:<8.0f} {2:<8.0f}'.format(
num(qlenlist, statistics.median),
num(rlenlist, statistics.median),
num(noqlenlist, statistics.median)
)
)
print('总体标准差: {0:<8.2f} {1:<8.2f} {2:<8.2f}'.format(
num(qlenlist, statistics.pstdev),
num(rlenlist, statistics.pstdev),
num(noqlenlist, statistics.pstdev)
)
)
# 字数分布 ------------------------------
# e_table由y=e**x函数生成 x:0.5,1.0,1.5,2.0,2.5,3.0...
e_table = [0, 7, 12, 20, 33, 55, 90, 148, 245, 403, \
665, 1097, 1808, 2981, 4915, 8103, 13360]
# 字数分布函数
def get_len_distribution(lenlist):
'''字数分布'''
table_len = len(e_table)
count_table = [0 for i in range(table_len+1)]
for length in lenlist:
for i in range(table_len):
if length < e_table[i]:
count_table[i] += 1
break
else:
count_table[-1] += 1
return count_table
# 得到字数分布
qdis = get_len_distribution(qlenlist)
rdis = get_len_distribution(rlenlist)
ndis = get_len_distribution(noqlenlist)
# 打印字数分布
print('\n字数分布')
print(' '*16, '(引用部分 回复部分) 无引用回复')
for i in range(1, len(e_table)):
print('{0:>6}<= x <{1:<5} : {2:<8} {3:<8} {4:<8}'.format(
e_table[i-1],
e_table[i],
qdis[i],
rdis[i],
ndis[i]
)
)
print('{0:>6}<= x : {1:<8} {2:<8} {3:<8}'.format(
e_table[-1],
qdis[-1],
rdis[-1],
ndis[-1]
)
)
print(' '*8,'='*35)
print(' '*12, '总数 : {0:<8} {1:<8} {2:<8}'.format(
len(qlenlist),
len(rlenlist),
len(noqlenlist)
)
)
# processor decorator
def processor(cls):
if not issubclass(cls, BaseProcessor):
print('注册自动处理器时出错,{0}不是BaseProcessor的子类'.format(cls))
return cls
if cls not in BaseProcessor.registered:
BaseProcessor.registered.append(cls)
else:
print('%s already exist in processors' % cls)
return cls
@processor # 注册NullProcessor为null的处理器
class NullProcessor(BaseProcessor):
pass
| animalize/tz2txt | tz2txt/BaseProcessor.py | BaseProcessor.py | py | 14,372 | python | en | code | 48 | github-code | 13 |
10259057475 | __config_version__ = 1
GLOBALS = {
'serializer': '{{major}}.{{minor}}.{{patch}}',
}
FILES = [
"setup.py",
"rfc5424logging/__init__.py",
"docs/conf.py",
]
VERSION = ['major', 'minor', 'patch']
VCS = {
'name': 'git',
'commit_message': "Version updated from {{ current_version }} to {{ new_version }}",
}
| jobec/rfc5424-logging-handler | punch_config.py | punch_config.py | py | 330 | python | en | code | 47 | github-code | 13 |
15214295902 | # -*- coding: utf-8 -*-
# greburs by InteGreat
from odoo import api, fields, models, SUPERUSER_ID, _
from odoo.tools import float_round
class PurchaseOrder(models.Model):
_inherit = 'purchase.order'
sale_order_ids = fields.Many2many(comodel_name="sale.order", string="OV",
compute="_compute_from_group_id", store=True)
production_ids = fields.Many2many(comodel_name="mrp.production", string="OP",
compute="_compute_from_group_id", store=True)
@api.depends('group_id.sale_order_ids', 'group_id.mrp_production_ids', 'group_id.production_ids')
def _compute_from_group_id(self):
for purchase in self:
purchase.sale_order_ids = [(6, 0, purchase.group_id.sale_order_ids.ids +
purchase.order_line.purchase_request_lines.mapped('sale_line_id.order_id.id'))]
purchase.production_ids = [(6, 0, purchase.group_id.mrp_production_ids.ids +
purchase.order_line.purchase_request_lines.mapped('request_id.group_id.mrp_production_ids').ids)]
@api.depends('order_line.move_dest_ids.group_id.mrp_production_ids', 'production_ids')
def _compute_mrp_production_count(self):
# super(PurchaseOrder, self)._compute_mrp_production_count()
for purchase in self:
purchase._compute_from_group_id()
purchase.mrp_production_count += len(purchase.production_ids)
# OVERRIDE
def _get_sale_orders(self):
so = self.order_line.sale_order_id
so += self.order_line.purchase_request_lines.sale_line_id.order_id
return so
# OVERRIDE
def action_view_mrp_productions(self):
self.ensure_one()
mrp_production_ids = (
self.order_line.move_dest_ids.group_id.mrp_production_ids |
self.order_line.move_ids.move_dest_ids.group_id.mrp_production_ids |
self.production_ids
).ids
action = {
'res_model': 'mrp.production',
'type': 'ir.actions.act_window',
}
if len(mrp_production_ids) == 1:
action.update({
'view_mode': 'form',
'res_id': mrp_production_ids[0],
})
else:
action.update({
'name': _("Manufacturing Source of %s", self.name),
'domain': [('id', 'in', mrp_production_ids)],
'view_mode': 'tree,form',
})
return action
def _add_supplier_to_product(self):
# OVERRIDE: we do not want prices to be added to supplierinfo!
return
class PurchaseOrderLine(models.Model):
_inherit = 'purchase.order.line'
manual_price = fields.Boolean(compute='_compute_is_manual_price')
@api.onchange('price_unit')
def _compute_is_manual_price(self):
for line in self:
line.manual_price = False
if line.product_id and line.price_unit != 0.0:
params = {'order_id': line.order_id}
seller = line.product_id._select_seller(
partner_id=line.partner_id,
quantity=line.product_qty,
date=line.order_id.date_order and line.order_id.date_order.date(),
uom_id=line.product_uom,
params=params)
price_unit = self.env['account.tax']._fix_tax_included_price_company(seller.price,
line.product_id.supplier_taxes_id, line.taxes_id, line.company_id) if seller else 0.0
if price_unit and seller and line.order_id.currency_id and seller.currency_id != line.order_id.currency_id:
price_unit = seller.currency_id._convert(price_unit, self.order_id.currency_id,
line.order_id.company_id, line.date_order or fields.Date.today())
if seller and line.product_uom and seller.product_uom != self.product_uom:
price_unit = seller.product_uom._compute_price(price_unit, line.product_uom)
if price_unit != round(line.price_unit, ndigits=6):
line.manual_price = True
else:
line.manual_price = True
| sgrebur/e3a | integreat_sale_mrp_mtso/models/purchase.py | purchase.py | py | 4,158 | python | en | code | 0 | github-code | 13 |
29835710015 | import numpy as np
from sklearn.model_selection import train_test_split
import cv2
import os
import Recognize
import re
import argparse
from itertools import product
def cross_validate(file_path, hyper_args):
plates = []
names = []
# uses sample recognition dataset
for f in os.listdir(file_path):
names.append(re.split("_|\.", f)[0])
img = cv2.imread(file_path + "/" + f)
plates.append(img)
train_and_test_model_recognition(plates, names, hyper_args)
def train_and_test_model_recognition(x, y, hyper_args):
best_hyper_arg = None
best_train = 0
best_output = None
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42, shuffle=True)
runs = 0
for v in product(*hyper_args.values()):
print(runs)
runs += 1
hyper_arg_dict = dict(zip(hyper_args, v))
parser = argparse.ArgumentParser()
for k, v in hyper_arg_dict.items():
parser.add_argument('--' + str(k), type=type(v), default=v)
hyper_arg = parser.parse_args()
matches_plates, _, _ = evaluate_plates(x_train, y_train, hyper_arg)
if matches_plates > best_train or best_train == 0: # natural selection of results that improve
best_train = matches_plates
best_hyper_arg = hyper_arg
best_matches_plates, best_matches_chars, best_output = evaluate_plates(x_test, y_test, best_hyper_arg)
print("Best Percentage of License Plates:", best_matches_plates)
print("Best Percentage of Characters:", best_matches_chars)
#print("\nBest match: ")
#print("Train set: " + str(best_output))
#print("Test set: " + str(test_Y))
print("Best hyper-parameters: " + str(best_hyper_arg))
return best_hyper_arg
def evaluate_plates(images, ground_truth, hyper_args):
recognized_plates = 0
percentage = 0
plates = Recognize.segment_and_recognize(images, hyper_args)
for i, plate in enumerate(plates):
res = evaluate_single_plate(plate, ground_truth[i])
recognized_plates += res[0]
percentage += res[1]
recognized_plates /= len(images)
percentage /= len(images)
#print("Percentage of Recognized Plates:", recognized_plates * 100, "%")
#print("Percentage of Recognized Characters:", percentage * 100, "%")
return recognized_plates, percentage, plates
# Evalautes a single plate by comparing plate with label
def evaluate_single_plate(plate, label):
success = 0 # 0 if even a single char in plate is not found
numChars = 0 # returns the percentage of characters recognized in the plate
dist = string_dist(plate, label)
if dist == 0: success = 1
maxd = max(len(plate), len(label))
numChars = (maxd - dist) / maxd
#if success == 0: print(plate, label, "\n")
return success, numChars
# Use levenshtein distance between two strings to determine plate character accuracy
def string_dist(str1, str2):
# Initialize distance matrix
m = len(str1)
n = len(str2)
dists = np.zeros((m + 1, n + 1))
for i in range(0, m): dists[i + 1, 0] = i + 1
for j in range(0, n): dists[0, j + 1] = j + 1
# Iterate through character pairs
for j in range(0, n):
for i in range(0, m):
if str1[i] == str2[j]: sub = 0
else: sub = 1
dists[i + 1, j + 1] = min(dists[i, j + 1] + 1, dists[i + 1, j] + 1, dists[i, j] + sub)
return dists[m, n]
| vdakov/license-plate-recognition-pipeline | cross_validation_recognition.py | cross_validation_recognition.py | py | 3,458 | python | en | code | 0 | github-code | 13 |
41643279595 | # Method: [Kadane's Algorithm] Calculate curr_max by adding new num, and update max_sum if curr_max is greater than it.
# TC: O(n), since traversing the list only once
# SC: O(1), since no extra space is used
from typing import List
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
max_sum = nums[0]
curr_max = nums[0]
for num in nums[1:]:
curr_max = max(num, curr_max + num)
max_sum = max(max_sum, curr_max)
return max_sum | ibatulanandjp/Leetcode | #53_MaximumSubarray/solution.py | solution.py | py | 536 | python | en | code | 1 | github-code | 13 |
12344609095 | # Program for printing top k frequent elements from the given input array
# Naive approach is to keep a count of all numbers in the array in a extra array in a tupled way
# (element, count) and then sort it on the basis of their frequencies and return the elements for
# top k frequencies and this would take atleast 0(N*lg(N)).
# Can we do better than this ?
# So, we can observe since we need count, so we will use collections.Counter for better performance
# and also we can use Counter.most_common() which inbuilt uses heap when given some "k" as input.
# So, overall we are building the array to sort it and return it k times by popping from root.
# So, either we can build a heap and pop it k times or we can use directly above said functions.
# In both case, max bound will be 0(k * (lg(N))) and space : 0(N) for building the heap.
# Can we do more better than this ?
# [0(N) + 0(K) + N*lg(N)] = bounded by N * lg(N) since K < N solution :
from typing import List
from collections import Counter
def topKFrequent(nums: List[int], k: int) -> List[int]:
count = Counter(nums)
count = count.most_common(k)
res = []
for i in range(k):
res.append(count[i][0])
return res
if __name__ == '__main__':
arr, k = [1, 1, 1, 2, 2, 3], 2
print(topKFrequent(arr, k))
| souravs17031999/100dayscodingchallenge | heaps_and_priorityQueues/top_k_frequent_elements.py | top_k_frequent_elements.py | py | 1,298 | python | en | code | 43 | github-code | 13 |
1376607641 | """
Functions to project renewals of contracts based on most recent inception date
Can be daily or monthly accuracy
# Sample products: coverage_period, lapse_rate, loss_ratio, comm_ratio, gwp, contracts
products = [[1, 0.1, 0.7, 0.2, 10, 10],
[3, 0.1, 1.2, 0.2, 10, 10],
[12, 0.1, 0.7, 0.4, 10, 10],
[36, 0.1, 0.7, 0.2, 10, 10]]
# Monthly
import pandas as pd
ref_date = pd.Period('2022-01')
projection_horizon = 84
# Daily
from datetime import date
ref_date = date(2022,1,2)
ref_date = pd.to_datetime(ref_date)
df = pd.DataFrame([[ref_date, *prod] for prod in products], columns=[ 'gwp_from', 'coverage_period', 'lapse_rate', 'loss_ratio', 'comm_ratio', 'gwp', 'contracts'])
import insurance_gi as gi
df = gi.renewals(df, projection_horizon)
df = gi.lapses(df)
df = gi.financials(df)
"""
import pandas as pd
import numpy as np
def add_number_of_renewals(df: pd.DataFrame, projection_horizon: int) -> pd.DataFrame:
"""
How many times does the contract renew within the projection horizon.
coverage_period is defined in months
This function adds a renewal index as well as months offset since initial data
- m_offset is used to build future coverage periods
- ren_idx drives the application of lapse rates
"""
# df['m_offset'] = df.coverage_period.apply(lambda x: list(range(0, projection_horizon * 12, x)))
df['m_offset'] = df.coverage_period.apply(
lambda x: list([(i, x * i) for i in range(0, int(projection_horizon / x))]))
df = df.explode('m_offset')
df[['ren_idx', 'm_offset']] = pd.DataFrame(df.m_offset.to_list(), index=df.index)
return df
def add_future_coverage_periods(df):
""" Accepts dates or monthly periods """
if df.gwp_from.dtype == 'datetime64[ns]':
# Start of period incremented by months -> daily accuracy:
for m_offset in set(df['m_offset'].unique()):
df.loc[df['m_offset'] == m_offset, 'gwp_from'] += pd.DateOffset(months=m_offset)
# End period = start period + coverage period
df['gwp_until'] = df['gwp_from']
for interval in set(df['coverage_period'].unique()):
df.loc[df['coverage_period'] == interval, 'gwp_until'] += pd.DateOffset(months=interval)
elif df.gwp_from.dtype == 'period[M]':
# Simpler logic for monthly periods -> no daily accuracy just complete months
for m_offset in set(df['m_offset'].unique()):
df.loc[df['m_offset'] == m_offset, 'gwp_from'] += m_offset
df['gwp_until'] = df['gwp_from']
for interval in set(df['coverage_period'].unique()):
df.loc[df['coverage_period'] == interval, 'gwp_until'] += interval
return df
def adjust_gwp_from_monthends(df):
# Group the gwp according to reporting month
df['gwp_ismonthend'] = df['gwp_from'].dt.is_month_end
# Need to be careful with pandas offsets as if it is already a monthend, will move to next month
df.loc[~df.gwp_ismonthend, 'gwp_from'] += pd.offsets.MonthEnd(0)
df.drop(columns=['gwp_ismonthend'], inplace=True)
return df
def premium_dates_to_period(df):
# Fixing dates to use monthly periods:
cols = ['gwp_from', 'acc_month']
for col in cols:
if col in df.columns:
# df[col] = pd.to_datetime(df[col])
df[col] = df[col].dt.to_period('M')
return df
def generate_earnings_pattern(df):
"""
This function builds an accident month series with balance of remaining earnings at end of acc month
"""
if df.gwp_from.dtype == 'datetime64[ns]':
# df = adjust_gwp_from_monthends(df)
# df = premium_dates_to_period(df)
# 1 down to zero, linearly across coverage period, resampled at month ends
df['earnings_bop'] = - 1.
df['earnings_eop'] = 0.
# This creates an index for the 1st of each month following the gwp date, needed for correct interpolation
# Want the 1st of the month of every month after gwp date until the upr has completely run off:
df['upr_idx_bom'] = df.apply(
lambda x: pd.date_range(x.gwp_from, x.gwp_until + pd.DateOffset(months=1), freq='MS', inclusive='right'),
axis=1)
# This creates an eom index consistent with rep date - i.e. end of the last day of the month is equivalent to start of the 1st day of the next month:
df['upr_idx_eom'] = df.apply(
lambda x: pd.date_range(x.gwp_from,
x.gwp_until + pd.DateOffset(months=1), freq='M', inclusive='left'), axis=1)
# This is a heavy step.
# The month-start index is joined to the gwp from and until dates
# The UPR is interpolated along the index from the initial value (gwp) to zero
# The index is reset and samples the month end figures only, and drops the start and end
df['upr_s'] = df.apply(
lambda x:
pd.Series([x.earnings_bop, x.earnings_eop], index=[x.gwp_from, x.gwp_until])
.reindex(np.unique([x.gwp_from, x.gwp_until] + x.upr_idx_bom.tolist()))
.interpolate('index')
.reindex(x.upr_idx_bom).to_list(),
axis=1)
df['earning_series'] = df.upr_s.apply(lambda x: list(range(len(x))))
df['upr_x'] = df.apply(lambda x: list(zip(x.earning_series, x.upr_idx_eom, x.upr_s)), axis=1)
df = df.drop(columns=['gwp_until', 'earnings_bop', 'earnings_eop', 'upr_idx_bom', 'upr_idx_eom', 'upr_s', 'earning_series'])
# df.index.name = 'temp_idx'
df = df.explode('upr_x')
df[['earning_period', 'acc_month', 'earnings_remaining']] = pd.DataFrame(df.upr_x.to_list(), index=df.index)
df = df.drop(columns=['upr_x'])
df['earnings_current'] = df.groupby(by=df.index)['earnings_remaining'].transform(
lambda x: x.diff())
df.earnings_current = df.earnings_current.fillna(df.earnings_remaining)
# df['upr_x'] = df.apply(lambda x: list(zip(x.upr_idx_eom, x.upr_s)), axis=1)
# df = df.drop(columns=['gwp_until', 'earnings_bop', 'earnings_eop', 'upr_idx_bom', 'upr_idx_eom', 'upr_s'])
#
# # df.index.name = 'temp_idx'
# df = df.explode('upr_x')
# df[['acc_month', 'earnings_remaining']] = pd.DataFrame(df.upr_x.to_list(), index=df.index)
# df = df.drop(columns=['upr_x'])
# df['earnings_current'] = df.groupby(by=df.index)['earnings_remaining'].transform(
# lambda x: x.diff())
# df.earnings_current = df.earnings_current.fillna(df.earnings_remaining)
elif df.gwp_from.dtype == 'period[M]':
# 1/earning_pattern. Requires earning pattern to be added in prior step.
# Build acc months series. f generates the earnings pattern for a given number of months
# E.g. f(1), f(12)
# f = lambda x: list([(i, 1 - (i+1)/x) for i in range(0, x)])
g = lambda x, y: 1 / y - 1 if x == 0 else 1 / y
h = lambda x: x == 0
f = lambda x: list([(i, 1 - (i + 1) / x, g(i, x), h(i)) for i in range(0, x)])
df['temp_series'] = df.coverage_period.apply(f)
df = df.explode('temp_series')
# df[['acc_months', 'earnings_remaining']] = pd.DataFrame(df.acc_months.to_list(), index=df.index)
df[['earning_period', 'earnings_remaining', 'earnings_current', 'initial_recognition']] = pd.DataFrame(df.temp_series.to_list(), index=df.index)
df['acc_month'] = df.gwp_from
for mth in set(df['earning_period'].unique()):
df.loc[df.earning_period == mth, 'acc_month'] += mth
df = df.drop(columns=['temp_series', 'coverage_period'])
return df
def renewals(df: pd.DataFrame, projection_horizon: int) -> pd.DataFrame:
"""
:param df: columns: gwp_from - day or monthly period, coverage_period -> number of months
:param proj_horizon: -> Number of periods into future to project
:return:
"""
df = add_number_of_renewals(df, projection_horizon)
df = add_future_coverage_periods(df)
df = generate_earnings_pattern(df)
return df
| pdavidsonFIA/insurance_gi | insurance_gi/renewals.py | renewals.py | py | 8,067 | python | en | code | 0 | github-code | 13 |
71312339858 | import torch
import torch.nn as nn
import copy
class TransformerEncoderLayer(nn.Module):
def __init__(self, embed_dim=1936, nhead=4, dim_feedforward=2048, dropout=0.1):
super().__init__()
self.self_attn = nn.MultiheadAttention(embed_dim, nhead, dropout=dropout)
self.linear1 = nn.Linear(embed_dim, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, embed_dim)
self.norm1 = nn.LayerNorm(embed_dim)
self.norm2 = nn.LayerNorm(embed_dim)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
def forward(self, src, input_key_padding_mask):
# local attention
src2, local_attention_weights = self.self_attn(src, src, src, key_padding_mask=input_key_padding_mask)
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(nn.functional.relu(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src, local_attention_weights
class TransformerDecoderLayer(nn.Module):
def __init__(self, embed_dim=1936, nhead=4, dim_feedforward=2048, dropout=0.1):
super().__init__()
self.multihead2 = nn.MultiheadAttention(embed_dim, nhead, dropout=dropout)
self.linear1 = nn.Linear(embed_dim, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, embed_dim)
self.norm3 = nn.LayerNorm(embed_dim)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
def forward(self, global_input, input_key_padding_mask, position_embed):
tgt2, global_attention_weights = self.multihead2(query=global_input+position_embed, key=global_input+position_embed,
value=global_input, key_padding_mask=input_key_padding_mask)
tgt = global_input + self.dropout2(tgt2)
tgt = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(nn.functional.relu(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
return tgt, global_attention_weights
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
def forward(self, input, input_key_padding_mask):
output = input
weights = torch.zeros([self.num_layers, output.shape[1], output.shape[0], output.shape[0]]).to(output.device)
for i, layer in enumerate(self.layers):
output, local_attention_weights = layer(output, input_key_padding_mask)
weights[i] = local_attention_weights
if self.num_layers > 0:
return output, weights
else:
return output, None
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, embed_dim):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
def forward(self, global_input, input_key_padding_mask, position_embed):
output = global_input
weights = torch.zeros([self.num_layers, output.shape[1], output.shape[0], output.shape[0]]).to(output.device)
for i, layer in enumerate(self.layers):
output, global_attention_weights = layer(output, input_key_padding_mask, position_embed)
weights[i] = global_attention_weights
if self.num_layers>0:
return output, weights
else:
return output, None
class transformer(nn.Module):
''' Spatial Temporal Transformer
local_attention: spatial encoder
global_attention: temporal decoder
position_embedding: frame encoding (window_size*dim)
mode: both--use the features from both frames in the window
latter--use the features from the latter frame in the window
'''
def __init__(self, enc_layer_num=1, dec_layer_num=3, embed_dim=1936, nhead=8, dim_feedforward=2048,
dropout=0.1, mode=None):
super(transformer, self).__init__()
self.mode = mode
encoder_layer = TransformerEncoderLayer(embed_dim=embed_dim, nhead=nhead, dim_feedforward=dim_feedforward,
dropout=dropout)
self.local_attention = TransformerEncoder(encoder_layer, enc_layer_num)
decoder_layer = TransformerDecoderLayer(embed_dim=embed_dim, nhead=nhead, dim_feedforward=dim_feedforward,
dropout=dropout)
self.global_attention = TransformerDecoder(decoder_layer, dec_layer_num, embed_dim)
self.position_embedding = nn.Embedding(2, embed_dim) #present and next frame
nn.init.uniform_(self.position_embedding.weight)
def forward(self, features, im_idx):
rel_idx = torch.arange(im_idx.shape[0])
l = torch.sum(im_idx == torch.mode(im_idx)[0]) # the highest box number in the single frame
b = int(im_idx[-1] + 1)
rel_input = torch.zeros([l, b, features.shape[1]]).to(features.device)
masks = torch.zeros([b, l], dtype=torch.uint8).to(features.device)
# TODO Padding/Mask maybe don't need for-loop
for i in range(b):
rel_input[:torch.sum(im_idx == i), i, :] = features[im_idx == i]
masks[i, torch.sum(im_idx == i):] = 1
# spatial encoder
local_output, local_attention_weights = self.local_attention(rel_input, masks)
local_output = (local_output.permute(1, 0, 2)).contiguous().view(-1, features.shape[1])[masks.view(-1) == 0]
global_input = torch.zeros([l * 2, b - 1, features.shape[1]]).to(features.device)
position_embed = torch.zeros([l * 2, b - 1, features.shape[1]]).to(features.device)
idx = -torch.ones([l * 2, b - 1]).to(features.device)
idx_plus = -torch.ones([l * 2, b - 1], dtype=torch.long).to(features.device) #TODO
# sliding window size = 2
for j in range(b - 1):
global_input[:torch.sum((im_idx == j) + (im_idx == j + 1)), j, :] = local_output[(im_idx == j) + (im_idx == j + 1)]
idx[:torch.sum((im_idx == j) + (im_idx == j + 1)), j] = im_idx[(im_idx == j) + (im_idx == j + 1)]
idx_plus[:torch.sum((im_idx == j) + (im_idx == j + 1)), j] = rel_idx[(im_idx == j) + (im_idx == j + 1)] #TODO
position_embed[:torch.sum(im_idx == j), j, :] = self.position_embedding.weight[0]
position_embed[torch.sum(im_idx == j):torch.sum(im_idx == j)+torch.sum(im_idx == j+1), j, :] = self.position_embedding.weight[1]
global_masks = (torch.sum(global_input.view(-1, features.shape[1]),dim=1) == 0).view(l * 2, b - 1).permute(1, 0)
# temporal decoder
global_output, global_attention_weights = self.global_attention(global_input, global_masks, position_embed)
output = torch.zeros_like(features)
if self.mode == 'both':
# both
for j in range(b - 1):
if j == 0:
output[im_idx == j] = global_output[:, j][idx[:, j] == j]
if j == b - 2:
output[im_idx == j+1] = global_output[:, j][idx[:, j] == j+1]
else:
output[im_idx == j + 1] = (global_output[:, j][idx[:, j] == j + 1] +
global_output[:, j + 1][idx[:, j + 1] == j + 1]) / 2
elif self.mode == 'latter':
# later
for j in range(b - 1):
if j == 0:
output[im_idx == j] = global_output[:, j][idx[:, j] == j]
output[im_idx == j + 1] = global_output[:, j][idx[:, j] == j + 1]
return output, global_attention_weights, local_attention_weights
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
| yrcong/STTran | lib/transformer.py | transformer.py | py | 8,023 | python | en | code | 155 | github-code | 13 |
21880766691 | """
metrics_logger.py - implements a custom logger that logs metrics across epochs,
which we can then used to plot metrics. This is useful when you don't want to
use Tensorboard for viewing epoch-wise progress of training
Thanks due to Marine Galantin
(@see: https://stackoverflow.com/questions/69276961/how-to-extract-loss-and-accuracy-from-logger-by-each-epoch-in-pytorch-lightning)
"""
import collections
from pytorch_lightning.loggers import Logger
from pytorch_lightning.loggers.logger import rank_zero_experiment
from pytorch_lightning.utilities import rank_zero_only
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
class MetricsLogger(Logger):
def __init__(self):
super().__init__()
self.history = collections.defaultdict(list) # copy not necessary here
# The defaultdict in contrast will simply create any items that you try to access
@property
def name(self):
return "Metrics History Logger for Pytorch"
@property
def version(self):
return "1.0"
@property
@rank_zero_experiment
def experiment(self):
# Return the experiment object associated with this logger.
pass
@rank_zero_only
def log_metrics(self, metrics, step):
# metrics is a dictionary of metric names and values
# your code to record metrics goes here
for metric_name, metric_value in metrics.items():
if metric_name != "epoch":
self.history[metric_name].append(metric_value)
else: # case epoch. We want to avoid adding multiple times the same. It happens for multiple losses.
if (
not len(self.history["epoch"]) or not self.history["epoch"][-1] == metric_value # len == 0:
): # the last values of epochs is not the one we are currently trying to add.
self.history["epoch"].append(metric_value)
else:
pass
return
def log_hyperparams(self, params):
pass
def plot_metrics(self, title=None, fig_size=None, col_count=3):
# let's extract the relevant metrics data from self.history
# We are interested only in epoch-level metrics
useful_keys = [k for k in self.history.keys() if not k.endswith("_step")]
# build our dict of useful keys & values
data = dict((k, self.history[k]) for k in useful_keys)
epoch_keys = [k for k in data.keys() if k.endswith("_epoch")]
for key in epoch_keys:
k2 = key[: -len("_epoch")]
data[k2] = data[key]
del data[key]
data_df = pd.DataFrame(data)
data_df = data_df.set_index("epoch")
# data_cols = ['val_loss', 'val_metric1',...'val_metricN', 'train_loss', 'train_metric1',...'train_metricN']
# of which the 'val_xxx' entries are all optional!
data_cols = list(data_df.columns)
# metrics_tracked = ['loss', 'metric1', 'metric2', ..., 'metricN']
metrics_tracked = [m[m.find("_") + 1 :] for m in data_cols if m.startswith("train")]
# now plot the metrics, col_count pairs per row
with sns.axes_style("darkgrid"):
sns.set(context="notebook")
sns.set_style(
{
"font.sans-serif": [
"Segoe UI",
"Calibri",
"SF Pro Display",
"Arial",
"DejaVu Sans",
"Sans",
],
}
)
fig_size = (16, 5) if fig_size is None else fig_size
if len(metrics_tracked) == 1:
# only loss is being tracked - that is mandatory!
plt.figure(figsize=fig_size)
plt.plot(
data_df.index,
data_df["train_loss"],
lw=2,
markersize=7,
color="steelblue",
marker="o",
label="train_loss",
)
ax_title = "Training loss vs epochs"
if "val_loss" in data_cols:
plt.plot(
data_df.index,
data_df["val_loss"],
lw=2,
markersize=7,
color="firebrick",
marker="o",
label="val_loss",
)
ax_title = "Training & cross-val loss vs epochs"
plt.title(ax_title)
plt.legend(loc="best")
else:
# loss & additional metrics tracked
# fmt:off
MAX_COL_COUNT = col_count
col_count = MAX_COL_COUNT if len(metrics_tracked) > MAX_COL_COUNT \
else len(metrics_tracked)
row_count = len(metrics_tracked) // MAX_COL_COUNT
row_count += 1 if len(metrics_tracked) % MAX_COL_COUNT != 0 else 0
# fmt:on
f, ax = plt.subplots(row_count, col_count, figsize=fig_size)
for r in range(row_count):
for c in range(col_count):
index = r * (col_count - 1) + c
if index < len(metrics_tracked):
metric_name = metrics_tracked[index]
if row_count == 1:
ax[c].plot(
data_df.index,
data_df[f"train_{metric_name}"],
lw=2,
markersize=7,
color="steelblue",
marker="o",
label=f"train_{metric_name}",
)
ax_title = f"Training {metric_name} vs epochs"
if f"val_{metric_name}" in data_cols:
ax[c].plot(
data_df.index,
data_df[f"val_{metric_name}"],
lw=2,
markersize=7,
color="firebrick",
marker="o",
label=f"val_{metric_name}",
)
ax_title = f"Training & cross-val {metric_name} vs epochs"
ax[c].set_title(ax_title)
ax[c].legend(loc="best")
else:
# more than 1 row
ax[r, c].plot(
data_df.index,
data_df[f"train_{metric_name}"],
lw=2,
markersize=7,
color="steelblue",
marker="o",
label=f"train_{metric_name}",
)
ax_title = f"Training {metric_name} vs epochs"
if f"val_{metric_name}" in data_cols:
ax[r, c].plot(
data_df.index,
data_df[f"val_{metric_name}"],
lw=2,
markersize=7,
color="firebrick",
marker="o",
label=f"val_{metric_name}",
)
ax_title = f"Training & cross-val {metric_name} vs epochs"
ax[r, c].set_title(ax_title)
ax[r, c].legend(loc="best")
if title is not None:
plt.suptitle(title)
plt.show()
plt.close()
if __name__ == "__main__":
raise RuntimeError("FATAL ERROR: this is a re-useable functions module. Cannot run it independently.")
| mjbhobe/dl-pytorch | pyt_lightning/metrics_logger.py | metrics_logger.py | py | 8,470 | python | en | code | 8 | github-code | 13 |
23007064499 | from frosch2010_Tabu_settings import tabu_settings
from random import shuffle
from random import randrange
import asyncio
import copy
import discord
import frosch2010_Tabu_variables as fTV
import frosch2010_Console_Utils as fCU
import frosch2010_Discord_Utils as fDU
import frosch2010_Tabu_other_funtions as fTOF
import frosch2010_Class_Utils as fCLU
import frosch2010_Tabu_manage_timer as fMT
#-----------------------------------------------------
async def on_Start_Game(isRevengeStart, msg, tabuVars, tabuSettings, tabuLanguage, client):
tabuVars.tabu_is_running = True
#Start-Ausgabe
fCU.log_In_Console("{} started game...".format(msg.author.name), "ON-START", "inf")
if not isRevengeStart:
await fDU.send_Message_To_Channel(tabuLanguage.tabu_user_started_game.replace("[USER_NAME]", str(msg.author.name)), [msg.channel])
if isRevengeStart:
tabuVars.tabu_points_to_win = tabuVars.tabu_last_points_to_win
else:
tabuVars.tabu_points_to_win = tabu_settings.tabu_default_points_to_win
#Abfrage, ob Points-To-WIN veraendert werden soll
args = msg.content.split(" ")
if len(args) >= 3:
try:
tabuVars.tabu_points_to_win = int(args[2])
fCU.log_In_Console("{} set points to win to: {}".format(msg.author.name, str(tabuVars.tabu_points_to_win)), "ON-START", "inf")
except:
fCU.log_In_Console("{} cant set points to win. Cant parse point-count from arguments.".format(msg.author.name), "ON-START", "err")
tabuVars.tabu_last_points_to_win = tabuVars.tabu_points_to_win
#Loesche Nachrichten in Team-Channels
fCU.log_In_Console("Delete messages for team 1 and 2...", "ON-START", "inf")
await fDU.delete_Messages_From_Channel([client.get_channel(tabuSettings.tabu_channelID_team_1), client.get_channel(tabuSettings.tabu_channelID_team_2)])
#Loesche Nachrichten in allen Privat-Channels der Spieler mit dem Bot
fCU.log_In_Console("Delete messages for all players...", "ON-START", "inf")
channels = []
for player in tabuVars.tabu_player_list_all:
await player.create_dm()
channels.append(client.get_channel(player.dm_channel.id))
await fDU.delete_Messages_From_Channel(channels, 4)
#Spieler in Teams aufteilen
fCU.log_In_Console("Shuffel playerlist and split them in teams...", "ON-START", "inf")
shuffle(tabuVars.tabu_player_list_all)
team_size = (len(tabuVars.tabu_player_list_all) / 2)
tabuVars.tabu_player_list_team_1 = tabuVars.tabu_player_list_all[:int(team_size)]
tabuVars.tabu_player_list_team_2 = tabuVars.tabu_player_list_all[int(team_size):]
if not isRevengeStart:
await fTOF.print_Who_Which_Team(tabuVars, msg.channel)
#Start Team bestimmen
fCU.log_In_Console("Set start team...", "ON-START", "inf")
tabuVars.tabu_guessing_team_num = randrange(0,1)
tabuVars.tabu_start_team_num = tabuVars.tabu_guessing_team_num
#Zeit pro Runde setzen + Timer starten
fCU.log_In_Console("Starting timer...", "ON-START", "inf")
tabuVars.tabu_current_time = copy.deepcopy(tabuSettings.tabu_round_lenght)
fCLU.Timer(1, fMT.manage_timer, [tabuVars, tabuSettings, tabuLanguage, client])
#Timer-Nachrichten an alle senden
fCU.log_In_Console("Sending countdown-messages...", "ON-START", "inf")
await fTOF.send_Team_Countdowns(tabuVars, tabuSettings, tabuLanguage, client)
await fTOF.send_Explainer_Countdown(tabuVars, tabuLanguage)
#Fertig
fCU.log_In_Console("Started successfully...", "ON-START", "inf")
await fTOF.send_New_Word_Card(tabuVars, tabuSettings, tabuLanguage, client) | Frosch2010/discord-taboo | code-files/frosch2010_Tabu_On_Start_Game.py | frosch2010_Tabu_On_Start_Game.py | py | 3,787 | python | en | code | 1 | github-code | 13 |
72161184339 | from collections import deque
def bfs(graph, start, visited=[]):
qu = deque([start])
visited.append(start)
while qu:
v = qu.popleft()
for i in graph[v]:
if i not in visited:
visited.append(i)
qu.append(i)
return visited
graph = [
[],
[2, 3, 8],
[1, 7],
[1, 4, 5],
[3, 5],
[3, 4],
[7],
[2, 6, 8],
[1, 7]
]
visited = []
bfs(graph, 1, visited)
print(visited)
| hyunlae/Algorithm-Note | search/bfs/bfs_04.py | bfs_04.py | py | 474 | python | en | code | 0 | github-code | 13 |
8731021904 | import numpy as np
from numpy import ma
import gc
import veldstatistiek
def afstandmatrix(X,Y):
"""afstandmatrix van punten
#invoer: class roostereigenschappen, zie hieronder"""
assert X.size<15000, "Te veel punten tegelijkertijd ingevoerd.\nVoer een kleiner aantal punten in"
from scipy.spatial.distance import pdist,squareform
Xflat= X.flatten()
Yflat= Y.flatten()
#coor= np.array([Xflat ,Yflat]).T
coor_reshape= np.array([Xflat ,Yflat]).T
#coor=ma.masked_equal(coor,coor.max())
#coor_comp=coor.compressed()
#print(coor_comp.shape)
#coor_reshape=coor_comp.reshape(coor_comp.size/2,2)
try:
afst=pdist(coor_reshape,'euclidean')
except MemoryError:
print("Te veel punten tegelijkertijd ingevoerd.\nStart een nieuw terminal scherm of voer een kleiner aantal punten in" )
else:
matr=squareform(afst)
matr=ma.masked_less(matr,1)
return matr
def flatxy(X,Y):
#Maak van een veld een 1D- Array
Yflat= Y.flatten()
Xflat= X.flatten()
return Xflat, Yflat
class roostereigenschappen:
def __init__(self,X,Y):
assert type(X)==type(np.ma.array([])),"X-veld moet masked array zijn"
assert type(Y)==type(np.ma.array([])),"Y-veld moet masked array zijn"
self.X= X
self.Y=Y
self.Mmax,self.Nmax=X.shape
def delta_m(self):
"""afstand rooster in m-richting"""
delta_m=np.sqrt(np.diff(self.X,axis=0)**2+np.diff(self.Y,axis=0)**2)
stacktrue=np.ma.array(delta_m.shape[1]*[True],mask=True)
delta_m=np.ma.vstack([delta_m,stacktrue])
return delta_m
def delta_n(self):
""" afstand rooster in n-richting"""
delta_n=np.sqrt(np.diff(self.X,axis=1)**2+np.diff(self.Y,axis=1)**2)
stacktrue=np.ma.array(delta_n.shape[0]*[True],mask=True)
stacktrue=stacktrue.reshape(stacktrue.shape+(1,))
delta_n=np.ma.hstack([delta_n,stacktrue])
return delta_n
def resolution(self):
"""resolutie"""
#dm=self.delta_m()[:,0:-1]
#dn=self.delta_n()[0:-1,:]
dm=self.delta_m()
dn=self.delta_n()
return np.sqrt(dm*dn)
def orto(self):
""" orthogonaliteit"""
mxd= np.diff(self.X,axis=0)[:,0:-1]
myd= np.diff(self.Y,axis=0)[:,0:-1]
nxd= np.diff(self.X,axis=1)[0:-1,:]
nyd= np.diff(self.Y,axis=1)[0:-1,:]
alfa=np.arctan(nyd/nxd);beta=np.arctan(mxd/myd);
return np.abs(np.cos(0.5*np.pi+alfa+beta))
def courant(self,dt,dep):
"""courantgetal
input:
dt= tijdstap in secondes (value )
dep= diepte in meters ((numpy) array)
"""
g=9.81
res=self.resolution()
print('dt invoer in secondes ('+str(dt)+')')
#dm=self.delta_m()[:,0:-1]
#dn=self.delta_n()[0:-1,:]
return dt*np.sqrt(2*g*dep)/res
def plot_re(self,parameter,*arg):
from pylab import pcolormesh
from mask_staggerdveld import mask_zeta_masker
attr=dir(self)
assert(attr.count(parameter)), 'Verkeerde naam voor roostereigenschap opgegeven'
#onderstaande is plastisch maar gaat wel werken.
input=''
for a in range(len(arg)):
input=input +'arg['+str(a)+'],'
Veldroostereigenschap=eval('self.'+parameter+'('+input+')')
print(Veldroostereigenschap.shape)
print(self.X.shape)
print(self.Y.shape)
#Plotroostereigenschap=mask_zeta_masker(Veldroostereigenschap,self.X[1:,1:])
Plotroostereigenschap=mask_zeta_masker(Veldroostereigenschap,self.X)
pcolormesh(self.X,self.Y,Plotroostereigenschap)
| MarcRotsaert/Qgis-resource | hydromodel_staggerd/info_waquaveld.py | info_waquaveld.py | py | 3,709 | python | nl | code | 0 | github-code | 13 |
74166693457 | #!/usr/bin/env python
# -*- coding:utf8 -*-
#套一个大循环,循环下一页
import requests
import re
# 大学生群体
# 数据的爬取采集
# 数据的存储
# 数据的处理/过滤/筛选
# 数据的分析与展示
# 1、要知道去哪里爬取想要的数据
# 2、要分析这个地址的结构或解析
# /d/file/20171202/3226be099ad8d610e92bbab5218047d1.jpg"
#
# 加入一个循环 按照某个标准循环
# 写成一个jpg的文件
import requests
import re
url = 'http://www.xiaohuar.com/list-1-%s.html'
for i in range(4):
temp = url % i
print(temp)
#h获取网页的源码
response =requests.get(temp)
#从源码里面,获取我们想要的图片的url(图片地址)
html = response.text
#/d/file/20171202/3226be099ad8d610e92bbab5218047d1.jpg
# /d/file/20170919/2f728d0f110a21fea95ce13e0b010d06.jpg
# /d/file/20170917/715515e7fe1f1cb9fd388bbbb00467c2.jpg
# /d/file/20170916/7f78145b1ca162eb814fbc03ad24fbc1.jpg
#写正则表达式
img_urls=re.findall(r"/d/file/\d+/\w+\.jpg",html)
#循环获取图片的url
for img_url in img_urls:
img_response=requests.get("http://www.xiaohuar.com%s" %img_url)
print(img_url)
img_data=img_response.content #二进制信息
xiaohua=img_url.split('/')[-1]#差分并且切割,娶她最后一个值
with open(xiaohua,'wb')as f: # 写入文件的一个格式
f.write(img_data) | levinyi/scripts | crawler/python_requests.py | python_requests.py | py | 1,417 | python | zh | code | 8 | github-code | 13 |
33979862335 | import tweepy
import json
# Authentication details. To obtain these visit dev.twitter.com
consumer_key = 'sk5h3aKTyje7Ffk8CwZa6vMtT'
consumer_secret = '5tHSJqEKxYc08bbWYfalFcBgoJ1E7y4lL3aLlH698Nx3wce5e6'
access_token = '287175928-IL2DnvCppJBM68iJHUgMl5svVzBj5RKM0qZrZUcv'
access_token_secret = 'TADqHOjlxhrQupHxaCxiyfxO6nAk29hXz03Xa00Jr8SXj'
# This is the listener, resposible for receiving data
class StdOutListener(tweepy.StreamListener):
def on_data(self, data):
# Twitter returns data in JSON format - we need to decode it first
decoded = json.loads(data)
# Also, we convert UTF-8 to ASCII ignoring all bad characters sent by users
#print('@%s: %s' % (decoded['user']['screen_name'], decoded['text'].encode('ascii', 'ignore')))
#print('@%s: %s' % (decoded['user']['screen_name'], decoded['text'].encode('utf-8', 'ignore')))
print('@%s: %s' % (decoded['user']['screen_name'], decoded['text']))
print('')
return True
def on_error(self, status):
print (status)
if __name__ == '__main__':
l = StdOutListener()
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
print ("Showing all new tweets for #programming:")
# There are different kinds of streams: public stream, user stream, multi-user streams
# In this example follow #programming tag
# For more details refer to https://dev.twitter.com/docs/streaming-apis
stream = tweepy.Stream(auth, l)
#stream.filter(track=['programming'])
stream.filter(languages=["bg"], track=["за", "най", "по", "как", "искам"]) # Up tp 400 words | Srednogorie/twitter_data_mining | streaming_shell.py | streaming_shell.py | py | 1,683 | python | en | code | 0 | github-code | 13 |
1676238757 | class Node:
def __init__(self, val, l_c = None, r_c = None): # Left and right child pointers
self.value = val # Value
self.l_c = l_c # Left child
self.r_c = r_c # Right child
class BinaryTree:
def __init__(self, root):
self.root = root
# def __str__(self):
# str_list = []
# cursor_node = self.root
# while cursor_node != None:
# str_list.append(cursor_node.value)
drinks = Node('Drinks')
cold_drinks = Node('Cold drinks')
hot_drinks = Node('Hot drinks')
binary_tree = BinaryTree
| farzan-dehbashi/toolkit | DS/tree/binary_3_LL.py | binary_3_LL.py | py | 566 | python | en | code | 5 | github-code | 13 |
32534272281 | from pico2d import *
import game_world
import random
IMAGE_WIDTH, IMAGE_HEIGHT = 32, 32
positionX = [0]
positionY = [0, 48, 96]
def intersected_rectangle(collided_Rect, rect1_left, rect1_top, rect1_right, rect1_bottom,
rect2_left, rect2_top, rect2_right, rect2_bottom):
vertical = False
horizontal = False
if rect1_left <= rect2_right and rect1_right >= rect2_left:
horizontal = True
collided_Rect[0] = max(rect1_left, rect2_left)
collided_Rect[2] = min(rect1_right, rect2_right)
if rect1_top >= rect2_bottom and rect1_bottom <= rect2_top:
vertical = True
collided_Rect[1] = min(rect1_top, rect2_top)
collided_Rect[3] = max(rect1_bottom, rect2_bottom)
if vertical and horizontal:
return True
else:
return False
image_sizeW = 48
image_sizeH = 48
object_sizeW = 64
object_sizeH = 64
colW = 32
colH = 32
RUN, IDLE, STUN = range(3)
class CrushBlock:
def __init__(self, pos):
self.image = load_image('chip\\tileset\\newTile5.png')
self.x = pos[0]
self.y = pos[1]
self.restore = 2000
self.left = self.x - colW
self.top = self.y + colH
self.right = self.x + colW
self.bottom = self.y - colH
self.check2_left = self.left + 16
self.check2_top = self.top + colH + 16
self.check2_right = self.check2_left + 32
self.check2_bottom = self.top + 16
self.check1_left = self.check2_left - colW - colW
self.check1_top = self.check2_top
self.check1_right = self.check2_left - colW
self.check1_bottom = self.check2_bottom
self.check3_left = self.check2_right + colW
self.check3_top = self.check2_top
self.check3_right = self.check3_left + colW
self.check3_bottom = self.check2_bottom
self.collided_Rect_Height = 0
self.collided_Rect_Width = 0
self.collided_Rect = [0, 0, 0, 0]
self.collided_Rect2 = [0, 0, 0, 0]
self.collided_Rect3 = [0, 0, 0, 0]
self.collided_Rect4 = [0, 0, 0, 0]
self.fill = True
self.canCrush = True
pass
def get_bb(self):
return self.left, self.top, self.right, self.bottom
def get_bb2(self):
return self.check1_left, self.check1_top, self.check1_right, self.check1_bottom
def get_bb3(self):
return self.check2_left, self.check2_top, self.check2_right, self.check2_bottom
def get_bb4(self):
return self.check3_left, self.check3_top, self.check3_right, self.check3_bottom
def update(self):
player = game_world.bring_object(6, 0)
crush = game_world.bring_objects(4)
enemy = game_world.bring_objects(2)
self.canCrush = True
for i in crush:
if i.fill:
if intersected_rectangle(self.collided_Rect3, self.check2_left, self.check2_top, self.check2_right,
self.check2_bottom,
i.left, i.top, i.right, i.bottom):
self.canCrush = False
for i in enemy:
if i.state is 2:
if intersected_rectangle(self.collided_Rect3, self.check2_left, self.check2_top, self.check2_right,
self.check2_bottom,
i.left, i.top, i.right, i.bottom):
self.canCrush = False
if intersected_rectangle(self.collided_Rect2, self.left, self.top, self.right,
self.bottom, player.left, player.top, player.right,
player.bottom) and self.fill:
if self.left <= player.x <= self.right and self.bottom <= player.y <= self.top and player.state is not 3:
player.frame = 0
player.state = 3
if intersected_rectangle(self.collided_Rect2, self.check1_left, self.check1_top, self.check1_right,
self.check1_bottom, player.left, player.top, player.right,
player.bottom) and player.do_right_action and self.canCrush:
self.fill = False
elif intersected_rectangle(self.collided_Rect3, self.check3_left, self.check3_top, self.check3_right,
self.check3_bottom, player.left, player.top, player.right,
player.bottom) and player.do_left_action and self.canCrush:
self.fill = False
if not self.fill:
self.restore -= 1
if self.restore == 0:
self.fill = True
self.restore = 1000
pass
def late_update(self):
player = game_world.bring_object(6, 0)
enemy = game_world.bring_objects(2)
if self.fill:
if intersected_rectangle(self.collided_Rect, self.left, self.top, self.right, self.bottom,
player.left, player.top, player.right, player.bottom):
self.collided_Rect_Height = self.collided_Rect[1] - self.collided_Rect[3]
self.collided_Rect_Width = self.collided_Rect[2] - self.collided_Rect[0]
if self.collided_Rect_Width > self.collided_Rect_Height:
if self.collided_Rect[1] == self.y + colH:
player.falling = False
player.y += self.collided_Rect_Height
elif self.collided_Rect[3] == self.y - colH:
player.y -= self.collided_Rect_Height
player.y -= 1
else:
if self.collided_Rect[0] == self.x - colW:
player.x -= self.collided_Rect_Width
elif self.collided_Rect[2] == self.x + colW:
player.x += self.collided_Rect_Width
for i in enemy:
if intersected_rectangle(self.collided_Rect, self.left, self.top, self.right, self.bottom,
i.left2, i.top2, i.right2, i.bottom2):
self.collided_Rect_Height = self.collided_Rect[1] - self.collided_Rect[3]
self.collided_Rect_Width = self.collided_Rect[2] - self.collided_Rect[0]
if i.state is 2:
i.falling = False
if self.collided_Rect_Width > self.collided_Rect_Height:
if self.collided_Rect[1] == self.y + colH:
i.falling = False
if i.state is not 2:
i.y += self.collided_Rect_Height
elif self.collided_Rect[3] == self.y - colH:
if i.state is not 2:
i.y -= self.collided_Rect_Height
i.y -= 1
else:
if self.collided_Rect[0] == self.x - colW:
if i.state is not 2:
i.x -= self.collided_Rect_Width
elif self.collided_Rect[2] == self.x + colW:
if i.state is not 2:
i.x += self.collided_Rect_Width
else:
for i in enemy:
if intersected_rectangle(self.collided_Rect, self.left, self.top, self.right, self.bottom,
i.left2, i.top2, i.right2, i.bottom2):
if i.y < self.y:
i.x = self.x
i.falling = False
i.state = 2
def draw(self):
if self.fill:
self.image.clip_draw(positionX[0], positionY[0], image_sizeW, image_sizeH, self.x, self.y, object_sizeW,
object_sizeH)
# draw_rectangle(*self.get_bb())
# draw_rectangle(*self.get_bb2())
# draw_rectangle(*self.get_bb3())
# draw_rectangle(*self.get_bb4())
| seungdam/2018182019-2DGP-project | 20181820192DGP/block3.py | block3.py | py | 8,069 | python | en | code | 0 | github-code | 13 |
30139454242 | """empty message
Revision ID: 3fee3bd10f9d
Revises: 45c2de366e66
Create Date: 2018-10-23 16:46:02.990772
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "3fee3bd10f9d"
down_revision = "45c2de366e66"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"output_file",
sa.Column(
"data", postgresql.JSONB(astext_type=sa.Text()), nullable=True
),
)
op.add_column(
"working_file",
sa.Column(
"data", postgresql.JSONB(astext_type=sa.Text()), nullable=True
),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("working_file", "data")
op.drop_column("output_file", "data")
# ### end Alembic commands ###
| cgwire/zou | zou/migrations/versions/3fee3bd10f9d_.py | 3fee3bd10f9d_.py | py | 969 | python | en | code | 152 | github-code | 13 |
28199617046 | import gym
from stable_baselines3 import PPO
import random
import argparse
import yaml
import csv
import numpy as np
import os
import matplotlib.pyplot as plt
def parse_args():
ap = argparse.ArgumentParser()
ap.add_argument('--config_path', type=str, required=True, help='Path to the config yaml')
return ap.parse_args()
if __name__ == '__main__':
args = parse_args()
# load config
config = yaml.full_load(open(args.config_path, 'r'))
n_cpu = 6
env = gym.make(config['env_name'])
env.configure({
"controlled_vehicles": random.choice(range(1,39))+1,
"vehicles_count": random.choice(range(10))+1, # add more IDM vehicles if needs
"observation": {
"type": "MultiAgentObservation",
"observation_config": {
"type": "Kinematics",
}
},
"action": {
"type": "MultiAgentAction",
"action_config": {
"type": "DiscreteMetaAction",
},
},
"create_vehicles_ego_idm": True, # True to create ego vehicle as IDM
"ego_collision_weight": config['ego_collision_weight'],
})
env.reset()
model = PPO('MultiInputPolicy', env,
policy_kwargs=dict(net_arch=[dict(pi=[256, 256], vf=[256, 256])]),
learning_rate=5e-4,
batch_size=32,
gamma=0.8,
n_steps=32 * 12 // n_cpu,
verbose=2,
n_epochs=100,
tensorboard_log="highway_multiagent/")
env.set_dqn_model(model)
model.learn(int(2e4))
model.save(config['model_path'])
print('start inference')
# inference
ttc_threshold = config['ttc_threshold']
save_result_csv = config['save_result_csv']
# number of episodes
T = config['num_episodes']
num_steps = config['num_steps']
eps = 0
patience = 3
episode_reward = 0
rewards = []
while eps < T:
age = 0
done = False
obs = env.reset()
previous_info = False
prev_info = None
for i in range(num_steps):# done:
action = model.predict(obs)
# convert action to tuple
action = tuple(action[0])
next_obs, reward, done, info = env.step(action)
episode_reward += reward
rewards.append(episode_reward)
current_crashed = info['crashed']
if current_crashed and not previous_info:
if prev_info is None:
prev_info = info
ego_speed = info['speed']
ego_acceleration = info['ego_action']['acceleration']
ego_ast_action = info['ego_ast_action']
lane_index_ = info['current_lane_index'][2]
# get crashed vehicle info
crashed_veh_info = info['crashed_veh_info']
crashed_speed = crashed_veh_info['crashed_veh_speed']
crashed_lane_index = crashed_veh_info['crashed_veh_lane_index'][2]
crashed_front = crashed_veh_info['front']
crashed_distance = crashed_veh_info['crashed_distance']
crashed_acceleration = crashed_veh_info['action']['acceleration']
crashed_ast_action = crashed_veh_info['ast_action']
with open(save_result_csv, 'a', newline='') as file:
writer = csv.writer(file)
# append to csv file the done, reward
writer.writerow([
eps, i, ego_speed, ego_acceleration, ego_ast_action, lane_index_,
crashed_speed, crashed_acceleration, crashed_lane_index, crashed_ast_action, crashed_front, crashed_distance,
])
# save picture
os.makedirs(config['save_pic'], exist_ok=True)
img_file = os.path.join(config['save_pic'], 'episode_{}_step_{}.png'.format(eps, i))
img = env.render(mode='rgb_array')
plt.imsave(img_file, img)
previous_info = True
if not current_crashed:
previous_info = False
prev_obs = next_obs
prev_reward = reward
prev_info = info
prev_done = done
obs = next_obs
env.render()
if done:
episode_reward = 0.0
obs = env.reset()
eps = eps + 1
# save rewards
with open(config['reward_file'], 'wb') as f:
np.save(f, rewards) | khaclinh/AST4AV-HighWay | main.py | main.py | py | 4,080 | python | en | code | 0 | github-code | 13 |
73659092816 | import sys, json
from collections import defaultdict
from urllib.parse import urljoin
try:
from owlready2.base import OwlReadyOntologyParsingError
except:
class OwlReadyOntologyParsingError(OwlReadyError): pass
"""
X ID
X LANGUAGE
X LIST
SET
X TYPE
X VALUE
INDEX
BASE
X REVERSE
CONTEXT
VOCAB
GRAPH
"""
class Alias(object):
def __init__(self, alias, name, type, container, reverse = False):
self.alias = alias
self.name = name
self.type = type
self.reverse = reverse
self.container = container
class Container(object):
def __init__(self, type, has_lang = False, has_index = False):
self.type = type
self.has_lang = has_lang
self.has_index = has_index
class Context(object):
def __init__(self, parent = None):
self.parent = parent
if parent:
self.alias = dict(parent.alias)
self.base = parent.base
else:
self.alias = {}
self.base = ""
def register(self, alias, name, type, containers, reverse = False):
if containers:
container = Container(None)
if isinstance(containers, str): containers = [containers]
for c in containers:
if self.is_keyword(c, "@language"): container.has_lang = True
elif self.is_keyword(c, "@index"): container.has_index = True
else:
if c in self.alias: c = self.alias[c].name
container.type = c
container = Container(container)
else:
container = None
self.alias[alias] = Alias(alias, name, type, container, reverse)
if name.startswith("@"):
if self.__class__ is Context:
self.__class__ = AliasedKeywordContext
self.keywords = {}
if not name in self.keywords: keywords[name] = [name, alias]
else: self.keywords[name].append(alias)
def get_keyword(self, d, k): return d.get(k)
def is_keyword(self, k, kref): return k == kref
class AliasedKeywordContext(Context):
def __init__(self, parent = None):
Context.__init__(self, parent)
self.keywords = parent.copy()
def get_keyword(self, d, k):
for k2 in self.keywords[k]:
r = d.get(k2)
if not r is None: return r
def is_keyword(self, k, kref):
return k in self.keywords[kref]
def parse(f, on_triple = None, on_prepare_triple = None, new_blank = None, new_literal = None, default_base = ""):
prefixes = {}
prefixess = [prefixes]
tag_is_predicate = False
current_blank = 0
nb_triple = 0
bns = defaultdict(set)
if default_base:
xml_base = default_base
if xml_base.endswith("#") or xml_base.endswith("/"): xml_base = xml_base[:-1]
xml_dir = xml_base.rsplit("/", 1)[0] + "/"
else:
xml_base = ""
xml_dir = ""
if not on_triple:
def on_triple(s,p,o):
print("%s %s %s ." % (s,p,o))
if not on_prepare_triple:
def on_prepare_triple(s,p,o):
nonlocal nb_triple
nb_triple += 1
if not s.startswith("_"): s = "<%s>" % s
if not (o.startswith("_") or o.startswith('"')): o = "<%s>" % o
on_triple(s,"<%s>" % p,o)
if not new_blank:
def new_blank():
nonlocal current_blank
current_blank += 1
return "_:%s" % current_blank
node_2_blanks = defaultdict(new_blank)
known_nodes = set()
if not new_literal:
def new_literal(value, datatype = "", lang = ""):
value = value.replace('"', '\\"').replace("\n", "\\n")
if lang: return '"%s"@%s' % (value, lang)
if datatype: return '"%s"^^<%s>' % (value, datatype)
return '"%s"' % (value)
if isinstance(f, str): ds = json.load(open(f))
else: ds = json.load(f)
def parse_node(d, context, in_context = False, no_literal = False):
if isinstance(d, list):
return [parse_node(i, context, in_context, no_literal) for i in d]
if isinstance(d, str):
if no_literal: return d
if context.default_type:
if context.default_type == "@id": return urljoin(context.base, d)
return new_literal(d, context.default_type)
else:
return new_literal(d)
w = context.get_keyword(d, "@value")
if w:
if context.default_type:
if context.default_type == "@id": return d
return new_literal(w, context.default_type, context.get_keyword(d, "@language"))
else:
return new_literal(w, context.get_keyword(d, "@type"), context.get_keyword(d, "@language"))
w = context.get_keyword(d, "@list") or context.get_keyword(d, "@list")
if w:
return [parse_node(i, context, in_context, no_literal) for i in w]
w = context.get_keyword(d, "@graph")
if w:
for d2 in w:
parse_node(d2, context, in_context, no_literal)
if "@context" in d:
context = context.__class__(context)
for k2, v2 in d["@context"].items():
if context.is_keyword(k2, "@base"):
context.base = v2
continue
if isinstance(v2, str):
context.register(k2, v2, None, None)
else:
container = context.get_keyword(v2, "@container")
w = context.get_keyword(v2, "@reverse")
if w:
context.register(k2, w, context.get_keyword(v2, "@type"), context.get_keyword(v2, "@container"), True)
else:
w = context.get_keyword(v2, "@id")
if w:
context.register(k2, w, context.get_keyword(v2, "@type"), context.get_keyword(v2, "@container"))
name = context.get_keyword(d, "@id")
if name:
name = urljoin(context.base, name)
else:
name = new_blank()
for k, v in d.items():
if k in context.alias:
alias = context.alias[k]
context.default_type = alias.type
k = alias.name
reverse = alias.reverse
else:
context.default_type = None
reverse = False
if context.is_keyword(k, "@reverse"):
if in_context:
pass
else:
for k2, v2 in v.items():
v2 = parse_node(v2, context, in_context, no_literal)
if not isinstance(v2, list): v2 = [v2]
for v3 in v2:
on_prepare_triple(v3, k2, name)
continue
elif context.is_keyword(k, "@type"):
k = "http://www.w3.org/1999/02/22-rdf-syntax-ns#type"
v = parse_node(v, context, in_context, no_literal = True)
elif k.startswith("@"):
continue
else:
v = parse_node(v, context, in_context, no_literal)
#print()
#print(k, v)
#print()
if reverse:
if isinstance(v, list):
for v2 in v:
on_prepare_triple(v2, k, name)
else:
on_prepare_triple(v, k, name)
else:
if isinstance(v, list):
for v2 in v:
on_prepare_triple(name, k, v2)
else:
on_prepare_triple(name, k, v)
return name
context = Context()
if isinstance(ds, list):
for d in ds:
parse_node(d, context)
else:
parse_node(ds, context)
return nb_triple
if __name__ == "__main__":
filename = sys.argv[-1]
import time
t = time.time()
nb_triple = parse(filename)
t = time.time() - t
print("# %s triples read in %ss" % (nb_triple, t), file = sys.stderr)
| haicheviet/ontology_food | lib_for_project/owlready2/jsonld_2_ntriples.py | jsonld_2_ntriples.py | py | 7,418 | python | en | code | 3 | github-code | 13 |
70309393937 | from tkinter import * #The game is interfaced on tkinter
import tkinter as tk
import random #Random library for adding new tiles
import colors as c #User-defined package for setting bg and colors
def whichbutton(button_press):
global ch
ch = button_press
global ch
class Start:
# Create Object
root = Tk()
root.title('Play 2048')
root.anchor('center')
# Initialize tkinter window with dimensions 500x500
root.geometry('600x400')
title=Label(root,padx=5,pady=5,text="Let's play 2048",bg='purple',fg='white',font=('times new roman', 25 ,'bold'),relief=GROOVE,justify=CENTER)
title.pack(fill=X)
playDisplay = Label(root,padx=5,pady=5,text="Select and Continue", fg='black',font=('times new roman', 25 ,'bold'),relief=GROOVE,justify=CENTER)
btn3 = Button(text='Play grid 3*3', font='arial 15 bold',padx=5,pady=10,command=lambda chc=3:whichbutton(chc),bg='yellow',fg='black',width=15)
btn4 = Button(text='Play grid 4*4', font='arial 15 bold',padx=5,pady=10,command=lambda chc=4:whichbutton(chc),bg='yellow',fg='black',width=15)
btn5 = Button(text='Play grid 5*5', font='arial 15 bold',padx=5,pady=10,command=lambda chc=5:whichbutton(chc),bg='yellow',fg='black',width=15)
btn = Button(text='Select and continue', font='arial 15 bold',padx=1,pady=10,command=root.destroy,bg='green',fg='white',width=25)
# Set the position of button on the top of window
btn.pack(side = 'bottom',pady=10)
btn5.pack(side = 'bottom', pady=10)
btn4.pack(side = 'bottom', pady=10)
btn3.pack(side = 'bottom', pady=10)
root.mainloop()
class Game(tk.Frame):
def __init__(self):
tk.Frame.__init__(self)
self.grid()
self.master.title('2048')
self.main_grid = tk.Frame(
self, bg=c.GRID_COLOR, bd=3, width=400, height=400)
self.main_grid.grid(pady=(100, 0))
self.make_GUI()
self.start_game()
self.master.bind("<Left>", self.left)
self.master.bind("<Right>", self.right)
self.master.bind("<Up>", self.up)
self.master.bind("<Down>", self.down)
self.mainloop()
def make_GUI(self):
# make grid
self.cells = []
for i in range(ch):
row = []
for j in range(ch):
cell_frame = tk.Frame(
self.main_grid,
bg=c.EMPTY_CELL_COLOR,
width=100,
height=100)
cell_frame.grid(row=i, column=j, padx=5, pady=5)
cell_number = tk.Label(self.main_grid, bg=c.EMPTY_CELL_COLOR)
cell_number.grid(row=i, column=j)
cell_data = {"frame": cell_frame, "number": cell_number}
row.append(cell_data)
self.cells.append(row)
# make score header
score_frame = tk.Frame(self)
score_frame.place(relx=0.5, y=40, anchor="center")
tk.Label(
score_frame,
text="Score",
font=c.SCORE_LABEL_FONT).grid(
row=0)
self.score_label = tk.Label(score_frame, text="0", font=c.SCORE_FONT)
self.score_label.grid(row=1)
def start_game(self):
# create matrix of zeroes
self.matrix = [[0] * ch for _ in range(ch)]
# fill 2 random cells with 2s
row = random.randint(0, (ch-1))
col = random.randint(0, (ch-1))
self.matrix[row][col] = 2
self.cells[row][col]["frame"].configure(bg=c.CELL_COLORS[2])
self.cells[row][col]["number"].configure(
bg=c.CELL_COLORS[2],
fg=c.CELL_NUMBER_COLORS[2],
font=c.CELL_NUMBER_FONTS[2],
text="2")
while(self.matrix[row][col] != 0):
row = random.randint(0, (ch-1))
col = random.randint(0, (ch-1))
self.matrix[row][col] = 2
self.cells[row][col]["frame"].configure(bg=c.CELL_COLORS[2])
self.cells[row][col]["number"].configure(
bg=c.CELL_COLORS[2],
fg=c.CELL_NUMBER_COLORS[2],
font=c.CELL_NUMBER_FONTS[2],
text="2")
self.score = 0
# Matrix Manipulation Functions
def stack(self):
new_matrix = [[0] * ch for _ in range(ch)]
for i in range(ch):
fill_position = 0
for j in range(ch):
if self.matrix[i][j] != 0:
new_matrix[i][fill_position] = self.matrix[i][j]
fill_position += 1
self.matrix = new_matrix
def combine(self):
for i in range(ch):
for j in range((ch-1)):
if self.matrix[i][j] != 0 and self.matrix[i][j] == self.matrix[i][j + 1]:
self.matrix[i][j] *= 2
self.matrix[i][j + 1] = 0
self.score += self.matrix[i][j]
def reverse(self):
new_matrix = []
for i in range(ch):
new_matrix.append([])
for j in range(ch):
new_matrix[i].append(self.matrix[i][(ch-1) - j])
self.matrix = new_matrix
def transpose(self):
new_matrix = [[0] * ch for _ in range(ch)]
for i in range(ch):
for j in range(ch):
new_matrix[i][j] = self.matrix[j][i]
self.matrix = new_matrix
# Add a new 2 or 4 tile randomly to an empty cell
def add_new_tile(self):
row = random.randint(0, (ch-1))
col = random.randint(0, (ch-1))
while(self.matrix[row][col] != 0):
row = random.randint(0, (ch-1))
col = random.randint(0, (ch-1))
self.matrix[row][col] = random.choice([2, 4])
# Update the GUI to match the matrix
def update_GUI(self):
for i in range(ch):
for j in range(ch):
cell_value = self.matrix[i][j]
if cell_value == 0:
self.cells[i][j]["frame"].configure(bg=c.EMPTY_CELL_COLOR)
self.cells[i][j]["number"].configure(
bg=c.EMPTY_CELL_COLOR, text="")
else:
self.cells[i][j]["frame"].configure(bg=c.CELL_COLORS[cell_value])
self.cells[i][j]["number"].configure(
bg=c.CELL_COLORS[cell_value],
fg=c.CELL_NUMBER_COLORS[cell_value],
font=c.CELL_NUMBER_FONTS[cell_value],
text=str(cell_value))
self.score_label.configure(text=self.score)
self.update_idletasks()
# Arrow-Press Functions
def left(self, event):
self.stack()
self.combine()
self.stack()
self.add_new_tile()
self.update_GUI()
self.game_over()
def right(self, event):
self.reverse()
self.stack()
self.combine()
self.stack()
self.reverse()
self.add_new_tile()
self.update_GUI()
self.game_over()
def up(self, event):
self.transpose()
self.stack()
self.combine()
self.stack()
self.transpose()
self.add_new_tile()
self.update_GUI()
self.game_over()
def down(self, event):
self.transpose()
self.reverse()
self.stack()
self.combine()
self.stack()
self.reverse()
self.transpose()
self.add_new_tile()
self.update_GUI()
self.game_over()
# Check if any moves are possible
def horizontal_move_exists(self):
for i in range(ch):
for j in range((ch - 1)):
if self.matrix[i][j] == self.matrix[i][j + 1]:
return True
return False
def vertical_move_exists(self):
for i in range((ch - 1)):
for j in range(ch):
if self.matrix[i][j] == self.matrix[i + 1][j]:
return True
return False
# Check if Game is Over (Win/Lose)
def game_over(self):
if any(2048 in row for row in self.matrix):
game_over_frame = tk.Frame(self.main_grid, borderwidth=2)
game_over_frame.place(relx=0.5, rely=0.5, anchor="center")
tk.Label(
game_over_frame,
text="You won!",
bg=c.WINNER_BG,
fg=c.GAME_OVER_FONT_COLOR,
font=c.GAME_OVER_FONT).pack()
elif not any(0 in row for row in self.matrix) and not self.horizontal_move_exists() and not self.vertical_move_exists():
game_over_frame = tk.Frame(self.main_grid, borderwidth=2)
game_over_frame.place(relx=0.5, rely=0.5, anchor="center")
tk.Label(
game_over_frame,
text="Game over!",
bg=c.LOSER_BG,
fg=c.GAME_OVER_FONT_COLOR,
font=c.GAME_OVER_FONT).pack()
def main():
Start()
Game()
if __name__ == "__main__":
main() | gauri0707/2048-Game | 2048.py | 2048.py | py | 9,421 | python | en | code | 0 | github-code | 13 |
29725869349 | #
#
#Morgans great example code:
#https://blog.metaflow.fr/tensorflow-how-to-freeze-a-model-and-serve-it-with-a-python-api-d4f3596b3adc
#
# GitHub utility for freezing graphs:
#https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph.py
#
#https://www.tensorflow.org/api_docs/python/tf/graph_util/convert_variables_to_constants
import tensorflow as tf
import numpy as np
from PIL import TiffImagePlugin, ImageOps
from PIL import Image
import pickle
g_graph=None
#k_freqbins=257
#k_width=856
VERBOSE=0
#------------------------------------------------------------
#global
# gleaned from the parmeters in the pickle file; used to load images
height=0
width=0
depth=0
#-------------------------------------------------------------
def getShape(g, name) :
return g.get_tensor_by_name(name + ":0").get_shape()
def loadImage(fname) :
#transform into 1D width with frequbins in channel dimension (we do this in the graph in the training net, but not with this reconstructed net)
if (height==1) :
return np.transpose(np.reshape(np.array(Image.open(fname).point(lambda i: i*255)), [1,depth,width,1]), [0,3,2,1])
else :
return np.reshape(np.array(Image.open(fname).point(lambda i: i*255)), [1,height,width,1])
def generate_noise_image(content_image, noise_ratio=0.6):
print('generate_noise_image with height=' + str(height) + ', width =' + str(width) + ', and depth =' + str(depth))
noise_image = np.random.uniform(-1, 1, (1, height, width, depth)).astype(np.float32)
print('noise_image shape is ' + str(noise_image.shape))
return noise_image * noise_ratio + content_image * (1. - noise_ratio)
# Assumes caller puts image into the correct orientation
def save_image(image, fname, scaleinfo=None):
print('save_image: shape is ' + str(image.shape))
if (height==1) : # orientation is freq bins in channels
print('saving image in channel orientation')
image = np.transpose(image, [2,1,0])[:,:,0]
else :
print('saving image in image orientation')
image = image[:,:,0]
print('AFTER reshaping, save_image: shape is ' + str(image.shape))
print('image max is ' + str(np.amax(image) ))
print('image min is ' + str(np.amin(image) ))
# Output should add back the mean pixels we subtracted at the beginning
# [0,80db] -> [0, 255]
# after style transfer, images range outside of [0,255].
# To preserve scale, and mask low values, we shift by (255-max), then clip at 0 and then have all bins in the top 80dB.
image = np.clip(image-np.amax(image)+255, 0, 255).astype('uint8')
info = TiffImagePlugin.ImageFileDirectory()
if (scaleinfo == None) :
info[270] = '80, 0'
else :
info[270] = scaleinfo
#scipy.misc.imsave(path, image)
bwarray=np.asarray(image)/255.
savimg = Image.fromarray(np.float64(bwarray)) #==============================
savimg.save(fname, tiffinfo=info)
#print('RGB2TiffGray : tiffinfo is ' + str(info))
return info[270] # just in case you want it for some reason
##===========================================================================================================
def conv2d(x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k_h=2, k_w=2):
# MaxPool2D wrapper
# ksize = [batch, height, width, channels]
return tf.nn.max_pool(x, ksize=[1, k_h, k_w, 1], strides=[1, k_h, k_w, 1], padding='SAME')
# This can produce any model that model1 and model3 can produce (1D or 2D, 1 layer or 2 layer)
# Rather than keeping weights, biases, and other model ops separate, and producing the logits layer as a return value,
# it returns all variables and ops as a graph object
def constructSTModel(weights, biases, params) : #nlaysers is either 1 or 3
print("now construct graph ")
global g_graph
g_graph = {}
#k_height = params['k_height']
#k_inputChannels = params['k_inputChannels']
#k_ConvRows = params['K_ConvRows'] #conv kernel height
#k_ConvCols = params['K_ConvCols'] #conv kernel width
#k_poolRows = params['k_poolRows']
#k_downsampledHeight = params['k_downsampledHeight']
#k_downsampledWidth = params['k_downsampledHeight']
# model params common to both 1D and 2D
#K_NUMCONVLAYERS = params['K_NUMCONVLAYERS']
#k_ConvStrideRows = params['k_ConvStrideRows'] #kernel horizontal stride
#k_ConvStrideCols = params['k_ConvStrideCols'] #kernel vertical stride
#k_poolStrideRows = params['k_poolStrideRows']
#Huz - is this right??
if params['K_NUMCONVLAYERS'] == 3 :
k_poolCols=2
else :
k_poolCols=4
g_graph["X"] = tf.Variable(np.zeros([1,params['k_height'], params['k_numFrames'], params['k_inputChannels']]), dtype=tf.float32, name="s_X")
g_graph["w1"]=tf.constant(weights["wc1"], name="s_w1")
g_graph["b1"]=tf.constant(biases["bc1"], name="s_b1")
g_graph["h1"]=conv2d(g_graph["X"], g_graph["w1"], g_graph["b1"])
g_graph["h1pooled"] = maxpool2d(g_graph["h1"], k_h=params['k_poolRows'], k_w=k_poolCols)
g_graph["W_fc1"] = tf.constant(weights['wd1'], name="s_W_fc1")
g_graph["b_fc1"] = tf.constant(biases["bd1"], name="s_b_fc1")
if params['K_NUMCONVLAYERS']== 3:
g_graph["w2"]=tf.constant(weights["wc2"], name="s_w2")
g_graph["b2"]=tf.constant(biases["bc2"], name="s_b2")
g_graph["h2"]=conv2d(g_graph["h1pooled"], g_graph["w2"], g_graph["b2"])
g_graph["h2pooled"] = maxpool2d(g_graph["h2"], k_h=params['k_poolRows'], k_w=k_poolCols)
g_graph["w3"]=tf.constant(weights["wc3"], name="s_w3")
g_graph["b3"]=tf.constant(biases["bc3"], name="s_b3")
g_graph["h3"]=conv2d(g_graph["h2pooled"], g_graph["w3"], g_graph["b3"])
g_graph["fc1"] = tf.reshape(g_graph["h3"], [-1, g_graph["W_fc1"].get_shape().as_list()[0]]) #convlayers_output
else :
g_graph["fc1"] = tf.reshape(g_graph["h1pooled"], [-1, g_graph["W_fc1"].get_shape().as_list()[0]]) #convlayers_output
g_graph["h_fc1"] = tf.nn.relu(tf.matmul(g_graph["fc1"], g_graph["W_fc1"]) + g_graph["b_fc1"], name="s_h_fc1")
g_graph["W_fc2"] = tf.constant(weights['wout'], name="s_W_fc2")
g_graph["b_fc2"] = tf.constant(biases['bout'], name="s_b_fc2")
g_graph["logits"] = tf.add(tf.matmul(g_graph["h_fc1"], g_graph["W_fc2"]) , g_graph["b_fc2"] , name="s_logits") #"out"
g_graph["softmax_preds"] = tf.nn.softmax(logits=g_graph["logits"], name="s_softmax_preds")
print("graph built - returning ")
return g_graph
#=============================================================================================================
def constructSTModel_old(weights, biases, params) :
global g_graph
g_graph = {}
#This is the variable that we will "train" to match style and content images.
##g_graph["X"] = tf.Variable(np.zeros([1,k_width*k_freqbins]), dtype=tf.float32, name="s_x_image")
##g_graph["x_image"] = tf.reshape(g_graph["X"], [1,k_height,k_width,k_inputChannels])
g_graph["X"] = tf.Variable(np.zeros([1,params['k_height'], params['k_width'], params['k_inputChannels']]), dtype=tf.float32, name="s_X")
g_graph["w1"]=tf.constant(state["w1:0"], name="s_w1")
g_graph["b1"]=tf.constant(state["b1:0"], name="s_b1")
#g_graph["w1"]=tf.Variable(tf.truncated_normal(getShape( tg, "w1"), stddev=0.1), name="w1")
#g_graph["b1"]=tf.Variable(tf.constant(0.1, shape=getShape( tg, "b1")), name="b1")
# tf.nn.relu(tf.nn.conv2d(x_image, w1, strides=[1, k_ConvStrideRows, k_ConvStrideCols, 1], padding='SAME') + b1, name="h1")
g_graph["h1"]=tf.nn.relu(tf.nn.conv2d(g_graph["X"], g_graph["w1"], strides=[1, params['k_ConvStrideRows'], params['k_ConvStrideCols'], 1], padding='SAME') + g_graph["b1"], name="s_h1")
# 2x2 max pooling
g_graph["h1pooled"] = tf.nn.max_pool(g_graph["h1"], ksize=[1, params['k_poolRows'], 2, 1], strides=[1, params['k_poolStride'], 2, 1], padding='SAME', name="s_h1_pooled")
g_graph["w2"]=tf.constant(state["w2:0"], name="s_w2")
g_graph["b2"]=tf.constant(state["b2:0"], name="s_b2")
#g_graph["w2"]=tf.Variable(tf.truncated_normal(getShape( tg, "w2"), stddev=0.1), name="w2")
#g_graph["b2"]=tf.Variable(tf.constant(0.1, shape=getShape( tg, "b2")), name="b2")
g_graph["h2"]=tf.nn.relu(tf.nn.conv2d(g_graph["h1pooled"], g_graph["w2"], strides=[1, params['k_ConvStrideRows'], params['k_ConvStrideCols'], 1], padding='SAME') + g_graph["b2"], name="s_h2")
g_graph["h2pooled"] = tf.nn.max_pool(g_graph["h2"], ksize=[1, params['k_poolRows'], 2, 1], strides=[1, params['k_poolStride'], 2, 1], padding='SAME', name='s_h2_pooled')
g_graph["convlayers_output"] = tf.reshape(g_graph["h2pooled"], [-1, params['k_downsampledWidth'] * params['k_downsampledHeight']*params['L2_CHANNELS']]) # to prepare it for multiplication by W_fc1
#
g_graph["W_fc1"] = tf.constant(state["W_fc1:0"], name="s_W_fc1")
g_graph["b_fc1"] = tf.constant(state["b_fc1:0"], name="s_b_fc1")
#g_graph["keepProb"]=tf.placeholder(tf.float32, (), name= "keepProb")
#g_graph["h_fc1"] = tf.nn.relu(tf.matmul(tf.nn.dropout(g_graph["convlayers_output"], g_graph["keepProb"]), g_graph["W_fc1"]) + g_graph["b_fc1"], name="h_fc1")
g_graph["h_fc1"] = tf.nn.relu(tf.matmul(g_graph["convlayers_output"], g_graph["W_fc1"]) + g_graph["b_fc1"], name="s_h_fc1")
#Read out layer
g_graph["W_fc2"] = tf.constant(state["W_fc2:0"], name="s_W_fc2")
g_graph["b_fc2"] = tf.constant(state["b_fc2:0"], name="s_b_fc2")
g_graph["logits_"] = tf.matmul(g_graph["h_fc1"], g_graph["W_fc2"])
g_graph["logits"] = tf.add(g_graph["logits_"] , g_graph["b_fc2"] , name="s_logits")
g_graph["softmax_preds"] = tf.nn.softmax(logits=g_graph["logits"], name="s_softmax_preds")
return g_graph
# Create and save the picke file of paramters
def saveState(sess, weight_dic, bias_dic, parameters, fname) :
# create object to stash tensorflow variables in
state={}
#for v in vlist :
# state[v.name] = sess.run(v)
# convert tensors to python arrays
for n in weight_dic.keys():
weight_dic[n] = sess.run(weight_dic[n])
for b in bias_dic.keys():
bias_dic[b] = sess.run(bias_dic[b])
# combine state and parameters into a single object for serialization
netObject={
#'state' : state,
'weight_dic' : weight_dic,
'bias_dic' : bias_dic,
'parameters' : parameters
}
pickle.dump(netObject, open( fname, "wb" ))
# Load the pickle file of parameters
def load(pickleFile, randomize=0) :
print(' will read state from ' + pickleFile)
netObject=pickle.load( open( pickleFile, "rb" ) )
#state = netObject['state']
weight_dic = netObject['weight_dic']
bias_dic = netObject['bias_dic']
parameters = netObject['parameters']
if randomize ==1 :
print('randomizing weights')
for n in weight_dic.keys():
print('shape of weights[' + n + '] is ' + str(weight_dic[n].shape))
weight_dic[n] = .2* np.random.random_sample(weight_dic[n].shape).astype(np.float32) -.1
print('randomizing biases')
for n in bias_dic.keys():
print('shape of biases[' + n + '] is ' + str(bias_dic[n].shape))
bias_dic[n] = .2* np.random.random_sample(bias_dic[n].shape).astype(np.float32) -.1
print("weight keys are " + str(weight_dic.keys()))
print("bias keys are " + str(bias_dic.keys()))
for p in parameters.keys() :
print('param[' + p + '] = ' + str(parameters[p]))
global height
height = parameters['k_height']
global width
#width = parameters['k_width']
width = parameters['k_numFrames']
global depth
depth = parameters['k_inputChannels']
return constructSTModel(weight_dic, bias_dic, parameters)
| muhdhuz/compareTF | Training/utils/pickledModel.py | pickledModel.py | py | 11,477 | python | en | code | 2 | github-code | 13 |
6396249945 | # WAP to fill a square matrix with value zero on the diagonals, 1 on the upper right triangle, and -1 on the lower left triangle.
from numpy import *
row,col = [int(i) for i in input("Enter no. of Rows and Columns of the array: ").split()]
a = zeros((row,col),int)
for i in range(row):
for j in range(col):
if(i == j):
a[i][j] = 0
elif(i>j):
a[i][j] = -1
else:
a[i][j] = 1
print(a) | miral25/SIMPLE-PYTHON-PROGRAM | PYTHON SIMPLE PROGRAM/31.py | 31.py | py | 463 | python | en | code | 0 | github-code | 13 |
36674725659 | """
Programmer: Troy Bechtold
Class: CptS 322-01, Spring 2022
Programming Assignment #3
Description: plot utils for jupternotebooks
"""
import matplotlib.pyplot as plt
import utils
plt.style.use('seaborn-dark')
def bar_chart(values, columns, title, x_axis_name, y_axis_name, save=False, file_name=""):
'''
Function to plot a barchart
'''
plt.figure(figsize=(30,10))
plt.title(title, size = 30)
plt.xlabel(x_axis_name, size = 30)
plt.ylabel(y_axis_name, size = 30)
plt.bar(values, columns, align="edge", width=.4)
plt.xticks(size = 20, rotation=45)
plt.yticks(size = 20)
fig = plt.gcf()
plt.show()
if save is True:
fig.savefig("graphs/" + file_name + ".png")
def pie_chart(nums, the_labels, title):
'''
Function to plot a piechart
'''
plt.figure()
plt.title(title, size = 30)
plt.pie(nums, labels=the_labels, autopct="%1.1f%%", normalize=False)
plt.show()
def hist_chart_frequency(values, columns, title, x_axis_name, y_axis_name):
'''
Function to plot a hist chart with freq
'''
plt.figure(figsize=(30,10))
plt.title(title, size = 30)
plt.xlabel(x_axis_name, size = 30)
plt.ylabel(y_axis_name, size = 30)
plt.xticks(size = 20)
plt.yticks(size = 20)
plt.bar(values[:-1], columns, width=(values[1] - values[0]), edgecolor="black", align="edge")
plt.show()
def hist_chart(column_data, title, x_axis_name, y_axis_name):
'''
Function to plot a hist chart
'''
plt.figure(figsize=(30,10))
plt.title(title, size = 30)
plt.xlabel(x_axis_name, size = 30)
plt.ylabel(y_axis_name, size = 30)
plt.xticks(size = 20)
plt.yticks(size = 20)
plt.hist(column_data, bins=10)
plt.xticks(rotation=90)
plt.show()
def scatter_plot(column_x_data, column_y_data, title, x_axis_name, y_axis_name):
'''
Function to plot a scatter chart
'''
plt.figure()
plt.title(title, size = 30)
plt.xlabel(x_axis_name, size = 30)
plt.ylabel(y_axis_name, size = 30)
plt.xticks(size = 20)
plt.yticks(size = 20)
plt.scatter(column_x_data, column_y_data)
plt.show()
def scatter_with_linear_regression(column_x_data, column_y_data, title, x_axis_name, y_axis_name):
'''
Function to plot a scatter chart with line for regression
'''
plt.figure()
plt.title(title, size = 30)
plt.xlabel(x_axis_name, size = 30)
plt.ylabel(y_axis_name, size = 30)
plt.xticks(size = 20)
plt.yticks(size = 20)
plt.scatter(column_x_data, column_y_data)
coefficient, covariance = utils.compute_coefficient_covariance(column_x_data, column_y_data)
slope_of_the_line, intercept = utils.compute_slope_intercept(column_x_data, column_y_data)
plt.annotate("coefficient =" + str(coefficient), xy=(0.7,0.9),
xycoords="axes fraction", horizontalalignment="center")
plt.annotate("covarient =" + str(covariance), xy=(0.7,0.8),
xycoords="axes fraction", horizontalalignment="center")
plt.plot([min(column_x_data), max(column_x_data)],
[slope_of_the_line * min(column_x_data) + intercept,
slope_of_the_line * max(column_x_data) + intercept], c="r")
def box_plot(data, names, title, y_axis_name):
'''
Function to plot a box chart
'''
plt.figure(figsize=(30,10))
plt.title(title, size = 30)
plt.boxplot(data)
plt.ylabel(y_axis_name, size = 30)
plt.xticks(list(range(1, len(names) + 1)), names)
plt.xticks(size = 20, rotation=45)
plt.annotate(r"$\mu=100$", xy=(0.5, 0.5), xycoords="axes fraction",
horizontalalignment="center", color="blue")
plt.show()
| tbech12/CPSC322-Final-Project | plot_utils.py | plot_utils.py | py | 3,708 | python | en | code | 0 | github-code | 13 |
16808782554 | import datetime
import subprocess
import sys
from unittest import TestCase
import pytest
from hypothesis import example, given, strategies as st
from hypothesis._settings import (
HealthCheck,
Phase,
Verbosity,
default_variable,
local_settings,
note_deprecation,
settings,
)
from hypothesis.database import ExampleDatabase
from hypothesis.errors import (
HypothesisDeprecationWarning,
InvalidArgument,
InvalidState,
)
from hypothesis.stateful import RuleBasedStateMachine, rule
from hypothesis.utils.conventions import not_set
from tests.common.utils import (
checks_deprecated_behaviour,
counts_calls,
fails_with,
validate_deprecation,
)
def test_has_docstrings():
assert settings.verbosity.__doc__
original_default = settings.get_profile("default").max_examples
def setup_function(fn):
settings.load_profile("default")
settings.register_profile("test_settings", settings())
settings.load_profile("test_settings")
def test_cannot_set_non_settings():
s = settings()
with pytest.raises(AttributeError):
s.databas_file = "some_file"
def test_settings_uses_defaults():
s = settings()
assert s.max_examples == settings.default.max_examples
def test_raises_attribute_error():
with pytest.raises(AttributeError):
settings().kittens
def test_respects_none_database():
assert settings(database=None).database is None
def test_can_repeatedly_push_the_same_thing():
s = settings(max_examples=12)
t = settings(max_examples=17)
assert settings().max_examples == original_default
with local_settings(s):
assert settings().max_examples == 12
with local_settings(t):
assert settings().max_examples == 17
with local_settings(s):
assert settings().max_examples == 12
with local_settings(t):
assert settings().max_examples == 17
assert settings().max_examples == 12
assert settings().max_examples == 17
assert settings().max_examples == 12
assert settings().max_examples == original_default
def test_can_set_verbosity():
settings(verbosity=Verbosity.quiet)
settings(verbosity=Verbosity.normal)
settings(verbosity=Verbosity.verbose)
settings(verbosity=Verbosity.debug)
def test_can_not_set_verbosity_to_non_verbosity():
with pytest.raises(InvalidArgument):
settings(verbosity="kittens")
@pytest.mark.parametrize("db", [None, ExampleDatabase()])
def test_inherits_an_empty_database(db):
assert settings.default.database is not None
s = settings(database=db)
assert s.database is db
with local_settings(s):
t = settings()
assert t.database is db
@pytest.mark.parametrize("db", [None, ExampleDatabase()])
def test_can_assign_database(db):
x = settings(database=db)
assert x.database is db
def test_will_reload_profile_when_default_is_absent():
original = settings.default
default_variable.value = None
assert settings.default is original
def test_load_profile():
settings.load_profile("default")
assert settings.default.max_examples == original_default
assert settings.default.stateful_step_count == 50
settings.register_profile("test", settings(max_examples=10), stateful_step_count=5)
settings.load_profile("test")
assert settings.default.max_examples == 10
assert settings.default.stateful_step_count == 5
settings.load_profile("default")
assert settings.default.max_examples == original_default
assert settings.default.stateful_step_count == 50
def test_profile_names_must_be_strings():
with pytest.raises(InvalidArgument):
settings.register_profile(5)
with pytest.raises(InvalidArgument):
settings.get_profile(5)
with pytest.raises(InvalidArgument):
settings.load_profile(5)
def test_loading_profile_keeps_expected_behaviour():
settings.register_profile("ci", settings(max_examples=10000))
settings.load_profile("ci")
assert settings().max_examples == 10000
with local_settings(settings(max_examples=5)):
assert settings().max_examples == 5
assert settings().max_examples == 10000
def test_load_non_existent_profile():
with pytest.raises(InvalidArgument):
settings.get_profile("nonsense")
def test_cannot_delete_a_setting():
x = settings()
with pytest.raises(AttributeError):
del x.max_examples
x.max_examples
x = settings()
with pytest.raises(AttributeError):
del x.foo
def test_cannot_set_settings():
x = settings()
with pytest.raises(AttributeError):
x.max_examples = "foo"
with pytest.raises(AttributeError):
x.database = "foo"
assert x.max_examples != "foo"
assert x.database != "foo"
def test_can_have_none_database():
assert settings(database=None).database is None
@pytest.mark.parametrize("db", [None, ExampleDatabase(":memory:")])
@pytest.mark.parametrize("bad_db", [":memory:", ".hypothesis/examples"])
def test_database_type_must_be_ExampleDatabase(db, bad_db):
with local_settings(settings(database=db)):
settings_property_db = settings.database
with pytest.raises(InvalidArgument):
settings(database=bad_db)
assert settings.database is settings_property_db
def test_cannot_define_settings_once_locked():
with pytest.raises(InvalidState):
settings._define_setting("hi", "there", default=4)
def test_cannot_assign_default():
with pytest.raises(AttributeError):
settings.default = settings(max_examples=3)
assert settings().max_examples != 3
@settings(max_examples=7)
@given(st.builds(lambda: settings.default))
def test_settings_in_strategies_are_from_test_scope(s):
assert s.max_examples == 7
TEST_SETTINGS_ALONE = """
from hypothesis import settings
from hypothesis.strategies import integers
@settings()
def test_settings_alone():
pass
"""
def test_settings_alone(testdir):
script = testdir.makepyfile(TEST_SETTINGS_ALONE)
result = testdir.runpytest(script)
out = "\n".join(result.stdout.lines)
assert (
"Using `@settings` on a test without `@given` is completely pointless." in out
)
assert "InvalidArgument" in out
assert result.ret == 1
@fails_with(InvalidArgument)
def test_settings_applied_twice_is_error():
@given(st.integers())
@settings()
@settings()
def test_nothing(x):
pass
@settings()
@given(st.integers())
def test_outer_ok(x):
pass
@given(st.integers())
@settings()
def test_inner_ok(x):
pass
def test_settings_as_decorator_must_be_on_callable():
with pytest.raises(InvalidArgument):
settings()(1)
ASSERT_DATABASE_PATH = """
import tempfile
from hypothesis import settings
from hypothesis.configuration import set_hypothesis_home_dir
from hypothesis.database import DirectoryBasedExampleDatabase
settings.default.database
if __name__ == '__main__':
new_home = tempfile.mkdtemp()
set_hypothesis_home_dir(new_home)
db = settings.default.database
assert isinstance(db, DirectoryBasedExampleDatabase), db
# TODO: use .is_relative_to() when we drop Python 3.8
assert str(db.path).startswith(new_home), (db.path, new_home)
"""
def test_puts_the_database_in_the_home_dir_by_default(tmp_path):
script = tmp_path.joinpath("assertlocation.py")
script.write_text(ASSERT_DATABASE_PATH, encoding="utf-8")
subprocess.check_call([sys.executable, str(script)])
def test_database_is_reference_preserved():
s = settings(database=not_set)
assert s.database is s.database
@settings(verbosity=Verbosity.verbose)
@example(x=99)
@given(st.integers())
def test_settings_apply_for_explicit_examples(x):
# Regression test for #1521
assert settings.default.verbosity == Verbosity.verbose
class TestGivenExampleSettingsExplicitCalled(TestCase):
"""Real nasty edge case here.
in #2160, if ``example`` is after ``given`` but before ``settings``,
it will be completely ignored.
If we set phases to only ``explicit``, the test case will never be called!
We have to run an assertion outside of the test case itself.
"""
@counts_calls
def call_target(self):
pass
@given(st.booleans())
@example(True)
@settings(phases=[Phase.explicit])
def test_example_explicit(self, x):
self.call_target()
def tearDown(self):
# In #2160, this is 0.
assert self.call_target.calls == 1
def test_setattr_on_settings_singleton_is_error():
# https://github.com/pandas-dev/pandas/pull/22679#issuecomment-420750921
# Should be setting attributes on settings.default, not settings!
with pytest.raises(AttributeError):
settings.max_examples = 10
def test_deadline_given_none():
x = settings(deadline=None).deadline
assert x is None
def test_deadline_given_valid_int():
x = settings(deadline=1000).deadline
assert isinstance(x, datetime.timedelta)
assert x.days == 0
assert x.seconds == 1
assert x.microseconds == 0
def test_deadline_given_valid_float():
x = settings(deadline=2050.25).deadline
assert isinstance(x, datetime.timedelta)
assert x.days == 0
assert x.seconds == 2
assert x.microseconds == 50250
def test_deadline_given_valid_timedelta():
x = settings(deadline=datetime.timedelta(days=1, microseconds=15030000)).deadline
assert isinstance(x, datetime.timedelta)
assert x.days == 1
assert x.seconds == 15
assert x.microseconds == 30000
@pytest.mark.parametrize(
"x",
[
0,
-0.7,
-1,
86400000000000000.2,
datetime.timedelta(microseconds=-1),
datetime.timedelta(0),
],
)
def test_invalid_deadline(x):
with pytest.raises(InvalidArgument):
settings(deadline=x)
@pytest.mark.parametrize("value", ["always"])
def test_can_not_set_print_blob_to_non_print_settings(value):
with pytest.raises(InvalidArgument):
settings(print_blob=value)
settings_step_count = 1
@settings(stateful_step_count=settings_step_count)
class StepCounter(RuleBasedStateMachine):
def __init__(self):
super().__init__()
self.step_count = 0
@rule()
def count_step(self):
self.step_count += 1
def teardown(self):
assert self.step_count <= settings_step_count
test_settings_decorator_applies_to_rule_based_state_machine_class = StepCounter.TestCase
def test_two_settings_decorators_applied_to_state_machine_class_raises_error():
with pytest.raises(InvalidArgument):
@settings()
@settings()
class StatefulTest(RuleBasedStateMachine):
pass
def test_settings_decorator_applied_to_non_state_machine_class_raises_error():
with pytest.raises(InvalidArgument):
@settings()
class NonStateMachine:
pass
def test_assigning_to_settings_attribute_on_state_machine_raises_error():
class StateMachine(RuleBasedStateMachine):
@rule(x=st.none())
def a_rule(self, x):
assert x is None
with pytest.raises(AttributeError):
StateMachine.settings = settings()
state_machine_instance = StateMachine()
state_machine_instance.settings = "any value"
def test_derandomise_with_explicit_database_is_invalid():
with pytest.raises(InvalidArgument):
settings(derandomize=True, database=ExampleDatabase(":memory:"))
@pytest.mark.parametrize(
"kwargs",
[
{"max_examples": -1},
{"max_examples": 2.5},
{"stateful_step_count": -1},
{"stateful_step_count": 2.5},
{"deadline": -1},
{"deadline": 0},
{"deadline": True},
{"deadline": False},
],
)
def test_invalid_settings_are_errors(kwargs):
with pytest.raises(InvalidArgument):
settings(**kwargs)
def test_invalid_parent():
class NotSettings:
def __repr__(self):
return "(not settings repr)"
not_settings = NotSettings()
with pytest.raises(InvalidArgument) as excinfo:
settings(not_settings)
assert "parent=(not settings repr)" in str(excinfo.value)
def test_show_changed():
s = settings(max_examples=999, database=None)
assert s.show_changed() == "database=None, max_examples=999"
def test_note_deprecation_checks_date():
with pytest.warns(HypothesisDeprecationWarning) as rec:
note_deprecation("This is bad", since="RELEASEDAY", has_codemod=False)
assert len(rec) == 1
with pytest.raises(AssertionError):
note_deprecation("This is way too old", since="1999-12-31", has_codemod=False)
def test_note_deprecation_checks_has_codemod():
with pytest.warns(
HypothesisDeprecationWarning,
match="The `hypothesis codemod` command-line tool",
):
note_deprecation("This is bad", since="2021-01-01", has_codemod=True)
def test_deprecated_settings_warn_on_set_settings():
with validate_deprecation():
settings(suppress_health_check=[HealthCheck.return_value])
with validate_deprecation():
settings(suppress_health_check=[HealthCheck.not_a_test_method])
@checks_deprecated_behaviour
def test_deprecated_settings_not_in_settings_all_list():
al = HealthCheck.all()
ls = list(HealthCheck)
assert al == ls
assert HealthCheck.return_value not in ls
assert HealthCheck.not_a_test_method not in ls
| HypothesisWorks/hypothesis | hypothesis-python/tests/cover/test_settings.py | test_settings.py | py | 13,475 | python | en | code | 7,035 | github-code | 13 |
31093844719 | from utils import *
import numpy
import shutil
# LEEEEMEEEEEEEEE (para correr)
# 1. borrar imgs en imgs_input
# 2. generar imgs a correr en imgs_input: python csv_converter.py ../data/ imgs_input/ .png 32
# los archivos tienen que ser de la forma "nombre-tamaño.csv" (el conversor los genera asi por default)
# 2. correr y buscar resultados del exp. en carpeta resultados =)
def escribir_resultados_en_archivo(resultado, img, output, psnr, cant_rayos):
tiempo_ms = output["time-lsq"][:output["time-lsq"].find("ms")]
resultado.write("{},{},{},{}\n".format(img, cant_rayos, psnr,tiempo_ms))
print(">> {},{},{},{}".format(img, cant_rayos, psnr,tiempo_ms))
def listar_archivos_en_carpeta(path):
filenames = []
for root, dirs, files in os.walk(path):
for filename in files:
filenames = filenames + [filename]
return filenames
def ejecutar_y_escribir_resultado(exp_args):
print("\n-----------CLEAN AND CREATE FOLDERS----------------")
if os.path.exists(exp_args["IMGS_DIR_OUTPUT"]):
shutil.rmtree(exp_args["IMGS_DIR_OUTPUT"])
if not os.path.exists("./resultados/"):
os.mkdir("./resultados/")
print("Nueva carpeta: ./resultados/")
os.mkdir(exp_args["IMGS_DIR_OUTPUT"])
print("Re-creada:" + exp_args["IMGS_DIR_OUTPUT"])
print("\n------------------FETCH INPUT----------------------")
print("Leyendo imagenes en carpeta " + exp_args["IMGS_DIR_INPUT"])
imgs = listar_archivos_en_carpeta(exp_args["IMGS_DIR_INPUT"])
print("Imagenes encontradas: " + ", ".join(imgs))
print("\n------------------ EXECUTION-----------------------")
output_name = "resultados/psnr-tiempos-variando-rayos-data.csv"
print("OUTPUT_FILE: {}\n".format(output_name))
resultado = open("./" + output_name, "w")
header = "nombre_imagen,cant_rayos,psnr,tiempo_ms"
resultado.write(header + "\n")
print(">> " + header)
min = exp_args["MIN_RAYS_COUNT"]
max = exp_args["MAX_RAYS_COUNT"]
count = exp_args["COUNT_RAYS_COUNT"]
img_count = 1
id = 0
for img in imgs:
dimension = img.split("-")[-1][:-4]
print("({}/{}) Ejecutando imagen {}".format(img_count, len(imgs), img))
img_count += 1
for rays_count in list(numpy.linspace(min, max, count).astype(int)):
input_img = exp_args["IMGS_DIR_INPUT"] + img
output_img = exp_args["IMGS_DIR_OUTPUT"] + img + "-output-" + str(id)
program_args = {input_img: output_img,
"--ray-count": rays_count,
"--ray-type": exp_args["RAY_TYPE"],
# "--cache": exp_args["CACHE_FOLDER"],
"-n": dimension}
output = ""
for line in ejecutar_tp(input_img, output_img, program_args):
output += line
print(line, end="")
parsed_output = parsear_output(output)
value_psnr = psnr(output_img, exp_args["IMGS_DIR_INPUT"] + img)
escribir_resultados_en_archivo(resultado, img, parsed_output, value_psnr, rays_count)
id +=1
print()
resultado.close()
exp_args = {"IMGS_DIR_INPUT": "./imgs_input/",
"IMGS_DIR_OUTPUT": "./imgs_output/",
"RAY_TYPE": 0,
"MIN_RAYS_COUNT": 10,
"MAX_RAYS_COUNT": 30000,
"COUNT_RAYS_COUNT": 20}
print("Ejecutando exp...")
ejecutar_y_escribir_resultado(exp_args)
print("Listo!")
| ABorgna/metnum | tp3/conversor_csv/exp_psnr_tiempos_variando_cant_rayos.py | exp_psnr_tiempos_variando_cant_rayos.py | py | 3,500 | python | es | code | 0 | github-code | 13 |
14581220540 | import os
import pytest
import torch
from src.models.train_model import build_model
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname + "/..")
# Get model struct
model, model_conf = build_model()
model.train()
@pytest.mark.parametrize("batch", [20, 10, 90])
def test_dim_output(batch):
log_ps = model(torch.randn(batch, 28, 28))
assert log_ps.shape[0] == batch, "Batch not correct size"
assert log_ps.shape[1] == 10, "Output dimension per sample is not correct"
def test_dim_input():
with pytest.raises(ValueError, match="Expected input to a 3D tensor"):
model(torch.randn(1, 28, 28, 4))
with pytest.raises(ValueError, match=r"Expected each sample to have shape \[28, 28\]"):
model(torch.randn(1, 2, 3))
| NWeis97/ML_Ops_Project | tests/test_model.py | test_model.py | py | 783 | python | en | code | 0 | github-code | 13 |
70180226258 | from rest_framework.test import APITestCase
from rest_framework import status
class SampleTests(APITestCase):
def test_sample(self):
http = status.HTTP_200_OK
url = '/apiv1/'
res = self.client.get(url)
self.assertEqual(res.status_code, http)
| emori92/drf-jwt-practice | back/apiv1/tests.py | tests.py | py | 281 | python | en | code | 0 | github-code | 13 |
5878839207 | from ast import ClassDef
import numpy as np
import struct
from ismember import ismember
#funcion struct2table en python: convierte una estructura a una lista
def myStruct2table(list):
#arr = np.array(estructura)
try:
arr = np.array(list)
except:
print('error en arr = np.array(estructura)')
arr = list
lst = []
for x in arr:
lst.append(x)
return lst
def bfs(home = None,path = None,current_node = None,uav_data = None):
#h = 0;
OPEN = []
CLOSE = []
current_node = {'parent' : current_node['parent'],'x' : current_node['x'],'y' : current_node['y'],'t' : 0,'g' : current_node['g'],'r' : current_node['r']}
OPEN = np.array([current_node,OPEN])
while (not len(OPEN)==0 ):
current_node = OPEN(1)
OPEN = remove(OPEN,current_node)
if (isSamePoint(np.array([[current_node['x']],[current_node['y']]]),home) and current_node['r'] == 0):
break
SUCS = expand_graph(current_node,path,home,uav_data)
__,n_sucs = SUCS.shape
for i in np.arange(1,n_sucs+1).reshape(-1):
in_closed = is_visited(CLOSE,SUCS(i))
if (0 == in_closed):
in_open = is_visited(OPEN,SUCS(i))
if (0 == in_open):
OPEN = np.array([SUCS(i),OPEN])
CLOSE = np.array([CLOSE,current_node])
return current_node
def expand_graph(current_node = None,path = None,home = None,uav_data = None):
__,c = path.shape
SUCS = []
for i in np.arange(1,c+1).reshape(-1):
suc = np.array([[path(1,i)],[path(2,i)]])
if (not isSamePoint(np.array([[current_node['x']],[current_node['y']]]),suc) ):
if (isSamePoint(home,suc)):
is_rel = False
else:
is_rel = is_inpath(current_node,suc)
if (not is_rel ):
d_toSuc = EUC_2D_Distance(np.array([[current_node['x']],[current_node['y']]]),suc)
t_toSuc = EUC_2D_Time(d_toSuc,uav_data)
if (isSamePoint(np.array([[current_node['x']],[current_node['y']]]),home)):
g = uav_data['To'] + t_toSuc
t_toSuc = current_node['t'] + uav_data['To'] + t_toSuc
else:
g = current_node['g'] + t_toSuc + uav_data['Tg']
if (isSamePoint(suc,home)):
tc = uav_data['Tt'] - g
t_toSuc = current_node['t'] + t_toSuc + uav_data['Tl'] + tc
else:
t_toSuc = current_node['t'] + t_toSuc
d_tohome = EUC_2D_Distance(home,suc)
t_tohome = EUC_2D_Time(d_tohome,uav_data)
if (uav_data['Tt'] >= int(np.floor(g + t_tohome + uav_data['Tl']))):
suc = {'parent' : current_node,'x' : suc(1,1),'y' : suc(2,1),'t' : t_toSuc,'g' : g,'r' : 0}
if (isSamePoint(np.array([[suc['x']],[suc['y']]]),home)):
suc['r'] = current_node['r']
else:
suc.r = current_node['r'] - 1
SUCS = np.array([SUCS,suc])
return SUCS
def is_visited(list = None,node = None):
nodeIdx = 0
if (not len(list)==0 ):
list = myStruct2table(list)
list = np.array([list[:,np.arange(2,4+1)],list[:,6]])
node = myStruct2table(node)
node = np.array([node[:,np.arange(2,4+1)],node[:,6]])
r,__ = list.shape
for i in np.arange(1,r+1).reshape(-1):
if (list[i,:] == node[1,:]):
nodeIdx = i
break
return nodeIdx
def isSamePoint(A = None,B = None):
bool = A(1,1) == B(1,1) and A(2,1) == B(2,1)
return bool
def is_inpath(current = None,suc = None):
#This function search if a node has been already visited in the path
if (ClassDef(current) == 'struct'):
if (isSamePoint(np.array([[current.x],[current.y]]),suc)):
is_rel = True
else:
is_rel = is_inpath(current.parent,suc)
else:
is_rel = False
return is_rel
def remove(OPEN = None,suc = None):
list = OPEN
if (not len(list)==0 ):
list = myStruct2table(list)
list = np.array([list[:,np.arange(2,4+1)],list[:,6]])
suc = myStruct2table(suc)
suc = np.array([suc[:,np.arange(2,4+1)],suc[:,6]])
__,Locb = ismember(list,suc,'rows')
idx = str.find(Locb,1)
OPEN(idx).f = np.inf
OPEN = deleteNode(OPEN)
return OPEN
def deleteNode(OPEN = None):
L = []
__,c = OPEN.shape
for i in np.arange(1,c+1).reshape(-1):
if (OPEN(i).f != np.inf):
L = np.array([L,OPEN(i)])
OPEN = L
return OPEN
def EUC_2D_Distance(last = None,next = None):
d = np.sqrt(((last(1,1) - next(1,1)) ** 2) + ((last(2,1) - next(2,1)) ** 2))
d = np.round(d,4)
return d
def EUC_2D_Time(d = None,uav_data = None):
t = np.round(d / uav_data.Vuav,0)
return t
return current_node | marioaguileraaa/TFG | Core/bfs.py | bfs.py | py | 5,195 | python | en | code | 0 | github-code | 13 |
37910377158 | from CaloRec.CaloRecFlags import jobproperties
from AthenaCommon.Resilience import treatException
from RecExConfig.RecFlags import rec
from AthenaCommon.GlobalFlags import globalflags
from AthenaCommon.DetFlags import DetFlags
from AthenaCommon.Logging import logging
if globalflags.DataSource()=='data':
if rec.projectName()=="data09_calophys":
# for data09_calophys project, force to use DSP output for the cell energy, perform reco like DSP, no dead cell correction
if rec.doLArg():
from LArROD.LArRODFlags import larRODFlags
larRODFlags.readDigits=False
if rec.doTile():
from TileRecUtils.TileRecFlags import jobproperties
jobproperties.TileRecFlags.readDigits=False
from CaloRec.CaloCellFlags import jobproperties
jobproperties.CaloCellFlags.doDeadCellCorr=False
jobproperties.CaloCellFlags.doLArCreateMissingCells=False
jobproperties.CaloCellFlags.doLArDeadOTXCorr=False
else:
if rec.doLArg() and globalflags.InputFormat() == 'bytestream' and jobproperties.CaloRecFlags.doLArAutoConfiguration():
# for bytestream reco of real data, autoconfigure based on the run format information
# use digits only when digits available for all cells (transparent or rawdataresult)
# use DSP energy + digits when available in case only sparse digits available (results)
# (when collisions with timing settled we could use only DSP energy in this case)
from LArROD.LArRODFlags import larRODFlags
larRODFlags.readDigits=True
from LArConditionsCommon.LArCool import larcool
if (larcool is not None):
# check format1 & results mode
if (larcool.format()==1 and larcool.runType()==2) :
larRODFlags.keepDSPRaw = True
if (larcool.format()==0):
larRODFlags.keepDSPRaw = False # raw data transparent mode, no DSP energy
## RS Jun 2009 remove CombinedTower, Clusters from input when readESD=doESD=True
# to allow rebuilding of them
if rec.readESD and rec.doESD:
from RecExConfig.ObjKeyStore import objKeyStore
objKeyStore['inputFile'].removeItem(["CaloTowerContainer#CombinedTower"])
objKeyStore['inputFile'].removeItem(["CaloTopoTowerContainer#TopoTower"])
objKeyStore['inputFile'].removeItem(["CaloClusterContainer#EMTopoCluster430"])
objKeyStore['inputFile'].removeItem(["CaloShowerContainer#EMTopoCluster430_Data"])
objKeyStore['inputFile'].removeItem(["CaloCellLinkContainer#EMTopoCluster430_Link"])
objKeyStore['inputFile'].removeItem(["CaloClusterContainer#LArClusterEM"])
objKeyStore['inputFile'].removeItem(["CaloClusterContainer#LArClusterEM7_11Nocorr"])
objKeyStore['inputFile'].removeItem(["CaloClusterContainer#LArClusterEMFrwd"])
objKeyStore['inputFile'].removeItem(["CaloClusterContainer#LArClusterEMSofte"])
objKeyStore['inputFile'].removeItem(["CaloShowerContainer#LArClusterEM7_11Nocorr_Data"])
objKeyStore['inputFile'].removeItem(["CaloShowerContainer#LArClusterEMSofte_Data"])
objKeyStore['inputFile'].removeItem(["CaloShowerContainer#LArClusterEM_Data"])
objKeyStore['inputFile'].removeItem(["CaloShowerContainer#LArClusterEM7_11Nocorr_Data"])
objKeyStore['inputFile'].removeItem(["CaloCellLinkContainer#LArClusterEM7_11Nocorr_Link"])
objKeyStore['inputFile'].removeItem(["CaloCellLinkContainer#LArClusterEM7_11Nocorr_Link"])
objKeyStore['inputFile'].removeItem(["CaloCellLinkContainer#LArClusterEMSofte_Link"])
objKeyStore['inputFile'].removeItem(["CaloCellLinkContainer#LArClusterEM_Link"])
if jobproperties.CaloRecFlags.doTileMuId() :
objKeyStore['inputFile'].removeItem(["TileMuContainer:TileMuObj"])
objKeyStore['inputFile'].removeItem(["TileCosmicMuon:TileCosmicMuonHT"])
objKeyStore['inputFile'].removeItem(["TileCosmicMuon:TileCosmicMuonMF"])
objKeyStore['inputFile'].removeItem(["TileCosmicMuonContainer:TileCosmicMuonHT"])
if jobproperties.CaloRecFlags.doTileCellCorrection() :
include( "TileRecAlgs/TileCellCorrection_jobOptions.py" )
if rec.readESD and not rec.doESD:
# this is needed by CaloTowerContainerCnv
from LArRecUtils.LArRecUtilsConf import LArTowerBuilderTool
svcMgr.ToolSvc += LArTowerBuilderTool("LArTowerEMHEC",
IncludedCalos = ["LAREM","LARHEC"])
ToolSvc.LArTowerEMHEC.IncludedCalos = ["LAREM","LARHEC"]
# possible hack : when reading ESD modify in put calo cell
## if rec.readESD() and DetFlags.detdescr.Calo_on():
## from CaloRec.CaloRecConf import CaloCellMaker
## theCaloCellMaker=CaloCellMaker(CaloCellsOutputName="AllCalo",CaloCellHack=True)
## # calo modifying tool to be inserted there
## from CaloTools.CaloNoiseToolDefault import CaloNoiseToolDefault
## theCaloNoiseTool = CaloNoiseToolDefault()
## ToolSvc+=theCaloNoiseTool
## from CaloCellCorrection.CaloCellCorrectionConf import CaloCellRandomizer
## theCaloCellRandomizer=CaloCellRandomizer(noiseTool=theCaloNoiseTool,DoGaussRandomization=True)
## ToolSvc += theCaloCellRandomizer
## from CaloRec.CaloRecConf import CaloCellContainerCorrectorTool
## from CaloIdentifier import SUBCALO
## theCaloCellContainerCorrectorTool = CaloCellContainerCorrectorTool(
## # CaloNums = [ SUBCALO.LAREM,SUBCALO.LARHEC,SUBCALO.LARFCAL],
## #CaloNums = [ SUBCALO.TILE],#SUBCALO.TILE
## CaloNums = [ SUBCALO.NSUBCALO],
## CellCorrectionToolNames = [theCaloCellRandomizer] )
## ToolSvc += theCaloCellContainerCorrectorTool
## theCaloCellMaker.CaloCellMakerToolNames += [theCaloCellContainerCorrectorTool ]
## from CaloRec.CaloRecConf import CaloCellContainerFinalizerTool
## theCaloCellContainerFinalizerTool=CaloCellContainerFinalizerTool()
## ToolSvc += theCaloCellContainerFinalizerTool
## theCaloCellMaker.CaloCellMakerToolNames += [theCaloCellContainerFinalizerTool ]
## from CaloRec.CaloRecConf import CaloCellContainerCheckerTool
## theCaloCellContainerCheckerTool=CaloCellContainerCheckerTool()
## # FIXME
## theCaloCellContainerCheckerTool.OutputLevel=DEBUG
## ToolSvc += theCaloCellContainerCheckerTool
## theCaloCellMaker.CaloCellMakerToolNames += [theCaloCellContainerCheckerTool]
## topSequence+=theCaloCellMaker;
# create LArFebErrorSummary for BS input
if rec.doLArg() and globalflags.DataSource()=='data' and globalflags.InputFormat() == 'bytestream':
from LArROD.LArRODFlags import larRODFlags
if larRODFlags.doLArFebErrorSummary() :
try:
include("LArROD/LArFebErrorSummaryMaker_jobOptions.py")
except Exception:
treatException("Problem with LArFebErrorSummaryMaker_jobOptions.py switch larRODFlags.doLArFebErrorSummary ")
larRODFlags.doLArFebErrorSummary=False
#
# functionality: Calorimeter cells
#
if DetFlags.makeRIO.Calo_on() and not rec.doWriteBS() :
from AthenaCommon.Include import excludeTracePattern
excludeTracePattern.append("*/CaloClusterCorrection/common.py")
try:
include ("TileRec/TileDefaults_jobOptions.py")
except Exception:
treatException("Could not set up Tile default options.")
# CaloCellGetter
#
try:
from CaloRec.CaloCellGetter import CaloCellGetter
CaloCellGetter()
# for data09_calophys project, force to use the same quality cut as in BS converter
if globalflags.DataSource()=='data':
if rec.projectName()=="data09_calophys":
if rec.doTile():
from AthenaCommon.AppMgr import ToolSvc
ToolSvc.TileCellBuilder.QualityCut = 15
except Exception:
treatException("Problem with CaloCellGetter. Switched off.")
DetFlags.makeRIO.Calo_setOff()
# CaloCellGetter_DigiHSTruth
doDigiTruthFlag = False
try:
from Digitization.DigitizationFlags import digitizationFlags
doDigiTruthFlag = digitizationFlags.doDigiTruth()
except:
log = logging.getLogger('CaloRec')
log.info('Unable to import DigitizationFlags in CaloRec_jobOptions. Expected in AthenaP1')
if doDigiTruthFlag:
try:
from CaloRec.CaloCellGetter_DigiHSTruth import CaloCellGetter_DigiHSTruth
CaloCellGetter_DigiHSTruth()
except Exception:
treatException("Problem with CaloCellGetter_DigiHSTruth. Switched off.")
#
#
# functionality : Calorimeter combined clustering
#
if jobproperties.CaloRecFlags.doCaloCluster() and DetFlags.haveRIO.Calo_on() :
try:
from CaloRec.CaloClusterSWCmbGetter import CaloClusterSWCmbGetter
CaloClusterSWCmbGetter()
except Exception:
treatException("Problem with CaloSWCmbCluster. Switched off.")
jobproperties.CaloRecFlags.doCaloCluster=False
else:
jobproperties.CaloRecFlags.doCaloCluster=False
#
# functionality : LAr Calorimeter clustering
#
if jobproperties.CaloRecFlags.doEmCluster() and DetFlags.haveRIO.LAr_on() :
try:
include( "LArClusterRec/LArCluster_jobOptions.py" )
# introduce multisize possibility
# include( "LArClusterRec/LArCluster_MultiSize_jobOptions.py" )
except Exception:
treatException("Problem with LArCluster. Switched off.")
jobproperties.CaloRecFlags.doEmCluster=False
# write digits of EM clusters
if jobproperties.CaloRecFlags.doEMDigits() and globalflags.DataSource()=='data' and globalflags.InputFormat() == 'bytestream':
try:
include ("LArClusterRec/LArDigits_fromEMCluster_jobptions.py")
except Exception:
treatException("Problem with LArDigitsFromEMClust. Switched off.")
jobproperties.CaloRecFlags.doEMDigits=False
else:
jobproperties.CaloRecFlags.doEmCluster=False
#
# functionality : Topological clustering (combined and EM)
#
if jobproperties.CaloRecFlags.doCaloTopoCluster() and DetFlags.haveRIO.Calo_on() :
try:
include( "CaloRec/CaloTopoCluster_jobOptions.py" )
except Exception:
treatException("Problem with CaloTopoCluster. Switched off.")
jobproperties.CaloRecFlags.doCaloTopoCluster=False
else:
jobproperties.CaloRecFlags.doCaloTopoCluster=False
if jobproperties.CaloRecFlags.doCaloEMTopoCluster() and DetFlags.haveRIO.Calo_on() :
try:
include( "CaloRec/EMTopoCluster_jobOptions.py" )
except Exception:
treatException("Problem with EMTopoCluster. Switched off")
jobproperties.CaloRecFlags.doCaloTopoCluster=False
else:
jobproperties.CaloRecFlags.doCaloEMTopoCluster=False
#
# functionality : Noise suppressed tower
#
if jobproperties.CaloRecFlags.doCaloTopoTower() and DetFlags.haveRIO.Calo_on():
try:
include ("CaloRec/CaloTopoTower_jobOptions.py")
except Exception:
treatException("Problem with CaloTopoTower. Switched off.")
jobproperties.CaloRecFlags.doCaloTopoTower=False
else:
jobproperties.CaloRecFlags.doCaloTopoTower=False
#
# functionality : muon candidates in Tile
#
if DetFlags.haveRDO.Tile_on() and DetFlags.detdescr.Tile_on() :
# this part is needed if we are reading Geant4 RDO created before 14.5.0
# but it's also safe to include this jobOptions even for new Geant4 RDO
if globalflags.DataSource()=='geant4' and rec.doTrigger() :
try:
include( "TileL2Algs/TileL2Algs_jobOptions.py" )
except Exception:
treatException("Could not set up TileMuId at ROD")
if jobproperties.CaloRecFlags.doTileMuId() :
if DetFlags.haveRIO.Tile_on() :
from AthenaCommon.BeamFlags import jobproperties
try:
# will merge these 2 jobOptions eventually
if jobproperties.Beam.beamType() == 'cosmics' or jobproperties.Beam.beamType() == 'singlebeam' :
include ("TileMuId/TileMuId_cosmics_jobOptions.py")
else:
include( "TileMuId/TileMuId_jobOptions.py" )
except Exception:
treatException("Could not set up TileMuId")
# Tile Muon fitter to find cosmic muon tracks
if jobproperties.Beam.beamType() == 'cosmics' or jobproperties.Beam.beamType() == 'singlebeam' :
try:
include( "TileCosmicAlgs/TileMuonFitter_jobOptions.py")
except Exception:
treatException("Could not set up TileMuonFitter")
#
# information on Calo AffectedRegion, write metadata
#
if DetFlags.dcs.LAr_on():
if jobproperties.CaloRecFlags.doLArAffectedRegion() and rec.doESD and (not rec.readESD):
try:
include ("LArAffectedRegion/LArAffectedRegion_write.py")
except Exception:
treatException("Could not setup LArAffectedRegion")
#
# find noise LAr regions/ events
if jobproperties.CaloRecFlags.doLArNoisyRO() and rec.doESD:
try:
from LArCellRec.LArNoisyROSummaryGetter import LArNoisyROSummaryGetter
LArNoisyROSummaryGetter()
except Exception:
treatException("Problem with LArNoisyROSummaryGetter. Switched off.")
#Fill the background word from MBTS
if rec.doTile() and rec.doESD() and jobproperties.CaloRecFlags.doFillMBTSBackgroundBit():
try:
include ("TileRecAlgs/MBTSTimeDiffEventInfoAlg_jobOptions.py")
except Exception:
treatException("Problem with MBTSTimeDiffEventInfoAlg.")
# Fill error bit in Event Info to veto noise LAr burst (done in raw->esd stage)
from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
if globalflags.DataSource()=='data' and jobproperties.CaloRecFlags.doLArNoiseBurstVeto() and rec.doESD() and rec.doLArg() and (not athenaCommonFlags.isOnline()):
try:
include("LArCellRec/LArTimeVetoAlg_jobOptions.py")
except Exception:
treatException("Problem with LArTimeVetoAlg")
if jobproperties.CaloRecFlags.doCaloTowerFromCells() and rec.doESD and rec.doCalo():
from CaloRec.CaloRecConf import CaloTowerxAODFromCells
topSequence+=CaloTowerxAODFromCells(CaloTowerContainer="CmbTowers",
# doCrossChecks=True
)
if jobproperties.CaloRecFlags.doCaloTowerFromCluster() and rec.doESD and rec.doCalo():
from CaloRec.CaloRecConf import CaloTowerxAODFromClusters
topSequence+=CaloTowerxAODFromClusters(CaloTowerContainer="CmbTopoTowers",
# doCrossChecks=True
)
#Convert clusters to xAOD::CaloCluster (temporary solution)
if rec.doWritexAOD():
from xAODCaloEventCnv.xAODCaloEventCnvConf import ClusterCreator
topSequence+=ClusterCreator("CaloCluster2xAOD")
#CaloClusterKeys=("CaloCalTopoCluster", "CaloTopoCluster", "EMTopoCluster430", "LArClusterEM", "LArClusterEM7_11Nocorr",
# "CombinedCluster","EMTopoSW35","egClusterCollection","LArClusterEMSofte")
#for k in CaloClusterKeys:
# itemname="CaloClusterContainer#"+k
# if objKeyStore["transient"].has_item(itemname):# "CaloClusterContainer",k):
# print "Scheduling xAOD converter for CaloClusterContainer with key",k
# topSequence+=ClusterCreator("ClusterxAOD_"+k,AODContainerName=k,xAODContainerName=k,CellContainerName="AllCalo")
# else:
# print "CaloClusterContainer with key",k,"not found, no xAOD converter scheduled"
# pass
# pass
#L1Calo Trigger tower decoration
if globalflags.DataSource()=='data' and rec.doESD() and rec.doCalo() and rec.doTrigger():
include("TrigT1CaloCalibTools/DecorateL1CaloTriggerTowers_prodJobOFragment.py")
| rushioda/PIXELVALID_athena | athena/Calorimeter/CaloRec/share/CaloRec_jobOptions.py | CaloRec_jobOptions.py | py | 16,046 | python | en | code | 1 | github-code | 13 |
15293744968 | # %%
import requests
import pandas
import os
import re
from bs4 import BeautifulSoup
# %%
page = requests.get('http://www.howstat.com/cricket/Statistics/Matches/MatchListMenu.asp')
# %%
soup = BeautifulSoup(page.content, 'html.parser')
# %%
a_all = soup.select('#odis > table > tr > td > table > tr > td > a.LinkOff')
a_all = [a for a in a_all if not a.text.strip().endswith('World Cup')]
href_all = ['http://www.howstat.com/cricket/Statistics/Matches/' + a['href'] for a in a_all]
# %%
match_info = []
for href in href_all:
print(href)
page = requests.get(href)
soup = BeautifulSoup(page.content, 'html.parser')
tr_all = soup.select('table.TableLined > tr')
tr_all = tr_all[1:]
for tr in tr_all:
td_all = tr.select('td')
date = td_all[1].text.strip()
countries = td_all[2].text.strip()
ground = td_all[3].text.strip()
a_scorecard = td_all[5].select_one('a')
match_info.append([date, countries, ground, 'http://www.howstat.com/cricket/Statistics/Matches/' + a_scorecard['href'] + '&Print=Y', href])
# %%
def is_aborted(soup):
td = soup.select_one('body > table:nth-child(2) > tr:nth-child(1) > td > table > tr:nth-child(5) > td.TextBlack8')
return td is not None and (td.text.strip() == 'Match abandoned' or\
td.text.strip() == 'Match cancelled' or\
td.text.strip() == 'No result')
def is_conceded(soup):
td = soup.select_one('body > table:nth-child(2) > tr:nth-child(1) > td > table > tr:nth-child(5) > td.TextBlack8')
return td is not None and (td.text.strip().endswith('won by default') or\
td.text.strip().endswith('won by walkover'))
def is_innings_start(td_all):
return len(td_all) == 7 and td_all[1].text.strip() == 'R' and td_all[2].text.strip() == 'BF'
def is_innings_stop(td_all):
return len(td_all) < 7
# %%
ds = []
count = 0
for info in match_info:
date, countries, ground, href, parent_href = info
count += 1
print(count)
print(parent_href)
print(href)
page = requests.get(href)
soup = BeautifulSoup(page.content, 'html.parser')
table = soup.select_one('body > table:nth-child(2) > tr > td > table:nth-child(3)')
if is_aborted(soup):
print('Detect match aborted')
continue
elif is_conceded(soup):
print('Detect match conceded')
continue
elif table is None:
print('Possibly match aborted')
break
tr_all = table.select('tr')
dt = []
innings_start = False
team = 0
for tr in tr_all:
td_all = tr.select('td')
if innings_start and is_innings_stop(td_all):
innings_start = False
team += 1
if innings_start:
a = td_all[0].select_one('a')
player_id = re.sub(r'(.*)=(.*)', r'\2', a['href'])
player_name = td_all[0].text.strip()
dt.append([date, countries, ground, team, player_name, player_id])
if is_innings_start(td_all):
innings_start = True
if team > 1:
break
if team == 1:
tr_all = soup.select('body > table:nth-child(2) > tr')
for tr in tr_all:
td_all = tr.select('td')
if innings_start and is_innings_stop(td_all):
innings_start = False
team += 1
if innings_start:
a = td_all[0].select_one('a')
player_id = re.sub(r'(.*)=(.*)', r'\2', a['href'])
player_name = td_all[0].text.strip()
dt.append([date, countries, ground, team, player_name, player_id])
if is_innings_start(td_all):
innings_start = True
if team > 1:
break
if len(dt) != 22:
print('Less than 22 players, possibly scrap error')
ds.extend(dt)
# %%
columns = [
'DATE',
'COUNTRIES',
'GROUND',
'TEAM',
'PLAYER_NAME',
'PLAYER_ID']
if not os.path.exists('files'):
os.mkdir('files')
df = pandas.DataFrame(data = ds, columns = columns)
df.to_csv('files/match_players.txt', sep = '|', index = False)
| nadeeg/cricket-2019 | data/source_match_players.py | source_match_players.py | py | 4,537 | python | en | code | 0 | github-code | 13 |
73837473298 | import sys
sys.path.append(".")
from bank.Bank import BankTransaction
def test_read_file_input():
"""
test BankTransaction.read_file_input method
"""
expected_output = [
{
"account":{
"active_card":True,
"available_limit":100
}
},
{
"transaction":{
"merchant":"Burger King",
"amount":20,
"time":"2019-02-13T10:00:00.000Z"
}
},
{
"transaction":{
"merchant":"Habbib's",
"amount":90,
"time":"2019-02-13T11:00:00.000Z"
}
}
]
bank = BankTransaction()
json_file = bank._read_file_input('data/input1.json')
assert json_file == expected_output
def test_print_status():
"""
test BankTransaction._print_status method
"""
bank = BankTransaction()
expected_result = bank._print_status()
assert expected_result == {"account": {"active_card": False, "account_initialized": False,"available_limit": None }, "violations": [None]}
def test_account_creation():
"""
test BankTransaction.account_creation method
"""
bank = BankTransaction()
expected_result = bank.account_creation(available_limit=100)
assert expected_result == {"account": {"active_card": True,"account_initialized": True, "available_limit": 100 }, "violations": [None]}
def test_transaction_authorization():
"""
test BankTransaction.transaction_authorization method
"""
bank = BankTransaction()
expected_result = bank.transaction_authorization(merchant ='my_merchant', amount=100, time="2019-02-13T10:00:00.000Z")
assert expected_result == {"account": {"active_card": False,"account_initialized": False, "available_limit": None }, "violations": ["card_not_active"]}
if __name__ == '__main__':
pytest.main([__file__])
| yennanliu/BankSimulator | tests/unit_test.py | unit_test.py | py | 1,893 | python | en | code | 0 | github-code | 13 |
12967204210 | import pygame
from pygame.locals import *
import sys
import random
class Text:
"""create a Text for GUI screen"""
def __init__(
self, WINDOW_WIDTH, WINDOW_HEIGHT, frame, location, value, color, size
):
pygame.font.init()
self.CreateWindow(WINDOW_WIDTH, WINDOW_HEIGHT)
self.frames = frame
self.location = location
self.font = pygame.font.SysFont(None, size)
self.text = None
self.color = color
self.clock()
self.setValue(value)
def CreateWindow(self, width, height):
self.window = pygame.display.set_mode((width, height))
def run(self):
self.frameCounter = 0
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
self.frameCounter += 1
self.setValue(str(self.frameCounter))
self.window.fill((255, 255, 255))
self.draw()
pygame.display.update()
self.clock.tick(self.frames)
def clock(self):
self.clock = pygame.time.Clock()
def setValue(self, newValue):
if self.text == newValue:
return
self.text = newValue
self.textSurface = self.font.render(self.text, True, self.color)
def draw(self):
self.window.blit(self.textSurface, self.location)
| AliiAhmadi/learn | Text.py | Text.py | py | 1,469 | python | en | code | 1 | github-code | 13 |
70063995857 | # DEPENDENCIES (Local)
# ----------------------------------------------------------------------------------------------------
from constants.enums import Models, ClassificationModels, RegressionModels, NeuralModels
from constants.params import ClassificationParams, RegressionParams, NeuralParams
# AUX METHODS
# ----------------------------------------------------------------------------------------------------
# Parse model class into enum with types:
# -> Base = Models based on common ML methods
# -> Neural = Models based on neural networks
def parse_model_class(model_class):
model = None
model_type = None
if model_class == 'classification':
model = Models.CLASSIFICATION
model_type = ClassificationModels.MLP
if model_class == 'regression':
model = Models.REGRESSION
model_type = RegressionModels.MLP
if model_class == 'neural':
model = Models.NEURAL
model_type = NeuralModels.MLP
return model, model_type
# Parse model type depending of model class, into enum with types:
# Classification:
# -> Tree = Binary decision tree
# -> GNB = Gaussian Naive Bayes
# -> KNN = K Nearest Neighbors
# -> SVM = Support Vector Machines
# -> LoR = Logistic Regression
# -> MLP = Multi Layered Perceptron
# Regression:
# -> KNN = K Nearest Neighbors
# -> SVM = Support Vector Machines
# -> LiR = Linear Regression
# -> MLP = Multi Layered Perceptron
# Neural:
# -> MLP = Multi Layered Perceptron
def parse_model_type(model_class, model_type):
model = None
model_params = None
if model_class == Models.CLASSIFICATION:
if model_type == 'tree':
model = ClassificationModels.TREE
if model_type == 'gnb':
model = ClassificationModels.GNB
if model_type == 'knn':
model = ClassificationModels.KNN
if model_type == 'svm':
model = ClassificationModels.SVM
if model_type == 'lor':
model = ClassificationModels.LOR
if model_type == 'mlp':
model = ClassificationModels.MLP
model_params = ClassificationParams[model][0]
elif model_class == Models.REGRESSION:
if model_type == 'knn':
model = RegressionModels.KNN
if model_type == 'svm':
model = RegressionModels.SVM
if model_type == 'lir':
model = RegressionModels.LIR
if model_type == 'mlp':
model = RegressionModels.MLP
model_params = RegressionParams[model][0]
else:
if model_type == 'mlp':
model = NeuralModels.MLP
return model, model_params
# Retrieve model training params depending on class and type
def parse_model_params(model_class, model_type, model_params):
params = None
if model_class == Models.CLASSIFICATION:
params = ClassificationParams[model_type][model_params]
elif model_class == Models.REGRESSION:
params = RegressionParams[model_type][model_params]
elif model_class == Models.NEURAL:
params = NeuralParams[model_type][model_params]
return params
| gouvina/ml-python-predictor | src/utils/parser.py | parser.py | py | 3,102 | python | en | code | 0 | github-code | 13 |
12359955538 | # run training code
from torch.utils.data import Dataset
import glob
import numpy as np
import scipy.io as io
import os
import cv2
import PIL
from torch import Tensor, einsum
from image import ImageDataset
from ground_truth import GroundTruthDataset
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Variable
from scipy.ndimage.morphology import distance_transform_edt as edt
## Loss function from https://github.com/JunMa11/SegLoss
## original called GDIceLossV2
class GDiceLoss(nn.Module):
def __init__(self, apply_nonlin=None, smooth=1e-5):
"""
Generalized Dice;
Copy from: https://github.com/wolny/pytorch-3dunet/blob/6e5a24b6438f8c631289c10638a17dea14d42051/unet3d/losses.py#L75
paper: https://arxiv.org/pdf/1707.03237.pdf
tf code: https://github.com/NifTK/NiftyNet/blob/dev/niftynet/layer/loss_segmentation.py#L279
"""
super(GDiceLoss, self).__init__()
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, net_output, gt):
shp_x = net_output.shape # (batch size,class_num,x,y,z)
shp_y = gt.shape # (batch size,1,x,y,z)
# one hot code for gt
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(net_output.shape, gt.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == "cuda":
y_onehot = y_onehot.cuda(net_output.device.index)
print("using cuda")
y_onehot.scatter_(1, gt, 1)
if self.apply_nonlin is not None:
softmax_output = self.apply_nonlin(net_output)
input = flatten(softmax_output)
target = flatten(y_onehot)
target = target.float()
target_sum = target.sum(-1)
class_weights = Variable(1. / (target_sum * target_sum).clamp(min=self.smooth), requires_grad=False)
intersect = (input * target).sum(-1) * class_weights
intersect = intersect.sum()
denominator = ((input + target).sum(-1) * class_weights).sum()
return 1. - 2. * intersect / denominator.clamp(min=self.smooth)
def flatten(tensor):
"""Flattens a given tensor such that the channel axis is first.
The shapes are transformed as follows:
(N, C, D, H, W) -> (C, N * D * H * W)
"""
C = tensor.size(1)
# new axis order
axis_order = (1, 0) + tuple(range(2, tensor.dim()))
# Transpose: (N, C, D, H, W) -> (C, N, D, H, W)
transposed = tensor.permute(axis_order).contiguous()
# Flatten: (C, N, D, H, W) -> (C, N * D * H * W)
return transposed.view(C, -1)
def softmax_helper(x):
# copy from: https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/utilities/nd_softmax.py
rpt = [1 for _ in range(len(x.size()))]
rpt[1] = x.size(1)
x_max = x.max(1, keepdim=True)[0].repeat(*rpt)
e_x = torch.exp(x - x_max)
return e_x / e_x.sum(1, keepdim=True).repeat(*rpt)
class Encoder(nn.Module):
def __init__(self, z_size):
super(Encoder, self).__init__()
self.z_size = z_size
self.n_features_min = 64
self.n_channel = 1
self.batch_size = 1
self.cube_len = 1
self.conv1 = nn.Conv2d(self.n_channel, self.n_features_min, 4, 2, 1)
self.bn1 = nn.BatchNorm2d(self.n_features_min)
self.conv2 = nn.Conv2d(self.n_features_min, self.n_features_min * 2, 4, 2, 1)
self.bn2 = nn.BatchNorm2d(self.n_features_min * 2)
self.conv3 = nn.Conv2d(self.n_features_min * 2, self.n_features_min * 4, 4, 2, 1)
self.bn3 = nn.BatchNorm2d(self.n_features_min * 4)
self.conv4 = nn.Conv2d(self.n_features_min * 4, self.n_features_min * 8, 4, 2, 1)
self.bn4 = nn.BatchNorm2d(self.n_features_min * 8)
self.conv5 = nn.Conv2d(self.n_features_min * 8, self.n_features_min * 16, 4, 1, 0)
self.bn5 = nn.BatchNorm2d(self.n_features_min * 16)
self.fc = nn.Linear(self.n_features_min * 16 * 9, self.z_size)
def forward(self, input):
batch_size = input.size(0)
layer1 = F.leaky_relu(self.bn1(self.conv1(input)), 0.2)
layer2 = F.leaky_relu(self.bn2(self.conv2(layer1)), 0.2)
layer3 = F.leaky_relu(self.bn3(self.conv3(layer2)), 0.2)
layer4 = F.leaky_relu(self.bn4(self.conv4(layer3)), 0.2)
layer5 = F.leaky_relu(self.bn5(self.conv5(layer4)), 0.2)
layer6 = layer5.view(batch_size, self.n_features_min * 16 * 9)
layer6 = self.fc(layer6)
return layer6
class Generator(nn.Module):
def __init__(self, z_size):
super(Generator, self).__init__()
self.z_size = z_size
self.n_features_min = 64
self.n_channel = 1
self.batch_size = 1
self.cube_len = 1
# ConvTranspose3d
# 1st parameter in_channel
# 2nd parameter out_channel effects the 2nd dimension
# 3rd parameter kernel
# 4th parameter stride
# 5th parameter padding
self.conv1 = nn.ConvTranspose3d(self.z_size, self.n_features_min * 8, 3, 1, 0, bias=False)
self.bn1 = nn.BatchNorm3d(self.n_features_min * 8)
self.conv2 = nn.ConvTranspose3d(self.n_features_min * 8, self.n_features_min * 4, 4, 2, 0, bias=False)
self.bn2 = nn.BatchNorm3d(self.n_features_min * 4)
self.conv3 = nn.ConvTranspose3d(self.n_features_min * 4, self.n_features_min * 2, 4, 2, 1, bias=False)
self.bn3 = nn.BatchNorm3d(self.n_features_min * 2)
self.conv4 = nn.ConvTranspose3d(self.n_features_min * 2, self.n_features_min * 1, 4, 2, 0, bias=False)
self.bn4 = nn.BatchNorm3d(self.n_features_min * 1)
self.conv5 = nn.ConvTranspose3d(self.n_features_min * 1, self.n_channel, 4, 3, 1, bias=False)
def forward(self, input):
x = input.view(input.size(0), self.z_size, 1, 1, 1)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = F.relu(self.bn4(self.conv4(x)))
x = self.conv5(x)
x = torch.sigmoid(x)
return x
def compute_distances(output_numpy, gdtm):
distance = torch.mul(output_numpy, gdtm)
return distance
def compute_dtm_gt(img_gt):
fg_dtm = edt(np.logical_not(img_gt))
return fg_dtm
## Running the Deep learning
def main():
Z_SIZE = 64
Testdataset = ImageDataset()
gtDataset = GroundTruthDataset()
dataloader = torch.utils.data.DataLoader(Testdataset, batch_size=1, shuffle=False, num_workers=1, drop_last=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
print("using cuda")
else:
print("using CPU")
encoder = Encoder(Z_SIZE).to(device)
generator = Generator(Z_SIZE).to(device)
Diceloss = GDiceLoss(softmax_helper, smooth=1e-2)
lr_d = 0.0025
lr_g = 0.0025
alpha = 0.01
saveAlpha = alpha
encoderOptim = torch.optim.Adam(encoder.parameters(), lr=lr_d)
generatorOptim = torch.optim.Adam(generator.parameters(), lr=lr_g)
distance_transforms_gt = []
# check if output/progress directory already exists otherwise create it
if not os.path.exists('output/progress'):
print("Creating directory '/output/progress'...\n")
os.makedirs('output/progress/')
# check if output/models directory already exists otherwise create it
if not os.path.exists('output/models'):
print("Creating directory '/output/models'...\n")
os.makedirs('output/models/')
print('Starting training...')
for epochs in range(1,100):
print("epoch: ", epochs)
for idx, (file_name, input) in enumerate(dataloader):
input = input.unsqueeze(0).float()
image = input.to(device)
x = encoder(image)
expected = gtDataset.getItem(file_name[0]).to(device)
output = generator(x)
size = output.size()[3]
output = output.reshape([size,size,size])
outputflatten = flatten(output)
expectedflatten = flatten(expected)
diceLoss = Diceloss(outputflatten, expectedflatten)
# compute distance maps
with torch.no_grad():
if epochs == 1:
# defalut using compute_dtm; however, compute_dtm01 is also worth to try;
gt_dtm_npy = compute_dtm_gt(expected.cpu().numpy())
gt_dtm_npy = torch.from_numpy(gt_dtm_npy).to(device)
distance_transforms_gt.append(gt_dtm_npy)
print(idx)
else:
gt_dtm_npy = distance_transforms_gt[idx]
distances = compute_distances(output, gt_dtm_npy)
loss_hd = torch.max(distances)
if (loss_hd < 1):
alpha = 1 - saveAlpha
else:
alpha = saveAlpha
loss = alpha*(diceLoss) + (1 - alpha) * loss_hd
encoderOptim.zero_grad()
generatorOptim.zero_grad()
loss.backward()
encoderOptim.step()
generatorOptim.step()
if (epochs == 1):
print(file_name)
expected_shape = expected.cpu().detach().numpy()
expectedIndicies = np.argwhere(expected_shape >= 1)
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.scatter3D(expectedIndicies[:,0],expectedIndicies[:,1],expectedIndicies[:,2], cmap='gray')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim([0,101])
ax.set_ylim([0,101])
ax.set_zlim([0,101])
name = "/content/drive/MyDrive/cmpt340/progress/groundtruth.png"
plt.savefig(name)
plt.close(fig)
acc = (torch.argmax(output, 1) == expected).float().mean()
print('epoch {}, loss {:.9f}, acc {:.5f}'.format(epochs, loss.item(), acc))
output_shape = output.cpu().detach().numpy()
# print(output.size)
# print("expected size: ", output_shape.shape)
outputIndicies = np.argwhere(output_shape >= 1)
print(outputIndicies)
# print("expected indicies shape: ", expectedIndicies.shape)
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.scatter3D(outputIndicies[:,0],outputIndicies[:,1],outputIndicies[:,2], cmap='gray')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim([0,101])
ax.set_ylim([0,101])
ax.set_zlim([0,101])
name = "./output/progress/" + str(epochs) + ".png"
plt.savefig(name)
plt.close(fig)
if (epochs % 10 == 0):
## Save models ##
torch.save(encoder.state_dict(), "./output/models/encoder" + str(epochs) + ".pt")
torch.save(generator.state_dict(), "./output/models/generator" + str(epochs) + ".pt")
print('Training is now complete! Models have been saved.')
torch.save(encoder.state_dict(), "./output/models/encoder_final.pt")
torch.save(generator.state_dict(), "./output/models/generator_final.pt")
if __name__ == "__main__":
main()
| KIngsleyU/Vascular-Tree-Lifting | train.py | train.py | py | 11,603 | python | en | code | 0 | github-code | 13 |
37202482145 | from __future__ import division
from torchvision import models
import torch.utils.data.distributed
import os, sys
if len(sys.argv) != 4:
print('Arguments : models_load_path_prefix, S , destination_dir')
sys.exit(-1)
models_load_path_prefix = sys.argv[1]
S = int(sys.argv[2])
destination_dir = sys.argv[3]
if not os.path.exists(destination_dir):
os.makedirs(destination_dir)
for b in range(2, S+1):
print('*' * 20)
print('BATCH '+str(b))
model_load_path = models_load_path_prefix + str(b - 1) + '.pth'
if not os.path.exists(model_load_path):
print('No model found in ' + model_load_path)
continue
print('Loading saved model from ' + model_load_path)
model = torch.load(model_load_path)
model.eval()
destination_path = os.path.join(destination_dir, 'batch_'+str(b))
print(model.fc.out_features)
print('Saving stats in: '+ destination_path)
# parameters
parameters = [e.cpu() for e in list(model.fc.parameters())]
# print(parameters)
#
# # print(model.fc.weight.shape)
# # print(model.fc.bias.shape)
# for parameter in model.fc.parameters():
# print(parameter)
# print(list(model.fc.parameters()))
np_crt_bias = parameters[0].detach().numpy()
np_crt_weights = parameters[1].detach().numpy()
# print(np_crt_bias.shape)
# print(np_crt_weights.shape)
# sys.exit(-1)
torch.save(parameters, destination_path) | EdenBelouadah/class-incremental-learning | cil/lucir/codes/extract_last_layer_weights.py | extract_last_layer_weights.py | py | 1,448 | python | en | code | 166 | github-code | 13 |
37969898788 | ## jO to run H6 TB 2004 simulation
##--------------------------------
if not 'PoolHitsOutput' in dir():
PoolHitsOutput="H6LAr_MyOutputFile.root"
if not 'EvtMax' in dir():
EvtMax=10
if not 'CryoXPos' in dir():
CryoXPos=0.
if not 'TableYPos' in dir():
TableYPos=0.
if not 'ParticlePDG' in dir():
ParticlePDG='211'
if not 'Energy' in dir():
Energy=200000
##--------------------------------
## Algorithm sequence
from AthenaCommon.AlgSequence import AlgSequence
topSeq = AlgSequence()
#--- Detector flags -------------------------------------------
from AthenaCommon.DetFlags import DetFlags
# - Select detectors
DetFlags.Calo_setOn()
# - MCTruth
DetFlags.Truth_setOn()
#--- AthenaCommon flags ---------------------------------------
from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
athenaCommonFlags.PoolHitsOutput=PoolHitsOutput
# Number of events to be processed (default is 10)
athenaCommonFlags.EvtMax=EvtMax
# explicitly switch off evgen reading
athenaCommonFlags.PoolEvgenInput.set_Off()
#--- DetDescr flag
from AthenaCommon.GlobalFlags import globalflags
globalflags.DetDescrVersion.set_Value_and_Lock('ATLAS-H6-2004-00')
DetDescrVersion = 'ATLAS-H6-2004-00'
from AthenaCommon.JobProperties import jobproperties
jobproperties.Global.DetDescrVersion = "ATLAS-H6-2004-00"
#--- Simulation flags -----------------------------------------
# Choose H6 2004 (Emec/HEC/FCAL) testbeam layout
from G4AtlasApps.SimFlags import simFlags
simFlags.load_atlas_flags()
simFlags.SimLayout='tb_LArH6_2004'
simFlags.load_tbLArH6_flags()
#--- Conditions global tag ------
from AthenaCommon.GlobalFlags import globalflags
globalflags.ConditionsTag = "OFLCOND-CSC-00-00-00"
include.block ( "CaloConditions/LArTTCellMap_ATLAS_jobOptions.py" )
include.block ( "CaloConditions/CaloTTIdMap_ATLAS_jobOptions.py" )
#include("LArDetDescr/LArDetDescr_H6_joboptions.py")
# Set the cryostat and table at their (0,0) position:
simFlags.LArTB_H1CryoXPos.set_Value_and_Lock(CryoXPos)
simFlags.LArTB_H1TableYPos.set_Value_and_Lock(TableYPos)
simFlags.LArTB_H1XSmear.set_Value_and_Lock(35.)
simFlags.LArTB_H1YSmear.set_Value_and_Lock(35.)
simFlags.LArTB_H6Hec.set_Value_and_Lock(True)
simFlags.LArTB_H6Emec.set_Value_and_Lock(True)
simFlags.LArTB_H6Fcal.set_Value_and_Lock(True)
simFlags.LArTB_H6Coldnose.set_Value_and_Lock(True)
simFlags.LArTB_H6Run1.set_Value_and_Lock(False)
simFlags.LArTB_H6Step.set_Value_and_Lock(False)
# Uncomment for calibration run
simFlags.CalibrationRun='LAr'
simFlags.EventFilter.set_Off()
#--- Generator flags ------------------------------------------
from AthenaServices.AthenaServicesConf import AtRanluxGenSvc
ServiceMgr += AtRanluxGenSvc()
ServiceMgr.AtRanluxGenSvc.Seeds = ["SINGLE 2000160768 643921183"]
simFlags.RandomSeedOffset.set_Value_and_Lock(1234)
simFlags.PhysicsList.set_Value_and_Lock('QGSP_BERT')
#--------- Birk's law on -----------------------
simFlags.DoLArBirk.set_Value_and_Lock(True)
#--- Output printout level -----------------------------------
#output threshold (2=DEBUG, 3=INFO, 4=WARNING, 5=ERROR, 6=FATAL)
MessageSvc = Service( "MessageSvc" )
MessageSvc.OutputLevel = 4
include("LArG4TBSimEventAthenaPool/LArG4TBSimEventAthenaPool_joboptions.py")
#--- Generator ----------------------------------------------
#>## this starts the space to customize ParticleGenerator ####
include("LArGeoH62004Algs/tbH6Generator.py")
# # enter interactive mode
#theApp.initialize()
theApp.Dlls += [ "RootHistCnv" ]
theApp.HistogramPersistency = "ROOT"
NTupleSvc = Service( "NTupleSvc" )
NTupleSvc.Output = [ "FILE1 DATAFILE='Ntuple.root' OPT='NEW'" ]
#>## this starts the space to customize the simulation #######
# - ex1: change the verbosity
#simFlags.G4Commands += ['/tracking/verbose 3']
include("G4AtlasApps/G4Atlas.flat.configuration.py")
#==============================================================
# Job configuration
#==============================================================
from AthenaCommon.CfgGetter import getAlgorithm
topSeq += getAlgorithm("G4AtlasAlg",tryDefaultConfigurable=True)
#--- LArH6 setup description ----------------------------------
# Adding TB specific output
from AthenaCommon import CfgMgr
CfgMgr.AthenaOutputStream.OutputLevel = 5
outStreams = AlgSequence( "Streams" )
outStream = outStreams.StreamHITS
outStream.ItemList+=["LArG4H6WarmTCHitCollection#*"]
outStream.ItemList+=["LArG4H6FrontHitCollection#*"]
outStream.ItemList+=["TBEventInfo#*"]
# Another implementation of the same:
#from AthenaPoolCnvSvc.WriteAthenaPool import AthenaPoolOutputStream
#stream1 = AthenaPoolOutputStream("StreamHITS", jobproperties.AthenaCommonFlags.PoolHitsOutput())
#stream1.OutputLevel = 5
#stream1.ItemList+=["LArG4H6WarmTCHitCollection#*"]
#stream1.ItemList+=["LArG4H6FrontHitCollection#*"]
#stream1.ItemList+=["TBEventInfo#*"]
# choose filter or printout
svcMgr.GeoModelSvc.DetectorTools.__getitem__("LArDetectorToolH62004").CheckPrim=False
svcMgr.GeoModelSvc.DetectorTools.__getitem__("LArDetectorToolH62004").PrintStep=True
#--- End jobOptions.G4TB_LArH6-2004.py file ------------------
| rushioda/PIXELVALID_athena | athena/LArCalorimeter/LArG4TB/H6G4Sim/share/jobOptions.G4TB_LArH6-2004.py | jobOptions.G4TB_LArH6-2004.py | py | 5,114 | python | en | code | 1 | github-code | 13 |
22827248573 | from azure.ai.formrecognizer import DocumentAnalysisClient
from azure.ai.formrecognizer import FormRecognizerClient
from azure.core.credentials import AzureKeyCredential
# set `<your-endpoint>` and `<your-key>` variables with the values from the Azure portal
endpoint = "https://testpdfrecognize.cognitiveservices.azure.com/"
key = "f251c939777240a092289a7c5d2ac60d"
blob_conn_str = "DefaultEndpointsProtocol=https;AccountName=ecgsdatastore;AccountKey=MRyWERByKo6Q8FWoOxSaqwgHgQjxoiBhlODkkIQ05J/6ciDVNBqDwJLgijvRou83V3FALN67/YIBToZHAlL5SQ==;EndpointSuffix=core.windows.net"
formUrl = "https://ecgsdatastore.blob.core.windows.net/cograw/US_20230215_EXAMPLE.pdf?sp=r&st=2023-04-28T01:09:26Z&se=2023-04-28T09:09:26Z&spr=https&sv=2021-12-02&sr=b&sig=EInWXyXcrPoabdUyrBY%2FXpuIthvgg3H8nB%2BbLnAa9fM%3D"
form_recognizer_client = FormRecognizerClient(endpoint, AzureKeyCredential(key))
def format_polygon(polygon):
if not polygon:
return "N/A"
return ", ".join(["[{}, {}]".format(p.x, p.y) for p in polygon])
def analyze_layout(formUrl):
# sample form document
document_analysis_client = DocumentAnalysisClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
poller = document_analysis_client.begin_analyze_document_from_url(
"prebuilt-document", formUrl)
result = poller.result()
for idx, style in enumerate(result.styles):
print(
"Document contains {} content".format(
"handwritten" if style.is_handwritten else "no handwritten"
)
)
for page in result.pages:
print("----Analyzing layout from page #{}----".format(page.page_number))
print(
"Page has width: {} and height: {}, measured with unit: {}".format(
page.width, page.height, page.unit
)
)
for line_idx, line in enumerate(page.lines):
words = line.get_words()
print(
"...Line # {} has word count {} and text '{}' within bounding box '{}'".format(
line_idx,
len(words),
line.content,
format_polygon(line.polygon),
)
)
for word in words:
print(
"......Word '{}' has a confidence of {}".format(
word.content, word.confidence
)
)
for selection_mark in page.selection_marks:
print(
"...Selection mark is '{}' within bounding box '{}' and has a confidence of {}".format(
selection_mark.state,
format_polygon(selection_mark.polygon),
selection_mark.confidence,
)
)
for table_idx, table in enumerate(result.tables):
print(
"Table # {} has {} rows and {} columns".format(
table_idx, table.row_count, table.column_count
)
)
for region in table.bounding_regions:
print(
"Table # {} location on page: {} is {}".format(
table_idx,
region.page_number,
format_polygon(region.polygon),
)
)
for cell in table.cells:
print(
"...Cell[{}][{}] has content '{}'".format(
cell.row_index,
cell.column_index,
cell.content,
)
)
for region in cell.bounding_regions:
print(
"...content on page {} is within bounding box '{}'".format(
region.page_number,
format_polygon(region.polygon),
)
)
print("----------------------------------------")
if __name__ == "__main__":
analyze_layout(formUrl)
| NickKletnoi/pythonProject | form_recongnize.py | form_recongnize.py | py | 3,966 | python | en | code | 0 | github-code | 13 |
26377142147 | #!/usr/bin/env python
# coding: utf-8
# # Import libraries
# In[1]:
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Flatten, Dense, Dropout,MaxPooling2D, BatchNormalization
from tensorflow.keras.optimizers import Adam
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from tensorflow.keras import layers
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# # Load the dataset
# In[2]:
train_data = tf.keras.utils.image_dataset_from_directory(
directory='dataset/training',
labels='inferred',
label_mode='binary',
batch_size=32,
class_names=["sc","healthy"],
image_size=(508, 274))
# In[3]:
val_data = tf.keras.utils.image_dataset_from_directory(
directory='dataset/validation',
labels='inferred',
label_mode='binary',
batch_size=32,
class_names=["sc","healthy"],
image_size=(508, 274))
# # Model building and training
# In[4]:
resnet_model = Sequential()
pretrained_model = tf.keras.applications.ResNet50(include_top=False,
input_shape=(508, 274,3),
pooling='avg',
weights='imagenet')
for each_layer in pretrained_model.layers:
each_layer.trainable=False
resnet_model.add(pretrained_model)
# In[5]:
resnet_model.add(Flatten())
resnet_model.add(Dense(512, activation='relu'))
resnet_model.add(Dense(1, activation='sigmoid'))
# In[6]:
resnet_model.compile(optimizer=Adam(learning_rate=0.001),loss='binary_crossentropy',metrics=['accuracy'])
print(resnet_model.summary())
# In[7]:
history = resnet_model.fit(train_data, validation_data=val_data, epochs=30)
# # Model evaluation
# In[9]:
plt.figure(figsize=(8, 8))
epochs_range= range(30)
plt.plot( epochs_range, history.history['accuracy'], label="Training Accuracy")
plt.plot(epochs_range, history.history['val_accuracy'], label="Validation Accuracy")
plt.axis(ymin=0.4,ymax=1)
plt.grid()
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epochs')
plt.legend(['train', 'validation'])
# In[10]:
plt.figure(figsize=(8, 8))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
# In[11]:
test_loss, test_acc = resnet_model.evaluate(val_data, verbose=2)
print(test_acc)
# In[12]:
train_loss, train_acc = resnet_model.evaluate(train_data, verbose=2)
print(train_acc)
# In[ ]:
| nipun2123/Schizophrenia_detection | Model development & training/Schizophrenia Detection (ResNet50)-Single best.py | Schizophrenia Detection (ResNet50)-Single best.py | py | 2,626 | python | en | code | 1 | github-code | 13 |
24784363448 | # нахождение корней у многочлена
restore = [] # корни уравнения
a = [int(i) for i in input().split()] # многочлен
w = len(a) - 1
a2 = [] # указатель на решения
s2 = 1
while len(a) != len(a2) and s2 < w:
s = 0
if a2 != []:
a = a2
p = []
b1 = a[-1]
q = []
b2 = a[0]
for i in range(1, int(abs(b1)) + 1):
if b1 % i == 0:
p.append(i)
for i in range(1, int(abs(b2)) + 1):
if b2 % i == 0:
q.append(i)
i //= -1
q.append(i)
i //= -1
for i in p:
for j in q:
m = i / j
c = a[0]
cstore = []
cstore.append(c)
for t in range(1, len(a) - 1): # схема Горнера
c = a[t] + m * c
cstore.append(c)
c = a[-1] + m * c
if c == 0 and s == 0:
s += 1
s2 += 1
restore.append(str(i) + '/' + str(j))
a2 = cstore
if len(a) == 3: # решение квадратного трехчлена
if a[1] ** 2 - 4 * a[0] * a[2] > 0:
q1 = '(' + str(-a[1]) + ' + √ ' + str(a[1] ** 2 - 4 * a[0] * a[2]) + \
') / ' + str(2 * a[0])
q2 = '(' + str(-a[1]) + ' - √ ' + str(a[1] ** 2 - 4 * a[0] * a[2]) + \
') / ' + str(2 * a[0])
restore.append(q1)
restore.append(q2)
elif a[1] ** 2 - 4 * a[0] * a[2] == 0:
q1 = str(-a[1]) + '/' + str(2 * a[0])
restore.append(q1)
for i in restore:
print(i, end='; ')
if len(restore) < w:
for i in a:
print(i, end='; ')
| SUPERustam/Special-projects | src/SuperA.py | SuperA.py | py | 1,753 | python | en | code | 0 | github-code | 13 |
46286727914 | # TODO: import reports
import matplotlib
# use the Agg backend, which is non-interactivate (just for PNGs)
# this way, a separate script isn't started by matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from datetime import datetime
from diva import Diva, Dashboard
from diva.dashboard import row_layout
from diva.widgets import *
from bokeh.plotting import figure
from functools import singledispatch
reporter = Diva()
@singledispatch
def type_to_str(val):
# strip off the tags or the HTML will not work
s = str(type(val))
return s[1:-1]
# helper for printing the types that widgets output:
def type_of_iterable(val):
s = ''
for item in val:
s += type_to_str(item) + ', '
return s
@type_to_str.register(tuple)
def tuple_type(val):
return '({})'.format(type_of_iterable(val))
@type_to_str.register(list)
def list_type(val):
return '[{}]'.format(type_of_iterable(val))
# Provide an overview pages for all of the available widgets
all_widgets = [
String('some text', 'hello'),
Float('a float', 1.5),
Int('an integer', 2),
Bool('a bool', True),
SelectOne('pick a name', ['foo', 'bar', 'baz'], 'bar'),
SelectSubset('pick names', ['foo', 'bar', 'baz'], ['foo', 'baz']),
Color('pick a color', '#ff0000'),
Slider('a float'),
Date('pick a date'),
Time('pick a time'),
DateRange('pick a date range')
]
@reporter.view('all widgets', all_widgets)
def widgets_test(wstr, wflo, wint, wbool, wso, wss, wcol, wsli, wdate, wtime, wdaterange):
args = [wstr, wflo, wint, wbool, wso, wss, wcol, wsli, wdate, wtime, wdaterange]
formats = ['{}', '{}', '{}', '{}', '{}', '{}', '{}', '{:f}', '{}', '{}', '{}']
body = ''
for w, arg, f in zip(all_widgets, args, formats):
arg_type = type_to_str(arg)
class_name = w.__class__.__name__
body += "widget class: {}<br />type: {}<br />value: {}<br /><br />".format(class_name, arg_type, f.format(arg))
return '<p>{}</p>'.format(body)
@reporter.view('Skip wid', [Skip('heyhye'), Int('hey'), Skip('barbar')])
def skip_sample(i):
return i
@reporter.view('convert: Dashboard')
def dashboard_view():
a = pd.DataFrame(np.random.randn(20, 20))
b = pd.DataFrame(np.random.randn(10, 10))
return Dashboard([a, b], [[0, 0, 1, 1], [1, 0, 1, 1]])
@reporter.view('test dash')
def test_dash():
a = pd.DataFrame(np.random.randn(10, 10));
b = pd.DataFrame(np.random.randn(10, 10));
plt.figure()
plt.plot([1, 2, 3, 4], 'ks-', mec='w', mew=5, ms=20)
return Dashboard([a, plt.gcf(), b])
@reporter.view('dashboard nice')
def dashboard_b():
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
plot = figure(title="bokeh example", x_axis_label='x', y_axis_label='y')
plot.line(x, y, legend="Temp", line_width=2)
a = pd.DataFrame(np.random.randn(20, 20))
b = pd.DataFrame(np.random.randn(10, 10))
c = pd.DataFrame(np.random.randn(5, 5))
d = pd.DataFrame(np.random.randn(1, 1))
return Dashboard([a, b, plot, c, d])
@reporter.view('dashboard b')
def dashboard_b():
a = pd.DataFrame(np.random.randn(20, 20))
b = pd.DataFrame(np.random.randn(10, 10))
c = pd.DataFrame(np.random.randn(5, 5))
d = pd.DataFrame(np.random.randn(1, 1))
return Dashboard([a, b, c, d], [[0, 0, 1, 1], [1, 0, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]])
@reporter.view('dashboard c')
def dashboard_c():
a = pd.DataFrame(np.random.randn(20, 20))
b = pd.DataFrame(np.random.randn(10, 10))
c = pd.DataFrame(np.random.randn(5, 5))
d = pd.DataFrame(np.random.randn(50, 50))
return Dashboard([a, b, c, d], [[0, 0, 3, 1], [0, 1, 1, 1], [1, 1, 1, 1], [2, 1, 1, 1]])
@reporter.view('dashboard d')
def dashboard_d():
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
plot = figure(title="bokeh example", x_axis_label='x', y_axis_label='y')
plot.line(x, y, legend="Temp", line_width=2)
a = pd.DataFrame(np.random.randn(20, 20))
b = pd.DataFrame(np.random.randn(10, 10))
c = pd.DataFrame(np.random.randn(5, 5))
d = pd.DataFrame(np.random.randn(50, 50))
return Dashboard([plot, b, c, d], row_layout(2, 2))
@reporter.view('convert: str')
def raw_html():
return '<h1>Raw HTML</h1><p>If a string is returned, it is assumed to be raw HTML</p>'
@reporter.view('convert: matplotlib.figure.Figure')
def matplot_fig():
plt.figure()
plt.plot([1, 2, 3, 4], 'ks-', mec='w', mew=5, ms=20)
return plt.gcf()
@reporter.view('another matplotlib')
def matplot_b():
plt.figure()
plt.plot([5, 6, 7, 7], 'ks-', mec='w', mew=5, ms=20)
return plt.gcf()
@reporter.view('convert: pandas.DataFrame')
def pandas_df():
df = pd.DataFrame(np.random.randn(20, 20))
return df;
@reporter.view('convert: pandas.Series')
def pandas_series():
s = pd.Series([p for p in range(100)])
return s
@reporter.view('convert: bokeh.plotting.figure.Figure')
def bokeh_fig():
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
plot = figure(title="bokeh example", x_axis_label='x', y_axis_label='y')
plot.line(x, y, legend="Temp", line_width=2)
return plot
@reporter.view('convert: none of the above (ex. datetime.time)')
def na():
return datetime.now()
for i in range(100):
@reporter.view('filler report {}'.format(i))
def foo():
return '<p>hi</p>'
if __name__ == "__main__":
reporter.run(debug=True)
| mgriley/diva | examples/rough_examples/my_demo.py | my_demo.py | py | 5,455 | python | en | code | 45 | github-code | 13 |
20346979993 | """Classes in this module are used to declare the place where a xml value is located inside a document.
They also provide a mapping between XML data types (which are always stings in specific formats) and
python types. By doing so these classes completely hide the XML nature of data.
The basic offered types are Element, list of elements, attribute, and list of attributes.
They are the buildings blocks that are needed to declare XML data types.
Container properties represent values in xml nodes.
"""
from __future__ import annotations
import copy
import time
from abc import ABC, abstractmethod
from datetime import date, datetime
from typing import TYPE_CHECKING, Any, Callable
from lxml import etree as etree_
from sdc11073.exceptions import ApiUsageError
from sdc11073.namespaces import QN_TYPE, docname_from_qname, text_to_qname
from . import isoduration
from .dataconverters import (
BooleanConverter,
ClassCheckConverter,
DecimalConverter,
DurationConverter,
EnumConverter,
IntegerConverter,
ListConverter,
NullConverter,
StringConverter,
TimestampConverter,
)
from sdc11073 import xml_utils
if TYPE_CHECKING:
from collections.abc import Iterable, Sequence
from decimal import Decimal
from sdc11073.namespaces import NamespaceHelper
from sdc11073.xml_types.basetypes import XMLTypeBase
from sdc11073.mdib.containerbase import ContainerBase
from .dataconverters import DataConverterProtocol
from .isoduration import DurationType
STRICT_TYPES = True # if True, only the expected types are excepted.
MANDATORY_VALUE_CHECKING = True # checks if mandatory values are present when xml is generated
class ElementNotFoundError(Exception): # noqa: D101
pass
class _NumberStack:
# uses as a part of _local_var_name in _XmlStructureBaseProperty.
# This makes duplicate names impossible
_value = 0
@classmethod
def unique_number(cls) -> str:
cls._value += 1
return str(cls._value)
class _XmlStructureBaseProperty(ABC):
"""_XmlStructureBaseProperty defines a python property that converts between Python Data Types and XML data types.
It has knowledge about two things:
- how to covert data from xml to python type and vice versa
- name/ location of the xml data in a node.
All derived Properties have the same interface:
__get__ and __set__ : read and write access, using Python data types.
get_py_value_from_node: reads the value from XML data and converts it to Python data type.
update_xml_value: convert the Python data type to XML type and write it to XML node.
"""
def __init__(self, local_var_name: str, # noqa: PLR0913
value_converter: DataConverterProtocol,
default_py_value: Any | None = None,
implied_py_value: Any | None = None,
is_optional: bool = False):
"""Construct an instance.
:param local_var_name: a member with this same is added to instance
:param value_converter: DataConverterProtocol
:param default_py_value: initial value when initialized
(should be set for mandatory elements, otherwise created xml might violate schema)
and if the xml element does not exist.
:param implied_py_value: for optional elements, this is the value that shall be implied if
xml element does not exist.
This value is for information only! Access only via class possible.
:param is_optional: reflects if this element is optional in schema
"""
if implied_py_value is not None and default_py_value is not None:
raise ValueError('set only one of default_py_value and implied_py_value')
if not is_optional and implied_py_value is not None:
raise ValueError('is_optional == False and implied_py_value != None is not allowed ')
if not hasattr(value_converter, 'check_valid'):
raise TypeError
self._converter = value_converter
if STRICT_TYPES:
if default_py_value is not None:
self._converter.check_valid(default_py_value)
if implied_py_value is not None:
self._converter.check_valid(implied_py_value)
self._default_py_value = None
self._implied_py_value = None
if default_py_value is not None:
self._default_py_value = default_py_value
if implied_py_value is not None:
self._implied_py_value = implied_py_value
self._is_optional = is_optional
self._local_var_name = local_var_name
self._is_default_value_set = False
@property
def is_optional(self) -> bool:
return self._is_optional
def __get__(self, instance, owner) -> Any: # noqa: ANN001
"""Return a python value, use the locally stored value."""
if instance is None: # if called via class
return self
try:
value = getattr(instance, self._local_var_name)
except AttributeError:
value = None
if value is None:
value = self._implied_py_value
return value
def get_actual_value(self, instance: Any) -> Any | None:
"""Return the actual value without considering default value and implied value.
E.g. return None if no value in xml exists.
:param instance: the instance that has the property as member
"""
try:
return getattr(instance, self._local_var_name)
except AttributeError:
return None
def __set__(self, instance, py_value): # noqa: ANN001
"""Value is the representation on the program side, e.g a float."""
if STRICT_TYPES:
self._converter.check_valid(py_value)
setattr(instance, self._local_var_name, py_value)
def init_instance_data(self, instance: Any):
"""Set initial values to default_py_value.
This method is used internally and should not be called by application.
:param instance: the instance that has the property as member
:return: None
"""
if self._default_py_value is not None:
setattr(instance, self._local_var_name, copy.deepcopy(self._default_py_value))
@abstractmethod
def update_xml_value(self, instance: Any, node: xml_utils.LxmlElement):
"""Update node with current data from instance.
This method is used internally and should not be called by application.
:param instance: the instance that has the property as member
:param node: the etree node that shall be updated
:return: None
"""
@abstractmethod
def get_py_value_from_node(self, instance: Any, node: xml_utils.LxmlElement):
"""Read data from node.
This method is used internally and should not be called by application.
:param instance: the instance that has the property as member
:param node: the etree node that provides the value
:return: value
"""
def update_from_node(self, instance: Any, node: xml_utils.LxmlElement):
"""Update instance data with data from node.
This method is used internally and should not be called by application.
:param instance:the instance that has the property as member
:param node:the etree node that provides the value
:return: value
:return:
"""
value = self.get_py_value_from_node(instance, node)
setattr(instance, self._local_var_name, value)
class _AttributeBase(_XmlStructureBaseProperty):
"""Base class that represents an XML Attribute.
The XML Representation is a string.
The python representation is determined by value_converter.
"""
def __init__(self, attribute_name: str, # noqa: PLR0913
value_converter: DataConverterProtocol | None = None,
default_py_value: Any = None,
implied_py_value: Any = None,
is_optional: bool = True):
"""Construct an instance.
:param attribute_name: name of the attribute in xml node
:param value_converter: converter between xml value and python value
:param default_py_value: see base class doc.
:param implied_py_value: see base class doc.
:param is_optional: see base class doc.
"""
if isinstance(attribute_name, etree_.QName):
local_var_name = f'_a_{attribute_name.localname}_{_NumberStack.unique_number()}'
else:
local_var_name = f'_a_{attribute_name.lower()}_{_NumberStack.unique_number()}'
super().__init__(local_var_name, value_converter, default_py_value, implied_py_value, is_optional)
self._attribute_name = attribute_name
def get_py_value_from_node(self, instance: Any, # noqa: ARG002
node: xml_utils.LxmlElement | None) -> Any:
xml_value = None if node is None else node.attrib.get(self._attribute_name)
return None if xml_value is None else self._converter.to_py(xml_value)
def update_xml_value(self, instance: Any, node: xml_utils.LxmlElement):
"""Write value to node."""
try:
py_value = getattr(instance, self._local_var_name)
except AttributeError:
# this can only happen if there is no default value defined and __set__ has never been called
py_value = None
if py_value is None:
if MANDATORY_VALUE_CHECKING and not self.is_optional:
raise ValueError(f'mandatory value {self._attribute_name} missing')
try:
if self._attribute_name in node.attrib:
del node.attrib[self._attribute_name]
except ElementNotFoundError:
return
else:
xml_value = self._converter.to_xml(py_value)
node.set(self._attribute_name, xml_value)
def __str__(self) -> str:
return f'{self.__class__.__name__} attribute {self._attribute_name}'
class _ElementBase(_XmlStructureBaseProperty, ABC):
"""_ElementBase represents an XML Element."""
def __init__(self, sub_element_name: etree_.QName | None, # noqa: PLR0913
value_converter: DataConverterProtocol,
default_py_value: Any = None,
implied_py_value: Any = None,
is_optional: bool = False):
"""Construct the representation of a (sub) element in xml.
:param sub_element_name: a QName or None. If None, the property represents the node itself,
otherwise the sub node with given name.
:param value_converter: see base class doc.
:param default_py_value: see base class doc.
:param implied_py_value: see base class doc.
:param is_optional: see base class doc.
"""
if sub_element_name is None:
local_var_name = f'_e_{_NumberStack.unique_number()}'
else:
local_var_name = f'_e_{sub_element_name.localname.lower()}_{_NumberStack.unique_number()}'
super().__init__(local_var_name, value_converter, default_py_value, implied_py_value, is_optional)
self._sub_element_name = sub_element_name
@staticmethod
def _get_element_by_child_name(node: xml_utils.LxmlElement,
sub_element_name: etree_.QName | None,
create_missing_nodes: bool) -> xml_utils.LxmlElement:
if sub_element_name is None:
return node
sub_node = node.find(sub_element_name)
if sub_node is None:
if not create_missing_nodes:
raise ElementNotFoundError(f'Element {sub_element_name} not found in {node.tag}')
sub_node = etree_.SubElement(node, sub_element_name) # create this node
return sub_node
def remove_sub_element(self, node: xml_utils.LxmlElement):
if self._sub_element_name is None:
return
sub_node = node.find(self._sub_element_name)
if sub_node is not None:
node.remove(sub_node)
def __str__(self) -> str:
return f'{self.__class__.__name__} in sub element {self._sub_element_name}'
class StringAttributeProperty(_AttributeBase):
"""Python representation is a string."""
def __init__(self, attribute_name: str,
default_py_value: Any = None,
implied_py_value: Any = None, is_optional: bool = True):
super().__init__(attribute_name, StringConverter, default_py_value, implied_py_value, is_optional)
class AnyURIAttributeProperty(StringAttributeProperty):
"""Represents an AnyURIAttribute."""
class CodeIdentifierAttributeProperty(StringAttributeProperty):
"""Represents a CodeIdentifier attribute."""
class HandleAttributeProperty(StringAttributeProperty):
"""Represents a Handle attribute."""
class HandleRefAttributeProperty(StringAttributeProperty):
"""Represents a HandleRef attribute."""
class SymbolicCodeNameAttributeProperty(StringAttributeProperty):
"""Represents a SymbolicCodeName attribute."""
class ExtensionAttributeProperty(StringAttributeProperty):
"""Represents an Extension attribute."""
class LocalizedTextRefAttributeProperty(StringAttributeProperty):
"""Represents a LocalizedTextRef attribute."""
class TimeZoneAttributeProperty(StringAttributeProperty):
"""Represents a TimeZone attribute."""
class EnumAttributeProperty(_AttributeBase):
"""Base class for enum attributes."""
def __init__(self, attribute_name: str, # noqa: PLR0913
enum_cls: Any,
default_py_value: Any = None,
implied_py_value: Any = None,
is_optional: bool = True):
super().__init__(attribute_name, EnumConverter(enum_cls), default_py_value, implied_py_value, is_optional)
class TimestampAttributeProperty(_AttributeBase):
"""Represents a Timestamp attribute.
XML notation is integer in milliseconds.
Python is a float in seconds.
"""
def __init__(self, attribute_name: str,
default_py_value: Any = None,
implied_py_value: Any = None,
is_optional: bool = True):
super().__init__(attribute_name, value_converter=TimestampConverter,
default_py_value=default_py_value, implied_py_value=implied_py_value, is_optional=is_optional)
class CurrentTimestampAttributeProperty(_AttributeBase):
"""Represents a special Timestamp attribute used for ClockState, it always writes current time to node.
Setting the value from python is possible, but makes no sense.
"""
def __init__(self, attribute_name: str,
is_optional: bool = True):
super().__init__(attribute_name, value_converter=TimestampConverter,
default_py_value=None, is_optional=is_optional)
def update_xml_value(self, instance: Any, node: xml_utils.LxmlElement):
"""Write value to node."""
setattr(instance, self._local_var_name, time.time())
super().update_xml_value(instance, node)
class DecimalAttributeProperty(_AttributeBase):
"""Represents a Decimal attribute."""
def __init__(self, attribute_name: str,
default_py_value: Decimal | None = None,
implied_py_value: Decimal | None = None,
is_optional: bool = True):
super().__init__(attribute_name, value_converter=DecimalConverter,
default_py_value=default_py_value, implied_py_value=implied_py_value, is_optional=is_optional)
class QualityIndicatorAttributeProperty(DecimalAttributeProperty):
"""Represents a QualityIndicator attribute, a value between 0 and 1."""
class DurationAttributeProperty(_AttributeBase):
"""Represents a Duration attribute.
XML notation is integer in milliseconds.
Python is a float in seconds.
"""
def __init__(self, attribute_name: str,
default_py_value: DurationType | None = None,
implied_py_value: DurationType | None = None,
is_optional: bool = True):
super().__init__(attribute_name, value_converter=DurationConverter,
default_py_value=default_py_value, implied_py_value=implied_py_value, is_optional=is_optional)
class IntegerAttributeProperty(_AttributeBase):
"""Represents an Integer attribute.
XML notation is an integer, python is an integer.
"""
def __init__(self, attribute_name: str,
default_py_value: int | None = None,
implied_py_value: int | None = None,
is_optional: bool = True):
super().__init__(attribute_name, value_converter=IntegerConverter,
default_py_value=default_py_value, implied_py_value=implied_py_value, is_optional=is_optional)
class UnsignedIntAttributeProperty(IntegerAttributeProperty):
"""Represents an UnsignedInt attribute.
Python has no unsigned int, therefore this is the same as IntegerAttributeProperty.
"""
class VersionCounterAttributeProperty(UnsignedIntAttributeProperty):
"""Represents a VersionCounter attribute.
VersionCounter in BICEPS is unsigned long.
Python has no unsigned long, therefore this is the same as IntegerAttributeProperty.
"""
class ReferencedVersionAttributeProperty(VersionCounterAttributeProperty):
"""Represents an ReferencedVersion attribute."""
class BooleanAttributeProperty(_AttributeBase):
"""Represents a Boolean attribute.
XML notation is 'true' or 'false'.
Python is a bool.
"""
def __init__(self, attribute_name: str,
default_py_value: bool | None = None,
implied_py_value: bool | None = None,
is_optional: bool = True):
super().__init__(attribute_name, value_converter=BooleanConverter,
default_py_value=default_py_value, implied_py_value=implied_py_value, is_optional=is_optional)
class QNameAttributeProperty(_AttributeBase):
"""Represents a qualified name attribute.
XML Representation is a prefix:name string, Python representation is a QName.
"""
def __init__(self, attribute_name: str,
default_py_value: etree_.QName | None = None,
implied_py_value: etree_.QName | None = None,
is_optional: bool = True):
super().__init__(attribute_name, value_converter=ClassCheckConverter(etree_.QName),
default_py_value=default_py_value, implied_py_value=implied_py_value, is_optional=is_optional)
def get_py_value_from_node(self, instance: Any, # noqa: ARG002
node: xml_utils.LxmlElement | None) -> Any:
xml_value = None if node is None else node.attrib.get(self._attribute_name)
return None if xml_value is None else text_to_qname(xml_value, node.nsmap)
def update_xml_value(self, instance: Any, node: xml_utils.LxmlElement):
"""Write value to node."""
try:
py_value = getattr(instance, self._local_var_name)
except AttributeError: # set to None
py_value = None
if py_value is None:
py_value = self._default_py_value
if py_value is None:
if MANDATORY_VALUE_CHECKING and not self.is_optional:
raise ValueError(f'mandatory value {self._attribute_name} missing')
try:
if self._attribute_name in node.attrib:
del node.attrib[self._attribute_name]
except ElementNotFoundError:
return
else:
xml_value = docname_from_qname(py_value, node.nsmap)
node.set(self._attribute_name, xml_value)
class _AttributeListBase(_AttributeBase):
"""Base class for a list of values as attribute.
XML Representation is a string which is a space separated list.
Python representation is a list of Any (type depends on ListConverter),
else a list of converted values.
"""
_converter: ListConverter
def __init__(self, attribute_name: str,
value_converter: ListConverter,
is_optional: bool = True):
super().__init__(attribute_name, value_converter, is_optional=is_optional)
def __get__(self, instance, owner): # noqa: ANN001
"""Return a python value, use the locally stored value."""
if instance is None: # if called via class
return self
try:
return getattr(instance, self._local_var_name)
except AttributeError:
setattr(instance, self._local_var_name, [])
return getattr(instance, self._local_var_name)
def init_instance_data(self, instance: Any):
setattr(instance, self._local_var_name, [])
def get_py_value_from_node(self, instance: Any, # noqa: ARG002
node: xml_utils.LxmlElement | None) -> list[Any]:
xml_value = None if node is None else node.attrib.get(self._attribute_name)
if xml_value is not None:
split_result = xml_value.split(' ')
return [self._converter.elem_to_py(val) for val in split_result if val]
return []
def update_xml_value(self, instance: Any, node: xml_utils.LxmlElement):
try:
py_value = getattr(instance, self._local_var_name)
except AttributeError:
# set to None (it is in the responsibility of the called method to do the right thing)
py_value = None
if not py_value and self.is_optional: # is None:
try:
if self._attribute_name in node.attrib:
del node.attrib[self._attribute_name]
except ElementNotFoundError:
return
else:
if py_value is None:
if MANDATORY_VALUE_CHECKING and not self.is_optional:
raise ValueError(f'mandatory value {self._attribute_name} missing')
xml_value = ''
else:
xml_value = ' '.join([self._converter.elem_to_xml(v) for v in py_value])
node.set(self._attribute_name, xml_value)
class _StringAttributeListBase(_AttributeListBase):
"""Base class for a list of strings as attribute.
XML Representation is a string which is a space separated list.
Python representation is a list of strings.
"""
def __init__(self, attribute_name: str, value_converter: DataConverterProtocol | None = None):
converter = value_converter or ListConverter(ClassCheckConverter(str))
super().__init__(attribute_name, converter)
class HandleRefListAttributeProperty(_StringAttributeListBase):
"""Represents a list of HandleRef attribute."""
class EntryRefListAttributeProperty(_StringAttributeListBase):
"""Represents a list of EntryRef attribute."""
class OperationRefListAttributeProperty(_StringAttributeListBase):
"""Represents a list of OperationRef attribute."""
class AlertConditionRefListAttributeProperty(_StringAttributeListBase):
"""Represents a list of AlertConditionRef attribute."""
class DecimalListAttributeProperty(_AttributeListBase):
"""Represents a list of Decimal attribute.
XML representation: an attribute string that represents 0...n decimals, separated with spaces.
Python representation: List of Decimal if attribute is set (can be an empty list!), otherwise None.
"""
def __init__(self, attribute_name: str):
super().__init__(attribute_name, ListConverter(DecimalConverter))
class NodeTextProperty(_ElementBase):
"""Represents the text of an XML Element.
Python representation depends on value converter.
"""
def __init__(self, sub_element_name: etree_.QName | None, # noqa: PLR0913
value_converter: DataConverterProtocol,
default_py_value: Any | None = None,
implied_py_value: Any | None = None,
is_optional: bool = False,
min_length: int = 0):
super().__init__(sub_element_name, value_converter,
default_py_value,
implied_py_value,
is_optional)
self._min_length = min_length
def get_py_value_from_node(self, instance: Any, node: xml_utils.LxmlElement) -> Any: # noqa: ARG002
"""Read value from node.
:return: None if the element was not found, else result of converter.
"""
try:
sub_node = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=False)
except ElementNotFoundError:
return None # element was not found, return None
return self._converter.to_py(sub_node.text)
def update_xml_value(self, instance: Any, node: xml_utils.LxmlElement):
"""Write value to node."""
try:
py_value = getattr(instance, self._local_var_name)
except AttributeError: # set to None (it is in the responsibility of the called method to do the right thing)
py_value = None
if py_value is None:
if MANDATORY_VALUE_CHECKING and not self.is_optional and self._min_length:
raise ValueError(f'mandatory value {self._sub_element_name} missing')
if not self._sub_element_name:
# update text of this element
node.text = None
elif self.is_optional:
sub_node = node.find(self._sub_element_name)
if sub_node is not None:
node.remove(sub_node)
else:
sub_node = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=True)
sub_node.text = None
else:
sub_node = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=True)
sub_node.text = self._converter.to_xml(py_value)
def __repr__(self) -> str:
return f'{self.__class__.__name__} in sub-element {self._sub_element_name}'
class NodeStringProperty(NodeTextProperty):
"""Represents the text of an XML Element.
Python representation is a string.
libxml sets text of element to None, if text in xml is empty. In this case the python value is an empty string.
if the xml element that should contain the text does not exist, the python value is None.
"""
def __init__(self, sub_element_name: etree_.QName | None = None, # noqa: PLR0913
default_py_value: str | None = None,
implied_py_value: str | None = None,
is_optional: bool = False,
min_length: int = 0):
super().__init__(sub_element_name, StringConverter, default_py_value, implied_py_value,
is_optional, min_length)
class AnyUriTextElement(NodeStringProperty):
"""For now the same as NodeStringProperty ,but later it could be handy to add uri type checking."""
# class LocalizedTextContentProperty(NodeStringProperty):
# pass
class NodeEnumTextProperty(NodeTextProperty):
"""Represents the text of an XML Element.
Python representation is an enum.
"""
def __init__(self, sub_element_name: etree_.QName | None, # noqa: PLR0913
enum_cls: Any,
default_py_value: Any | None = None,
implied_py_value: Any | None = None,
is_optional: bool = False):
super().__init__(sub_element_name, EnumConverter(enum_cls), default_py_value, implied_py_value,
is_optional, min_length=1)
self.enum_cls = enum_cls
class NodeEnumQNameProperty(NodeTextProperty):
"""Represents a qualified name as text of an XML Element.
Python representation is an Enum of QName, XML is prefix:localname.
"""
def __init__(self, sub_element_name: etree_.QName | None, # noqa: PLR0913
enum_cls: Any,
default_py_value: Any | None = None,
implied_py_value: Any | None = None,
is_optional: bool = False):
super().__init__(sub_element_name, EnumConverter(enum_cls), default_py_value, implied_py_value,
is_optional, min_length=1)
self.enum_cls = enum_cls
def get_py_value_from_node(self, instance: Any, node: xml_utils.LxmlElement) -> Any: # noqa: ARG002
"""Read value from node."""
try:
sub_node = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=False)
prefix, localname = sub_node.text.split(':')
namespace = node.nsmap[prefix]
q_name = etree_.QName(namespace, localname)
return self._converter.to_py(q_name)
except ElementNotFoundError:
return self._default_py_value
def update_xml_value(self, instance: Any, node: xml_utils.LxmlElement):
"""Write value to node."""
try:
py_value = getattr(instance, self._local_var_name)
except AttributeError: # set to None (it is in the responsibility of the called method to do the right thing)
py_value = None
if py_value is None:
if MANDATORY_VALUE_CHECKING and not self.is_optional and self._min_length:
raise ValueError(f'mandatory value {self._sub_element_name} missing')
if not self._sub_element_name:
# update text of this element
node.text = ''
elif self.is_optional:
sub_node = node.find(self._sub_element_name)
if sub_node is not None:
node.remove(sub_node)
else:
sub_node = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=True)
sub_node.text = None
else:
sub_node = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=True)
for prefix, namespace in sub_node.nsmap.items():
if namespace == py_value.value.namespace:
value = f'{prefix}:{py_value.value.localname}'
sub_node.text = self._converter.to_xml(value)
return
raise ValueError(f'no prefix for namespace "{py_value.value.namespace}"')
class NodeIntProperty(NodeTextProperty):
"""Python representation is an int."""
def __init__(self, sub_element_name: etree_.QName | None = None, # noqa: PLR0913
default_py_value: int | None = None,
implied_py_value: int | None = None,
is_optional: bool = False,
min_length: int = 0):
super().__init__(sub_element_name, IntegerConverter, default_py_value, implied_py_value,
is_optional, min_length)
class NodeDecimalProperty(NodeTextProperty):
"""Python representation is an int."""
def __init__(self, sub_element_name: etree_.QName | None = None, # noqa: PLR0913
default_py_value: Decimal | None = None,
implied_py_value: Decimal | None = None,
is_optional: bool = False,
min_length: int = 0):
super().__init__(sub_element_name, DecimalConverter, default_py_value, implied_py_value,
is_optional, min_length)
class NodeDurationProperty(NodeTextProperty):
"""Python representation is an int."""
def __init__(self, sub_element_name: etree_.QName | None = None, # noqa: PLR0913
default_py_value: isoduration.DurationType | None = None,
implied_py_value: isoduration.DurationType | None = None,
is_optional: bool = False,
min_length: int = 0):
super().__init__(sub_element_name, DurationConverter, default_py_value, implied_py_value,
is_optional, min_length)
class NodeTextQNameProperty(_ElementBase):
"""The handled data is a single qualified name in the text of an element in the form prefix:localname."""
def __init__(self, sub_element_name: etree_.QName | None,
default_py_value: etree_.QName | None = None,
is_optional: bool = False):
super().__init__(sub_element_name, ClassCheckConverter(etree_.QName), default_py_value,
is_optional=is_optional)
def get_py_value_from_node(self, instance: Any, node: xml_utils.LxmlElement) -> Any: # noqa: ARG002
"""Read value from node."""
try:
sub_node = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=False)
xml_value = sub_node.text
if xml_value is not None:
value = text_to_qname(xml_value, sub_node.nsmap)
return value
except ElementNotFoundError:
pass
return self._default_py_value
def update_xml_value(self, instance: Any, node: xml_utils.LxmlElement):
"""Write value to node."""
try:
py_value = getattr(instance, self._local_var_name)
except AttributeError: # set to None (it is in the responsibility of the called method to do the right thing)
py_value = None
if py_value is None:
if not self._sub_element_name:
# update text of this element
node.text = ''
elif self.is_optional:
sub_node = node.find(self._sub_element_name)
if sub_node is not None:
node.remove(sub_node)
else:
if MANDATORY_VALUE_CHECKING and not self.is_optional:
raise ValueError(f'mandatory value {self._sub_element_name} missing')
sub_node = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=True)
sub_node.text = None
else:
sub_node = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=True)
value = docname_from_qname(py_value, sub_node.nsmap)
sub_node.text = value
def _compare_extension(left: xml_utils.LxmlElement, right: xml_utils.LxmlElement) -> bool:
# xml comparison
try:
if left.tag != right.tag: # compare expanded names
return False
if dict(left.attrib) != dict(right.attrib): # unclear how lxml _Attrib compares
return False
except AttributeError: # right side is not an Element type because expected attributes are missing
return False
# ignore comments
left_children = [child for child in left if not isinstance(child, etree_._Comment)]
right_children = [child for child in right if not isinstance(child, etree_._Comment)]
if len(left_children) != len(right_children): # compare children count
return False
if len(left_children) == 0 and len(right_children) == 0:
if left.text != right.text: # mixed content is not allowed. only compare text if there are no children
return False
return all(map(_compare_extension, left_children, right_children)) # compare children but keep order
class ExtensionLocalValue(list[xml_utils.LxmlElement]):
compare_method: Callable[[xml_utils.LxmlElement, xml_utils.LxmlElement], bool] = _compare_extension
"""may be overwritten by user if a custom comparison behaviour is required"""
def __eq__(self, other: Sequence) -> bool:
try:
if len(self) != len(other):
return False
except TypeError: # len of other cannot be determined
return False
return all(self.__class__.compare_method(left, right) for left, right in zip(self, other))
class ExtensionNodeProperty(_ElementBase):
"""Represents an ext:Extension Element that contains 0...n child elements of any kind.
The python representation is an ExtensionLocalValue with list of elements.
"""
def __init__(self, sub_element_name: etree_.QName | None, default_py_value: Any | None = None):
super().__init__(sub_element_name, ClassCheckConverter(ExtensionLocalValue), default_py_value,
is_optional=True)
def __set__(self, instance: Any, value: Iterable):
if not isinstance(value, ExtensionLocalValue):
value = ExtensionLocalValue(value)
super().__set__(instance, value)
def __get__(self, instance, owner): # noqa: ANN001
"""Return a python value, uses the locally stored value."""
if instance is None: # if called via class
return self
try:
value = getattr(instance, self._local_var_name)
except AttributeError:
value = None
if value is None:
value = ExtensionLocalValue()
setattr(instance, self._local_var_name, value)
return value
def get_py_value_from_node(self, instance: Any, node: xml_utils.LxmlElement) -> Any:
"""Read value from node."""
try:
extension_nodes = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=False)
except ElementNotFoundError:
return ExtensionLocalValue()
return ExtensionLocalValue(extension_nodes[:])
def update_xml_value(self, instance: Any, node: xml_utils.LxmlElement):
"""Write value to node.
The Extension Element is only added if there is at least one element available in local list.
"""
try:
extension_local_value = getattr(instance, self._local_var_name)
except AttributeError:
return # nothing to add
if extension_local_value is None or len(extension_local_value) == 0:
return # nothing to add
sub_node = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=True)
sub_node.extend(xml_utils.copy_node_wo_parent(x) for x in extension_local_value)
class AnyEtreeNodeProperty(_ElementBase):
"""Represents an Element that contains xml tree of any kind."""
def __init__(self, sub_element_name: etree_.QName | None, is_optional: bool = False):
super().__init__(sub_element_name, NullConverter, default_py_value=None,
is_optional=is_optional)
def get_py_value_from_node(self, instance: Any, node: xml_utils.LxmlElement) -> Any: # noqa: ARG002
"""Read value from node."""
try:
sub_node = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=False)
except ElementNotFoundError:
return None
return sub_node[:] # all children
def update_xml_value(self, instance: Any, node: xml_utils.LxmlElement):
"""Write value to node."""
try:
py_value = getattr(instance, self._local_var_name)
except AttributeError: # set to None (it is in the responsibility of the called method to do the right thing)
py_value = None
if py_value is None:
if self.is_optional:
sub_node = node.find(self._sub_element_name)
if sub_node is not None:
node.remove(sub_node)
elif MANDATORY_VALUE_CHECKING:
raise ValueError(f'mandatory value {self._sub_element_name} missing')
else:
sub_node = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=True)
if isinstance(py_value, etree_._Element): # noqa: SLF001
sub_node.append(py_value)
else:
sub_node.extend(py_value)
class SubElementProperty(_ElementBase):
"""Uses a value that has an "as_etree_node" method."""
def __init__(self, sub_element_name: etree_.QName | None, # noqa: PLR0913
value_class: type[XMLTypeBase],
default_py_value: Any | None = None,
implied_py_value: Any | None = None,
is_optional: bool = False):
super().__init__(sub_element_name, ClassCheckConverter(value_class), default_py_value, implied_py_value,
is_optional)
self.value_class = value_class
def get_py_value_from_node(self, instance: Any, node: xml_utils.LxmlElement) -> Any: # noqa: ARG002
"""Read value from node."""
value = self._default_py_value
try:
sub_node = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=False)
value_class = self.value_class.value_class_from_node(sub_node)
value = value_class.from_node(sub_node)
except ElementNotFoundError:
pass
return value
def update_xml_value(self, instance: Any, node: xml_utils.LxmlElement):
"""Write value to node."""
try:
py_value = getattr(instance, self._local_var_name)
except AttributeError:
py_value = self._default_py_value
if py_value is None:
if not self.is_optional:
if MANDATORY_VALUE_CHECKING and not self.is_optional:
raise ValueError(f'mandatory value {self._sub_element_name} missing')
etree_.SubElement(node, self._sub_element_name, nsmap=node.nsmap)
else:
sub_node = py_value.as_etree_node(self._sub_element_name, node.nsmap)
if hasattr(py_value, 'NODETYPE') and hasattr(self.value_class, 'NODETYPE') \
and py_value.NODETYPE != self.value_class.NODETYPE:
# set xsi type
sub_node.set(QN_TYPE, docname_from_qname(py_value.NODETYPE, node.nsmap))
node.append(sub_node)
class ContainerProperty(_ElementBase):
"""ContainerProperty supports xsi:type information from xml and instantiates value accordingly."""
def __init__(self, sub_element_name: etree_.QName | None, # noqa: PLR0913
value_class: type[ContainerBase],
cls_getter: Callable[[etree_.QName], type],
ns_helper: NamespaceHelper,
is_optional: bool = False):
"""Construct a ContainerProperty.
:param sub_element_name: see doc of base class
:param value_class: Default value class if no xsi:type is found
:param cls_getter: function that returns a class for xsi:type QName
:param ns_helper: name space helper that knows current prefixes
:param is_optional: see doc of base class
"""
super().__init__(sub_element_name, ClassCheckConverter(value_class), is_optional=is_optional)
self.value_class = value_class
self._cls_getter = cls_getter
self._ns_helper = ns_helper
def get_py_value_from_node(self, instance: Any, node: xml_utils.LxmlElement) -> Any: # noqa: ARG002
"""Read value from node."""
value = self._default_py_value
try:
sub_node = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=False)
node_type_str = sub_node.get(QN_TYPE)
if node_type_str is not None:
node_type = text_to_qname(node_type_str, node.nsmap)
value_class = self._cls_getter(node_type)
else:
value_class = self.value_class
value = value_class.from_node(sub_node)
except ElementNotFoundError:
pass
return value
def update_xml_value(self, instance: Any, node: xml_utils.LxmlElement):
"""Write value to node."""
try:
py_value = getattr(instance, self._local_var_name)
except AttributeError:
py_value = self._default_py_value
if py_value is None:
if not self.is_optional:
if MANDATORY_VALUE_CHECKING and not self.is_optional:
raise ValueError(f'mandatory value {self._sub_element_name} missing')
etree_.SubElement(node, self._sub_element_name, nsmap=node.nsmap)
else:
self.remove_sub_element(node)
sub_node = py_value.mk_node(self._sub_element_name, self._ns_helper)
if py_value.NODETYPE != self.value_class.NODETYPE:
# set xsi type
sub_node.set(QN_TYPE, docname_from_qname(py_value.NODETYPE, node.nsmap))
node.append(sub_node)
class _ElementListProperty(_ElementBase, ABC):
def __get__(self, instance, owner): # noqa: ANN001
"""Return a python value, uses the locally stored value."""
if instance is None: # if called via class
return self
try:
return getattr(instance, self._local_var_name)
except AttributeError:
setattr(instance, self._local_var_name, [])
return getattr(instance, self._local_var_name)
def __set__(self, instance, py_value):
if isinstance(py_value, tuple):
py_value = list(py_value)
super().__set__(instance, py_value)
def init_instance_data(self, instance: Any):
setattr(instance, self._local_var_name, [])
class SubElementListProperty(_ElementListProperty):
"""SubElementListProperty is a list of values that have an "as_etree_node" method.
Used if maxOccurs="Unbounded" in BICEPS_ParticipantModel.
"""
def __init__(self, sub_element_name: etree_.QName | None,
value_class: type[XMLTypeBase],
is_optional: bool = True):
super().__init__(sub_element_name, ListConverter(ClassCheckConverter(value_class)), is_optional=is_optional)
self.value_class = value_class
def get_py_value_from_node(self, instance: Any, node: xml_utils.LxmlElement) -> Any: # noqa: ARG002
"""Read value from node."""
objects = []
try:
nodes = node.findall(self._sub_element_name)
for _node in nodes:
value_class = self.value_class.value_class_from_node(_node)
value = value_class.from_node(_node)
objects.append(value)
return objects
except ElementNotFoundError:
return objects
def update_xml_value(self, instance: Any, node: xml_utils.LxmlElement):
"""Write value to node."""
try:
py_value = getattr(instance, self._local_var_name)
except AttributeError: # set to None (it is in the responsibility of the called method to do the right thing)
py_value = self._default_py_value
if py_value is not None:
for val in py_value:
sub_node = val.as_etree_node(self._sub_element_name, node.nsmap)
if hasattr(val, 'NODETYPE') and hasattr(self.value_class, 'NODETYPE') \
and val.NODETYPE != self.value_class.NODETYPE:
# set xsi type
sub_node.set(QN_TYPE, docname_from_qname(val.NODETYPE, node.nsmap))
node.append(sub_node)
def __repr__(self) -> str:
return f'{self.__class__.__name__} datatype {self.value_class.__name__} in subelement {self._sub_element_name}'
class ContainerListProperty(_ElementListProperty):
"""ContainerListProperty is a property with a list of elements, each supports xsi:type information.
Used if maxOccurs="Unbounded" in BICEPS_ParticipantModel.
"""
def __init__(self, sub_element_name: etree_.QName | None, # noqa: PLR0913
value_class: type[ContainerBase],
cls_getter: Callable[[etree_.QName], type],
ns_helper: NamespaceHelper,
is_optional: bool = True):
"""Construct a list of Containers.
:param sub_element_name: see doc of base class
:param value_class: Default value class if no xsi:type is found
:param cls_getter: function that returns a class for xsi:type QName
:param ns_helper: name space helper that knows current prefixes
:param is_optional: see doc of base class
"""
super().__init__(sub_element_name, ListConverter(ClassCheckConverter(value_class)), is_optional=is_optional)
self.value_class = value_class
self._cls_getter = cls_getter
self._ns_helper = ns_helper
def get_py_value_from_node(self, instance: Any, node: xml_utils.LxmlElement) -> Any: # noqa: ARG002
"""Read value from node."""
objects = []
try:
nodes = node.findall(self._sub_element_name)
for _node in nodes:
node_type_str = _node.get(QN_TYPE)
if node_type_str is not None:
node_type = text_to_qname(node_type_str, _node.nsmap)
value_class = self._cls_getter(node_type)
else:
value_class = self.value_class
value = value_class.from_node(_node)
objects.append(value)
return objects
except ElementNotFoundError:
return objects
def update_xml_value(self, instance: Any, node: xml_utils.LxmlElement):
"""Write value to node."""
try:
py_value = getattr(instance, self._local_var_name)
except AttributeError: # set to None (it is in the responsibility of the called method to do the right thing)
py_value = self._default_py_value
nodes = node.findall(self._sub_element_name)
for _node in nodes:
node.remove(_node)
# ... and create new ones
if py_value is not None:
for val in py_value:
sub_node = val.mk_node(self._sub_element_name, self._ns_helper)
if val.NODETYPE != self.value_class.NODETYPE:
# set xsi type
sub_node.set(QN_TYPE, docname_from_qname(val.NODETYPE, node.nsmap))
node.append(sub_node)
def __repr__(self) -> str:
return f'{self.__class__.__name__} datatype {self.value_class.__name__} in subelement {self._sub_element_name}'
class SubElementTextListProperty(_ElementListProperty):
"""SubElementTextListProperty represents a list of strings.
On xml side every string is a text of a sub element.
"""
def __init__(self, sub_element_name: etree_.QName | None,
value_class: Any,
is_optional: bool = True):
super().__init__(sub_element_name, ListConverter(ClassCheckConverter(value_class)), is_optional=is_optional)
def get_py_value_from_node(self, instance: Any, node: xml_utils.LxmlElement) -> Any: # noqa: ARG002
"""Read value from node."""
objects = []
try:
nodes = node.findall(self._sub_element_name)
for _node in nodes:
objects.append(_node.text)
return objects
except ElementNotFoundError:
return objects
def update_xml_value(self, instance: Any, node: xml_utils.LxmlElement):
"""Write value to node."""
try:
py_value = getattr(instance, self._local_var_name)
except AttributeError:
py_value = self._default_py_value
if py_value is None or len(py_value) == 0:
return
nodes = node.findall(self._sub_element_name)
for _node in nodes:
node.remove(_node)
# ... and create new ones
for val in py_value:
child = etree_.SubElement(node, self._sub_element_name)
try:
child.text = val
except TypeError as ex:
# re-raise with better info about data
raise TypeError(f'{ex} in {self}') from ex
def __str__(self) -> str:
return f'{self.__class__.__name__} in sub-element {self._sub_element_name}'
class SubElementStringListProperty(SubElementTextListProperty):
"""SubElementStringListProperty represents a list of strings.
On xml side every string is a text of a sub element.
"""
def __init__(self, sub_element_name: etree_.QName | None,
is_optional: bool = True):
super().__init__(sub_element_name, str, is_optional=is_optional)
class SubElementHandleRefListProperty(SubElementStringListProperty):
"""Represents a list of Handles."""
class SubElementWithSubElementListProperty(SubElementProperty):
"""Class represents an optional Element that is only present if its value class is not empty.
value_class must have an is_empty method.
"""
def __init__(self, sub_element_name: etree_.QName | None,
default_py_value: Any,
value_class: type[XMLTypeBase]):
assert hasattr(value_class, 'is_empty')
super().__init__(sub_element_name,
default_py_value=default_py_value,
value_class=value_class)
def update_xml_value(self, instance: Any, node: xml_utils.LxmlElement):
"""Write value to node."""
try:
py_value = getattr(instance, self._local_var_name)
except AttributeError:
py_value = self._default_py_value
if py_value is None or py_value.is_empty():
return
self.remove_sub_element(node)
node.append(py_value.as_etree_node(self._sub_element_name, node.nsmap))
def __set__(self, instance: Any, py_value: Any):
if isinstance(py_value, self.value_class):
super().__set__(instance, py_value)
else:
raise ApiUsageError(f'do not set {self._sub_element_name} directly, use child member!')
class AnyEtreeNodeListProperty(_ElementListProperty):
"""class represents a list of lxml elements."""
def __init__(self, sub_element_name: etree_.QName | None, is_optional: bool = True):
super().__init__(sub_element_name,
ListConverter(ClassCheckConverter(xml_utils.LxmlElement)),
is_optional=is_optional)
def get_py_value_from_node(self, instance: Any, node: xml_utils.LxmlElement) -> Any: # noqa: ARG002
"""Read value from node."""
objects = []
try:
sub_node = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=False)
if sub_node is None:
return []
return sub_node[:]
except ElementNotFoundError:
return objects
def update_xml_value(self, instance: Any, node: xml_utils.LxmlElement):
"""Write value to node."""
try:
py_value = getattr(instance, self._local_var_name)
except AttributeError:
py_value = None
if py_value is None or len(py_value) == 0:
if self.is_optional:
self.remove_sub_element(node)
return
sub_node = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=True)
sub_node.extend(py_value)
def __str__(self) -> str:
return f'{self.__class__.__name__} in subelement {self._sub_element_name}'
class NodeTextListProperty(_ElementListProperty):
"""The handled data is a list of words (string without whitespace). The xml text is the joined list of words."""
def __init__(self, sub_element_name: etree_.QName | None,
value_class: Any,
is_optional: bool = False):
super().__init__(sub_element_name, ListConverter(ClassCheckConverter(value_class)),
is_optional=is_optional)
def get_py_value_from_node(self, instance: Any, node: xml_utils.LxmlElement) -> Any: # noqa: ARG002
"""Read value from node."""
try:
sub_node = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=False)
if sub_node.text is not None:
return sub_node.text.split()
except ElementNotFoundError:
pass
return self._default_py_value
def update_xml_value(self, instance: Any, node: xml_utils.LxmlElement):
"""Write value to node."""
try:
py_value = getattr(instance, self._local_var_name)
except AttributeError: # set to None (it is in the responsibility of the called method to do the right thing)
py_value = None
if py_value is None:
if not self._sub_element_name:
# update text of this element
node.text = ''
elif self.is_optional:
sub_node = node.find(self._sub_element_name)
if sub_node is not None:
node.remove(sub_node)
else:
if MANDATORY_VALUE_CHECKING and not self.is_optional:
raise ValueError(f'mandatory value {self._sub_element_name} missing')
sub_node = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=True)
sub_node.text = None
else:
sub_node = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=True)
sub_node.text = ' '.join(py_value)
class NodeTextQNameListProperty(_ElementListProperty):
"""The handled data is a list of qualified names.
The xml text is the joined list of qnames in the form prefix:localname.
"""
def __init__(self, sub_element_name: etree_.QName | None,
is_optional: bool = False):
super().__init__(sub_element_name, ListConverter(ClassCheckConverter(etree_.QName)),
is_optional=is_optional)
def get_py_value_from_node(self, instance: Any, node: xml_utils.LxmlElement) -> Any: # noqa: ARG002
"""Read value from node."""
result = []
try:
sub_node = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=False)
if sub_node is None:
return None
if sub_node.text is not None:
for q_name_string in sub_node.text.split():
result.append(text_to_qname(q_name_string, sub_node.nsmap))
return result
except ElementNotFoundError:
pass
return self._default_py_value or result
def update_xml_value(self, instance: Any, node: xml_utils.LxmlElement):
"""Write value to node."""
try:
py_value = getattr(instance, self._local_var_name)
except AttributeError: # set to None (it is in the responsibility of the called method to do the right thing)
py_value = None
if py_value is None:
if not self._sub_element_name:
# update text of this element
node.text = ''
elif self.is_optional:
sub_node = node.find(self._sub_element_name)
if sub_node is not None:
node.remove(sub_node)
else:
if MANDATORY_VALUE_CHECKING and not self.is_optional:
raise ValueError(f'mandatory value {self._sub_element_name} missing')
sub_node = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=True)
sub_node.text = None
else:
sub_node = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=True)
tmp = []
for q_name in py_value:
# by setting each qname as text, namespace prefixes are generated automatically
sub_node.text = q_name
tmp.append(sub_node.text)
sub_node.text = ' '.join(tmp)
class DateOfBirthProperty(_ElementBase):
"""DateOfBirthProperty represents the DateOfBirth type of BICEPS.
<xsd:simpleType>
<xsd:union memberTypes="xsd:dateTime xsd:date xsd:gYearMonth xsd:gYear"/>
</xsd:simpleType>
xsd:dateTime is YYYY-MM-DDThh:mm:ss.sss
xsd:date is YYYY-MM-DD format. All components are required
xsd:gYearMonth is YYYY-MM
xsd:gYear is YYYY
If the timepoint of birth matters, the value SHALL be populated with a time zone.
Time zone info can be provided:
UTC can be specified by appending a Z character, e.g. 2002-09-24Z
other timezones by adding a positive or negative time behind the date, e.g. 2002.09-24-06:00, 2002-09-24+06:00
xsd:time is hh:mm:ss format, e.g. 9:30:10, 9:30:10.5. All components are required.
Time zone handling is identical to date type
The corresponding Python types are datetime.Date (=> not time point available)
or datetime.Datetime (with time point attribute).
"""
def __init__(self, sub_element_name: etree_.QName | None,
default_py_value: Any = None,
implied_py_value: Any = None,
is_optional: bool = True):
super().__init__(sub_element_name, ClassCheckConverter(datetime, date),
default_py_value, implied_py_value, is_optional)
def get_py_value_from_node(self, instance: Any, node: xml_utils.LxmlElement) -> Any: # noqa: ARG002
"""Read value from node."""
try:
sub_node = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=False)
if sub_node is not None:
date_string = sub_node.text
return isoduration.parse_date_time(date_string)
except ElementNotFoundError:
pass
return None
def update_xml_value(self, instance: Any, node: xml_utils.LxmlElement):
"""Write value to node."""
try:
py_value = getattr(instance, self._local_var_name)
except AttributeError: # set to None (it is in the responsibility of the called method to do the right thing)
py_value = self._default_py_value
if py_value is None:
self.remove_sub_element(node)
return
date_string = py_value if isinstance(py_value, str) else self._mk_datestring(py_value)
sub_element = self._get_element_by_child_name(node, self._sub_element_name, create_missing_nodes=True)
sub_element.text = date_string
@staticmethod
def mk_value_object(date_string: str) -> isoduration.DateTypeUnion | None:
"""Parse isoduration string."""
return isoduration.parse_date_time(date_string)
@staticmethod
def _mk_datestring(date_object: date | datetime | isoduration.GYear | isoduration.GYearMonth | None) -> str:
"""Create isoduration string."""
return isoduration.date_time_string(date_object)
| Draegerwerk/sdc11073 | src/sdc11073/xml_types/xml_structure.py | xml_structure.py | py | 62,864 | python | en | code | 27 | github-code | 13 |
31738949573 | # Sieve of Eratosthenes
# Code by David Eppstein, UC Irvine, 28 Feb 2002
# http://code.activestate.com/recipes/117119/
# Edited by Lucas Saldyt, 25 Sep 2018
def gen_numbers(start=2, primes=True):
""" Generate an infinite sequence of prime numbers.
"""
# Maps composites to primes witnessing their compositeness.
# This is memory efficient, as the sieve is not "run forward"
# indefinitely, but only as long as required by the current
# number being tested.
D = {}
# The running integer that's checked for primeness
q = start
while True:
#print(q)
if q not in D:
# q is a new prime.
# Yield it and mark its first multiple that isn't
# already marked in previous iterations
#
if primes:
yield q
D[q * q] = [q]
else:
# q is composite. D[q] is the list of primes that
# divide it. Since we've reached q, we no longer
# need it in the map, but we'll mark the next
# multiples of its witnesses to prepare for larger
# numbers
#
if not primes:
yield q
for p in D[q]:
D.setdefault(p + q, []).append(p)
del D[q]
q += 1
def gen_primes(start=2):
for item in gen_numbers(start, primes=True):
yield item
def gen_composites(start=2):
for item in gen_numbers(start, primes=False):
yield item
import itertools
take = lambda l, N : list(itertools.islice(l, N))
| LSaldyt/sence | keras/sieve.py | sieve.py | py | 1,575 | python | en | code | 3 | github-code | 13 |
11408327797 | import time #スリープ関数用.必須じゃない.
import RPi.GPIO as GPIO #GPIO用のライブラリ
PIN = 4 # サーボモータの信号線を接続したGPIO番号の設定
GPIO.setmode(GPIO.BCM) # ポート番号の指定方法をGPIO番号に指定
GPIO.setup(PIN, GPIO.OUT) # GPIOを出力に設定
servo = GPIO.PWM(PIN, 50) # PWMの周波数を50に設定
servo.start(3.0) # PWMのデューティー比を2.5で開始
def servo_lock():
# servo.start(2.5) # PWMのデューティー比を2.5で開始
servo.ChangeDutyCycle(3.0) #サーボを0°まで動かして,ロック
print("locked! (degree is 0)")
time.sleep(1) #サードの動作時間
# servo.stop() # サーボの制御を終了
def servo_unlock():
# servo.start(4.08) # PWMのデューティー比を2.5で開始
servo.ChangeDutyCycle(6.9) #サーボを30°まで動かして,アンロック
print("unlocked! (degree is 30)")
time.sleep(1) #サードの動作時間
# servo.stop() # サーボの制御を終了
def main():
while True:
x = input("アンロック=u,o || ロック=l,c || 終了-左記以外:") #キーボード入力
if x == "u" or x == "o":
servo_unlock() #サーボのアンロック用関数を呼び出し
elif x == "l" or x == "c":
servo_lock() #サーボのロック用関数を呼び出し
else:
break #このプログラムの終了
time.sleep(1) #サードの動作時間
servo.stop() # サーボの制御を終了
GPIO.cleanup() # GPIOポートのクリア
if __name__ == '__main__':
main() | jphacks/D_2002 | controller/watcher/src/control.py | control.py | py | 1,672 | python | ja | code | 4 | github-code | 13 |
1643891311 | from flask import Flask, render_template, request, jsonify
import requests
OPENAI_API_URL = "https://api.openai.com/v1/chat/completions"
OPENAI_API_KEY = "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"
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
MODEL = "gpt-3.5-turbo"
@app.route('/process_data', methods=['POST'])
def process_data():
data = request.json
user_message = ""
if 'field1' in data and data['field1'].strip():
user_message += f"{data['field1']}\n"
if 'field2' in data and data['field2'].strip():
user_message += f"{data['field2']}"
if not user_message:
response_data = {
'result': "Порожні поля. Введіть дані у поля field1 або field2."
}
return jsonify(response_data)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": user_message}
]
headers = {
"Authorization": f"Bearer {OPENAI_API_KEY}",
}
payload = {
"model": MODEL,
"messages": messages,
"temperature": 0.9, # Параметр для регулювання креативності відповіді (за замовчуванням 1.0)
"max_tokens": 1000, # Максимальна кількість токенів у відповіді
}
response = requests.post(OPENAI_API_URL, headers=headers, json=payload)
if response.status_code == 200:
data = response.json()
result = [choice["message"]["content"] for choice in data["choices"]]
else:
result = ["Помилка під час запиту до OpenAI API."]
response_data = {
'result': result
}
return jsonify(response_data)
if __name__ == '__main__':
app.run(debug=True) | Version40/flask_openai | app.py | app.py | py | 1,870 | python | uk | code | 0 | github-code | 13 |
26124182511 | from datetime import date, datetime, timedelta
from django.db import models
from django.db.models.fields import DateField
from django.db.models.functions import Cast, ExtractMonth
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import ModelViewSet
from .models import Cycles, Projects, Tasks
from .serializers import CyclesSerializer, ProjectsSerializer, TasksSerializer
# Create your views here.
class ProjectsView(ModelViewSet): # pylint: disable=R0901
serializer_class = ProjectsSerializer
permission_classes = [IsAuthenticated]
lookup_field = 'public_id'
def get_queryset(self):
user = self.request.user
self.queryset = Projects.objects.filter(
created_by=user,
is_active=True
)
return self.queryset
def list(self, request):
query_set = Projects.objects.filter(
created_by=request.user,
is_active=True
).all()
return Response(
self.serializer_class(
query_set,
many=True
).data,
status=status.HTTP_200_OK
)
def create(self, request, *args, **kwargs):
request.data['user'] = request.user.id
serializer = self.serializer_class(
data=request.data
)
if serializer.is_valid():
serializer.save()
return Response(
data=serializer.data,
status=status.HTTP_201_CREATED
)
return Response(
data=serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
class TasksView(ModelViewSet): # pylint: disable=R0901
serializer_class = TasksSerializer
permission_classes = [IsAuthenticated]
lookup_field = "public_id"
def get_queryset(self):
user = self.request.user
self.queryset = Tasks.objects.filter(
created_by=user,
is_active=True
)
return self.queryset
def create(self, request, *args, **kwargs):
request.data['user'] = request.user.id
serializer = self.serializer_class(
data=request.data
)
if serializer.is_valid():
serializer.save()
return Response(
data=serializer.data,
status=status.HTTP_201_CREATED
)
return Response(
data=serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
class CyclesView(ModelViewSet): # pylint: disable=R0901
serializer_class = CyclesSerializer
permission_classes = [IsAuthenticated]
lookup_field = "public_id"
def get_queryset(self):
user = self.request.user
self.queryset = Cycles.objects.filter(
created_by=user,
is_active=True
)
return self.queryset
def create(self, request, *args, **kwargs):
request.data['user'] = request.user.id
serializer = self.serializer_class(
data=request.data
)
if serializer.is_valid():
# validate if dt_start or dt_end is contained between
# another cycle of the corresponding task
data = serializer.validated_data
dt_start_validation = Cycles.objects.filter(
is_active=True,
created_by=data['user'],
task=data['task'],
dt_start__lte=data['dt_start'],
dt_end__gte=data['dt_start']
).all()
if len(dt_start_validation) > 0:
return Response(
data={
'message': "A data de início já "
"está contida em outro intervalo"
},
status=status.HTTP_400_BAD_REQUEST
)
if 'dt_end' in data and data.get('dt_end') is not None:
dt_end_validation = Cycles.objects.filter(
is_active=True,
created_by=data['user'],
task=data['task'],
dt_start__lte=data['dt_end'],
dt_end__gte=data['dt_end']
).all()
if len(dt_end_validation) > 0:
return Response(
data={
'message': "A data de término já está "
"contida em outro intervalo"
},
status=status.HTTP_400_BAD_REQUEST
)
serializer.save()
return Response(
data=serializer.data,
status=status.HTTP_201_CREATED
)
return Response(
data=serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(
instance,
data=request.data,
partial=partial
)
if serializer.is_valid():
data = serializer.validated_data
if 'dt_start' in data:
dt_start_validation = Cycles.objects.filter(
is_active=True,
created_by=instance.created_by,
task=instance.task,
dt_start__lte=data['dt_start'],
dt_end__gte=data['dt_start'],
).exclude(id=instance.id).all()
if len(dt_start_validation) > 0:
return Response(
data={
'message': 'A data de início já '
'está contida em outro intervalo'
},
status=status.HTTP_400_BAD_REQUEST
)
if 'dt_end' in data and data.get('dt_end') is not None:
dt_end_validation = Cycles.objects.filter(
is_active=True,
created_by=instance.created_by,
task=instance.task,
dt_start__lte=data['dt_end'],
dt_end__gte=data['dt_end']
).exclude(id=instance.id).all()
if len(dt_end_validation) > 0:
return Response(
data={
'message': 'A data de término já '
'está contida em outro intervalo'
},
status=status.HTTP_400_BAD_REQUEST
)
self.perform_update(serializer)
return Response(
data=serializer.data,
status=status.HTTP_200_OK
)
return Response(
data=serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
class DurationRankingView(APIView):
permission_classes = [IsAuthenticated]
def get(self, request):
user = request.user
#TODO detect period (week, month, year, all_time) and filter by it
tasks_summary = Tasks.objects.\
filter(
created_by=user,
is_active=True,
cycles__created_by=user,
cycles__dt_end__gte=models.F('cycles__dt_start'),
cycles__is_active=True,
project_id__is_active=True
).\
annotate(
task_name=models.F('name')
).\
values('task_name').\
annotate(
interval=models.Sum(
models.F('cycles__dt_end') - models.F('cycles__dt_start')
)
).\
values('task_name', 'interval').\
order_by('-interval')[0:5]
projects_summary = Projects.objects.\
filter(
created_by=user,
is_active=True,
tasks__created_by=user,
tasks__is_active=True,
tasks__cycles__created_by=user,
tasks__cycles__dt_end__gte=models.F('tasks__cycles__dt_start'),
tasks__cycles__is_active=True
).\
annotate(
project_name=models.F('name')
).\
values('project_name').\
annotate(
interval=models.Sum(
models.F('tasks__cycles__dt_end') -
models.F('tasks__cycles__dt_start')
)
).\
values('project_name', 'interval').\
order_by('-interval')[0:5]
data = {
'projects': {
'series': [
int(q['interval'].total_seconds())
for q in projects_summary
],
'labels': [q['project_name'] for q in projects_summary]
},
'tasks': {
'series': [
int(q['interval'].total_seconds())
for q in tasks_summary
],
'labels': [q['task_name'] for q in tasks_summary]
}
}
return Response(
data=data,
status=status.HTTP_200_OK
)
class OpenTasksView(APIView):
permission_classes = [IsAuthenticated]
def get(self, request):
user = request.user
#TODO detect period (week, month, year, all_time) and filter by it
open_tasks = Tasks.objects.\
filter(
created_by=user,
cycles__created_by=user,
is_active=True,
cycles__is_active=True,
cycles__dt_end__isnull=True,
project_id__is_active=True
).\
annotate(
project_public_id=models.F(
'project_id__public_id'
)
).\
values('public_id', 'name', 'project_public_id')
data = [
{
'public_id': q['public_id'],
'name': q['name'],
'project_public_id': q['project_public_id']
}
for q in open_tasks
]
return Response(
data=data,
status=status.HTTP_200_OK
)
class LastModifiedTasks(APIView):
permission_classes = [IsAuthenticated]
def get(self, request):
user = request.user
latest_tasks = Tasks.objects.\
filter(
created_by=user,
cycles__created_by=user,
is_active=True,
cycles__is_active=True,
project_id__is_active=True
).\
annotate(
project_public_id=models.F(
'project_id__public_id'
),
last_modified_on=models.Max(
models.F(
'cycles__modified_on'
)
)
).\
values(
'public_id',
'name',
'project_public_id',
'last_modified_on'
).\
order_by('-last_modified_on')[0:5]
data = [
{
'public_id': q['public_id'],
'name': q['name'],
'project_public_id': q['project_public_id'],
'last_modified_on': q['last_modified_on']
}
for q in latest_tasks
]
return Response(
data=data,
status=status.HTTP_200_OK
)
class HistogramView(APIView):
permission_classes = [IsAuthenticated]
def get(self, request): # pylint: disable=R0914,R0912
#TODO detect period
user = request.user
cycle_base_query = Cycles.objects.\
filter(
dt_start__isnull=False,
created_by=user,
is_active=True,
dt_end__gte=models.F('dt_start'),
task_id__is_active=True,
task_id__project_id__is_active=True
)
date_target = request.GET.get('date_target', date.today())
if isinstance(date_target, str):
date_target = datetime.strptime(date_target, '%Y-%m-%d')
current_week = date_target.isocalendar()[1]
current_month = date_target.month
current_year = date_target.year
# Week
#TODO convert to the desired timezone
week_query = cycle_base_query.\
filter(
dt_start__week=current_week,
dt_start__year=current_year
).\
annotate(day=Cast('dt_start', DateField())).\
values('day').\
annotate(
interval=models.Sum(
models.F('dt_end') -
models.F('dt_start')
)
).\
values('day', 'interval')
week_agg_total = week_query.aggregate(
total=models.Sum(
models.F('interval')
)
)['total']
if week_agg_total is not None:
week_total = int(week_agg_total.total_seconds())
else:
week_total = 0
# Month
#TODO convert to the desired timezone
month_query = cycle_base_query.\
filter(
dt_start__month=current_month,
dt_start__year=current_year
).\
annotate(
day=Cast('dt_start', DateField())
).\
values('day').\
annotate(
interval=models.Sum(
models.F('dt_end') -
models.F('dt_start')
)
).\
values('day', 'interval')
month_agg_total = month_query.aggregate(
total=models.Sum(
models.F('interval')
)
)['total']
if month_agg_total is not None:
month_total = int(month_agg_total.total_seconds())
else:
month_total = 0
# Year
#TODO convert to the desired timezone
year_query = cycle_base_query.\
filter(
dt_start__year=current_year
).\
annotate(
month=ExtractMonth('dt_start')
).\
values('month').\
annotate(
interval=models.Sum(
models.F('dt_end') -
models.F('dt_start')
)
).\
values('month', 'interval')
year_agg_total = year_query.aggregate(
total=models.Sum(
models.F('interval')
)
)['total']
if year_agg_total is not None:
year_total = year_agg_total.total_seconds()
else:
year_total = 0
#TODO deal when it is the first week of the year
#TODO convert to the desired timezone
last_week = date_target - timedelta(weeks=1)
last_week_query = cycle_base_query.\
filter(
dt_start__week=last_week.isocalendar()[1],
dt_start__year=last_week.year
).\
aggregate(last_week_interval=models.Sum(
models.F('dt_end') -
models.F('dt_start')
))
if last_week_query['last_week_interval'] is not None:
last_week_value = int(
last_week_query['last_week_interval'].total_seconds()
)
else:
last_week_value = 0
#TODO deal when it is the first month of the year
#TODO convert to the desired timezone
last_month = date_target - timedelta(days=30)
last_month_query = cycle_base_query.\
filter(
dt_start__month=last_month.month,
dt_start__year=last_month.year
).\
aggregate(
last_month_interval=models.Sum(
models.F('dt_end') -
models.F('dt_start')
)
)
if last_month_query['last_month_interval'] is not None:
last_month_value = int(
last_month_query['last_month_interval'].total_seconds()
)
else:
last_month_value = 0
last_year = date_target - timedelta(days=360)
last_year_query = cycle_base_query.\
filter(
dt_start__year=last_year.year
).\
aggregate(
last_year_interval=models.Sum(
models.F('dt_end') -
models.F('dt_start')
)
)
if last_year_query['last_year_interval'] is not None:
last_year_value = int(
last_year_query['last_year_interval'].total_seconds()
)
else:
last_year_value = 0
data = {
'week': {
'plot_data': {
'series': [
int(q['interval'].total_seconds())
for q in week_query
],
'xaxis': [q['day'] for q in week_query]
},
'additional_info': {
'current_value': week_total,
'last_value': last_week_value
},
},
'month': {
'plot_data': {
'series': [
int(q['interval'].total_seconds())
for q in month_query
],
'xaxis': [q['day'] for q in month_query]
},
'additional_info': {
'current_value': month_total,
'last_value': last_month_value
}
},
'year': {
'plot_data': {
'series': [
int(q['interval'].total_seconds())
for q in year_query
],
'xaxis': [q['month'] for q in year_query]
},
'additional_info': {
'current_value': year_total,
'last_value': last_year_value
}
}
}
return Response(
data=data,
status=status.HTTP_200_OK
)
| AndreImasato/tasktime-backend | tasktime/views.py | views.py | py | 18,641 | python | en | code | 0 | github-code | 13 |
30793301588 | # -*- coding: utf-8 -*-
from django.db import models
from django.utils.html import strip_tags
from django.utils.text import Truncator
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin
from djangocms_text_ckeditor.models import AbstractText
from filer.fields.image import FilerImageField
FRAGMENT_CHOICES = {
'grow': 'grow',
'shrink': 'shrink',
'roll-in': 'roll-in',
'fade-out': 'fade-out',
'highlight-red': 'highlight-red',
'highlight-blue': 'highlight-blue',
'current-visible': 'current-visible',
'highlight-current-blue': 'highlight-current-blue',
}
TRANSITIONS = {
'cube': 'Cube',
'page': 'Page',
'concave': 'Concave',
'zoom': 'Zoom',
'linea': 'Linear',
'fade': 'Fade',
'none': 'None',
'default': 'Default'
}
TRANSITIONS_SPEED = {
'default': 'Default',
'fast': 'Fast',
'slow': 'Slow'
}
class FragmentModel(AbstractText):
"""
Fragment plugin model
"""
effects = models.CharField(max_length=20, choices=FRAGMENT_CHOICES.items(), default='', blank=True)
class Meta:
verbose_name = _('fragment')
verbose_name_plural = _('fragments')
class SlideModel(AbstractText):
"""
Main slide plugin model
"""
title = models.CharField(verbose_name=_(u'Title'), max_length=100,
default='', blank=True)
sub_title = models.CharField(_(u'Section title'), max_length=100,
default='', blank=True)
transition = models.CharField(verbose_name=_('Transition'), max_length=20,
choices=TRANSITIONS.items(), default='',
blank=True)
transition_speed = models.CharField(verbose_name=_(u'Transition speed'),
max_length=20, default='', blank=True,
choices=TRANSITIONS_SPEED.items())
css_class = models.CharField(verbose_name=_(u'Custom CSS class'),
max_length=100, default='', blank=True)
background_css = models.CharField(verbose_name=_(u'Background CSS class'),
max_length=100, default='', blank=True)
background_image = FilerImageField(verbose_name=_(u'Background image'),
null=True, blank=True)
background_transition_slide = models.BooleanField(
verbose_name=_(u'Background transition'), default=False)
class Meta:
verbose_name = _('slide')
verbose_name_plural = _('slides')
def __unicode__(self):
if self.title:
if self.sub_title:
return '%s - %s' % (self.sub_title, self.title)
return self.title
elif self.sub_title:
return self.sub_title
return Truncator(strip_tags(self.body)).words(3, truncate="...")
class SlideNote(CMSPlugin):
"""
Slide notes
"""
note = models.TextField(verbose_name=_(u'Notes'), default='',
blank=True)
class Meta:
verbose_name = _('note')
verbose_name_plural = _('notes')
def __unicode__(self):
return Truncator(strip_tags(self.note)).words(3, truncate="...")
class SlideCode(CMSPlugin):
"""
Slide codes
"""
code = models.TextField(verbose_name=_(u'Code'), default='',
blank=True)
class Meta:
verbose_name = _('code')
verbose_name_plural = _('codes')
def __unicode__(self):
return Truncator(strip_tags(self.code)).words(3, truncate="...")
| nephila/djangocms-revealjs | djangocms_revealjs/models.py | models.py | py | 3,629 | python | en | code | 1 | github-code | 13 |
39401597600 | import random
from is_win_checker import is_win
matrix = [
['', '', ''],
['', '', ''],
['', '', ''],
]
print(matrix)
def gen_numbers():
x, y = random.randrange(0, 3), random.randrange(0, 3)
return [x, y]
def comp_step():
[x, y] = gen_numbers()
if matrix[x][y] == '':
matrix[x][y] = 'o'
else:
comp_step()
def player_step():
print('Enter the row number')
x = int(input())
print('Enter the column number')
y = int(input())
matrix[x][y] = 'x'
def game():
while not is_win(matrix):
player_step()
print('win') if is_win(matrix) else print('play')
print(matrix)
comp_step()
print(matrix)
game() | ShamilSE/tic_tac_toe | main.py | main.py | py | 711 | python | en | code | 0 | github-code | 13 |
9229836676 | import asyncio
from typing import TypeVar
from aiogram import types, Bot
MESSAGE_LIMIT = 4096
ReplyMarkup = TypeVar("ReplyMarkup", types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove,
types.ForceReply)
async def split_sending(message: types.Message,
answer_text: str,
reply_markup: ReplyMarkup = None):
""" Split message if it's length is more than 4096 symbols """
answer_length = len(answer_text)
if answer_length > MESSAGE_LIMIT:
for _from in range(0, answer_length, MESSAGE_LIMIT):
_to = _from + MESSAGE_LIMIT
if _to >= answer_length:
await message.answer(answer_text[_from: _to], reply_markup=reply_markup)
else:
await message.answer(answer_text[_from:_to])
await asyncio.sleep(0.5)
else:
await message.answer(answer_text, reply_markup=reply_markup)
async def mailings(bot: Bot, text: str, users: list | int, *, reply_markup: ReplyMarkup = None) -> list[types.Message]:
""" Send message to all users in list """
if isinstance(users, int):
users = [users]
mails = []
for user in users:
try:
mails.append(await bot.send_message(user, text, reply_markup=reply_markup))
except Exception as e:
print(e)
await asyncio.sleep(0.5)
return mails
| taimast/aiochatgpt | aiochatgpt/utils/message.py | message.py | py | 1,434 | python | en | code | 1 | github-code | 13 |
6386912863 | from collections import namedtuple
from gi.repository import BlockDev as blockdev
import logging
log = logging.getLogger("blivet")
from . import raid
from ..size import Size
from ..i18n import N_
from ..flags import flags
# some of lvm's defaults that we have no way to ask it for
LVM_PE_START = Size("1 MiB")
LVM_PE_SIZE = Size("4 MiB")
# thinp constants
LVM_THINP_MIN_METADATA_SIZE = Size("2 MiB")
LVM_THINP_MAX_METADATA_SIZE = Size("16 GiB")
LVM_THINP_MIN_CHUNK_SIZE = Size("64 KiB")
LVM_THINP_MAX_CHUNK_SIZE = Size("1 GiB")
RAID_levels = raid.RAIDLevels(["raid0", "raid1", "linear"])
ThPoolProfile = namedtuple("ThPoolProfile", ["name", "desc"])
KNOWN_THPOOL_PROFILES = (ThPoolProfile("thin-generic", N_("Generic")),
ThPoolProfile("thin-performance", N_("Performance")))
# Start config_args handling code
#
# Theoretically we can handle all that can be handled with the LVM --config
# argument. For every time we call an lvm_cc (lvm compose config) funciton
# we regenerate the config_args with all global info.
config_args_data = { "filterRejects": [], # regular expressions to reject.
"filterAccepts": [] } # regexp to accept
def _set_global_config():
"""lvm command accepts lvm.conf type arguments preceded by --config. """
filter_string = ""
rejects = config_args_data["filterRejects"]
for reject in rejects:
filter_string += ("\"r|/%s$|\"," % reject)
if filter_string:
filter_string = "filter=[%s]" % filter_string.strip(",")
# XXX consider making /tmp/blivet.lvm.XXXXX, writing an lvm.conf there, and
# setting LVM_SYSTEM_DIR
devices_string = 'preferred_names=["^/dev/mapper/", "^/dev/md/", "^/dev/sd"]'
if filter_string:
devices_string += " %s" % filter_string
# devices_string can have (inside the brackets) "dir", "scan",
# "preferred_names", "filter", "cache_dir", "write_cache_state",
# "types", "sysfs_scan", "md_component_detection". see man lvm.conf.
config_string = " devices { %s } " % (devices_string) # strings can be added
if not flags.lvm_metadata_backup:
config_string += "backup {backup=0 archive=0} "
blockdev.lvm_set_global_config(config_string)
def needs_config_refresh(fn):
def fn_with_refresh(*args, **kwargs):
ret = fn(*args, **kwargs)
_set_global_config()
return ret
return fn_with_refresh
@needs_config_refresh
def lvm_cc_addFilterRejectRegexp(regexp):
""" Add a regular expression to the --config string."""
log.debug("lvm filter: adding %s to the reject list", regexp)
config_args_data["filterRejects"].append(regexp)
@needs_config_refresh
def lvm_cc_removeFilterRejectRegexp(regexp):
""" Remove a regular expression from the --config string."""
log.debug("lvm filter: removing %s from the reject list", regexp)
try:
config_args_data["filterRejects"].remove(regexp)
except ValueError:
log.debug("%s wasn't in the reject list", regexp)
return
@needs_config_refresh
def lvm_cc_resetFilter():
config_args_data["filterRejects"] = []
config_args_data["filterAccepts"] = []
| TimothyAsirJeyasing/blivet | blivet/devicelibs/lvm.py | lvm.py | py | 3,163 | python | en | code | null | github-code | 13 |
43129916160 | import re
from django.utils.safestring import mark_safe
def strip(s, all_tags=None):
try:
from BeautifulSoup import BeautifulSoup, Comment
soup = BeautifulSoup(s)
except ImportError:
soup = None
valid_tags = ('strong b a i'.split() if not all_tags else '')
valid_attrs = ('href src'.split() if not all_tags else '')
if soup:
for Comment in soup.findAll(
text=lambda text: isinstance(text, Comment)):
comment.extract()
for tag in soup.findAll(True):
if tag.name not in valid_tags:
tag.hidden = True
tag.attrs = [(attr, val) for attr, val in tag.attrs
if attr in valid_attrs]
ret = soup.renderContents().decode('utf8').replace('javascript:', '')
else:
ret = "Could not load BeautifulSoup"
return ret
def convert_links(s):
#NOTE: TEXT MUST ALREDY BE ESCAPED...
##Find links that aren't already active (hyperlinked) and turn into hyperlink
URL_regex = re.compile(r'(|^)http([\w\d\.\:\/]+?)(\s|$|\:|,)', re.IGNORECASE)
s = URL_regex.sub(r'\1<a href="http\2">http\2</a>\3', s)
URL_regex = re.compile(r'(\s|^)@([\w\d_]+)', re.IGNORECASE)
s = URL_regex.sub(r'\1<a href="http://twitter.com/\2/">@\2</a>', s)
return mark_safe(s) | freshplum/django_utils | manage_html.py | manage_html.py | py | 1,326 | python | en | code | 6 | github-code | 13 |
16936229277 | # https://www.acmicpc.net/problem/1208
import sys
from itertools import combinations
n, s = map(int, sys.stdin.readline().split())
a = list(map(int, sys.stdin.readline().split()))
# 가운데를 기점으로 두개의 배열로 나눈다.
arr1 = a[:n // 2]
arr2 = a[n // 2:]
# 나눈후, 각 왼쪽, 오른쪽의 값들 조합으로 새로운 배열 생성
left, right = [], []
for i in range(len(arr1) + 1):
data = combinations(arr1, i)
for l in data:
left.append(sum(l))
for i in range(len(arr2) + 1):
data = combinations(arr2, i)
for l in data:
right.append(sum(l))
# left에는 오름차순 , right 는 내림차순 (pointer 이동에 편리함)
left.sort()
right.sort(reverse=True)
# print(left,right,sep = '\n')
# lp : left의 위치 포인터 , rp : right의 위치 포인터
lp, rp = 0, 0
answer = 0
while lp < len(left) and rp < len(right):
tmp = left[lp] + right[rp] # 현재값
# 현재값이 s라면, 같은값이 존재하는지 구해본다.
if tmp == s:
lp += 1
rp += 1
c1 = 1
c2 = 1
# left에 다음칸 값과 같다면,
while lp < len(left) and left[lp] == left[lp - 1]:
c1 += 1
lp += 1
# 오른쪽 다음칸 값과 같다면,
while rp < len(right) and right[rp] == right[rp - 1]:
c2 += 1
rp += 1
# c1,c2의 곱은 순서쌍 곱이다.
"""
1, 1, 1, 2,3
-1,-1,-1, 5,10 에서 c1 = 3 ,c2 = 3이므로 순서쌍은 총 9개
"""
answer += (c1 * c2)
elif tmp < s:
lp += 1
else:
rp += 1
# s가 0이라면 , 공집합 데이터를 빼주어야하므로 -1
if s == 0:
answer -= 1
print(answer)
| JaeHyeok-2/Algorithm1 | 알고리즘 연습문제/이분 탐색/부분수열의 합2.py | 부분수열의 합2.py | py | 1,751 | python | ko | code | 0 | github-code | 13 |
40485516951 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='barbell',
version='0.2.1',
scripts=['barbell'],
author="Henrique de Paula",
author_email="oprometeumoderno@gmail.com",
description="A tool for creating Gym environments",
long_description=long_description,
long_description_content_type="text/markdown",
include_package_data=True,
url="https://github.com/oprometeumoderno/barbell",
packages=setuptools.find_packages(),
install_requires=['gym', 'stringcase'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| oprometeumoderno/barbell | setup.py | setup.py | py | 738 | python | en | code | 0 | github-code | 13 |
3720913130 | class Solution:
def findSubstring(self, s: str, words: List[str]) -> List[int]:
# 참고 : Anavil
from collections import Counter
from collections import defaultdict
c = Counter(words)
words_len = len(words)
n = len(words[0])
result = []
for k in range(n):
left = k
word_cnt = 0
# s에 있는 단어들 탐색
for j in range(k, len(s) - n + 1, n):
word = s[j:j + n]
if word in c:
sub_d[word] += 1
word_cnt += 1
# 예외처리
while sub_d[word] > c[word]:
sub_d[s[left:left + n]] -= 1
left += n
word_cnt -= 1
if word_cnt == words_len:
result.append(left)
else:
left = j + n
sub_d = defaultdict(int)
word_cnt = 0
return result
'''
시간초과가 났던 코드
-> words의 길이가 길어지면 permutation으로 인해
메모리도 많이 먹게 될 뿐더러, 시간소비가 큼
from itertools import permutations
result = set()
# permutation in words
possible_words = list(permutations(words,len(words)))
for word in possible_words:
# substrings_starting_index
word = ''.join(word)
w_len = len(word)
for idx in range(len(s)-w_len+1):
if s[idx:idx+w_len] == word:
result.add(idx)
return list(result)
'''
| JaeEon-Ryu/Coding_test | LeetCode/0030_ Substring with Concatenation of All Words.py | 0030_ Substring with Concatenation of All Words.py | py | 1,750 | python | en | code | 1 | github-code | 13 |
29652491613 | #!/usr/bin/python3
# -*- Mode: Python; indent-tabs-mode: nil; tab-width: 4; coding: utf-8 -*-
# Partly based on a script from Review Board, MIT license; but modified to
# act as a unit test.
from __future__ import print_function
import os
import re
import subprocess
import unittest
CURDIR = os.path.dirname(os.path.abspath(__file__))
class TestPyflakesClean(unittest.TestCase):
""" ensure that the tree is pyflakes clean """
def read_exclusions(self):
exclusions = {}
try:
excpath = os.path.join(CURDIR, "pyflakes.exclude")
with open(excpath, "r") as fp:
for line in fp:
if not line.startswith("#"):
exclusions[line.rstrip()] = 1
except IOError:
pass
return exclusions
def filter_exclusions(self, contents):
exclusions = self.read_exclusions()
for line in contents:
if line.startswith("#"):
continue
line = line.rstrip().split(CURDIR + '/', 1)[1]
test_line = re.sub(r":[0-9]+:", r":*:", line, 1)
test_line = re.sub(r"line [0-9]+", r"line *", test_line)
if test_line not in exclusions:
yield line
def test_pyflakes_clean(self):
# mvo: type -f here to avoid running pyflakes on imported files
# that are symlinks to other packages
cmd = 'find %s/.. -type f -name "*.py" | xargs pyflakes3' % CURDIR
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
close_fds=True, shell=True, universal_newlines=True)
contents = p.communicate()[0].splitlines()
filtered_contents = list(self.filter_exclusions(contents))
for line in filtered_contents:
print(line)
self.assertEqual(0, len(filtered_contents))
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| GalliumOS/update-manager | tests/test_pyflakes.py | test_pyflakes.py | py | 1,995 | python | en | code | 4 | github-code | 13 |
10835707497 | from flask import Flask, request, jsonify
import response_builder
import json
from flask_mysqldb import MySQL
import os
import json
import collections
app = Flask(__name__)
@app.route('/api', methods=['GET'])
def main():
city_name = request.args.get("cityName")
statistics = response_builder.build_statistic(city_name)
return jsonify(statistics)
@app.route('/test', methods=['GET'])
def test():
city_name = request.args.get("cityName")
statistics = [{'name' : "Grevenbroich", 'adresse' : "Breite Straße 1", 'modarea' : 'test', 'radabs' : 'test', 'kwh_kwp' : 'test', 'anzahl_0' : 'test', 'kw_17' : 'test', 'str_17' : "test"}]
return json.dumps(statistics)
app.config['MYSQL_USER'] = os.environ.get("MYSQL_USER")
app.config['MYSQL_PASSWORD'] = os.environ.get("MYSQL_PASSWORD")
app.config['MYSQL_HOST'] = os.environ.get("MYSQL_HOST")
app.config['MYSQL_DB'] = os.environ.get("MYSQL_DB")
#app.config['MYSQL_CURSORCLASS'] = 'DictCursor'
db = MySQL(app)
@app.route('/')
def index():
return "TEST"
@app.route('/databaseTest')
def databaseTest():
cur = db.connection.cursor()
cur.execute('''select * from staedte;''')
results = cur.fetchall()
print(results)
return jsonify(results)
@app.route('/getBuildings')
def getBuildings():
city_name = request.args.get("cityName")
cur = db.connection.cursor()
cur.execute('''select * from gebaeude where StadtID = (Select StadtID from Staedte where Name= \'''' + city_name + '''\');''')
rows = cur.fetchall()
objects_list = []
for row in rows:
d = collections.OrderedDict()
d["name"] = row[2]
d["address"] = row[3]
d["type"] = row[4]
d["modarea"] = row[5]
d["radabs"] = row[6]
d["kwh_kwp"] = row[7]
d["anzahl_0"] = row[8]
d["kw_17"] = row[9]
d["str_17"] = row[10]
objects_list.append(d)
j = json.dumps(objects_list)
# print(j)
return j
@app.route('/getCityDetails')
def getCityDetails():
city_name = request.args.get("cityName")
cur = db.connection.cursor()
cur.execute('''select * from Staedte where StadtID = (Select StadtID from Staedte where Name= \'''' + city_name + '''\');''')
results = cur.fetchall()
#print(results)
return jsonify(results)
@app.route('/GetCityData')
def getCityData():
city_name = request.args.get("cityName")
cur = db.connection.cursor()
cur.execute('''select * from Staedte where StadtID = (Select StadtID from Staedte where Name= \'''' + city_name + '''\');''')
results1 = cur.fetchall()
cur = db.connection.cursor()
cur.execute('''select * from Gebaeude where StadtID = (Select StadtID from Staedte where Name= \'''' + city_name + '''\');''')
results2 = cur.fetchall()
out = results1+results2
return jsonify(out)
| whatwouldmarvindo/sweetgeeks | backend/app.py | app.py | py | 3,042 | python | en | code | 0 | github-code | 13 |
40916863729 | import torch
import numpy as np
from torch.utils.data import TensorDataset, DataLoader
from sklearn.model_selection import train_test_split
def get_data_loader_X_C_Y(X, C, Y, batch_size):
'''
To improve, allow for batch loading of data for large data
'''
if C is None:
C = np.empty((X.shape[0], 0))
if Y is None:
Y = np.empty((X.shape[0], 0))
X = torch.tensor(X).float()
C = torch.tensor(C).float()
Y = torch.tensor(Y).long()
data = TensorDataset(X, C, Y)
loader = DataLoader(data,
batch_size=batch_size,
num_workers=8)
return loader
def get_train_val_loaders_X_C_Y(X, Y=None, C=None, batch_size=32, shuffle=False, validation_size=0.1):
if C is None:
C = np.empty((X.shape[0], 0))
if Y is None:
Y = np.empty((X.shape[0], 0))
X_train, X_val, C_train, C_val, Y_train, Y_val = train_test_split(
X, C, Y, test_size=validation_size, shuffle=shuffle)
train_loader = get_data_loader_X_C_Y(X_train, C_train, Y_train, batch_size)
val_loader = get_data_loader_X_C_Y(X_val, C_val, Y_val, batch_size=X_val.shape[0])
return train_loader, val_loader | simonzabrocki/variational-models | src/utils.py | utils.py | py | 1,215 | python | en | code | 0 | github-code | 13 |
24845969938 |
"""
This file contains a content handler for parsing sumo network xml files.
It uses other classes from this module to represent the road network.
"""
from collections import defaultdict
from copy import deepcopy
from enum import Enum, unique
from typing import List, Dict, Tuple, Optional, Callable, TypeVar, Iterable, Union, Set
from xml.etree import cElementTree as ET
import os
import numpy as np
import sumolib
import sumolib.files
def set_allowed_changes(xml_node: ET.Element, obj: Union["Connection", "Lane"]):
"""Adds allowed lange change directions to etree xml node"""
return
if obj.change_left_allowed and len(obj.change_left_allowed) != len(VehicleType):
xml_node.set("changeLeft", " ".join(la.value for la in obj.change_left_allowed))
elif len(obj.change_left_allowed) == 0:
xml_node.set("changeLeft", VehicleType.CUSTOM1.value)
if obj.change_right_allowed and len(obj.change_right_allowed) != len(VehicleType):
xml_node.set("changeRight", " ".join(la.value for la in obj.change_right_allowed))
elif len(obj.change_right_allowed) == 0:
xml_node.set("changeRight", VehicleType.CUSTOM1.value)
class NetLocation:
def __init__(self, net_offset: np.ndarray,
conv_boundary: np.ndarray,
orig_boundary: np.ndarray,
proj_parameter: str):
assert net_offset.shape == (2,)
self.net_offset = net_offset
self.conv_boundary = conv_boundary
self.orig_boundary = orig_boundary
self.proj_parameter = proj_parameter
class Net:
"""The whole sumo network."""
def __init__(self, version: str = None,
junction_corner_detail: float = None,
junction_link_detail: float = None,
limit_turn_speed: float = None,
location: NetLocation = None):
self.version: str = version if version is not None else ""
self.location = location
self.junction_corner_detail = junction_corner_detail
self.junction_link_detial = junction_link_detail
self.limit_turn_speed = limit_turn_speed
self.types: Dict[str, EdgeType] = {}
self.junctions: Dict[int, Junction] = {}
self.edges: Dict[int, Edge] = {}
# from_edge -> from_lane -> to_edge -> to_lane -> Connection
self.connections: Dict[int, Dict[int, Dict[int, Dict[int, Connection]]]] = defaultdict(
lambda: defaultdict(lambda: defaultdict(dict)))
# id -> program_id -> TLSProgram
self.tlss: Dict[str, TLSProgram] = {}
_K = TypeVar("_K")
_V = TypeVar("_V")
_VV = TypeVar("_VV")
def _get_default(d: Dict[_K, _V], key: _K, default: Optional[_VV] = None, map: Callable[[_V], _VV] = lambda x: x) -> \
Optional[
_VV]:
try:
return map(d[key])
except KeyError:
return default
except ValueError:
return None
def sumo_net_from_xml(file: str) -> Net:
"""
Given a SUMO .net.xml file this function returns the parsed
representation of it.
:param file: Path to .net.xml file
:return: parsed Net
"""
if not os.path.isfile(file):
raise RuntimeError(f"Invalid file path: {file}")
if not file.endswith(".net.xml"):
raise RuntimeError(f"Invalid file type {file}, required *.net.xml")
root = ET.parse(file).getroot()
net = Net(version=_get_default(root.attrib, "version", None),
junction_corner_detail=_get_default(root.attrib, "junctionCornerDetail", None, float),
junction_link_detail=_get_default(root.attrib, "junctionLinkDetail", None, float),
limit_turn_speed=_get_default(root.attrib, "limitTurnSpeed", None, float))
for elem in root.iter():
if elem.tag == "location":
net.location = NetLocation(
net_offset=np.array([float(f) for f in elem.attrib["netOffset"].split(",")]),
conv_boundary=np.array([float(f) for f in elem.attrib["convBoundary"].split(",")]),
orig_boundary=np.array([float(f) for f in elem.attrib["origBoundary"].split(",")]),
proj_parameter=elem.attrib["projParameter"]
)
elif elem.tag == "type":
try:
net.types[elem.attrib["id"]] = EdgeType(
id=elem.attrib["id"],
allow=_get_default(elem.attrib, "allow", None,
lambda allow: [VehicleType(a) for a in allow.split(" ")]),
disallow=_get_default(elem.attrib, "disallow", None,
lambda disallow: [VehicleType(a) for a in disallow.split(" ")]),
discard=_get_default(elem.attrib, "discard", False, lambda d: bool(int(x))),
num_lanes=_get_default(elem.attrib, "num_lanes", -1, int),
oneway=_get_default(elem.attrib, "oneway", False, lambda o: bool(int(o))),
priority=_get_default(elem.attrib, "priority", 0, int),
speed=_get_default(elem.attrib, "speed", 13.89, float),
sidewalk_width=_get_default(elem.attrib, "sidewalkWidth", -1., float)
)
except ValueError:
print("eeor")
elif elem.tag == "tlLogic":
program = TLSProgram(id=elem.attrib["id"],
offset=_get_default(elem.attrib, "offset", 0, int),
program_id=elem.attrib["programID"],
tls_type=TLSType(elem.attrib["type"]))
for phase in elem:
if phase.tag != "phase":
continue
program.add_phase(Phase(
duration=_get_default(phase.attrib, "duration", 0., float),
state=_get_default(phase.attrib, "state", [], lambda state: [SignalState(s) for s in state]),
min_dur=_get_default(phase.attrib, "minDur", None, int),
max_dur=_get_default(phase.attrib, "maxDur", None, int),
name=_get_default(phase.attrib, "name"),
next=_get_default(phase.attrib, "next", None, lambda n: [int(i) for i in n.split(" ")])
))
assert program.id not in net.tlss
net.tlss[program.id] = program
elif elem.tag == "junction":
x = _get_default(elem.attrib, "x", None, float)
y = _get_default(elem.attrib, "y", None, float)
z = _get_default(elem.attrib, "z", None, float)
id_ = _get_default(elem.attrib, "id", None, int)
if id_ is None:
id_ = _get_default(elem.attrib, "id", None, str)
assert id_ is not None
junction = Junction(
id=id_,
junction_type=_get_default(elem.attrib, "type", None, JunctionType),
coord=np.array([x, y, z] if z is not None else [x, y]),
shape=_get_default(elem.attrib, "shape", None, from_shape_string),
inc_lanes=_get_default(elem.attrib, "incLanes", None,
lambda inc_lanes: inc_lanes.split(" ") if inc_lanes else None),
int_lanes=_get_default(elem.attrib, "intLanes", None,
lambda int_lanes: int_lanes.split(" ") if int_lanes else None),
)
for request in elem:
if request.tag != "request":
continue
junction.requests.append(
JunctionRequest(
index=_get_default(request.attrib, "index", 0, int),
response=_get_default(request.attrib, "response", [],
lambda response: [bool(int(bit)) for bit in response]),
foes=_get_default(request.attrib, "foes", [],
lambda foes: [bool(int(bit)) for bit in foes]),
cont=_get_default(request.attrib, "cont", 0, int)
)
)
net.junctions[junction.id] = junction
for elem in root:
if elem.tag == "edge":
edge = Edge(
id=elem.attrib["id"],
from_node=_get_default(elem.attrib, "from", map=lambda f: net.junctions[int(f)]),
to_node=_get_default(elem.attrib, "to", map=lambda f: net.junctions[int(f)]),
type_id=_get_default(elem.attrib, "type", ""),
speed=_get_default(elem.attrib, "speed", None, float),
priority=_get_default(elem.attrib, "priority", None, int),
length=_get_default(elem.attrib, "length", None, float),
shape=_get_default(elem.attrib, "shape", None, from_shape_string),
spread_type=_get_default(elem.attrib, "spreadType", None, SpreadType),
)
for lane in elem:
if lane.tag != "lane":
continue
# lane is added to edge implicitly in the Lane() constructor
Lane(
edge=edge,
speed=_get_default(lane.attrib, "speed", None, float),
length=_get_default(lane.attrib, "length", None, float),
width=_get_default(lane.attrib, "width", None, float),
allow=_get_default(lane.attrib, "allow", None,
lambda allow: [VehicleType(a) for a in allow.split(" ")]),
disallow=_get_default(lane.attrib, "disallow", None,
lambda disallow: [VehicleType(a) for a in disallow.split(" ")]),
shape=_get_default(lane.attrib, "shape", None, from_shape_string)
)
net.edges[edge.id] = edge
elif elem.tag == "connection":
from_edge = _get_default(elem.attrib, "from", map=lambda f: net.edges[f])
to_edge = _get_default(elem.attrib, "to", map=lambda t: net.edges[t])
c = Connection(
from_edge=from_edge,
to_edge=to_edge,
from_lane=_get_default(elem.attrib, "fromLane", map=lambda idx: from_edge.lanes[int(idx)]),
to_lane=_get_default(elem.attrib, "toLane", map=lambda idx: to_edge.lanes[int(idx)]),
direction=_get_default(elem.attrib, "dir", map=ConnectionDirection),
tls=_get_default(elem.attrib, "tl", map=lambda tls: net.tlss[tls]),
tl_link=_get_default(elem.attrib, "linkIndex", map=int),
state=_get_default(elem.attrib, "state"),
via_lane_id=_get_default(elem.attrib, "via", map=lambda via: via.split(" ")),
shape=_get_default(elem.attrib, "shape", map=from_shape_string),
keep_clear=_get_default(elem.attrib, "keepClear", map=lambda k: bool(int(k))),
cont_pos=_get_default(elem.attrib, "contPos")
)
net.connections[c.from_edge.id][c.from_lane.id][c.to_edge.id][c.to_lane.id] = c
for junction in net.junctions.values():
def replace_lanes(lane_ids):
if not lane_ids:
return None
lanes = []
for lane_id in lane_ids:
split = lane_id.split("_")
lanes.append(net.edges["_".join(split[:-1])].lanes[int(split[-1])])
return lanes
junction.int_lanes = replace_lanes(junction.int_lanes)
junction.inc_lanes = replace_lanes(junction.inc_lanes)
return net
#
# Node
#
@unique
class NodeType(Enum):
"""
Node types:
If you leave out the type of the node, it is automatically guessed by netconvert but may not be the one you
intended.
The following types are possible, any other string is counted as an error and will yield in a program stop:
taken from https://sumo.dlr.de/docs/Networks/PlainXML.html#connections_after_joining_nodes
"""
# priority: Vehicles on a low-priority edge have to wait until vehicles on a high-priority edge
# have passed the junction.
PRIORITY = "priority"
# traffic_light: The junction is controlled by a traffic light (priority rules are used to avoid collisions
# if conflicting links have green light at the same time).
TRAFFIC_LIGHT = "traffic_light"
# traffic_light_unregulated: The junction is controlled by a traffic light without any further rules.
# This may cause collision if unsafe signal plans are used.
# Note, that collisions within the intersection will never be detected.
TRAFFIC_LIGHT_UNREGULATED = "traffic_light_unregulated"
# traffic_light_right_on_red: The junction is controlled by a traffic light as for type traffic_light.
# Additionally, right-turning vehicles may drive in any phase whenever it is safe to do so (after stopping once).
# This behavior is known as right-turn-on-red.
TRAFFIC_LIGHT_RIGHT_ON_RED = "traffic_light_right_on_red"
# right_before_left: Vehicles will let vehicles coming from their right side pass.
RIGHT_BEFORE_LEFT = "right_before_left"
# unregulated: The junction is completely unregulated - all vehicles may pass without braking;
# Collision detection on the intersection is disabled but collisions beyond the intersection will
# detected and are likely to occur.
UNREGULATED = "unregulated"
# priority_stop: This works like a priority-junction but vehicles on minor links always have to stop before passing
PRIORITY_STOP = "priority_stop"
# allway_stop: This junction works like an All-way stop
ALLWAY_STOP = "allway_stop"
# rail_signal: This junction is controlled by a rail signal. This type of junction/control is only useful for rails.
RAIL_SIGNAL = "rail_signal"
# rail_crossing: This junction models a rail road crossing.
# It will allow trains to pass unimpeded and will restrict vehicles via traffic signals when a train is approaching.
RAIL_CROSSING = "rail_crossing"
# zipper: This junction connects edges where the number of lanes decreases and traffic needs
# to merge zipper-style (late merging).
ZIPPER = "zipper"
@unique
class RightOfWay(Enum):
# Taken from: https://sumo.dlr.de/docs/Networks/PlainXML.html#right-of-way
# This mode is useful if the priority attribute of the edges cannot be relied
# on to determine right-of-way all by itself.
# It sorts edges according to priority, speed and laneNumber. The 2 incoming edges with the highest position
# are determined and will receive right-of-way. All other edges will be classified as minor.
DEFAULT = "default"
# This mode is useful for customizing right-of-way by tuning edge priority attributes.
# The relationship between streams of different incoming-edge priority will be solely determined by edge priority.
# For equal-priority values, turning directions are also evaluated.
EDGE_PRIORITY = "edgePriority"
class Node:
""" Nodes from a sumo network """
def __init__(self,
id: int,
node_type: NodeType,
coord: np.ndarray,
shape: np.ndarray = None,
inc_lanes: List['Lane'] = None,
int_lanes: List['Lane'] = None,
tl: 'TLSProgram' = None,
right_of_way=RightOfWay.DEFAULT):
self.id = id
self.type = node_type
self.coord = coord
self._incoming: List[Edge] = []
self._outgoing: List[Edge] = []
self._foes: Dict[int, Edge] = {}
self._prohibits: Dict[int, Edge] = {}
self.inc_lanes: List[Lane] = inc_lanes if inc_lanes is not None else []
self.int_lanes: List[Lane] = int_lanes if int_lanes is not None else []
self.shape: Optional[np.ndarray] = shape
self.tl = tl
self.right_of_way = right_of_way
self.zipper = True
self.keep_clear = True
def add_outgoing(self, edge: 'Edge'):
self._outgoing.append(edge)
@property
def outgoing(self) -> List['Edge']:
return self._outgoing
def add_incoming(self, edge: 'Edge'):
self._incoming.append(edge)
@property
def incoming(self) -> List['Edge']:
return self._incoming
def to_xml(self) -> str:
"""
Converts this node to it's xml representation
"""
node = ET.Element("node")
node.set("id", str(self.id))
node.set("type", str(self.type.value))
for k, v in zip(["x", "y", "z"][:self.coord.shape[0]], self.coord):
node.set(k, str(v))
if self.incoming:
node.set("incoming", " ".join([str(i.id) for i in self.incoming]))
if self.outgoing:
node.set("outgoing", " ".join([str(o.id) for o in self.outgoing]))
if self._foes is not None:
# TODO: convert foes
pass
if self._prohibits is not None:
# TODO: convert prohibits
pass
if self.keep_clear is False:
node.set("keepClear", "false")
if self.inc_lanes:
node.set("incLanes", " ".join([str(la.id) for la in self.inc_lanes]))
if self.int_lanes:
node.set("intLanes", " ".join([str(la.id) for la in self.int_lanes]))
if self.shape is not None:
node.set("shape", to_shape_string(self.shape))
if self.tl is not None:
node.set("tl", self.tl.id)
node.set("rightOfWay", str(self.right_of_way.value))
return ET.tostring(node, encoding="unicode")
def __str__(self):
return "Node: " + str(self.id)
def __hash__(self):
return hash((self.id, self.type))
def __eq__(self, other):
return self.id == other.id \
and self.type == other.type \
and self.tl == other.tl \
and self.right_of_way == other.right_of_way
def __ne__(self, other):
return not self.__eq__(other)
#
# Junction
#
@unique
class JunctionType(Enum):
DEAD_END = "dead_end"
# the following is copied from NodeType, as Enum inheritance is not supported:
PRIORITY = "priority"
# traffic_light: The junction is controlled by a traffic light (priority rules are used to avoid collisions
# if conflicting links have green light at the same time).
TRAFFIC_LIGHT = "traffic_light"
# traffic_light_unregulated: The junction is controlled by a traffic light without any further rules.
# This may cause collision if unsafe signal plans are used.
# Note, that collisions within the intersection will never be detected.
TRAFFIC_LIGHT_UNREGULATED = "traffic_light_unregulated"
# traffic_light_right_on_red: The junction is controlled by a traffic light as for type traffic_light.
# Additionally, right-turning vehicles may drive in any phase whenever it is safe to do so (after stopping once).
# This behavior is known as right-turn-on-red.
TRAFFIC_LIGHT_RIGHT_ON_RED = "traffic_light_right_on_red"
# right_before_left: Vehicles will let vehicles coming from their right side pass.
RIGHT_BEFORE_LEFT = "right_before_left"
# unregulated: The junction is completely unregulated - all vehicles may pass without braking;
# Collision detection on the intersection is disabled but collisions beyond the intersection will
# detected and are likely to occur.
UNREGULATED = "unregulated"
# priority_stop: This works like a priority-junction but vehicles on minor links always have to stop before passing
PRIORITY_STOP = "priority_stop"
# allway_stop: This junction works like an All-way stop
ALLWAY_STOP = "allway_stop"
# rail_signal: This junction is controlled by a rail signal. This type of junction/control is only useful for rails.
RAIL_SIGNAL = "rail_signal"
# rail_crossing: This junction models a rail road crossing.
# It will allow trains to pass unimpeded and will restrict vehicles via traffic signals when a train is approaching.
RAIL_CROSSING = "rail_crossing"
# zipper: This junction connects edges where the number of lanes decreases and traffic needs
# to merge zipper-style (late merging).
ZIPPER = "zipper"
class Junction(Node):
def __init__(self,
id: int,
junction_type: JunctionType,
coord: np.ndarray,
shape: np.ndarray = None,
inc_lanes: List['Lane'] = None,
int_lanes: List['Lane'] = None):
super().__init__(id, junction_type, coord, shape, inc_lanes, int_lanes)
self.id = id
self.type = junction_type
assert coord.shape == (2,) or coord.shape == (3,), f"Coord has to have two or three values, was {coord}"
self.coord = coord
self.shape = shape
self.inc_lanes = inc_lanes
self.int_lanes = int_lanes
self.requests: List[JunctionRequest] = []
def __str__(self):
return "Junction: " + str(self.id)
class JunctionRequest:
def __init__(self, index: int, response: List[bool], foes: List[bool], cont: int):
self.index = index
self.response = response
self.foes = foes
self.cont = cont
#
# Edge
#
@unique
class SpreadType(Enum):
# From: https://sumo.dlr.de/docs/Networks/PlainXML.html#spreadtype
# (default): The edge geometry is interpreted as the left side of the edge and lanes flare out to the right.
# This works well if edges in opposite directions have the same (or rather reversed) geometry.
RIGHT = "right"
# The edge geometry is interpreted as the middle of the directional edge and lanes
# flare out symmetrically to both sides.
# This is appropriate for one-way edges
CENTER = "center"
# The edge geometry is interpreted as the middle of a bi-directional road.
# This works well when both directional edges have a different lane number.
ROAD_CENTER = "roadCenter"
class Edge:
""" Edges from a sumo network """
def __init__(self,
id: int,
from_node: 'Node',
to_node: 'Node',
type_id: str = "",
speed: float = None,
priority: int = None,
length: float = None,
shape: np.ndarray = None,
spread_type: SpreadType = SpreadType.RIGHT,
allow: List['VehicleType'] = None,
disallow: List['VehicleType'] = None,
width: float = None,
name: str = None,
end_offset: float = None,
sidewalk_width: float = None):
self.id = id
self.from_node = from_node
self.to_node = to_node
if from_node:
from_node.add_outgoing(self)
if to_node:
to_node.add_incoming(self)
self.type_id = type_id
self._priority = priority
self.speed = speed
self.priority = priority
self.length = length
self.shape = shape
self.spread_type = spread_type
self.allow = allow
self.disallow = disallow
self.width = width
self.name = name
self.end_offset = end_offset
self.sidewalk_width = sidewalk_width
self._lanes: List['Lane'] = []
self._incoming: Dict[Node, List[Edge]] = defaultdict(list)
self._outgoing: Dict[Node, List[Edge]] = defaultdict(list)
self._name = name
@property
def num_lanes(self) -> int:
return len(self._lanes)
@property
def lanes(self) -> List['Lane']:
return self._lanes
def add_lane(self, lane: 'Lane') -> int:
index = len(self._lanes)
self._lanes.append(lane)
self.speed = lane.speed
self.length = lane.length
return index
def add_outgoing(self, edge: 'Edge'):
self._outgoing[edge.to_node].append(edge)
def add_incoming(self, edge: 'Edge'):
self._incoming[edge.from_node].append(edge)
@property
def incoming(self) -> List['Edge']:
return [e for edges in self._incoming.values() for e in edges]
@property
def outgoing(self) -> List['Edge']:
return [e for edges in self._outgoing.values() for e in edges]
# def getClosestLanePosDist(self, point, perpendicular=False):
# minDist = 1e400
# minIdx = None
# minPos = None
# for i, l in enumerate(self._lanes):
# pos, dist = l.getClosestLanePosAndDist(point, perpendicular)
# if dist < minDist:
# minDist = dist
# minIdx = i
# minPos = pos
# return minIdx, minPos, minDist
# def rebuildShape(self):
# numLanes = len(self._lanes)
# if numLanes % 2 == 1:
# self._shape3D = self._lanes[int(numLanes / 2)].getShape3D()
# else:
# self._shape3D = []
# minLen = -1
# for l in self._lanes:
# if minLen == -1 or minLen > len(l.getShape()):
# minLen = len(l.shape)
# for i in range(minLen):
# x = 0.
# y = 0.
# z = 0.
# for l in self._lanes:
# x += l.getShape3D()[i][0]
# y += l.getShape3D()[i][1]
# z += l.getShape3D()[i][2]
# self._shape3D.append((x / float(numLanes), y / float(numLanes),
# z / float(numLanes)))
#
# self._shapeWithJunctions3D = lane.add_junction_pos(self._shape3D,
# self.from_node.getCoord3D(),
# self.to_node.getCoord3D())
#
# if self._rawShape3D == []:
# self._rawShape3D = [self.from_node.getCoord3D(), self.to_node.getCoord3D()]
#
# # 2d - versions
# self._shape = [(x, y) for x, y, z in self._shape3D]
# self._shapeWithJunctions = [(x, y)
# for x, y, z in self._shapeWithJunctions3D]
# self._rawShape = [(x, y) for x, y, z in self._rawShape3D]
# def setTLS(self, tls):
# self._tls = tls
# def is_fringe(self, connections=None):
# """true if this edge has no incoming or no outgoing connections (except turnarounds)
# If connections is given, only those connections are considered"""
# if connections is None:
# return self.is_fringe(self._incoming) or self.is_fringe(
# self._outgoing)
# else:
# cons = sum([c for c in connections.values()], [])
# return len([
# c for c in cons if c._direction != Connection.LINKDIR_TURN
# ]) == 0
#
# def allows(self, vClass):
# """true if this edge has a lane which allows the given vehicle class"""
# for lane in self._lanes:
# if vClass in lane._allow:
# return True
# return False
def to_xml(self) -> str:
edge = ET.Element("edge")
edge.set("id", str(self.id))
edge.set("from", str(self.from_node.id))
edge.set("to", str(self.to_node.id))
if self.type_id:
edge.set("type", str(self.type_id))
if self.num_lanes > 0:
edge.set("numLanes", str(self.num_lanes))
if self.speed is not None:
edge.set("speed", str(self.speed))
if self.priority is not None:
edge.set("priority", str(self.priority))
if self.length is not None:
edge.set("length", str(self.length))
if self.shape is not None:
edge.set("shape", to_shape_string(self.shape))
edge.set("spreadType", str(self.spread_type.value))
if self.allow:
edge.set("allow", " ".join([str(a.value) for a in self.allow]))
if self.disallow:
edge.set("disallow", " ".join([str(a.value) for a in self.allow]))
if self.width is not None:
edge.set("width", str(self.width))
if self.name is not None:
edge.set("name", self.name)
if self.end_offset is not None:
edge.set("endOffset", str(self.end_offset))
if self.sidewalk_width is not None:
edge.set("sidewalkWidth", str(self.sidewalk_width))
for lane in self._lanes:
edge.append(ET.fromstring(lane.to_xml()))
return ET.tostring(edge, encoding="unicode")
def __repr__(self):
return str(self)
def __str__(self):
return self.to_xml()
def __hash__(self):
return hash((self.id, self.from_node.id, self.to_node.id, self.type_id, *self._lanes))
def __eq__(self, other: 'Edge'):
return type(self) == type(other) \
and self.id == other.id \
and self.from_node == other.from_node \
and self.to_node == other.to_node \
and self.type_id == other.type_id \
and len(self._lanes) == len(other._lanes) \
and all(x == y for x, y in zip(self._lanes, other._lanes))
def __ne__(self, other: 'Edge'):
return not self.__eq__(other)
#
# Lane
#
def add_junction_pos(shape, fromPos, toPos):
"""Extends shape with the given positions in case they differ from the
existing endpoints. assumes that shape and positions have the same dimensionality"""
result = list(shape)
if fromPos != shape[0]:
result = [fromPos] + result
if toPos != shape[-1]:
result.append(toPos)
return result
class Lane:
""" Lanes from a sumo network """
def __init__(self,
edge: Edge,
speed: float,
length: float,
width: float,
allow: List['VehicleType'] = None,
disallow: List['VehicleType'] = None,
shape: np.ndarray = None):
self._edge = edge
self._speed = speed
self._length = length
self._width = width
self._shape = shape if shape is not None else np.empty(0)
self._shapeWithJunctions = None
self._shapeWithJunctions3D = None
self._outgoing: List['Connection'] = []
self._adjacent_opposite = None # added by Lisa
self._allow: List['VehicleType'] = []
self._disallow: List['VehicleType'] = []
self._set_allow_disallow(allow, disallow)
self._index = edge.add_lane(self)
@property
def id(self) -> str:
return f"{self._edge.id}_{self.index}"
def _set_allow_disallow(self, allow: Optional[List['VehicleType']], disallow: Optional[List['VehicleType']]):
if allow is not None and disallow is not None:
assert set(allow).isdisjoint(set(disallow))
self._allow = allow
self._disallow = disallow
elif allow:
self._disallow: List['VehicleType'] = list(set(VehicleType) - set(allow))
elif disallow:
self._allow: List['VehicleType'] = list(set(VehicleType) - set(disallow))
@property
def edge(self) -> Edge:
return self._edge
@edge.setter
def edge(self, edge: Edge):
self._edge = edge
# with open(self._output_file, "r") as f:
# xml = ET.parse(f)
# # parse TLS from generated xml file
# def get_tls(xml, junction) -> TLSProgram:
# for tl_logic in xml.findall("tlLogic"):
# if not int(tl_logic.get("id")) == junction.id:
# continue
#
# tls_program = TLSProgram(tl_logic.get("programID"),
# int(tl_logic.get("offset")),
# tl_logic.get("type"))
# for phase in tl_logic.findall("phase"):
# tls_program.add_phase(Phase(
# int(phase.get("duration")),
# [SignalState(s) for s in phase.get("state")]
# ))
# return tls_program
#
# tls_program = get_tls(xml, junction)
@property
def speed(self) -> float:
return self._speed
@speed.setter
def speed(self, speed: float):
self._speed = speed
@property
def length(self) -> float:
return self._length
@length.setter
def length(self, length: float):
self._length = length
@property
def width(self) -> float:
return self._width
@width.setter
def width(self, width: float):
self._width = width
def setAdjacentOpposite(self, opposite_lane_id):
self._adjacent_opposite = opposite_lane_id
def getAdjacentOpposite(self):
return self._adjacent_opposite
@property
def shape(self) -> np.ndarray:
return self._shape
@shape.setter
def shape(self, shape: np.ndarray):
self._shape = shape
@property
def bounding_box(self) -> Tuple[float, float, float, float]:
s = self.shape
xmin = float(np.min(s[:, 0]))
xmax = float(np.max(s[:, 0]))
ymin = float(np.min(s[:, 1]))
ymax = float(np.max(s[:, 1]))
assert (xmin != xmax or ymin != ymax)
return xmin, ymin, xmax, ymax
def getClosestLanePosAndDist(self, point, perpendicular=False):
return sumolib.geomhelper.polygon.OffsetAndDistanceToPoint(
point, self.getShape(), perpendicular)
@property
def index(self) -> int:
return self._index
@property
def outgoing(self) -> List['Connection']:
return self._outgoing
def add_outgoing(self, conn: 'Connection'):
self._outgoing.append(conn)
@property
def allow(self) -> List['VehicleType']:
return self._allow
@allow.setter
def allow(self, allow: List['VehicleType']):
self._set_allow_disallow(allow, None)
@property
def disallow(self) -> List['VehicleType']:
return self._disallow
@disallow.setter
def disallow(self, disallow: List['VehicleType']):
self._set_allow_disallow(None, disallow)
def to_xml(self) -> str:
"""
Converts this lane to it's xml representation
"""
lane = ET.Element("lane")
lane.set("index", str(self.index))
if self.speed:
lane.set("speed", str(self._speed))
if self._length:
lane.set("length", str(self._length))
if len(self._shape) > 0:
lane.set("shape", to_shape_string(self._shape))
if self._width:
lane.set("width", str(self._width))
if self._allow:
lane.set("allow", " ".join(a.value for a in self._allow))
if self._disallow:
lane.set("disallow", " ".join(d.value for d in self._disallow))
return ET.tostring(lane, encoding="unicode")
def __str__(self):
return self.to_xml()
def __repr__(self):
return str(self)
def __hash__(self):
return hash((self.edge.id, self.index))
def __eq__(self, other: 'Lane'):
return type(self) == type(other) \
and self.edge.id == other.edge.id \
and self.speed == other.speed \
and self.length == other.length \
and self.width == other.width \
and self._shapeWithJunctions == other._shapeWithJunctions \
and self._shapeWithJunctions3D == other._shapeWithJunctions3D \
and len(self.outgoing) == len(other.outgoing) \
and all(x == y for x, y in zip(self.outgoing, other.outgoing)) \
and len(self.allow) == len(other.allow) \
and all(x == y for x, y in zip(self.allow, other.allow)) \
and len(self.disallow) == len(other.disallow) \
and all(x == y for x, y in zip(self.disallow, other.disallow))
def __ne__(self, other: 'Lane'):
return not self.__eq__(other)
#
# Connection
#
def to_shape_string(shape: np.ndarray) -> str:
"""
Convert a collection of points from format shape to string
:param shape:
:return: the same shape but in string format
"""
return " ".join([",".join(str(p) for p in v) for v in shape])
def from_shape_string(shape: str) -> np.ndarray:
"""
Convert a shape string to a ndarray
:param shape:
:return:
"""
return np.asarray([[float(c) for c in coords.split(",")] for coords in shape.split(" ")], dtype=float)
@unique
class ConnectionDirection(Enum):
# constants as defined in sumo/src/utils/xml/SUMOXMLDefinitions.cpp
STRAIGHT = "s"
TURN = "t"
LEFT = "l"
RIGHT = "r"
PARTLEFT = "L"
PARTRIGHT = "R"
class Connection:
"""edge connection for a sumo network"""
def __init__(self,
from_edge: Edge,
to_edge: Edge,
from_lane: Lane,
to_lane: Lane,
direction: ConnectionDirection = None,
tls: 'TLSProgram' = None,
tl_link: int = None,
state=None,
via_lane_id: List[str] = None,
shape: Optional[np.ndarray] = None,
keep_clear: bool = None,
cont_pos=None,
prohibits: List["Connection"] = [],
change_left_allowed: Set['VehicleType'] = None,
change_right_allowed: Set['VehicleType'] = None,
forbidden=False):
self._from = from_edge
self._to = to_edge
self._from_lane = from_lane
self._to_lane = to_lane
self._direction = direction
self._tls = tls
self._tl_link = tl_link
self._state = state
self._via: List[str] = via_lane_id if via_lane_id is not None else []
self._shape = shape
self._keep_clear = keep_clear
self._cont_pos = cont_pos
self._prohibits = prohibits
self._change_left_allowed = None
self._change_right_allowed = None
self.change_left_allowed = change_left_allowed
self.change_right_allowed = change_right_allowed
self._forbidden = forbidden
@property
def from_edge(self) -> Edge:
return self._from
@from_edge.setter
def from_edge(self, from_edge: Edge):
self._from = from_edge
@property
def to_edge(self) -> Edge:
return self._to
@to_edge.setter
def to_edge(self, to_edge: Edge):
self._to = to_edge
@property
def from_lane(self) -> Lane:
return self._from_lane
@from_lane.setter
def from_lane(self, from_lane: Lane):
self._from_lane = from_lane
@property
def to_lane(self) -> Lane:
return self._to_lane
@to_lane.setter
def to_lane(self, to_lane: Lane):
self._to_lane = to_lane
@property
def via(self) -> Optional[List[str]]:
return self._via
@via.setter
def via(self, via: List[str]):
self._via = via
@property
def direction(self):
return self._direction
@property
def tls(self):
return self._tls
@tls.setter
def tls(self, tls: 'TLSProgram'):
self._tls = tls
@property
def tl_link(self) -> int:
return self._tl_link
@tl_link.setter
def tl_link(self, tl_link: int):
self._tl_link = tl_link
def get_junction_index(self):
return self._from.to_node.getLinkIndex(self)
@property
def junction(self) -> Node:
return self._from.to_node
@property
def state(self):
return self._state
@state.setter
def state(self, state):
self._state = state
@property
def shape(self) -> np.ndarray:
return self._shape
@shape.setter
def shape(self, shape: np.ndarray):
self._shape = shape
@shape.setter
def shape(self, shape: np.ndarray):
self._shape = shape
@property
def prohibits(self) -> List["Connection"]:
return self._prohibits
@prohibits.setter
def prohibits(self, prohibits):
self._prohibits = prohibits if self._prohibits is not None else []
@property
def connection_string(self) -> str:
return f"{self.from_lane.id}->{self.to_lane.id}"
@property
def change_left_forbidden(self) -> Set['VehicleType']:
return set(VehicleType) - self._change_left_allowed
@change_left_forbidden.setter
def change_left_forbidden(self, change_left_forbidden):
self._change_left_allowed = set(VehicleType) - set(change_left_forbidden) \
if change_left_forbidden is not None else set(VehicleType)
@property
def change_right_forbidden(self) -> Set['VehicleType']:
return set(VehicleType) - self._change_right_allowed \
if self._change_right_allowed is not None else set(VehicleType)
@change_right_forbidden.setter
def change_right_forbidden(self, change_right_forbidden):
self._change_right_allowed = set(VehicleType) - set(change_right_forbidden) \
if change_right_forbidden is not None else set(VehicleType)
@property
def change_left_allowed(self) -> Set['VehicleType']:
return self._change_left_allowed
@change_left_allowed.setter
def change_left_allowed(self, change_left_allowed):
self._change_left_allowed = set(change_left_allowed) \
if change_left_allowed is not None else set(VehicleType)
@property
def change_right_allowed(self) -> Set['VehicleType']:
return self._change_right_allowed
@change_right_allowed.setter
def change_right_allowed(self, change_right_allowed):
self._change_right_allowed = set(change_right_allowed) \
if change_right_allowed is not None else set(VehicleType)
def to_xml(self) -> str:
"""
Converts this connection to it's xml representation
"""
c = ET.Element("connection")
c.set("from", str(self._from.id))
c.set("to", str(self._to.id))
c.set("fromLane", str(self._from_lane.index))
c.set("toLane", str(self._to_lane.index))
if self._via is not None:
c.set("via", " ".join(self._via))
if self._direction is not None:
c.set("dir", str(self._direction))
if self._tls is not None:
c.set("tl", str(self._tls.id))
if self._tl_link is not None:
c.set("linkIndex", str(self._tl_link))
if self._state is not None:
c.set("state", str(self._state))
if self._forbidden is True:
c.set("disallow", "all")
if self._shape is not None:
c.set("shape", to_shape_string(self._shape))
if self._keep_clear is not None:
c.set("keepClear", "true" if self._keep_clear is True else "false")
if self._cont_pos is not None:
c.set("contPos", str(self._cont_pos))
set_allowed_changes(c, self)
return ET.tostring(c, encoding="unicode")
def get_prohibition_xmls(self) -> List[str]:
xmls = []
for p in self.prohibits:
x = ET.Element("prohibition")
x.set("prohibitor", self.connection_string)
x.set("prohibited", p.connection_string)
xmls.append(ET.tostring(x, encoding="unicode"))
return xmls
def __str__(self):
return self.to_xml()
def __repr__(self):
return str(self)
def __hash__(self):
return hash((self._from.id, self._to.id, self._from_lane.id, self._to_lane.id, self._direction, self._tls,
self._tl_link, self._state, len(self._via) if self._via else 0, self._keep_clear, self._cont_pos))
def __eq__(self, other: 'Connection'):
return type(self) == type(other) and self._from == other._from and self._to == other._to \
and self._direction == other._direction and self._tls == other._tls and self._tl_link == other._tl_link \
and self._state == other._state and len(self._via) == len(other._via) \
and all(x == y for x, y in zip(self._via, other._via)) \
and self._keep_clear == other._keep_clear and self._cont_pos == other._cont_pos
def __ne__(self, other: 'Connection'):
return not self.__eq__(other)
#
# Crossings
#
class Crossing:
def __init__(self,
node: Node,
edges: Iterable[Edge],
priority: bool = None,
width: float = None,
shape=None,
link_index: int = None,
link_index_2: int = None,
discard: bool = None):
self.node = node
self.edges = edges
self.priority = priority
self.width = width
self.shape = shape
self.link_index = link_index
self.link_index_2 = link_index_2
self.discard = discard
def __str__(self) -> str:
return str(self.to_xml())
def to_xml(self) -> str:
c = ET.Element("crossing")
c.set("node", str(self.node.id))
c.set("edges", " ".join(str(edge.id) for edge in self.edges))
if self.priority is not None:
c.set("priority", str(self.priority))
if self.width is not None:
c.set("width", str(self.width))
if self.shape is not None:
c.set("shape", " ".join([",".join(str(coord) for coord in v) for v in self.shape]))
if self.link_index is not None:
c.set("linkIndex", str(self.link_index))
if self.link_index_2 is not None:
c.set("linkIndex2", str(self.link_index_2))
if self.discard is not None:
c.set("discard", str(self.discard))
return ET.tostring(c, encoding="unicode")
#
# Edge Type Manager
#
def _bool_to_str(b: bool) -> str:
return "true" if b else "false"
def _str_to_bool(s: str) -> bool:
return s == "true"
# generic type
_T = TypeVar("_T")
class EdgeType:
def __init__(self, id: str,
allow: List['VehicleType'] = None,
disallow: List['VehicleType'] = None,
discard: bool = False,
num_lanes: int = -1,
oneway=False,
priority: int = 0,
speed: float = 13.89,
sidewalk_width: float = -1):
"""
Constructs a SUMO Edge Type
Documentation from: https://sumo.dlr.de/docs/SUMO_edge_type_file.html
:param id: The name of the road type. This is the only mandatory attribute.
For OpenStreetMap data, the name could, for example, be highway.trunk or highway.residential.
For ArcView data, the name of the road type is a number.
:param allow: List of allowed vehicle classes
:param disallow: List of not allowed vehicle classes
:param discard: If "yes", edges of that type are not imported. This parameter is optional and defaults to false.
:param num_lanes: The number of lanes on an edge. This is the default number of lanes per direction.
:param oneway: If "yes", only the edge for one direction is created during the import.
(This attribute makes no sense for SUMO XML descriptions but, for example, for OpenStreetMap files.)
:param priority: A number, which determines the priority between different road types.
netconvert derives the right-of-way rules at junctions from the priority.
The number starts with one; higher numbers represent more important roads.
:param speed: The default (implicit) speed limit in m/s.
:param sidewalk_width: The default width for added sidewalks (defaults to -1 which disables extra sidewalks).
"""
self.id = id
assert not (allow and disallow and set(allow) & set(disallow)), \
f"allow and disallow contain common elements {set(allow) & set(disallow)}"
self.allow: List['VehicleType'] = []
if allow:
assert set(allow).issubset(set(VehicleType)), \
f"allow contains invalid classes {set(allow) - set(VehicleType)}"
self.allow: List['VehicleType'] = allow
self.disallow: List['VehicleType'] = []
if disallow:
assert set(disallow).issubset(set(VehicleType)), \
f"disallow contains invalid classes {set(disallow) - set(VehicleType)}"
self.disallow: List['VehicleType'] = disallow
self.discard = discard
self.num_lanes = num_lanes
self.oneway = oneway
self.priority = priority
self.speed = speed
self.sidewalk_width = sidewalk_width
@classmethod
def from_xml(cls, xml: str) -> 'EdgeType':
"""
Creates an instance of this class from the given xml representation
:param xml:
:return:
"""
root = ET.fromstring(xml)
def get_map(key: str, map: Callable[[str], _T], default: _T) -> _T:
value = root.get(key)
return map(value) if value else default
def str_to_vehicle_type(value: str) -> VehicleType:
return VehicleType(value)
return cls(id=root.get("id"),
allow=get_map("allow", lambda sp: [str_to_vehicle_type(s) for s in sp.split(" ")], []),
disallow=get_map("disallow", lambda sp: [str_to_vehicle_type(s) for s in sp.split(" ")], []),
discard=get_map("discard", _str_to_bool, False),
num_lanes=get_map("numLanes", int, -1),
oneway=get_map("oneway", _str_to_bool, False),
priority=get_map("priority", int, 0),
speed=get_map("speed", float, 13.89),
sidewalk_width=get_map("sidewalkWidth", int, -1))
def to_xml(self) -> str:
"""
Converts this node to it's xml representation
:return: xml representation of this EdgeType
"""
node = ET.Element("type")
node.set("id", str(self.id))
if self.allow:
node.set("allow", " ".join(a.value for a in self.allow))
if self.disallow:
node.set("disallow", " ".join(d.value for d in self.disallow))
if self.discard:
node.set("discard", _bool_to_str(self.discard))
if self.num_lanes != -1:
node.set("numLanes", str(self.num_lanes))
if self.oneway:
node.set("oneway", _bool_to_str(self.oneway))
if self.priority:
node.set("priority", str(self.priority))
if self.speed:
node.set("speed", f"{self.speed:.2f}")
if self.sidewalk_width > 0:
node.set("sidewalkWidth", f"{self.sidewalk_width:.2f}")
return ET.tostring(node, encoding="unicode")
def __str__(self):
return self.to_xml()
class EdgeTypes:
def __init__(self, types: Dict[str, EdgeType] = None):
self.types: Dict[str, EdgeType] = types if types else dict()
@classmethod
def from_xml(cls, xml: str) -> 'EdgeTypes':
root = ET.fromstring(xml)
types: Dict[str, EdgeType] = {}
for edge_type in root.iter("type"):
types[edge_type.get("id")] = EdgeType.from_xml(ET.tostring(edge_type, encoding="unicode"))
return cls(types)
def to_xml(self) -> str:
types = ET.Element("types")
types.set("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance")
types.set("xsi:noNamespaceSchemaLocation", "http://sumo.dlr.de/xsd/types_file.xsd")
for type_id, type in self.types.items():
types.append(ET.fromstring(type.to_xml()))
return ET.tostring(types, encoding="unicode")
def _create_from_update(self, old_id: str, attr: str, value: any) -> Optional[EdgeType]:
if old_id not in self.types:
return None
edge_type = self.types[old_id]
val_rep = str(value)
if isinstance(value, Iterable):
val_rep = "_".join([str(v) for v in value])
new_id = f"{edge_type.id}_{attr}_{val_rep}"
if new_id in self.types:
return self.types[new_id]
new_type = deepcopy(edge_type)
new_type.id = new_id
setattr(new_type, attr, value)
self.types[new_type.id] = new_type
return new_type
def create_from_update_priority(self, old_id: str, priority: int) -> Optional[EdgeType]:
return self._create_from_update(old_id, "priority", priority)
def create_from_update_speed(self, old_id: str, speed: float) -> Optional[EdgeType]:
return self._create_from_update(old_id, "speed", round(speed, 2))
def create_from_update_oneway(self, old_id: str, oneway: bool) -> Optional[EdgeType]:
return self._create_from_update(old_id, "oneway", oneway)
def create_from_update_allow(self, old_id: str, allow: List['VehicleType']) -> Optional[EdgeType]:
new_type = self._create_from_update(old_id, "allow", allow)
# setattr(new_type, "disallow", list(set(new_type.disallow) - set(new_type.allow)))
return new_type
def create_from_update_disallow(self, old_id: str, disallow: List['VehicleType']) -> Optional[EdgeType]:
new_type = self._create_from_update(old_id, "disallow", disallow)
# setattr(new_type, "allow", list(set(new_type.allow) - set(new_type.disallow)))
return new_type
#
# Traffic Light Systems
#
class SignalState(Enum):
"""
Adapted from: https://sumo.dlr.de/docs/Simulation/Traffic_Lights.html#tllogic62_attributes
"""
# 'red light' for a signal - vehicles must stop
RED = "r"
# 'amber (yellow) light' for a signal -
# vehicles will start to decelerate if far away from the junction, otherwise they pass
YELLOW = "y"
# 'green light' for a signal, no priority -
# vehicles may pass the junction if no vehicle uses a higher priorised foe stream,
# otherwise they decelerate for letting it pass.
# They always decelerate on approach until they are within the configured visibility distance
GREEN = "g"
# 'green light' for a signal, priority -
# vehicles may pass the junction
GREEN_PRIORITY = "G"
# 'green right-turn arrow' requires stopping -
# vehicles may pass the junction if no vehicle uses a higher priorised foe stream.
# They always stop before passing.
# This is only generated for junction type traffic_light_right_on_red.
GREEN_TURN_RIGHT = "s"
# 'red+yellow light' for a signal, may be used to indicate upcoming
# green phase but vehicles may not drive yet (shown as orange in the gui)
RED_YELLOW = "u"
# 'off - blinking' signal is switched off, blinking light indicates vehicles have to yield
BLINKING = "o"
# 'off - no signal' signal is switched off, vehicles have the right of way
NO_SIGNAL = "O"
class Phase:
def __init__(self,
duration: float,
state: List[SignalState],
min_dur: int = None,
max_dur: int = None,
name: str = None,
next: List[int] = None):
"""
Adapted from: https://sumo.dlr.de/docs/Simulation/Traffic_Lights.html#tllogic62_attributes
:param duration: The duration of the phase (sec)
:param state: The traffic light states for this phase, see below
:param min_dur: The minimum duration of the phase when using type actuated. Optional, defaults to duration.
:param max_dur: The maximum duration of the phase when using type actuated. Optional, defaults to duration.
:param name: An optional description for the phase. This can be used to establish the
correspondence between SUMO-phase-indexing and traffic engineering phase names.
:param next: The next phase in the cycle after the current.
This is useful when adding extra transition phases to a traffic light plan which are not part of every cycle.
Traffic lights of type 'actuated' can make use of a list of indices for
selecting among alternative successor phases.
"""
self.duration = duration
self.state = state
self.min_dur = min_dur
self.max_dur = max_dur
self.name = name
self.next = next
def to_xml(self) -> str:
phase = ET.Element("phase")
phase.set("duration", str(self.duration))
phase.set("state", "".join([s.value for s in self.state]))
if self.min_dur is not None:
phase.set("minDur", str(self.min_dur))
if self.max_dur is not None:
phase.set("maxDur", str(self.max_dur))
if self.name is not None:
phase.set("name", str(self.name))
if self.next is not None:
phase.set("next", str(self.next))
return ET.tostring(phase, encoding="unicode")
def __str__(self):
return str(self.to_xml())
def __repr__(self):
return str(self)
class TLSType(Enum):
"""
Adapted from: https://sumo.dlr.de/docs/Simulation/Traffic_Lights.html
The type of the traffic light
- fixed phase durations,
- phase prolongation based on time gaps between vehicles (actuated),
- or on accumulated time loss of queued vehicles (delay_based)
"""
STATIC = "static"
ACTUATED = "actuated"
DELAY_BASED = "delay_based"
class TLSProgram:
def __init__(self, id: str, offset: int, program_id: str, tls_type: TLSType = TLSType.STATIC):
"""
Adapted from: https://sumo.dlr.de/docs/Simulation/Traffic_Lights.html#tllogic62_attributes
:param id: The id of the traffic light. This must be an existing traffic light id in the .net.xml file.
Typically the id for a traffic light is identical with the junction id.
The name may be obtained by right-clicking the red/green bars in front of a controlled intersection.
:param offset: The initial time offset of the program
:param program_id: The id of the traffic light program;
This must be a new program name for the traffic light id.
Please note that "off" is reserved, see below.
:param tls_type: The type of the traffic light (fixed phase durations, phase prolongation based on time
gaps between vehicles (actuated), or on accumulated time loss of queued vehicles (delay_based) )
"""
self._id = id
self._type = tls_type
self._offset = offset
self._program_id = program_id
self._phases: List[Phase] = []
@property
def id(self) -> str:
return self._id
@property
def program_id(self) -> str:
return self._id
@property
def phases(self) -> List[Phase]:
return self._phases
@phases.setter
def phases(self, phases: List[Phase]):
self._phases = phases
@property
def offset(self) -> int:
return self._offset
@offset.setter
def offset(self, offset: int):
self._offset = offset
def add_phase(self, phase: Phase):
self._phases.append(phase)
def to_xml(self) -> str:
tl = ET.Element("tlLogic")
tl.set("id", self._id)
tl.set("type", str(self._type.value))
tl.set("programID", str(self._program_id))
tl.set("offset", str(int(self._offset)))
for phase in self._phases:
tl.append(ET.fromstring(phase.to_xml()))
return ET.tostring(tl, encoding="unicode")
# def update_state(self, old_idx: int, new_idx: int, new_state: SumoSignalState):
#
def __str__(self):
return str(self.to_xml())
def __repr__(self):
return str(self)
class TLS:
"""Traffic Light Signal, managing TLSPrograms for SUMO"""
def __init__(self):
self._connections: List[Connection] = []
self._maxConnectionNo = -1
self._programs: Dict[str, Dict[str, TLSProgram]] = defaultdict(dict)
@property
def connections(self) -> List[Connection]:
return self._connections
def add_connection(self, connection: Connection):
self._connections.append(connection)
@property
def programs(self) -> Dict[str, Dict[str, TLSProgram]]:
return self._programs
def add_program(self, program: TLSProgram):
self._programs[program._id][program._program_id] = program
def clear_programs(self):
self._programs.clear()
def to_xml(self) -> str:
tl = ET.Element("tlLogics")
for programs in self._programs.values():
for program in programs.values():
tl.append(ET.fromstring(program.to_xml()))
for c in self._connections:
conn = ET.fromstring(c.to_xml())
tl.append(conn)
return ET.tostring(tl, encoding="unicode")
def __str__(self):
return str(self.to_xml())
def __repr__(self):
return str(self)
#
# Roundabout
#
class Roundabout:
def __init__(self, edges: List[Edge] = None):
self._edges = edges if edges is not None else []
# @property
# def nodes(self) -> List[Node]:
# return self._nodes
#
# @nodes.setter
# def nodes(self, nodes: List[Node]):
# self._nodes = nodes
@property
def edges(self) -> List[Edge]:
return self._edges
@edges.setter
def edges(self, edges: List[Edge]):
self._edges = edges
def to_xml(self) -> str:
roundabout = ET.Element("roundabout")
roundabout.set("edges", " ".join([str(e.id) for e in self.edges]))
return ET.tostring(roundabout, encoding="unicode")
#
# Enums
#
@unique
class VehicleType(Enum):
"""taken from sumo/src/utils/common/SUMOVehicleClass.cpp
"public_emergency", # deprecated
"public_authority", # deprecated
"public_army", # deprecated
"public_transport", # deprecated
"transport", # deprecated
"lightrail", # deprecated
"cityrail", # deprecated
"rail_slow", # deprecated
"rail_fast", # deprecated
"""
PRIVATE = "private"
EMERGENCY = "emergency"
AUTHORITY = "authority"
ARMY = "army"
VIP = "vip"
PASSENGER = "passenger"
HOV = "hov"
TAXI = "taxi"
BUS = "bus"
COACH = "coach"
DELIVERY = "delivery"
TRUCK = "truck"
TRAILER = "trailer"
TRAM = "tram"
RAIL_URBAN = "rail_urban"
RAIL = "rail"
RAIL_ELECTRIC = "rail_electric"
MOTORCYCLE = "motorcycle"
MOPED = "moped"
BICYCLE = "bicycle"
PEDESTRIAN = "pedestrian"
EVEHICLE = "evehicle"
SHIP = "ship"
CUSTOM1 = "custom1"
CUSTOM2 = "custom2"
SUMO_VEHICLE_CLASSES: Tuple[str] = tuple(str(vehicle.value) for vehicle in VehicleType)
| CommonRoad/crgeo | commonroad_geometric/external/map_conversion/sumo_map/sumolib_net.py | sumolib_net.py | py | 62,888 | python | en | code | 25 | github-code | 13 |
71793555218 | """
This script clears the output of the rp_win as it can be rather long and hard to work with. The script
groups the ROP gadgets based on the gadget and shows a maximum of 10 addresses for better readability.
It also removes gadgets with call/jmp to a dword as it is hardly usable in exploitation scenarios.
Author: Tomas Kabrt
"""
import re
#Path to the output from rp-win - for example rp-win-x86.exe -f SNFS.dll -r 5 > SNFS.txt
with open('/Users/admin/Desktop/SNFS.txt', 'r', encoding='UTF-16 LE') as f:
lines = f.readlines()[10:] # skip the first 10 lines
line_dict = {}
for line in lines:
#Do not save gadgets which ends with call/jump to address - it has no usage in ROP buidling
if not re.findall(r'(call|jmp) dword \[[^\[]*] ;', line[:-12]):
key = line[12:-12]
value = line[:10].strip()
if key in line_dict:
if len(line_dict[key]) < 10:
line_dict[key].append(value)
else:
line_dict[key] = [value]
print(len(line_dict))
with open("/Users/admin/SNFS_sorted.txt", "w") as f:
for key, values in line_dict.items():
f.write(key + " ||| " + " ".join(values) + "\n")
| tomas-kabrt/Exploits-Vulns | sort_rp_win_rop_gadgets.py | sort_rp_win_rop_gadgets.py | py | 1,172 | python | en | code | 0 | github-code | 13 |
14386319195 | #
# @lc app=leetcode.cn id=283 lang=python3
#
# [283] 移动零
#
from typing import List
# @lc code=start
class Solution:
def moveZeroes(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
n = len(nums)
i = 0
for j in range(n):
if nums[j] != 0:
nums[i] = nums[j]
i += 1
while i < n:
nums[i] = 0
i += 1
# @lc code=end
l = [0, 0, 1]
Solution().moveZeroes(l)
print(l)
| largomst/leetcode-problem-solution | 283.移动零.py | 283.移动零.py | py | 546 | python | en | code | 0 | github-code | 13 |
41581933949 | import os
import pandas as pd
from luigi.format import Nop
from luigi import Task, Parameter, LocalTarget
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from machine_learning_utils.luigi.task import Requires, Requirement, TargetOutput
from machine_learning_utils.luigi.target import BaseAtomicProviderLocalTarget
from machine_learning_utils.functions.functions import eval_classifier
class DownloadData(Task):
"""A Luigi task to download data locally"""
DATA = Parameter() # Add in where the data should be downloaded
LOCAL_ROOT = Parameter(default=os.path.abspath("data"))
SHARED_RELATIVE_PATH = Parameter(default="data.parquet")
def output(self):
return BaseAtomicProviderLocalTarget(
path=os.path.join(self.LOCAL_ROOT, self.SHARED_RELATIVE_PATH), format=Nop
)
def run(self):
with self.output().open("wb") as out_file:
df = pd.read_csv(self.DATA)
df.to_parquet(out_file, engine="pyarrow")
class DataPreprocess(Task):
"""A Luigi task to preprocess data after download"""
requires = Requires()
# download = Requirement(DownloadData) - This task requires the DownloadData task as input
output = TargetOutput(ext=".parquet", target_class=LocalTarget)
def run(self):
# Read in the data using df = pd.read_parquet(self.input()['download'].path, engine='pyarrow')
# After you have implemented the necessary functions needed
# extract the correct features, save data to parquet file using the statement below
# df.to_parquet(self.output().path, engine='pyarrow')
raise NotImplementedError()
class BuildModel(Task):
"""A Luigi task to classify a data using Support Vector Machine and Random Forest Model"""
requires = Requires()
preprocess = Requirement(DataPreprocess)
class_column = Parameter() # Add in the column that should be evaluated
output = TargetOutput(ext=".csv", target_class=LocalTarget)
def run(self):
# Edit the classifiers to fit needs
df = pd.read_parquet(self.input()["preprocess"].path, engine="pyarrow")
X = df.loc[:, df.columns != self.class_column].values
y = df.loc[:, df.columns == self.class_column].values.ravel()
svc_report = eval_classifier(SVC(kernel="rbf", gamma=0.05, C=2), X, y)
rf_report = eval_classifier(
RandomForestClassifier(n_estimators=100, random_state=None, n_jobs=4), X, y
)
class_report = svc_report.join(rf_report, lsuffix="_svc", rsuffix="_rf")
class_report.to_csv(self.output().path)
| Gayle19/2020fa-final-project-Gayle19 | machine_learning_utils/luigi/data.py | data.py | py | 2,626 | python | en | code | 0 | github-code | 13 |
16021865995 | """
Example demonstrating the use of dcompressee
"""
import os
import pathlib
import time
import dcompressee
# get path to local file
path = os.path.dirname(os.path.abspath(__file__))
files_uncmp = os.path.join(path, "example_Seq0000.fasta")
files_gz = [os.path.join(path, f"example_Seq000{i}.fasta.gz") for i in range(1, 5)]
files_lz4 = [os.path.join(path, f"example_Seq000{i}.fasta.lz4") for i in range(5, 9)]
files_bz2 = os.path.join(path, "example_Seq0009.fasta.bz2")
files = [pathlib.Path(p) for p in [files_uncmp] + files_gz + files_lz4 + [files_bz2]]
now = time.perf_counter()
for f in files:
# unpack file
unpacked = dcompressee.unpack(f)
# do something with unpacked file
print(f"Unpacked file: {unpacked}")
print(unpacked.read_text(encoding="utf8"))
print(f"Time taken: {time.perf_counter() - now}")
| MDU-PHL/dcompressee | examples/example1.py | example1.py | py | 836 | python | en | code | 0 | github-code | 13 |
36925786052 | import numpy as np
from external_libraries.spline import get_natural_cubic_spline_model
def spline(x=None, y=None, frames=None):
nodes = max(frames) - min(frames)
spline_x = get_natural_cubic_spline_model(x=frames, y=x, minval=min(frames), maxval=max(frames),
n_knots=int(nodes / 2))
spline_y = get_natural_cubic_spline_model(x=frames, y=y, minval=min(frames), maxval=max(frames),
n_knots=int(nodes / 2))
all_frames = np.arange(min(frames), max(frames), 1)
x_est = spline_x.predict(all_frames)
y_est = spline_y.predict(all_frames)
return x_est, y_est
if __name__ == '__main__':
coordinates_text = input("Coordinates: (x1,y2;x2,y2;...)")
coor_list = coordinates_text.split(';')
count = 0
x_list = []
y_list = []
time_list = []
for coor_text in coor_list:
count = count + 1
coor_list = coor_text.split(',')
x_list.append(int(coor_list[0]))
y_list.append(int(coor_list[1]))
time_list.append(int(count))
x_est, y_est = spline(np.array(x_list), np.array(y_list), np.array(time_list))
print('x list: ' + str(x_est.tolist()))
print('y list: ' + str(y_est.tolist())) | RSantos94/vessel-impact-detection | tools/interpolate_tool.py | interpolate_tool.py | py | 1,268 | python | en | code | 1 | github-code | 13 |
73714033616 | """empty message
Revision ID: 67720836cc61
Revises: 3d9cea121ce6
Create Date: 2022-04-04 15:18:15.590110
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '67720836cc61'
down_revision = '3d9cea121ce6'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('questions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('level_id', sa.Integer(), nullable=False),
sa.Column('question_text', sa.String(length=140), nullable=False),
sa.ForeignKeyConstraint(['level_id'], ['levels.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
op.create_index(op.f('ix_questions_level_id'), 'questions', ['level_id'], unique=False)
op.create_table('answers',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('question_id', sa.Integer(), nullable=False),
sa.Column('answer_text', sa.String(length=15), nullable=False),
sa.Column('correct', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['question_id'], ['questions.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
op.create_index(op.f('ix_answers_question_id'), 'answers', ['question_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_answers_question_id'), table_name='answers')
op.drop_table('answers')
op.drop_index(op.f('ix_questions_level_id'), table_name='questions')
op.drop_table('questions')
# ### end Alembic commands ###
| d-rocham/sofka-challenge | backend/migrations/versions/67720836cc61_.py | 67720836cc61_.py | py | 1,672 | python | en | code | 0 | github-code | 13 |
41844112110 | import logging
import sys
import os
import optparse
from pmpmanager.__version__ import version as pmpman_version
import json
import lsblk
import time
import pmpmanager.db_devices as model
if __name__ == "__main__":
main()
import uuid
#import queue_display
import pmpmanager.initialise_db as devices
from cli_process import ProcesHandler
class CliInput:
def __init__(self):
self.defaults = dict()
self.callbacks = dict()
self.log = logging.getLogger("CliInput")
self.callbacks_set({})
def process_actions(self):
output = process_actions(self.defaults,self.callbacks)
if output == None:
self.log.error("process_actions:failed")
return
self.defaults.update(output)
def get_parrameters_enviroment(self):
output = get_parrameters_enviroment(self.defaults)
if output == None:
self.log.error("get_parrameters_enviroment:failed")
return
self.defaults.update(output)
def get_parrameters_cli_init(self):
output = get_parrameters_cli_init(self.defaults)
actions = output["pmpman.cli.actions"]
self.log.debug( "pmpman.cli.actions=%s" % actions )
if "pmpman.cli.actions" in actions:
pass
if 'pmpman.action.partition.list' in actions:
pass
if output == None:
self.log.error("get_parrameters_cli_init:failed")
return
self.defaults.update(output)
def callbacks_set(self,update):
if update == None:
return
foundKeys = set(update.keys())
#self.log.error("dsfsdfsdf=%s" % foundKeys)
self.callbacks = update
def callbacks_get(self):
return self.callbacks
def main():
defaults = dict()
defaults = get_parrameters_enviroment(defaults)
log = logging.getLogger("cli")
UI = CliInput()
UI.defaults = defaults
UI.get_parrameters_cli_init()
log = logging.getLogger("cli")
PH = ProcesHandler(UI)
PH.Connect()
UI.process_actions()
def process_actions(defaults,callbacks):
log = logging.getLogger("process_actions")
output = dict()
output.update(defaults)
actions = output.get("pmpman.cli.actions")
if actions == None:
actions = set()
if len(actions) == 0:
log.warning('No actions selected')
#log.debug("actions=%s" % (actions))
#log.debug("callbacks=%s" % (callbacks))
for match_tuple in actions.intersection(callbacks.keys()):
for i in range ( len(callbacks[match_tuple])):
callback = callbacks[match_tuple][i].get('callback')
callback(caller = defaults)
#if 'endorser_show' in actions:
# json_output = json.dumps(imagepub.endorserDump(endorserSub),sort_keys=True, indent=4)
# if json_output != None:
# print json_output
return output
def get_parrameters_enviroment(defaults):
output = dict()
output.update(defaults)
if 'PMPMAN_LOG_CONF' in os.environ:
output['pmpman.logcfg'] = str(os.environ['PMPMAN_LOG_CONF'])
if 'PMPMAN_RDBMS' in os.environ:
output['pmpman.rdms'] = str(os.environ['PMPMAN_RDBMS'])
if 'PMPMAN_CFG' in os.environ:
output['pmpman.path.cfg'] = str(os.environ['PMPMAN_CFG'])
if 'HOME' in os.environ:
output['pmpman.path.home'] = str(os.environ['HOME'])
return output
def get_parrameters_cli_init(defaults):
output = dict()
output.update(defaults)
"""Runs program and handles command line options"""
p = optparse.OptionParser(version = "%prog " + pmpman_version)
p.add_option('-d', '--database', action ='store', help='Database conection string')
p.add_option('-L', '--logcfg', action ='store',help='Logfile configuration file.', metavar='CFG_LOGFILE')
p.add_option('-v', '--verbose', action ='count',help='Change global log level, increasing log output.', metavar='LOGFILE')
p.add_option('-q', '--quiet', action ='count',help='Change global log level, decreasing log output.', metavar='LOGFILE')
p.add_option('-C', '--config-file', action ='store',help='Configuration file.', metavar='CFG_FILE')
p.add_option('--mark-udev', action ='store',help='Called by udev $name')
p.add_option('--add-filestore', action ='store_true',help='List all known instalations')
p.add_option('--list-partitions', action ='store_true',help='Called by udev $name')
p.add_option('--list-filestore', action ='store_true',help='List all known instalations')
p.add_option('--block-list', action ='store_true',help='Scan All Partitions')
p.add_option('--block-scan', action ='store_true',help='Scan All Partitions')
p.add_option('--queue-display', action ='store_true',help='Scan All Partitions')
actions = set()
requires = set()
options, arguments = p.parse_args()
# Set up log file
LoggingLevel = logging.WARNING
LoggingLevelCounter = 2
if options.verbose:
LoggingLevelCounter = LoggingLevelCounter - options.verbose
if options.verbose == 1:
LoggingLevel = logging.INFO
if options.verbose == 2:
LoggingLevel = logging.DEBUG
if options.quiet:
LoggingLevelCounter = LoggingLevelCounter + options.quiet
if LoggingLevelCounter <= 0:
LoggingLevel = logging.DEBUG
if LoggingLevelCounter == 1:
LoggingLevel = logging.INFO
if LoggingLevelCounter == 2:
LoggingLevel = logging.WARNING
if LoggingLevelCounter == 3:
LoggingLevel = logging.ERROR
if LoggingLevelCounter == 4:
LoggingLevel = logging.FATAL
if LoggingLevelCounter >= 5:
LoggingLevel = logging.CRITICAL
output["pmpman.logging.level"] = LoggingLevel
if options.logcfg:
output['pmpman.path.cfg'] = options.logcfg
if defaults.get('pmpman.path.cfg') != None:
if os.path.isfile(str(options.log_config)):
logging.config.fileConfig(options.log_config)
else:
logging.basicConfig(level=LoggingLevel)
log = logging.getLogger("main")
log.error("Logfile configuration file '%s' was not found." % (options.log_config))
sys.exit(1)
else:
output['pmpman.path.cfg'] = None
logging.basicConfig(level=LoggingLevel)
log = logging.getLogger("main")
if options.mark_udev:
actions.add('pmpman.action.udev')
output["pmpman.udev.partition"] = options.mark_udev
if options.mark_udev:
actions.add('pmpman.action.udev')
output["pmpman.udev.partition"] = options.mark_udev
if options.add_filestore:
actions.add('pmpman.action.filestore.add')
if options.list_partitions:
actions.add('pmpman.action.partition.list')
if options.block_list:
actions.add('pmpman.action.block.list')
if options.list_filestore:
actions.add('pmpman.action.filestore.list')
if options.block_scan:
actions.add('pmpman.action.block.scan')
if options.queue_display:
actions.add('pmpman.action.queue.display')
output["pmpman.cli.actions"] = actions
if options.database:
output['pmpman.rdms'] = options.database
if output.get('pmpman.rdms') == None:
output['pmpman.rdms'] = 'sqlite:///pmpman.db'
log.info("Defaulting DB connection to '%s'" % (output['pmpman.rdms']))
return output
| osynge/pmpman | pmpmanager/cli.py | cli.py | py | 7,399 | python | en | code | 0 | github-code | 13 |
72915521618 | import os
import pytest
from qutebrowser.qt.core import Qt
from qutebrowser.mainwindow import prompt as promptmod
from qutebrowser.utils import usertypes
class TestFileCompletion:
@pytest.fixture
def get_prompt(self, qtbot, config_stub, key_config_stub):
"""Get a function to display a prompt with a path."""
config_stub.val.bindings.default = {}
def _get_prompt_func(path):
question = usertypes.Question()
question.title = "test"
question.default = path
prompt = promptmod.DownloadFilenamePrompt(question)
qtbot.add_widget(prompt)
with qtbot.wait_signal(prompt._file_model.directoryLoaded):
pass
assert prompt._lineedit.text() == path
return prompt
return _get_prompt_func
@pytest.mark.parametrize('steps, where, subfolder', [
(1, 'next', 'a'),
(1, 'prev', 'c'),
(2, 'next', 'b'),
(2, 'prev', 'b'),
])
def test_simple_completion(self, tmp_path, get_prompt, steps, where,
subfolder):
"""Simply trying to tab through items."""
testdir = tmp_path / 'test'
for directory in 'abc':
(testdir / directory).mkdir(parents=True)
prompt = get_prompt(str(testdir) + os.sep)
for _ in range(steps):
prompt.item_focus(where)
assert prompt._lineedit.text() == str((testdir / subfolder).resolve())
def test_backspacing_path(self, qtbot, tmp_path, get_prompt):
"""When we start deleting a path we want to see the subdir."""
testdir = tmp_path / 'test'
for directory in ['bar', 'foo']:
(testdir / directory).mkdir(parents=True)
prompt = get_prompt(str(testdir / 'foo') + os.sep)
# Deleting /f[oo/]
with qtbot.wait_signal(prompt._file_model.directoryLoaded):
for _ in range(3):
qtbot.keyPress(prompt._lineedit, Qt.Key.Key_Backspace)
# For some reason, this isn't always called when using qtbot.keyPress.
prompt._set_fileview_root(prompt._lineedit.text())
# 'foo' should get completed from 'f'
prompt.item_focus('next')
assert prompt._lineedit.text() == str(testdir / 'foo')
# Deleting /[foo]
for _ in range(3):
qtbot.keyPress(prompt._lineedit, Qt.Key.Key_Backspace)
# We should now show / again, so tabbing twice gives us bar -> foo
prompt.item_focus('next')
prompt.item_focus('next')
assert prompt._lineedit.text() == str(testdir / 'foo')
@pytest.mark.parametrize("keys, expected", [
([], ['bar', 'bat', 'foo']),
([Qt.Key.Key_F], ['foo']),
([Qt.Key.Key_A], ['bar', 'bat']),
])
def test_filtering_path(self, qtbot, tmp_path, get_prompt, keys, expected):
testdir = tmp_path / 'test'
for directory in ['bar', 'foo', 'bat']:
(testdir / directory).mkdir(parents=True)
prompt = get_prompt(str(testdir) + os.sep)
for key in keys:
qtbot.keyPress(prompt._lineedit, key)
prompt._set_fileview_root(prompt._lineedit.text())
num_rows = prompt._file_model.rowCount(prompt._file_view.rootIndex())
visible = []
for row in range(num_rows):
parent = prompt._file_model.index(
os.path.dirname(prompt._lineedit.text()))
index = prompt._file_model.index(row, 0, parent)
if not prompt._file_view.isRowHidden(index.row(), index.parent()):
visible.append(index.data())
assert visible == expected
@pytest.mark.linux
def test_root_path(self, get_prompt):
"""With / as path, show root contents."""
prompt = get_prompt('/')
assert prompt._file_model.rootPath() == '/'
| qutebrowser/qutebrowser | tests/unit/mainwindow/test_prompt.py | test_prompt.py | py | 3,870 | python | en | code | 9,084 | github-code | 13 |
13913005623 | from . import gamefield
import datetime
import random
import logging
LOGGER = logging.getLogger("reversi.game")
class GameState:
def __init__(self, field, current_player, mode):
self._field = field
self._current_player = current_player
self._other_player = gamefield.DiskType(int(not self.current_player.value))
self._mode = mode
self._white_count = field.get_white_disks_count()
self._black_count = field.get_black_disks_count()
self._gameover = False
@property
def mode(self):
return self._mode
@property
def field(self):
return self._field
@property
def white_count(self):
return self._white_count
@property
def black_count(self):
return self._black_count
@property
def current_player(self):
return self._current_player
@property
def other_player(self):
return self._other_player
@property
def game_over(self):
return self._gameover
def make_turn(self, coords, flipping_instructions=None):
"""Пытается сделать ход по указанным координатам и возвращает True, если удалось сделать ход."""
if not self._field.check_coords(coords):
return False
if flipping_instructions is None:
flipping_instructions = self._field.get_flip_instructions(coords, self._current_player)
if flipping_instructions:
self.place_or_flip_disk(*coords)
for instruction in flipping_instructions:
for i in range(instruction[2]):
new_x = coords[0] + instruction[0] * (i + 1)
new_y = coords[1] + instruction[1] * (i + 1)
self.place_or_flip_disk(new_x, new_y)
self._other_player = self.current_player
self._current_player = gamefield.DiskType(int(not self._current_player.value))
return True
return False
def place_or_flip_disk(self, *coords):
if self.field[coords[0]][coords[1]] == gamefield.DiskType.NONE:
if self._current_player == gamefield.DiskType.WHITE:
self._white_count += 1
else:
self._black_count += 1
else:
if self._current_player == gamefield.DiskType.WHITE:
self._white_count += 1
self._black_count -= 1
else:
self._black_count += 1
self._white_count -= 1
self._field[coords[0]][coords[1]] = self._current_player
def remove_disk(self, *coords):
if not self.field[coords[0]][coords[1]] == gamefield.DiskType.NONE:
if self.field[coords[0]][coords[1]] == gamefield.DiskType.WHITE:
self._white_count -= 1
else:
self._black_count -= 1
self.field[coords[0][coords[1]]] = gamefield.DiskType.NONE
def get_turn(self, start_x=0, start_y=0):
"""Ищет ход у текущего игрока, и возвращает координаты
первой подходящей клетки и инструкции для неё. Если ходов нет - возвращает None"""
for y in range(start_y, self._field.side_length):
for x in range(start_x, self._field.side_length):
if self._field[x][y] == gamefield.DiskType.NONE:
instructions = self._field.get_flip_instructions((x, y), self.current_player)
if instructions:
return (x, y), instructions
start_x = 0
return None
def check_end_game(self):
"""Возвращает True, если больше нет ходов, иначе - False"""
if not self.get_turn():
self._gameover = True
return True
return False
| BobMarleysFan/Reversi | reversi/game.py | game.py | py | 3,942 | python | en | code | 0 | github-code | 13 |
6113283460 | def calcular_imc(peso, altura):
imc = peso / (altura ** 2)
return imc
def classificar_imc(imc):
if imc < 18.5:
return "Abaixo do peso"
elif imc < 24.9:
return "Peso normal"
elif imc < 29.9:
return "Sobrepeso"
elif imc < 34.9:
return "Obesidade Grau I"
elif imc < 39.9:
return "Obesidade Grau II"
else:
return "Obesidade Grau III"
peso = float(input("Digite o seu peso em kg: "))
altura = float(input("Digite a sua altura em metros: "))
imc = calcular_imc(peso, altura)
categoria_imc = classificar_imc(imc)
print(f"Seu IMC é: {imc:.2f}")
print(f"Você está na categoria de: {categoria_imc}") | HyagoRubo/IMC_Phyton | desafio_IMC/imc.py | imc.py | py | 703 | python | pt | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.