seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
39326157381 | import requests
import datetime
from datetimerange import DateTimeRange
import json
import math
import pytz
def get_hijri(timezone):
r = requests.get('http://api.aladhan.com/v1/gToH?date='+datetime.datetime.now(pytz.timezone(timezone)).strftime('%d-%m-%Y')).json()
return r['data']['hijri']['day'] +' '+ r['data']['hijri']['month']['en'] + ' ' + r['data']['hijri']['year']
def waktu_tersisa(hour, minute,timezone):
now = datetime.datetime.now(pytz.timezone(timezone))
target = pytz.timezone(timezone).localize(datetime.datetime(*now.timetuple()[0:3], hour, minute))
if target < now: # if the target is before now, add one day
target += datetime.timedelta(days=1)
diff = target - now
hasil = math.ceil(diff.seconds/60) # Dalam Menit
if hasil > 60:
hasil = str(math.ceil(hasil/60))+" Jam Lagi" # Dalam Jam
else:
hasil = str(hasil)+" Menit Lagi" # Menit
return hasil
def current_pray(kota,timezone):
jadwal = get_jadwal(kota)
print(jadwal)
jam = datetime.datetime.now(pytz.timezone(timezone)).time().strftime('%H:%M')
subuh = DateTimeRange(jadwal['jadwal']['data']['subuh'],jadwal['jadwal']['data']['dzuhur'])
dzuhur = DateTimeRange(jadwal['jadwal']['data']['dzuhur'], jadwal['jadwal']['data']['ashar'])
ashar = DateTimeRange(jadwal['jadwal']['data']['ashar'], jadwal['jadwal']['data']['maghrib'])
magrib = DateTimeRange(jadwal['jadwal']['data']['maghrib'],jadwal['jadwal']['data']['isya'])
if jam in subuh:
return('Subuh')
elif jam in dzuhur:
return("Dzuhur")
elif jam in ashar:
return("Ashar")
elif jam in magrib:
return("Maghrib")
else:
return("Isya")
def split_jam(jam):
# H:M
return jam.split(':')
def solat_berikutnya(kota,timezone):
jadwal = get_jadwal(kota)
sekarang = current_pray(kota,timezone)
if sekarang == "Subuh":
waktuberikutnya = split_jam(jadwal['jadwal']['data']['dzuhur'])
waktutersisa = waktu_tersisa(int(waktuberikutnya[0]),int(waktuberikutnya[1]),timezone)
solatberikutnya = "Dzuhur"
elif sekarang == "Dzuhur":
waktuberikutnya = split_jam(jadwal['jadwal']['data']['ashar'])
waktutersisa = waktu_tersisa(int(waktuberikutnya[0]),int(waktuberikutnya[1]),timezone)
solatberikutnya = "Ashar"
elif sekarang == "Ashar":
waktuberikutnya = split_jam(jadwal['jadwal']['data']['maghrib'])
waktutersisa = waktu_tersisa(int(waktuberikutnya[0]),int(waktuberikutnya[1]),timezone)
solatberikutnya = "Maghrib"
elif sekarang == "Maghrib":
waktuberikutnya = split_jam(jadwal['jadwal']['data']['isya'])
waktutersisa = waktu_tersisa(int(waktuberikutnya[0]),int(waktuberikutnya[1]),timezone)
solatberikutnya = "Isya"
elif sekarang == "Isya":
waktuberikutnya = split_jam(jadwal['jadwal']['data']['subuh'])
waktutersisa = waktu_tersisa(int(waktuberikutnya[0]),int(waktuberikutnya[1]),timezone)
solatberikutnya = "Subuh"
return {
'tersisa':waktutersisa,
'waktuberikutnya':solatberikutnya
}
def get_random_ayat():
# 114 Surat
# 6236 Ayat
r = requests.get('https://api.banghasan.com/quran/format/json/acak').json()
return {'arab':r['acak']['ar']['teks'],
'terjemah':r['acak']['id']['teks'].replace('\n',''),
'surah':r['surat']['nama'],
'arti':r['surat']['arti'],
'ayat':r['acak']['id']['ayat']}
def get_city(city):
"""Menambil Kode Kota
Arguments:
city {str} -- nama kota
Returns:
json -- Kode Kota
"""
try:
r = requests.get('https://api.banghasan.com/sholat/format/json/kota/nama/'+city)
return r.json()['kota'][0]['id']
except:
return 404
def get_jadwal(namakota):
"""Mendapatkan Jadwal Shalat
Arguments:
kode {str} -- nama kota
Returns:
json -- jadwal shalat
"""
kode = get_city(namakota)
r = requests.get('https://api.banghasan.com/sholat/format/json/jadwal/kota/%s/tanggal/%s'%(kode, str(datetime.date.today())))
return r.json()
if __name__ == "__main__":
print(get_jadwal()) | RaihanStark/sakumuslim | engine.py | engine.py | py | 4,264 | python | en | code | 0 | github-code | 36 |
28521177727 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable import Variable
from opus_core.misc import safe_array_divide
from variable_functions import my_attribute_label
class total_number_of_possible_SSS_jobs_from_buildings(Variable):
"""Computed by dividing the total buildings_commercial/industrial sqft. of location by the
commercial/industrial square feet per job
"""
_return_type = "int32"
def __init__(self, type):
self.sqft = "buildings_%s_sqft" % type
self.sqft_per_job = "%s_sqft_per_job" % type
Variable.__init__(self)
def dependencies(self):
return [my_attribute_label(self.sqft), my_attribute_label(self.sqft_per_job)]
def compute(self, dataset_pool):
ds = self.get_dataset()
values_sqft_per_job = ds.get_attribute(self.sqft_per_job)
values_sqft = ds.get_attribute(self.sqft)
return safe_array_divide(values_sqft, values_sqft_per_job, type="int32")
from opus_core.tests import opus_unittest
from opus_core.tests.utils.variable_tester import VariableTester
from numpy import array
class Tests(opus_unittest.OpusTestCase):
def test_my_inputs( self ):
#declare an array of four locations, each with the specified sector ID below
commercial_sqft = array([1000, 500, 5000, 233])
commercial_sqft_per_job = array([20, 0, 100, 33])
tester = VariableTester(
__file__,
package_order=['urbansim'],
test_data={
"gridcell":{
"grid_id":array([1,2,3,4]),
"buildings_commercial_sqft":commercial_sqft,
"commercial_sqft_per_job":commercial_sqft_per_job
}
}
)
#notice that the computation code above purposely truncates decimal results,
#which makes sense because fractions of jobs don't exist
should_be = array( [50.0, 0.0, 50.0, 7.0] )
instance_name = "urbansim.gridcell.total_number_of_possible_commercial_jobs_from_buildings"
tester.test_is_equal_for_family_variable(self, should_be, instance_name)
if __name__=='__main__':
opus_unittest.main() | psrc/urbansim | urbansim/gridcell/total_number_of_possible_SSS_jobs_from_buildings.py | total_number_of_possible_SSS_jobs_from_buildings.py | py | 2,426 | python | en | code | 4 | github-code | 36 |
32094159630 | from decimal import Decimal
import setoptconf as soc
GOOD_SIMPLE_VALUES = (
(soc.String, None, None),
(soc.String, 'foo', 'foo'),
(soc.String, '1', '1'),
(soc.String, 1, '1'),
(soc.String, 1.23, '1.23'),
(soc.String, Decimal('1.23'), '1.23'),
(soc.Integer, None, None),
(soc.Integer, 123, 123),
(soc.Integer, '123', 123),
(soc.Integer, 123.45, 123),
(soc.Integer, Decimal('123'), 123),
(soc.Integer, Decimal('123.45'), 123),
(soc.Float, None, None),
(soc.Float, 123, 123.0),
(soc.Float, '123', 123.0),
(soc.Float, 123.45, 123.45),
(soc.Float, Decimal('123'), 123.0),
(soc.Float, Decimal('123.45'), 123.45),
(soc.Boolean, None, None),
(soc.Boolean, True, True),
(soc.Boolean, False, False),
(soc.Boolean, 'y', True),
(soc.Boolean, 'yes', True),
(soc.Boolean, 't', True),
(soc.Boolean, 'true', True),
(soc.Boolean, 'on', True),
(soc.Boolean, '1', True),
(soc.Boolean, '', False),
(soc.Boolean, 'n', False),
(soc.Boolean, 'no', False),
(soc.Boolean, 'f', False),
(soc.Boolean, 'false', False),
(soc.Boolean, 'off', False),
(soc.Boolean, '0', False),
(soc.Boolean, 123, True),
(soc.Boolean, 0, False),
(soc.Boolean, 123.45, True),
)
BAD_SIMPLE_VALUES = (
(soc.Integer, 'foo'),
(soc.Integer, '123abc'),
(soc.Float, 'foo'),
(soc.Float, '123abc'),
(soc.Float, '123.45abc'),
(soc.Boolean, 'foo'),
)
def test_simple_sanitization():
for datatype, in_value, out_value in GOOD_SIMPLE_VALUES:
yield check_good_value, datatype, in_value, out_value
for datatype, in_value in BAD_SIMPLE_VALUES:
yield check_bad_value, datatype, in_value
def check_good_value(datatype, in_value, out_value):
dt = datatype()
assert dt.sanitize(in_value) == out_value
assert dt.is_valid(in_value) is True
def check_bad_value(datatype, in_value):
dt = datatype()
try:
dt.sanitize(in_value)
except soc.DataTypeError:
pass
else:
assert False, 'Invalid %s allowed: %s' % (
datatype.__name__,
in_value,
)
assert dt.is_valid(in_value) is False
GOOD_LIST_VALUES = (
(soc.String, None, None),
(soc.String, [], []),
(soc.String, ['foo', 'bar'], ['foo', 'bar']),
(soc.String, ('foo', 'bar'), ['foo', 'bar']),
(soc.String(), ['foo', 'bar'], ['foo', 'bar']),
(soc.String, 'foo', ['foo']),
(soc.Integer, [123, '456'], [123, 456]),
)
BAD_LIST_VALUES = (
(soc.Integer, ['foo'], soc.DataTypeError),
(soc.Boolean, [True, False, 'y', 4, 'foo'], soc.DataTypeError),
('a', ['foo'], TypeError),
(soc.Configuration, ['foo'], TypeError),
)
def test_list_sanitization():
for subtype, in_value, out_value in GOOD_LIST_VALUES:
yield check_good_list_value, subtype, in_value, out_value
for subtype, in_value, exc in BAD_LIST_VALUES:
yield check_bad_list_value, subtype, in_value, exc
def check_good_list_value(subtype, in_value, out_value):
dt = soc.List(subtype)
assert dt.sanitize(in_value) == out_value
def check_bad_list_value(subtype, in_value, exc):
try:
dt = soc.List(subtype)
dt.sanitize(in_value)
except exc:
pass
else:
assert False, 'Invalid %s allowed: %s' % (
subtype.__class__.__name__,
in_value,
)
GOOD_CHOICE_VALUES = (
(soc.String, ['foo', 'bar'], None),
(soc.String, ['foo', 'bar'], 'foo'),
(None, ['foo', 'bar'], 'foo'),
(soc.Integer, [1,2,3], 2),
(soc.Integer(), [1,2,3], 2),
)
BAD_CHOICE_VALUES = (
(soc.String, ['foo', 'bar'], 'baz', soc.DataTypeError),
(soc.String, [1, 2, 3], 'baz', soc.DataTypeError),
('a', [1, 2, 3], 4, TypeError),
)
def test_choice_sanitization():
for subtype, choices, value in GOOD_CHOICE_VALUES:
yield check_good_choice_value, subtype, choices, value
for subtype, choices, value, exc in BAD_CHOICE_VALUES:
yield check_bad_choice_value, subtype, choices, value, exc
def check_good_choice_value(subtype, choices, value):
dt = soc.Choice(choices, subtype)
assert dt.sanitize(value) == value
def check_bad_choice_value(subtype, choices, value, exc):
try:
dt = soc.Choice(choices, subtype)
dt.sanitize(value)
except exc:
pass
else:
assert False, 'Invalid choice allowed: %s' % value
| jayclassless/setoptconf | test/test_datatypes.py | test_datatypes.py | py | 4,436 | python | en | code | 3 | github-code | 36 |
3006495445 | from pandas.io.parsers import read_csv
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
def carga_csv(filename):
valores = read_csv(filename, header=None).to_numpy()
return valores.astype(float)
def h(x, theta):
return theta[0] + theta[1] * x
def func_coste(X, Y, theta):
acc = 0
m = len(X)
acc = np.sum((h(X, theta) - Y) ** 2)
return acc / (2 * m)
def plot_line(X, Y, theta):
min_x = min(X)
max_x = max(X)
min_y = h(min_x, theta)
max_y = h(max_x, theta)
plt.plot(X, Y, "x")
plt.plot([min_x, max_x], [min_y, max_y])
# plt.show()
plt.savefig("apartado1_line.png")
def descenso_gradiente_simple(X, Y, alpha=0.01, iteraciones=1500):
theta_0 = theta_1 = 0
m = len(X)
for _ in range(iteraciones):
acc_0 = np.sum(h(X, [theta_0, theta_1]) - Y)
acc_1 = np.sum((h(X, [theta_0, theta_1]) - Y) * X)
theta_0 = theta_0 - (alpha / m) * acc_0
theta_1 = theta_1 - (alpha / m) * acc_1
return [theta_0, theta_1]
def make_grid(t0_range, t1_range, X, Y, step=0.1):
Theta0 = np.arange(t0_range[0], t0_range[1], step)
Theta1 = np.arange(t1_range[0], t1_range[1], step)
Theta0, Theta1 = np.meshgrid(Theta0, Theta1)
Coste = np.empty_like(Theta0)
#TODO comprobar si se puede limpiar este bucle
for ix, iy in np.ndindex(Theta0.shape):
Coste[ix, iy] = func_coste(X, Y, [Theta0[ix, iy], Theta1[ix, iy]])
return [Theta0, Theta1, Coste]
def show_mesh(data):
fig = plt.figure()
ax = Axes3D(fig)
surf = ax.plot_surface(data[0], data[1], data[2], cmap=cm.jet, linewidth=0, antialiased=False)
# plt.show()
plt.savefig("apartado1_mesh.png")
def show_contour(data):
#TODO preguntar por logspace
plt.contour(data[0],data[1],data[2],np.logspace(-2,3,20),colors='blue')
# plt.scatter(data[0], data[1])
# plt.contour(data[0],data[1],data[2],colors='blue')
# plt.show()
plt.savefig("apartado1_contour.png")
def apartado_1():
datos = carga_csv('ex1data1.csv')
X = datos[:, 0]
Y = datos[:, 1]
theta = descenso_gradiente_simple(X, Y)
# plot_line(X, Y, theta)
grid_data = make_grid([-10, 10], [-1, 4], X, Y)
# show_mesh(grid_data)
show_contour(grid_data)
def normaliza_matriz(x):
mu = np.mean(x, axis=0) # Media de cada columna
sigma = np.std(x, axis=0) # Desviacion estandar por columnas, no confundir con la querida std de c++
x_norm = (x-mu)/sigma
return x_norm, mu, sigma
def coste_vec(X, Y, Theta):
H = np.dot(X, Theta)
Aux = (H-Y) ** 2
return Aux.sum() / (2*len(X))
def gradiente_it(X, Y, Theta, alpha):
m = np.shape(X)[0]
n = np.shape(X)[1]
H = np.dot(X, Theta)
Aux = (H-Y)
for i in range(n):
Aux_i = Aux * X[:, i]
Theta[i] -= (alpha/m) * Aux_i.sum()
return Theta
def gradiente_vec(X, Y, Theta, alpha):
NuevaTheta = Theta
m = np.shape(X)[0]
H = np.dot(X, Theta)
return Theta - (alpha/m) * np.dot(np.transpose(X), (H-Y))
def descenso_gradiente_multiple(X, Y, alpha=0.01, iteraciones=1500):
Theta = np.zeros(np.shape(X)[1])
costes = np.zeros(iteraciones)
for i in range(iteraciones):
costes[i] = coste_vec(X, Y, Theta)
Theta = gradiente_it(X, Y, Theta, alpha)
# Devolveremos todo el proceso para poder comparar distintos
# Factores de aprendizaje
return costes, Theta
def ec_normal(X, Y):
transX = np.transpose(X)
XTX = np.dot(transX, X)
invXT = np.dot(np.linalg.pinv(XTX), transX)
return np.dot(invXT, Y)
def apartado_2():
datos = carga_csv('ex1data2.csv')
mat_norm, mu, sigma = normaliza_matriz(datos)
X = mat_norm[:, :-1] #Todas las columnas excepto la ultima
Y = mat_norm[:, -1] #La ultima columna
m = np.shape(X)[0]
X = np.hstack([np.ones([m, 1]), X])
plt.figure()
Alphas = [(0.01,'lime'),(0.1,'blue'),(0.3,'indigo'),(0.03,'teal')]
for alpha, color in Alphas:
costes, Theta = descenso_gradiente_multiple(X, Y, alpha,iteraciones=500)
plt.scatter(np.arange(np.shape(costes)[0]),costes,c=color,label='alpha {}'.format(alpha))
plt.legend()
plt.savefig("descenso_gradiente.png")
ejemplo = [1650, 3]
ejemplo_norm = (ejemplo - mu[:2]) / sigma[:2] #Normalizamos los datos
ejemplo_norm = np.hstack([[1],ejemplo_norm]) #Añadimos un 1
prediccion = np.sum(Theta * ejemplo_norm) #Multiplicamos elemento a elemnto
print(prediccion*sigma[-1] + mu[-1]) #Escalamos el resultado
def apartado_2_2():
datos = carga_csv('ex1data2.csv')
ejemplo = [[1, 1650, 3]]
X = datos[:, :-1] #Todas las columnas excepto la ultima
Y = datos[:, -1] #La ultima columna
m = np.shape(X)[0]
X = np.hstack([np.ones([m, 1]), X])
Thetas = ec_normal(X, Y)
print(np.shape(X))
print(np.shape(ejemplo))
print(np.shape(Thetas))
prediccion = np.sum(Thetas * ejemplo)
print(prediccion)
def main():
apartado_1()
apartado_2()
apartado_2_2()
main()
| jorgmo02/AA | P1/practica1.py | practica1.py | py | 5,108 | python | es | code | 0 | github-code | 36 |
4254235874 | """
Example 1:
Input: arr1=[[1, 3], [5, 6], [7, 9]], arr2=[[2, 3], [5, 7]]
Output: [2, 3], [5, 6], [7, 7]
Explanation: The output list contains the common intervals between the two lists.
Example 2:
Input: arr1=[[1, 3], [5, 7], [9, 12]], arr2=[[5, 10]]
Output: [5, 7], [9, 10]
Explanation: The output list contains the common intervals between the two lists.
1 2 3 4 5 6 7 8 9
xxxxx xxx xxxxx
xxx xxxxx
"""
def merge(intervals_a, intervals_b):
ans = []
start, end = 0, 1
i, j = 0, 0
while i < len(intervals_a) and j < len(intervals_b):
a = intervals_a[i]
b = intervals_b[j]
a_starts_within_b = b[start] <= a[start] <= b[end]
b_starts_within_a = a[start] <= b[start] <= a[end]
a_and_b_overlap = a_starts_within_b or b_starts_within_a
if a_and_b_overlap:
ans.append([max(a[start], b[start]), min(a[end], b[end])])
if b[end] > a[end]:
i += 1
else:
j += 1
return ans
| blhwong/algos_py | grokking/merge_intervals/intervals_intersection/main.py | main.py | py | 1,000 | python | en | code | 0 | github-code | 36 |
27033338799 |
from __future__ import print_function
import argparse
from ast import literal_eval
import logging
from utils import metrics_manager
from utils import data_manager
try:
import ConfigParser
config = ConfigParser.ConfigParser()
except ImportError:
import configparser
config = configparser.ConfigParser()
# --metrics-policy metrics_parameters_images --task-name custom.p316xlarge.fp32.bs32 --metrics-suffix nightly --num-gpus 8 --command-to-execute \"Hello world\"
CONFIG_TEMPLATE = './task_config_template.cfg'
def run_benchmark(args):
if 'imagenet' in args.data_set:
data_manager.getImagenetData(args.data_set)
config.read(args.metrics_template)
for name, value in config.items(args.metrics_policy):
if(name == 'patterns'):
metric_patterns = literal_eval(value)
elif(name == 'metrics'):
metric_names= literal_eval(value)
else:
metric_compute_methods = literal_eval(value)
metrics_manager.BenchmarkResultManager.uptime()
metrics_manager.benchmark(
command_to_execute=args.command_to_execute,
metric_patterns=metric_patterns,
metric_names=metric_names,
metric_compute_methods=metric_compute_methods,
num_gpus=args.num_gpus,
task_name=args.task_name,
suffix=args.metrics_suffix,
framework=args.framework
)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Run a benchmark task.")
parser.add_argument('--framework', type=str, help='Framework eg. mxnet')
parser.add_argument('--metrics-policy', type=str, help='Metrics policy section name e.g. metrics_paramaters_images')
parser.add_argument('--task-name', type=str, help='Task Name e.g. resnet50_cifar10_symbolic.')
parser.add_argument('--metrics-suffix', type=str, help='Metrics suffix e.g. --metrics-suffix daily')
parser.add_argument('--num-gpus', type=int, help='Numbers of gpus. e.g. --num-gpus 8')
parser.add_argument('--command-to-execute', type=str, help='The script command that performs benchmarking')
parser.add_argument('--data-set', type=str, help='The data set to use for benchmarking, eg. imagenet, imagenet-480px-256px-q95')
parser.add_argument('--metrics-template', type=str, help='The template file to use for metrics pattern', default=CONFIG_TEMPLATE)
args = parser.parse_args()
log_file_location = args.task_name + ".log"
logging.basicConfig(filename=log_file_location,level=logging.DEBUG)
try:
run_benchmark(args)
except Exception:
logging.exception("Fatal error in run_benchmark")
exit()
| awslabs/deeplearning-benchmark | benchmark_runner.py | benchmark_runner.py | py | 2,670 | python | en | code | 119 | github-code | 36 |
13488107411 | def roman(num):
roman_map = {1: "I", 2: "II", 3: "III", 4: "IV", 5: "V", 6: "VI", 7: "VII",
8: "VIII", 9: "IX", 10: "X", 50: "L", 100: "C", 500: "D", 1000: "M"}
result = ""
remainder = num
for i in sorted(roman_map.keys(), reverse=True):# 2
print(i)
if remainder > 0:
multiplier = i
roman_digit = roman_map[i]
times = remainder // multiplier # 3
remainder = remainder % multiplier # 4
result += roman_digit * times # 4
return result
print(roman(1553))
| AydinTokuslu/AWS-DevOps-Projects | Project-001-Roman-Numerals-Converter/benim-cozumum/roman.py | roman.py | py | 593 | python | en | code | 0 | github-code | 36 |
41287151080 | from game_of_greed_v2.game_logic import GameLogic
class Game:
def __init__(self, roller=None):
self.roller = roller
def play(self):
print('Welcome to Game of Greed')
wanna_play = input('Wanna play? ')
if wanna_play == 'n':
print('OK. Maybe another time')
else:
print('Starting round 1')
print('Rolling 6 dice...')
rolled_dice = self.roller(6)
nums = []
for i in rolled_dice:
nums.append(str(i))
print(','.join(nums))
decision = input('Enter dice to keep (no spaces), or (q)uit: ')
print('Thanks for playing. You earned 0 points')
if __name__=="__main__":
game = Game(GameLogic.roll_dice)
game.play() | LTUC/amman-python-401d7 | class-07/demo/game-of-greed-v2/game_of_greed_v2/game.py | game.py | py | 785 | python | en | code | 2 | github-code | 36 |
34710160257 | import os
import shutil
import time
import unittest
from configparser import ConfigParser
from os import environ
from Bio import SeqIO
from installed_clients.WorkspaceClient import Workspace as workspaceService
from GenomeFileUtil.GenomeFileUtilImpl import GenomeFileUtil
from GenomeFileUtil.GenomeFileUtilServer import MethodContext
class MinimalGenbankUploadTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('setting up class')
token = environ.get('KB_AUTH_TOKEN', None)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': token,
'provenance': [
{'service': 'GenomeFileUtil',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('GenomeFileUtil'):
cls.cfg[nameval[0]] = nameval[1]
cls.wsURL = cls.cfg['workspace-url']
cls.ws = workspaceService(cls.wsURL, token=token)
cls.impl = GenomeFileUtil(cls.cfg)
cls.MINIMAL_TEST_FILE = os.path.join( cls.cfg['scratch'], 'minimal.gbff')
shutil.copy('data/minimal.gbff', cls.MINIMAL_TEST_FILE )
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.ws.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
def getWsClient(self):
return self.ws
def getWsName(self):
if hasattr(self.__class__, 'wsName'):
return self.__class__.wsName
suffix = int(time.time() * 1000)
wsName = "test_GenomeFileUtil_" + str(suffix)
ret = self.getWsClient().create_workspace({'workspace': wsName})
self.__class__.wsName = wsName
return wsName
def getImpl(self):
return self.__class__.impl
def getContext(self):
return self.__class__.ctx
def test_upload(self):
# fetch the test files and set things up
genomeFileUtil = self.getImpl()
gbk_path = self.MINIMAL_TEST_FILE
# ok, first test with minimal options
result = genomeFileUtil.genbank_to_genome(self.getContext(),
{
'file':{'path': gbk_path},
'workspace_name': self.getWsName(),
'taxon_id': 4932,
'genome_name': 'something',
'generate_ids_if_needed': 1
})[0]
self.check_minimal_items_exist(result)
# test with setting a taxon_reference directly
result = genomeFileUtil.genbank_to_genome(self.getContext(),
{
'file': {'path': gbk_path},
'workspace_name': self.getWsName(),
'genome_name': 'something',
'taxon_id': 4932,
'generate_ids_if_needed': 1
})[0]
self.check_minimal_items_exist(result)
# test setting additional metadata
result = genomeFileUtil.genbank_to_genome(self.getContext(),
{
'file': {'path': gbk_path},
'workspace_name': self.getWsName(),
'genome_name': 'something',
'taxon_id': 4932,
'metadata': {'mydata': 'yay', 'otherdata': 'ok' },
'generate_ids_if_needed': 1
})[0]
self.check_minimal_items_exist(result)
metadata_saved = result['genome_info'][10]
self.assertTrue('mydata' in metadata_saved)
self.assertTrue('otherdata' in metadata_saved)
self.assertEqual(metadata_saved['mydata'], 'yay')
invalidate_input_params = {
'workspace_name': 'workspace_name',
'genome_name': 'genome_name',
'file': {'path': 'fasta_file'},
'genetic_code': 'meh'
}
with self.assertRaisesRegex(
ValueError,
'Invalid genetic code specified'):
self.getImpl().genbank_to_genome(self.getContext(), invalidate_input_params)
def check_minimal_items_exist(self, result):
self.assertTrue('genome_info' in result)
self.assertTrue('genome_ref' in result)
genome_info = result['genome_info']
self.assertEqual(genome_info[10]['Number contigs'], '1')
self.assertEqual(genome_info[10]['Number of Protein Encoding Genes'], '2')
self.assertEqual(genome_info[10]['Domain'], 'Eukaryota')
self.assertEqual(genome_info[10]['Genetic code'], '11')
self.assertEqual(genome_info[10]['Name'], 'Saccharomyces cerevisiae')
self.assertEqual(genome_info[10]['Source'], 'Genbank')
self.assertEqual(genome_info[10]['GC content'], '0.37967')
self.assertEqual(genome_info[10]['Size'], '5028')
self.assertEqual(genome_info[10]['Taxonomy'],
'cellular organisms; Eukaryota; Opisthokonta; Fungi; Dikarya; Ascomycota; '+
'saccharomyceta; Saccharomycotina; Saccharomycetes; Saccharomycetales; '+
'Saccharomycetaceae; Saccharomyces')
def test_supply_assembly(self):
genomeFileUtil = self.getImpl()
"""Warning: This test will fail if not run against CI"""
gbk_path = self.MINIMAL_TEST_FILE
with self.assertRaisesRegex(ValueError, "not a valid format."):
result = genomeFileUtil.genbank_to_genome(self.getContext(), {
'file': {'path': gbk_path},
'workspace_name': self.getWsName(),
'genome_name': 'something',
'taxon_id': 4932,
'use_existing_assembly': "1",
})[0]
with self.assertRaisesRegex(ValueError, "not a reference to an assembly"):
result = genomeFileUtil.genbank_to_genome(
self.getContext(), {
'file': {'path': gbk_path},
'workspace_name': self.getWsName(),
'taxon_id': 4932,
'genome_name': 'something',
'use_existing_assembly': "6976/923/6",
})[0]
with self.assertRaisesRegex(ValueError, "following contigs which are not present"):
result = genomeFileUtil.genbank_to_genome(
self.getContext(), {
'file': {'path': gbk_path},
'workspace_name': self.getWsName(),
'genome_name': 'something',
'taxon_id': 4932,
'use_existing_assembly': "31767/5/1",
})[0]
def test_translation(self):
record = next(SeqIO.parse(open(self.MINIMAL_TEST_FILE), 'genbank'))
f_seq = str(record.seq)
r_seq = f_seq.translate(str.maketrans("CTAG", "GATC"))
def _location(feat):
strand_trans = ("", "+", "-")
loc = []
for part in feat.location.parts:
if part.strand >= 0:
begin = int(part.start) + 1
else:
begin = int(part.end)
loc.append((
record.id,
begin,
strand_trans[part.strand],
len(part)))
return loc
def get_seq(feat):
seq = []
strand = 1
for part in feat.location.parts:
strand = part.strand
if strand >= 0:
seq.append(f_seq[part.start:part.end])
else:
seq.insert(0, r_seq[part.start:part.end])
if strand >= 0:
return "".join(seq)
else:
return "".join(seq)[::-1]
for feat in record.features:
print(feat.id)
seq1 = feat.extract(record)
seq2 = get_seq(feat)
self.assertEqual(str(seq1.seq), seq2)
| kbaseapps/GenomeFileUtil | test/supplemental_genbank_tests/genbank_upload_parameter_test.py | genbank_upload_parameter_test.py | py | 8,715 | python | en | code | 0 | github-code | 36 |
24486995491 | """archive hails
Revision ID: da94441f919f
Revises: 51c630a38d3c
Create Date: 2022-03-16 13:46:13.409774
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'da94441f919f'
down_revision = '51c630a38d3c'
branch_labels = None
depends_on = None
def upgrade():
sources_enum = postgresql.ENUM('form', 'api', name='via', create_type=False)
op.create_table(
'archived_hail',
sa.Column('added_at', sa.DateTime(), nullable=True),
sa.Column('added_via', sources_enum, nullable=False),
sa.Column('source', sa.String(length=255), nullable=False),
sa.Column('last_update_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.String(), nullable=False),
sa.Column('status', sa.String(), nullable=False),
sa.Column('moteur', sa.String(), nullable=False),
sa.Column('operateur', sa.String(), nullable=False),
sa.Column('incident_customer_reason', sa.String()),
sa.Column('incident_taxi_reason', sa.String()),
sa.Column('session_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('insee', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.add_column('hail', sa.Column('blurred', sa.Boolean(), server_default='false', nullable=True))
def downgrade():
op.drop_column('hail', 'blurred')
op.drop_table('archived_hail')
| openmaraude/APITaxi | APITaxi_models2/migrations/versions/20220316_13:46:13_da94441f919f_archive_hails.py | 20220316_13:46:13_da94441f919f_archive_hails.py | py | 1,462 | python | en | code | 24 | github-code | 36 |
23083264879 | from django.shortcuts import render
from .forms import RegisterForm, LoginForm
from django.shortcuts import redirect
from django.contrib import messages
from django.contrib.auth import authenticate , login
# Create your views here.
def index(request):
return render(request,'acounts/index.html')
def register(request):
if request.method == 'POST':
form = RegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'welcome {username} your account is created')
return redirect('login_view')
else:
form = RegisterForm()
context = {
"form": form,
}
return render(request, "acounts/register.html", context )
def login_view(request):
form = LoginForm(request.POST or None)
msg = None
if request.method == 'POST':
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None and user.is_doctor:
login(request, user)
return redirect('doctorpage')
elif user is not None and user.is_patient:
login(request, user)
return redirect('patientpage')
else:
msg= 'invalid credentials'
else:
msg = 'error validating form'
return render(request, 'acounts/login.html', {'form': form, 'msg': msg})
def doctor(request):
return render(request,'acounts/doctor.html')
def patient(request):
return render(request,'acounts/patient.html')
| Shivam38391/django-asignment | acounts/views.py | views.py | py | 1,792 | python | en | code | 3 | github-code | 36 |
5892047689 | from typing import Tuple
import numpy as np
import yaml
import os
def PIDController(
v_0: float, y_ref: float, y_hat: float, prev_e_y: float, prev_int_y: float, delta_t: float
) -> Tuple[float, float, float, float]:
"""
PID performing lateral control.
Args:
v_0: linear Duckiebot speed (constant).
y_ref: target y coordinate.
y_hat: the current estimated y.
prev_e_y: tracking error at previous iteration.
prev_int_y: previous integral error term.
delta_t: time interval since last call.
Returns:
v_0: linear velocity of the Duckiebot
omega: angular velocity of the Duckiebot
e: current tracking error (automatically becomes prev_e_y at next iteration).
e_int: current integral error (automatically becomes prev_int_y at next iteration).
"""
# # Read PID gains from file
# script_dir = os.path.dirname(__file__)
# file_path = script_dir + "/GAINS.yaml"
# with open(file_path) as f:
# gains = yaml.full_load(f)
# f.close()
# kp = gains['kp']
# kd = gains['kd']
# ki = gains['ki']
# ------------- DEFINE YOUR PID FUNCTION BELOW ---------
# Tracking error
e = y_ref - y_hat
# integral of the error
e_int = prev_int_y + e * delta_t
# anti-windup - preventing the integral error from growing too much
e_int = max(min(e_int,2),-2)
# derivative of the error
e_diff = (e - prev_e_y) / delta_t
# controller coefficients
Kp = 5
Ki = 0.2
Kd = 0.1
# Compute control signals
omega = Kp * e + Ki * e_int + Kd * e_diff
# Update previous errors for the next iteration
# prev_e_y = e
# prev_int_y = e_int
# # Tracking error
# e = y_ref - y_hat
# # integral of the error
# e_int = prev_int_y + e*delta_t
# # anti-windup - preventing the integral error from growing too much
# e_int = max(min(e_int,2),-2)
# # derivative of the error
# e_der = (e - prev_e_y)/delta_t
# # controller coefficients
# Kp = 15
# Ki = 1
# Kd = 0.1
# # PID controller for omega
# omega = Kp*e + Ki*e_int + Kd*e_der
#print(f"\n\nDelta time : {delta_t} \nE : {np.rad2deg(e)} \nE int : {e_int} \nPrev e : {prev_e} \nU : {u} \nTheta hat: {np.rad2deg(theta_hat)} \n")
return v_0, omega, e, e_int
| bratjay01/bharath_duckiebot | modcon/packages/solution/pid_controller_homework.py | pid_controller_homework.py | py | 2,430 | python | en | code | 0 | github-code | 36 |
13511295213 | #! /usr/bin/python
import tensorflow as tf
import numpy as np
from check_base import *
import mnist
class mnist_cnn_test_1(check_base):
def __init(self,reader):
self.base = super(mnist_cnn_test_1,self)
self.base.__init__(reader)
def decl_predict(self):
x = self.decl_placeholder("x",[None,784])
y_ = self.decl_placeholder("y_",[None,10])
input = tf.reshape(x,[-1,28,28,1])
conv1 = tf.layers.conv2d(input,32,[5,5],padding='same',activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(conv1,[2,2],2)
conv2 = tf.layers.conv2d(pool1,64,[5,5],padding='same',activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(conv2,[2,2],2)
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout = tf.layers.dropout(dense,0.4,training=True)
y = self.decl_full_conn_layer("fc",dropout,[1024,10],[10])
return 1,y,x,y_
if __name__ == "__main__":
print("#"*30)
m = config.mnist_test_reader('mnist_cnn_2')
model = mnist_cnn_test_1(m)
model.check()
| angelbruce/NN | mnist_cnn_1_test.py | mnist_cnn_1_test.py | py | 1,151 | python | en | code | 0 | github-code | 36 |
74963734183 | # !/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@FileName: weChatClient
@Author : sky
@Date : 2022/8/1 15:48
@Desc : 客户端
"""
import wx
import socket
import threading
# 客户端继承wx.frame,就拥有了窗口界面
class WeChatClient(wx.Frame):
def __init__(self, c_name):
# 调用父类的构造函数
wx.Frame.__init__(self, None, id=101, title='%s的客户端界面'%c_name, pos=wx.DefaultPosition, size=(400, 700))
pl = wx.Panel(self) # 在窗口初始化一个面板
box = wx.BoxSizer(wx.VERTICAL)
pl.SetSizer(box)
g1 = wx.FlexGridSizer(wx.HORIZONTAL)
conn_button = wx.Button(pl, size=(200, 40), label="连接")
dis_conn_button = wx.Button(pl, size=(200, 40), label="断开")
g1.Add(conn_button, 1, wx.TOP | wx.LEFT)
g1.Add(dis_conn_button, 1, wx.TOP | wx.Right)
box.Add(g1, 1, wx.ALIGN_CENTER)
self.text = wx.TextCtrl(pl, size=(400, 250), style=wx.TE_MULTILINE | wx.TE_READONLY)
box.Add(self.text, 1, wx.ALIGN_CENTER)
self.input_text = wx.TextCtrl(pl, size=(400, 100), style=wx.TE_MULTILINE)
box.Add(self.input_text, 1, wx.ALIGN_CENTER)
g2 = wx.FlexGridSizer(wx.HORIZONTAL)
clear_button = wx.Button(pl, size=(200, 40), label="重置")
send_button = wx.Button(pl, size=(200, 40), label="发送")
g2.Add(clear_button, 1, wx.TOP | wx.LEFT)
g2.Add(send_button, 1, wx.TOP | wx.RIGHT)
box.Add(g2, 1, wx.ALIGN_CENTER)
pl.SetSizer(box)
'''给所有按钮绑定点击事件'''
self.Bind(wx.EVT_BUTTON, self.connect_to_server, conn_button)
self.Bind(wx.EVT_BUTTON, self.send_to, send_button)
self.Bind(wx.EVT_BUTTON, self.go_out, dis_conn_button)
self.Bind(wx.EVT_BUTTON, self.reset, clear_button)
'''客户端属性'''
self.name = c_name
self.isConnected = False # 客户端是否已经连上服务器
self.client_socket = None
# 连接服务器
def connect_to_server(self, event):
print(f"客户端{self.name},开始连接服务器")
if not self.isConnected:
server_host_port = ('localhost', 8888)
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client_socket.connect(server_host_port)
# 之前规定客户端只要连接成功,马上把自己的名字发给服务器
self.client_socket.send(self.name.encode('utf-8'))
self.isConnected = True
t = threading.Thread(target=self.recive_data)
t.setDaemon(True) # 客户端界面如果关闭,当前守护线程也自动关闭
t.start()
# 接收服务器数据
def recive_data(self):
while self.isConnected:
data = self.client_socket.recv(1024).decode('utf-8')
# 从服务器接收到的数据,需要显示
self.text.AppendText(f"{data}\n")
# 客户端发送消息到聊天室
def send_to(self, event):
if self.isConnected:
info = self.input_text.GetValue()
if len(info) > 0:
self.client_socket.send(info.encode('utf-8'))
# 输入框中的数据如果已经发送,输入框设置为空
self.input_text.Clear()
# 客户端离开聊天室
def go_out(self, event):
self.client_socket.send('A^disconnect^B'.encode('utf-8'))
# 客户端主线程也要关闭
self.isConnected = False
# 客户端输入框的信息重置
def reset(self, event):
self.input_text.Clear()
if __name__ == "__main__":
app = wx.App()
name = input("请输入客户端名字:")
WeChatClient(name).Show()
app.MainLoop() # 循环刷新显示
| Bxiaoyu/NotesRep | Wechat/weChatClient.py | weChatClient.py | py | 3,819 | python | en | code | 0 | github-code | 36 |
7112777830 | import scipy.integrate as integrate
import sympy as sp
x = sp.symbols('x')
n = sp.symbols('n')
f = (1/sp.pi) * x**3 * sp.sin(n*x)
lower = -sp.pi
upper = sp.pi
integral = sp.integrate(f,(x,lower,upper))
simplified_integral = sp.simplify(integral)
print(simplified_integral)
| ClarkieUK/Fourier-Series | testing.py | testing.py | py | 276 | python | en | code | 0 | github-code | 36 |
73720676264 | # -*- coding: utf-8 -*-
# @date:2022/12/12 9:55
# @Author:crab-pc
# @file: onlinelibrary_detail
import random
from urllib.parse import urljoin
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import logging
import os
import pandas as pd
from concurrent.futures import ThreadPoolExecutor
from lxml import etree
threadpool = ThreadPoolExecutor(max_workers=2)
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s: %(message)s')
chrome_options = Options()
chrome_options.add_experimental_option("debuggerAddress", "127.0.0.1:9222") # 前面设置的端口号
browser = webdriver.Chrome(executable_path=r'D:\python38\chromedriver.exe',
options=chrome_options) # executable执行webdriver驱动的文件
def save_list(data, file, name):
# desk = os.path.join(os.path.expanduser('~'), 'Desktop')
# 当前文件夹
file_path = r'F:\mysubject\contribute_link\contributuLink\投稿链接\\' + file
if os.path.isfile(file_path):
df = pd.DataFrame(data=data)
df.to_csv(file_path, encoding="utf-8", mode='a', header=False, index=False)
else:
os.makedirs(os.path.dirname(file_path), exist_ok=True)
df = pd.DataFrame(data=data, columns=name)
df.to_csv(file_path, encoding="utf-8", index=False)
def first_requests():
pf = pd.read_excel(r'F:\mysubject\contribute_link\contributuLink\spiders\onlinelibrary详情页.xlsx', dtype=str)
sha = pf.shape[0]
for i in range(8, 10):
url = pf.values[i][0]
# input('=====')
# input(f'waiting---------{i}')
browser.get(url)
# time.sleep(random.randint(4, 6))
html = browser.page_source
res = etree.HTML(html)
link = res.xpath('//a[contains(text(), "Submit an article")]/@href | //a[contains(text(), "Submit an Article")]/@href')[0] if res.xpath('//a[contains(text(), "Submit an article")]/@href | //a[contains(text(), "Submit an Article")]/@href') else ''
data = []
links = ''
if link and 'http' not in link:
links = urljoin(url, link)
print(url, link)
data.append(dict(url=url, contribute_link=links, contribute_links=link))
save_list(data, 'onlinelibrary456.csv', data[0].keys())
if __name__ == '__main__':
# first_requests()
pf = pd.read_excel(r'F:\mysubject\contribute_link\contributuLink\spiders\onlinelibrary详情页.xlsx', dtype=str)
pf.to_csv(r'F:\mysubject\contribute_link\contributuLink\spiders\onlinelibrary详情页.csv', index=False,encoding='utf-8') | yjsdl/contribute_link | contributuLink/spiders/onlinelibrary_detail.py | onlinelibrary_detail.py | py | 2,627 | python | en | code | 0 | github-code | 36 |
28147682147 | import os
import cv2 as cv
import numpy as np
import time
import json
import threading
from queue import Queue
import sys
picture_path='C:/Users/Administrator/Desktop/1/'
picture_number=0 #第几个图片
num=0 #成功了多少张图片
#魔方的颜色
greenLower = (46, 133, 46)
greenUpper = (85, 255, 255)
redLower = (150, 100, 6)
redUpper = (185, 255, 255)
yellowLower = (21, 84, 46)
yellowUpper = (64, 255, 255)
orangeLower = (2, 150, 100)
orangeUpper = (15, 255, 255)
whiteLower = (0, 0, 146) # gray
whiteUpper = (180, 78, 255)
blueLower = (88, 143, 46)
blueUpper = (120, 255, 255)
Side_length=54
Outer_frame=[[10, 10], [85, 10], [160, 10],
[10, 85], [85, 85], [160, 85],
[10, 160], [85, 160], [160, 160]
]
listnet=[]
listall=[]
listhsv=[]
listrgb=[]
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, numpy.integer):
return int(obj)
elif isinstance(obj, numpy.floating):
return float(obj)
elif isinstance(obj, numpy.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
#获取图片的路径(返回图片路径)
def read_picture(i):
path=picture_path+'huanyuan{0}.jpg'.format(i)
print(path)
return(path)
def indextocolor(index):
color=()
if (index==0):
color=(0, 0, 255)
if (index==1):
color=(255, 0, 0)
if (index==2):
color=(0, 255, 255)
if (index==3):
color=(0, 165, 255)
if (index==4):
color=(0, 255, 0)
if (index==5):
color=(255, 255, 255)
return (color)
def draw_rectangle(image,color,i):
x=Outer_frame[i][0]
y=Outer_frame[i][1]
x1=Outer_frame[i][0]+Side_length
y1=Outer_frame[i][1]+Side_length
cv.rectangle(image,(x,y),(x1,y1),color,-1)
def get_averageBGR(image,x,y):
img = cv.cvtColor(image,cv.COLOR_HSV2RGB)
img=img[x+20:x+45,y+20:y+45]
per_image_Rmean = []
per_image_Gmean = []
per_image_Bmean = []
list1=[]
per_image_Bmean.append(np.mean(img[:,:,0]))
per_image_Gmean.append(np.mean(img[:,:,1]))
per_image_Rmean.append(np.mean(img[:,:,2]))
R_mean = np.mean(per_image_Rmean)
G_mean = np.mean(per_image_Gmean)
B_mean = np.mean(per_image_Bmean)
list1.append(R_mean)
list1.append(G_mean)
list1.append(B_mean)
return (list1)
def get_averageHSV(img,x,y):
hsv=[]
list1=[]
h=s=v=0
image1=img[x+20:x+45,y+20:y+45]
hsv= cv.cvtColor(image1,cv.COLOR_BGR2HSV)
width = hsv.shape[0]
height= hsv.shape[1]
for index1 in range (width):
for index2 in range (height):
h=h+ hsv[index1,index2,0]
s=s+ hsv[index1,index2,1]
v=v+ hsv[index1,index2,2]
aveh=h//(width*height)
aves=s//(width*height)
avev=v//(width*height)
list1.append(aveh)
list1.append(aves)
list1.append(avev)
return (list1)
def average(img):
# 彩色图像均衡化,需要分解通道 对每一个通道均衡化
image_yuv = cv.cvtColor(img,cv.COLOR_BGR2YUV)
#直方图均衡化
image_yuv[:,:,0] = cv.equalizeHist(image_yuv[:,:,0])
#显示效果
output = cv.cvtColor(image_yuv,cv.COLOR_YUV2BGR)
cv.imshow('HistEqualize',output)
return (output)
# img=cv.cvtColor(img,cv.COLOR_BGR2HSV)
# (b, g, r) = cv.split(img)
# bH = cv.equalizeHist(b)
# gH = cv.equalizeHist(g)
# rH = cv.equalizeHist(r)
# # 合并每一个通道
# result = cv.merge((bH, gH, rH))
# cv.imshow("直方图均衡化", result)
def balance(img_input):
# 完美反射白平衡
# STEP 1:计算每个像素的R\G\B之和
# STEP 2:按R+G+B值的大小计算出其前Ratio%的值作为参考点的的阈值T
# STEP 3:对图像中的每个点,计算其中R+G+B值大于T的所有点的R\G\B分量的累积和的平均值
# STEP 4:对每个点将像素量化到[0,255]之间
# 依赖ratio值选取而且对亮度最大区域不是白色的图像效果不佳。
# :param img: cv2.imread读取的图片数据
# :return: 返回的白平衡结果图片数据
img = img_input.copy()
b, g, r = cv.split(img)
m, n, t = img.shape
sum_ = np.zeros(b.shape)
for i in range(m):
for j in range(n):
sum_[i][j] = int(b[i][j]) + int(g[i][j]) + int(r[i][j])
hists, bins = np.histogram(sum_.flatten(), 766, [0, 766])
Y = 765
num, key = 0, 0
ratio = 0.01
while Y >= 0:
num += hists[Y]
if num > m * n * ratio / 100:
key = Y
break
Y = Y - 1
sum_b, sum_g, sum_r = 0, 0, 0
time = 0
for i in range(m):
for j in range(n):
if sum_[i][j] >= key:
sum_b += b[i][j]
sum_g += g[i][j]
sum_r += r[i][j]
time = time + 1
avg_b = sum_b / time
avg_g = sum_g / time
avg_r = sum_r / time
maxvalue = float(np.max(img))
# maxvalue = 255
for i in range(m):
for j in range(n):
b = int(img[i][j][0]) * maxvalue / int(avg_b)
g = int(img[i][j][1]) * maxvalue / int(avg_g)
r = int(img[i][j][2]) * maxvalue / int(avg_r)
if b > 255:
b = 255
if b < 0:
b = 0
if g > 255:
g = 255
if g < 0:
g = 0
if r > 255:
r = 255
if r < 0:
r = 0
img[i][j][0] = b
img[i][j][1] = g
img[i][j][2] = r
return (img)
def gaussi_blur(img):
blur = cv.GaussianBlur(img,(5,5),0)
#cv.imshow("gaussian",blur)
return (blur)
def k_means(img):
Z = img.reshape((-1,3))
Z = np.float32(Z)
# convert to np.float32
# define criteria, number of clusters(K) and apply kmeans()
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K = 8
ret,label,center=cv.kmeans(Z,K,None,criteria,10,cv.KMEANS_RANDOM_CENTERS)
# Now convert back into uint8, and make original image
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape((img.shape))
#cv.imshow("k_means",res2)
return (res2)
'''
image= cv.imread("huanyuan32.jpg")
cv.imshow("image",image)
img1=gaussi_blur(image)
img2=k_means(img1)
cv.imwrite("svmwo1.jpg",img2)
img3=balance(img2)
cv.imshow("balance",img3)
img4=average(img3)
#cv.imwrite("svmwo5.jpg",img4)
'''
def main(src):
img1=gaussi_blur(src)
img2=k_means(img1)
for x,y in (Outer_frame):
listhsv=get_averageHSV(img2,x,y)
listrgb=get_averageBGR(img2,x,y)
listrgb = list(map(int,listrgb))
listnet=listhsv+listrgb
listall.append(listnet)
#print(listall)
#########################多线程尝试#############################################
cube_list_hsv=[[] for _ in range (6)]
cube_list_bgr=[[] for _ in range (6)]
cube_list_all=[[] for _ in range (6)]
cube_list_net=[[] for _ in range (6)]
dict_data={"1":cube_list_all[0],'2':cube_list_all[1],'3':cube_list_all[2],
'4':cube_list_all[3],'5':cube_list_all[4],'6':cube_list_all[5]
}
####多线程分别进行魔方6个面的识别
def job1():
for i in range (1,29):
path1 = read_picture(i)
print (path1,end='\n')
cube_list_hsv[0]=[]
cube_list_bgr[0]=[]
cube_list_net[0]=[]
src1=cv.imread(path1)
# if not src1:
# print('error reading picture')
# sys.exit()
cube1_img1=gaussi_blur(src1)
cube1_img2=k_means(cube1_img1)
for x,y in (Outer_frame):
cube_list_hsv[0]=get_averageHSV(cube1_img2,x,y)
cube_list_bgr[0]=get_averageBGR(cube1_img2,x,y)
cube_list_bgr[0]=list(map(int,cube_list_bgr[0]))
cube_list_net[0]=cube_list_hsv[0]+cube_list_bgr[0]
cube_list_all[0].append(cube_list_net[0])
#q.put(cube_list_all[0])
def job2():
for i in range (29,63):
path2 = read_picture(i)
# print (path1,end='\n')
cube_list_hsv[1]=[]
cube_list_bgr[1]=[]
cube_list_net[1]=[]
src1=cv.imread(path2)
# if not src1:
# print('error reading picture')
# sys.exit()
cube1_img1=gaussi_blur(src1)
cube1_img2=k_means(cube1_img1)
for x,y in (Outer_frame):
cube_list_hsv[1]=get_averageHSV(cube1_img2,x,y)
cube_list_bgr[1]=get_averageBGR(cube1_img2,x,y)
cube_list_bgr[1]=list(map(int,cube_list_bgr[1]))
cube_list_net[1]=cube_list_hsv[1]+cube_list_bgr[1]
cube_list_all[1].append(cube_list_net[1])
#q.put(cube_list_all[0])
def job3():
for i1 in range (63,91):
path1 = read_picture(i1)
print (path1,end='\n')
cube_list_hsv[2]=[]
cube_list_bgr[2]=[]
cube_list_net[2]=[]
src1=cv.imread(path1)
# if not src1:
# print('error reading picture')
# sys.exit()
cube1_img1=gaussi_blur(src1)
cube1_img2=k_means(cube1_img1)
for x,y in (Outer_frame):
cube_list_hsv[2]=get_averageHSV(cube1_img2,x,y)
cube_list_bgr[2]=get_averageBGR(cube1_img2,x,y)
cube_list_bgr[2]=list(map(int,cube_list_bgr[2]))
cube_list_net[2]=cube_list_hsv[2]+cube_list_bgr[2]
cube_list_all[2].append(cube_list_net[2])
#q.put(cube_list_all[0])
def job4():
for i1 in range (91,166):
path1 = read_picture(i1)
print (path1,end='\n')
cube_list_hsv[3]=[]
cube_list_bgr[3]=[]
cube_list_net[3]=[]
src1=cv.imread(path1)
# if not src1:
# print('error reading picture')
# sys.exit()
cube1_img1=gaussi_blur(src1)
cube1_img2=k_means(cube1_img1)
for x,y in (Outer_frame):
cube_list_hsv[3]=get_averageHSV(cube1_img2,x,y)
cube_list_bgr[3]=get_averageBGR(cube1_img2,x,y)
cube_list_bgr[3]=list(map(int,cube_list_bgr[3]))
cube_list_net[3]=cube_list_hsv[3]+cube_list_bgr[3]
cube_list_all[3].append(cube_list_net[3])
#q.put(cube_list_all[0])
def job5():
for i1 in range (205,304):
path1 = read_picture(i1)
print (path1,end='\n')
cube_list_hsv[4]=[]
cube_list_bgr[4]=[]
cube_list_net[4]=[]
src1=cv.imread(path1)
# if not src1:
# print('error reading picture')
# sys.exit()
cube1_img1=gaussi_blur(src1)
cube1_img2=k_means(cube1_img1)
for x,y in (Outer_frame):
cube_list_hsv[4]=get_averageHSV(cube1_img2,x,y)
cube_list_bgr[4]=get_averageBGR(cube1_img2,x,y)
cube_list_bgr[4]=list(map(int,cube_list_bgr[4]))
cube_list_net[4]=cube_list_hsv[4]+cube_list_bgr[4]
cube_list_all[4].append(cube_list_net[4])
#q.put(cube_list_all[0])
def job6():
for i1 in range (304,416):
path1 = read_picture(i1)
print (path1,end='\n')
cube_list_hsv[5]=[]
cube_list_bgr[5]=[]
cube_list_net[5]=[]
src1=cv.imread(path1)
# if not src1:
# print('error reading picture')
# sys.exit()
cube1_img1=gaussi_blur(src1)
cube1_img2=k_means(cube1_img1)
for x,y in (Outer_frame):
cube_list_hsv[5]=get_averageHSV(cube1_img2,x,y)
cube_list_bgr[5]=get_averageBGR(cube1_img2,x,y)
cube_list_bgr[5]=list(map(int,cube_list_bgr[5]))
cube_list_net[5]=cube_list_hsv[5]+cube_list_bgr[5]
cube_list_all[5].append(cube_list_net[5])
#q.put(cube_list_all[0])
'''
q=Queue()
threads=[]
t1 = threading.Thread(target=job1,name=('t1',))
t2 = threading.Thread(target=job2,name=('t2',))
t3 = threading.Thread(target=job3,name=('t3',))
t4 = threading.Thread(target=job4,name=('t4',))
t5 = threading.Thread(target=job5,name=('t5',))
t6 = threading.Thread(target=job6,name=('t6',))
t1.start()
threads.append(t1)
t2.start()
threads.append(t2)
t3.start()
threads.append(t3)
t4.start()
threads.append(t4)
t5.start()
threads.append(t5)
t6.start()
threads.append(t6)
for thread in threads:
thread.join()
print('all pictures are taken\n')
'''
#every_data_contain_number
#for key in dict_data:
number_of_dict=len(dict_data)
#声明6个,用来作为文本存储,json不支持numpy 的int32 我用本办法转换
store_data=[[] for _ in range (number_of_dict)]
#把这几个数组百变成字典中列表的格式
for circule_num,value in zip([x for x in range(0,6)],dict_data.values()):
store_data[circule_num] = [[0,0,0,0,0,0] for i in range (len(value))]
for first in range(len(value)):
for two in range(len(value[first])):
store_data[circule_num][first][two]=int(value[first][two])
for json_number in range (6):
file_name="data{0}.json".format(json_number)
with open(file_name,"w") as f:
json.dump(store_data[json_number],f)
f.close()
'''
for i in range(1,29):
path=read_picture(i)
print (path)
listhsv.clear()#清空hsv的tup
listrgb.clear()#清空rgb的tup
listnet.clear()#清空节点的tup
src = cv.imread(path)
while (src is None):
src = cv.imread(path)
if not src:
print('error reading picture')
sys.exit()
main(src)
print(listall)
print ('个数是')
list_num=len(listall)
store = [[0,0,0,0,0,0] for i in range (list_num)]
for list_1 in range(len(listall)):
for list_2 in range(len(listall[list_1])):
store[list_1][list_2]=int(listall[list_1][list_2])
'''
'''
filename='test.json'
with open(filename,'w') as f:
json.dump(store,f)
f.close()
'''
'''
with open('test(副本).txt','w') as f1:
for temp in listall:
print(type(temp[0]))
data='{},{},{},{},{},{}\n'.format(temp[0],temp[1],temp[2],temp[3],temp[4],temp[5])
f1.write(data)
f1.close()
'''
| xiaomoxiao/magic-cube | MultiThreading/code/getdata.py | getdata.py | py | 14,183 | python | en | code | 0 | github-code | 36 |
5459057284 | import configparser
from constants.Constants import Constants as const
from .OptimizerParamsFactory import OptimizerParamsFactory
from model.OptimizerFactory import OptimizerFactory
class ConfigParams(object):
def __init__(self, file):
config = configparser.ConfigParser()
config.read_file(open(file))
# Model
self.architecture = config.get(const.ConfigSection.model, "architecture")
# Valid only for mobilenet
if self.architecture == "mobilenet":
self.mobilenetAlpha = config.getfloat(const.ConfigSection.model, "mobilenetAlpha", fallback=1.0)
self.inputSize = config.getint(const.ConfigSection.model, "inputSize", fallback=224)
self.inputChannels = config.getint(const.ConfigSection.model, "inputChannels", fallback=3)
self.preprocessType = config.get(const.ConfigSection.model, "preprocessType", fallback="dummy")
# HyperParameters
self.epochs = config.getint(const.ConfigSection.hyperparameters, "epochs")
self.batchSize = config.getint(const.ConfigSection.hyperparameters, "batchSize")
self.patience = config.getint(const.ConfigSection.hyperparameters, "patience")
optimizerType = config.get(const.ConfigSection.hyperparameters, "optimizer")
optimizerParams = OptimizerParamsFactory.createOptimizerParams(optimizerType, config)
self.optimizer = OptimizerFactory.create(optimizerParams)
| SlipknotTN/kaggle_dog_breed | keras/lib/config/ConfigParams.py | ConfigParams.py | py | 1,442 | python | en | code | 0 | github-code | 36 |
70677270824 | """
Filename: locate_nci_data.py
Author: Damien Irving, irving.damien@gmail.com
Description: Locate CMIP5 data at NCI
"""
# Import general Python modules
import sys, os, pdb
import argparse
from ARCCSSive import CMIP5
import six
import glob
# Define functions
def main(inargs):
"""Run the program."""
cmip5 = CMIP5.DB.connect()
outputs = cmip5.outputs(experiment = inargs.experiment,
variable = inargs.variable,
mip = inargs.mip,
model = inargs.model,
ensemble = inargs.ensemble)
ua6_path = '/g/data/ua6/DRSv2/CMIP5/%s/%s/%s/%s/%s/%s/latest/*' %(inargs.model, inargs.experiment, inargs.time_freq, inargs.realm, inargs.ensemble, inargs.variable)
print('DRSv2:', glob.glob(ua6_path))
my_path = '/g/data/r87/dbi599/DRSv2/CMIP5/%s/%s/%s/%s/%s/%s/latest' %(inargs.model, inargs.experiment, inargs.time_freq, inargs.realm, inargs.ensemble, inargs.variable)
print('Elsewhere path:')
elsewhere_path = []
for o in outputs:
var = o.variable
for v in o.versions:
elsewhere_path.append(v.path)
print(v.path)
print('Elsewhere files:')
for f in outputs.first().filenames():
six.print_(f)
if inargs.symlink:
#assert len(elsewhere_path) == 1
command1 = 'mkdir -p %s' %(my_path)
command2 = 'ln -s -f %s/%s %s/%s' %(elsewhere_path[inargs.elsewhere_index], f, my_path, f)
if inargs.execute:
os.system(command1)
os.system(command2)
else:
print(command1)
print(command2)
if __name__ == '__main__':
extra_info ="""
author:
Damien Irving, irving.damien@gmail.com
dependencies:
vdi $ pip install --user ARCCSSive
vdi $ export CMIP5_DB=sqlite:////g/data1/ua6/unofficial-ESG-replica/tmp/tree/cmip5_raijin_latest.db
"""
description='Locate CMIP5 data at NCI'
parser = argparse.ArgumentParser(description=description,
epilog=extra_info,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("experiment", type=str, help="Experiment name")
parser.add_argument("variable", type=str, help="var_name")
parser.add_argument("time_freq", type=str, help="e.g. mon or fx")
parser.add_argument("mip", type=str, help="e.g. Omon, Amon, fx or aero")
parser.add_argument("realm", type=str, help="e.g. atmos, ocean or aerosol")
parser.add_argument("model", type=str, help="Model name")
parser.add_argument("ensemble", type=str, help="e.g. r1i1p1")
parser.add_argument("--symlink", action="store_true", default=False,
help="Create a symlink for the elsewhere files")
parser.add_argument("--execute", action="store_true", default=False,
help="Execute the symlink command rather than printing to screen")
parser.add_argument("--elsewhere_index", type=int, default=0,
help="Index for whcih elsewhere path to use")
args = parser.parse_args()
main(args)
| DamienIrving/ocean-analysis | downloads/locate_nci_data.py | locate_nci_data.py | py | 3,321 | python | en | code | 9 | github-code | 36 |
35869343289 | # receiver
import socket, select
from pickle import loads
def extract_ip():
st = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
st.connect(('10.255.255.255', 1))
IP = st.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
st.close()
return IP
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip = extract_ip()
print(ip)
port = 5005
timeout = 3
sock.bind((ip, port))
while True:
data, addr = sock.recvfrom(1024)
if data:
d = loads(data)
print('File name:', d)
fname = d.strip()
print(fname)
#f = open(fname, 'wb')
| jmerc141/UDP-Chatroom | my_receiver.py | my_receiver.py | py | 644 | python | en | code | 0 | github-code | 36 |
14248726433 | # 모든 상어가 이동한 후의 보드를 반환하는 함수
def move_shark(board, priority_move, look_direction, shark_info_for_smell, dx, dy):
n = len(board)
new_board = [[0] * n for _ in range(n)]
for x in range(n):
for y in range(n):
if board[x][y] != 0: # 만약 상어가 존재하면
shark_num = board[x][y]
flag = False
#네 방향을 돌아보면서 냄새가 없는 곳을 찾아내기
#마땅한 곳이 없다면 자신의 냄새 쪽으로 이동
for i in range(4):
#우선순위 방향대로 먼저 한번 이동을 해보고 냄새의 존재에 의해서 이동 불가능하면 그 다음 우선순위대로 이동을 해보자
nx = x + dx[priority_move[shark_num - 1][look_direction[shark_num - 1] - 1][i] - 1]
ny = y + dy[priority_move[shark_num - 1][look_direction[shark_num - 1] - 1][i] - 1]
if 0 <= nx and nx < n and 0 <= ny and ny < n:
#만약 냄새의 흔적이 없다면 이동하기
if shark_info_for_smell[nx][ny][1] == 0:
#이동하면 바라보는 방향도 바뀔 것이므로 업데이트
look_direction[shark_num - 1] = priority_move[shark_num - 1][look_direction[shark_num - 1] - 1][i]
#새로운 보드를 통해서 이미 그 자리로 이동한 상어가 있는지 확인하기
if new_board[nx][ny] == 0: #만약 상어가 없다면
new_board[nx][ny] = shark_num
else: #있다면 번호가 낮은 상어로 업데이트
new_board[nx][ny] = min(new_board[nx][ny], shark_num)
flag = True #이동 완료
break #이동완료했으므로 네 방향 둘러보는 반복문 나가기
if not flag: #냄새 때문에 이동 불가능하다면 자신의 냄새로 이동하기
#다시 네 방향을 둘러보며
for i in range(4):
nx = x + dx[priority_move[shark_num - 1][look_direction[shark_num - 1] - 1][i] - 1]
ny = y + dy[priority_move[shark_num - 1][look_direction[shark_num - 1] - 1][i] - 1]
if 0 <= nx and nx < n and 0 <= ny and ny < n:
if shark_info_for_smell[nx][ny][0] == board[x][y]: # 만약 이 자리가 냄새를 남겨놓은 자리라면
#해당 자리로 이동하기
look_direction[shark_num - 1] = priority_move[shark_num - 1][look_direction[shark_num - 1] - 1][i]
new_board[nx][ny] = board[x][y]
break
return new_board #새로 이동한 board를 반환하기
# 상어의 냄새 정보를 업데이트하는 함수
def update_smell(board, k, shark_info_for_smell):
n = len(board)
for i in range(n):
for j in range(n):
#만약 해당 위치에 냄새가 존재한다면 냄새를 1 감소시키기
if shark_info_for_smell[i][j][1] > 0:
shark_info_for_smell[i][j][1] -= 1
if board[i][j] != 0: #상어가 존재하면 그 자리에 정보 업데이트
shark_info_for_smell[i][j] = [board[i][j], k]
#상어가 1만 남았는지 확인하는 함수
def isOnlyOne(board):
for i in range(len(board)):
for j in range(len(board)):
if board[i][j] > 1:
return False
return True
#메인 함수
if __name__ == "__main__":
# 입력하기
#n은 보드의 크기, m은 상어의 개수, k는 상어의 냄새가 남아있는 시간
n, m, k = map(int, input().split())
#상어의 번호와 냄새 정보를 담고 있는 리스트 (상어의 번호, 상어의 냄새 시간)
#리스트 컴프리핸션 사용
shark_info_for_smell = [[[0] * 2 for _ in range(n)] for i in range(n)]
board = [] #상어의 번호 정보를 담을 board
for _ in range(n):
board.append(list(map(int, input().split())))
#상어의 번호와 냄새 정보를 smell_for_shark에 업데이트 (3차원 리스트로 형성)
for i in range(n):
for j in range(n):
if board[i][j] != 0:
shark_info_for_smell[i][j] = [board[i][j], k]
# 각 상어들이 현재 바라보고 있는 방향을 담기 (리스트) -> 이동후 상어들이 바라보고 있는 방향을 수시로 업데이트 해줘야 한다.
look_direction = list(map(int, input().split()))
# 각 상어들이 바라보고 있는 방향에 대한 이동 방향 우선순위 리스트를 선언 (3차원 리스트) -> 상하좌우(0, 1, 2, 3)당 우선 방향 정하기
priority_move = [[[None]] * 4 for _ in range(m)]
for i in range(m):
for j in range(4):
priority_move[i][j] = list(map(int, input().split()))
#과정
#방향 정보를 담고 있는 리스트(상, 하, 좌, 우)
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
t = 0 # 상어가 움직일 때마다 시간을 기록
while True:
#이제 보드에 있는 상어를 우선순위에 따라서 이동시키기
#이동 시킬 때 상어를 잡아먹는 것까지 포함시키기
#먼저 냄새 정보를 업데이트
update_smell(board, k, shark_info_for_smell)
board = move_shark(board, priority_move, look_direction, shark_info_for_smell, dx, dy)
t += 1 # 상어를 이동시켰으므로 시간 1초 증가
# t가 1000이 넘었는데도 1이외의 다른 상어가 있다면 -1을 반환하고 반복문 나오기
if t > 1000:
print(-1)
break
else:
if isOnlyOne(board):
print(t)
break
| vmfaldwntjd/Algorithm | BaekjoonAlgorithm/파이썬/구현/[백준 19237]어른 상어/Baekjoon_19237.py | Baekjoon_19237.py | py | 6,119 | python | ko | code | 0 | github-code | 36 |
6724801910 | from RestrictedPython import compile_restricted_function, safe_builtins, limited_builtins, utility_builtins
someglobalvar = 123
myscript = """
import math
import tempfile
import io
#folgende befehle fuehren zu fehlern
#f = open("app.py", "rb")
#f = NamedTemporaryFile(delete=False)
def g(x):
#return x + 1 + someglobalvar <--- kein Zugriff auf someglobalvar moeglich
return h(x + 1)
result = math.exp(g(f(data)))
return result
"""
#globale variablen innerhalb der sandbox
safe_locals = {}
safe_globals = safe_builtins
additional_globals = {'data' : 2, 'f' : lambda x: x**2}
safe_globals.update(additional_globals)
#Kompilieren der Hauptfunktion
main_function_name = 'main'
main_function_compiled = compile_restricted_function(p = '', body = myscript, name = main_function_name, filename = '<inline code>')
#Kompilieren der Hilfsfunktion
support_function_name = 'h'
support_function_parameters = 'x'
support_function_body = 'return -x'
support_function_compiled = compile_restricted_function(p = support_function_parameters, body = support_function_body, name = support_function_name, filename = '<inline code>')
#Erstellen des Funktionszeigers der Hilfsfunktion
exec(support_function_compiled.code, safe_globals, safe_locals)
support_function_compiled_pointer = safe_locals[support_function_name]
print((support_function_compiled_pointer(123))) #Test der Hilfsfunktion
#Hinzufuegen der Hilfsfunktion zu den globalen Variablen der Sandbox, damit diese genutzt werden kann
updated_globals = {support_function_name : support_function_compiled_pointer}
safe_globals.update(updated_globals)
#Erzeugen des Funktionszeigers der Hauptfunktion
exec(main_function_compiled.code, safe_globals, safe_locals)
main_compiled_pointer = safe_locals[main_function_name]
print(main_compiled_pointer(*[], **{})) #Test der Hauptfunktion
#update der globalen variable 'data'
updated_globals = {'data' : 3}
safe_globals.update(updated_globals)
#update von 'h'
support_function_compiled = compile_restricted_function(p = support_function_parameters, body = 'return +x', name = support_function_name, filename = '<inline code>')
exec(support_function_compiled.code, safe_globals, safe_locals)
support_function_compiled_pointer = safe_locals[support_function_name]
updated_globals = {support_function_name : support_function_compiled_pointer}
safe_globals.update(updated_globals)
#erneute Kompilierung
import types
main_compiled_update_pointer = types.FunctionType(
main_compiled_pointer.__code__,
safe_globals,
'<' + main_function_name + '>',
main_compiled_pointer.__defaults__ or ())
print(main_compiled_update_pointer(*[], **{})) #Test der Hauptfunktion
| aleksProsk/HydroOpt2.0 | minimal-code-examples/minimal-embedded-script2.py | minimal-embedded-script2.py | py | 2,656 | python | en | code | 0 | github-code | 36 |
42927359751 | class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
j = -1
for i in range(len(nums)):
if (target-nums[i]) in nums:
if (nums.count(nums[i]) == 1) and (nums[i]==target-nums[i]):
continue
else:
j = nums.index(target-nums[i],i+1)
break
if j>0:
return [i, j]
else:
return []
if __name__ == "__main__":
print(Solution().twoSum([1,2,3], 3))
print(Solution().twoSum([2,5,7], 7))
| Ftttttt/LeetCode_solution | Python/two_sum.py | two_sum.py | py | 574 | python | en | code | 0 | github-code | 36 |
33205266662 | import unittest
from booksorter import BookSorter
class BookSorterTest(unittest.TestCase):
def test_scan_command(self):
bs = BookSorter('sacn',
'--config config.json',
'--target ./books')
bs.run()
# test there is booktypes.json and it has proper information
def test_sort_command(self):
bs = BookSorter('sort',
'--config config.json',
'--report report.txt',
'--source ./inbox')
bs.run()
# test there is report.txt and it has sorting result
def test_move_command(self):
bs = BookSorter('move',
'--config config.json',
'--report report.txt',
'--source ./inbox',
'--target ./books')
bs.run()
# test the books have been moved and report updated
def test_default_configs(self):
bs = BookSorter('scan')
# assert bs is properly configed by 'config.json'
| yuan201/sortbooks | test_booksorter.py | test_booksorter.py | py | 1,079 | python | en | code | 0 | github-code | 36 |
31932662699 | class ClassMV:
def __init__(self):
self.alpha = 0
self.beta = 0
self.a = 0
self.k = 0
self.points = []
for i in range(71):
temp = self.poly(i)**35%71
if temp == 1:
val = self.poly(i)**18%71
self.points.append((i,val))
self.points.append((i,-val%71))
self.points.append('inf')
def poly(self,x):
val = x**3+4*x+2
val = val % 71
return val
def egcd(self,a, b):
if a == 0:
return (b, 0, 1)
else:
g, y, x = self.egcd(b % a, a)
return (g, x - (b // a) * y, y)
def modinv(self,a, m):
g, x, y = self.egcd(a, m)
if g != 1:
raise Exception('modular inverse does not exist')
else:
return x % m
def suma(self,x,y):
if x == 'inf':
return y
if y == 'inf':
return x
if ((-x[1])%71 == y[1]) and (x[0]==y[0]):
return 'inf'
l=0
x1=x[0]
y1=x[1]
x2=y[0]
y2=y[1]
if x == y:
l = ((3*x1**2+4))*self.modinv(2*y1,71)
#l = ((3*x[0]**2+9)%71)*pow(y[0], -1, 71)
#l = l % 71
z = (l**2-2*x1) % 71
w = (l*(x1-z) -y1) %71
return (z,w)
else:
l = ((y2-y1)%71)*self.modinv((x2-x1)%71, 71)
#l = ((y[1]-y[0])%71)*pow(x[1]-x[0],-1, 71)
l = l % 71
z = (l**2-x1-x2) % 71
w = (l*(x1-z) -y1) % 71
return (z,w)
def mult(self,int, x):
temp = x
for i in range(int-1):
temp = self.suma(x,temp)
return temp
def set_key(self,alpha,beta,a):
self.alpha = alpha
self.beta = beta
self.a = a
def cifrar(self,m,k):
y0 = self.mult(k,self.alpha)
temp = self.mult(k,self.beta)
y1 = temp[0]*m[0] %71
y2 = temp[1]*m[1] %71
return (y0,y1,y2)
def descifrar(self,y0,y1,y2):
c = self.mult(self.a,y0)
x = (self.modinv(c[0],71)*y1) % 71
y = (self.modinv(c[1],71)*y2) % 71
return (x,y)
''' test = ClassMV()
print(test.points[0])
print(test.points[0],test.suma(test.points[0],test.points[0]))
test.set_key((0,19),(59,17),8)
print(test.cifrar((10,7),8))
print(test.descifrar((59, 17), 68, 65)) '''
| JuanDa14Sa/Cripto | Main/MV.py | MV.py | py | 2,320 | python | en | code | 0 | github-code | 36 |
12480966037 | import view as user
import model_div
import model_sub
import model_sum
import model_mult
import logger
def button_click():
global value_a, value_b
print('1-комплексные числа, 2- рациональные числа')
value_item = int(input('Выберите значение: '))
print()
if value_item == 1:
value_a = user.input_complex()
value_b = user.input_complex()
if value_item == 2:
value_a = user.input_data()
value_b = user.input_data()
print('1-деление, 2-умножение, 3 - сложение, 4- вычитание')
print('Выберите функцию: ')
value_model = int(input('Выберите значение: '))
print()
if value_model == 1:
model_div.init(value_a, value_b)
result = model_div.do_it()
user.view_data(result)
logger.log_to_file(value_a, value_b, "//", result)
if value_model == 2:
model_mult.init(value_a, value_b)
result = model_mult.do_it()
user.view_data(result)
logger.log_to_file(value_a, value_b, "*", result)
if value_model == 3:
model_sum.init(value_a, value_b)
result = model_sum.do_it()
user.view_data(result)
logger.log_to_file(value_a, value_b, "+", result)
if value_model == 4:
model_sub.init(value_a, value_b)
result = model_sub.do_it()
user.view_data(result)
logger.log_to_file(value_a, value_b, "-", result)
| dungogggggggggggggg/pythonProject7 | controller.py | controller.py | py | 1,510 | python | ru | code | 0 | github-code | 36 |
36728200947 | #!/usr/bin/env python
from pwn import *
__DEBUG__ = 1
#context.log_level = 'debug'
p = None
def init():
global p
envs = {'LD_PRELOAD':'/home/nhiephon/libc.so.6'}
if __DEBUG__:
p = process('./library_in_c', env=envs)
else:
p = remote('shell.actf.co', 20201)
return
def menu():
return
def send_name(data=''):
p.sendlineafter('name?', data)
return
def check_out(data=''):
p.sendafter('check out?', data)
return
if __name__ == '__main__':
init()
send_name('%p %p %p')
data = p.recvuntil('And')
leak_stack = int(data[-48:-34], 16)
rbp = leak_stack + 0x2730
success('rbp : ' + hex(rbp))
leak_libc = int(data[-16:-4], 16)
success('leak_libc : ' + hex(leak_libc))
libc_base = leak_libc - 0xf72c0
success('libc_base : ' + hex(libc_base))
one_gadget = 0x45216 + libc_base
success('one_gadget : ' + hex(one_gadget))
num1 = int(hex(one_gadget)[2:6], 16)
num2 = int(hex(one_gadget)[6:10], 16)
num3 = int(hex(one_gadget)[10:14], 16)
if num1 < num2 and num2 < num3:
# raw_input('?')
payload = '%{}p%21$hn%{}p%22$hn%{}p%23$hn'.format(num1, num2-num1, num3-num2).ljust(40, 'A') + p64(rbp+8 +4) + p64(rbp+8 +2) + p64(rbp+8)
check_out(payload)
p.interactive() | Aleks-dotcom/ctf_lib_2021 | angstormctf/chall4/sol3.py | sol3.py | py | 1,485 | python | en | code | 1 | github-code | 36 |
22546241259 | from PySide2.QtUiTools import QUiLoader #pip3 install PySide2
from PySide2.QtWidgets import QApplication, QTableWidgetItem
from PySide2.QtCore import QFile, QIODevice, QTimer
from PySide2.QtWidgets import QFileDialog, QMessageBox
import math
from PySide2.QtCore import QStringListModel
import sys
import os
from PySide2.QtGui import QIcon, QPixmap
import requests
put = os.path.dirname(os.path.realpath(__file__)) + "/"#Путь- (part-1)
R = -1
U_1 = 0
U_2 = 0
group_list = []
import recording_spark_api
def sex(SSS, window,target):
###window.pushButton_2.setEnabled(False)
print(SSS)
a = 0
for m in group_list:
if m[0] == target[5]:
break
a = a + 1
if SSS != a:
window.pushButton_7.setEnabled(True)
else:
window.pushButton_7.setEnabled(False)
def SAS(window):
m = window.radioButton.isChecked()
print(m)
m = window.radioButton_2.isChecked()
print(m)
window.radioButton_2.setChecked(1)
def test(window, target, IM):
global R
if IM == 0 or IM == 2:
#print(window.comboBox.currentIndex())
#print(window.comboBox.currentText())
#print(window.comboBox_2.currentIndex())
#print(window.comboBox_2.currentText())
group_id = group_list[window.comboBox.currentIndex()][0]
E_1 = window.checkBox.isChecked()
E_2 = window.checkBox_2.isChecked()
# add(user_name, email, password, avatar, active, group_id)
M = recording_spark_api.user.add(window.lineEdit.text(), window.lineEdit_2.text(), window.lineEdit_3.text(), E_1, E_2, group_id)
print(M.number)
if M.number == 200:
R = M.response.user_id
window.close()
#return R
else:
msg = QMessageBox(window)
msg.setWindowTitle(f"ERROE {M.number}")
msg.setText(f" \n {M.response.text} \n ")
msg.exec_()
elif IM == 1:
#group_id = target email, password, avatar, active, group_id
group_id = group_list[window.comboBox.currentIndex()][0]
E_1 = window.checkBox.isChecked()
E_2 = window.checkBox_2.isChecked()
if window.lineEdit_3.text() == "" or window.lineEdit_3.text() == None:
password = None
else:
password = window.lineEdit_3.text()
if window.lineEdit.text() == target[1]:
user_name = None
else:
user_name = window.lineEdit.text()
if window.lineEdit_2.text() == target[2]:
email = None
else:
email = window.lineEdit_2.text()
if window.checkBox.isChecked() == target[3]:
avatar = None
else:
avatar = window.checkBox.isChecked()
print(window.checkBox_2.isChecked(), target[4])
if window.checkBox_2.isChecked() == target[4]:
active = None
else:
active = window.checkBox_2.isChecked()
if group_list[window.comboBox.currentIndex()][0] == target[5]:
group_id = None
else:
group_id = group_list[window.comboBox.currentIndex()][0]
if (target[4] == 1 and window.checkBox_2.isChecked() == False) or (password != None):
msg = QMessageBox.question(window, " !!!ВНИМАНИЕ!!! ",
"Вы пытаетесь отключить/сменить пароль у этой учётной запеси!\nВсе открытые сесии будут закрыты\nПроболжать ?", QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if msg == QMessageBox.Yes:
M = recording_spark_api.user.edit(target[0],user_name, email, password, avatar, active, group_id)
print(M.number)
if M.number == 200:
R = 0
window.close()
#return R
else:
msg = QMessageBox(window)
msg.setWindowTitle(f"ERROE {M.number}")
msg.setText(f" \n {M.response.text} \n ")
msg.exec_()
else:
M = recording_spark_api.user.edit(target[0],user_name, email, password, avatar, active, group_id)
print(M.number)
if M.number == 200:
R = 0
window.close()
#return R
else:
msg = QMessageBox(window)
msg.setWindowTitle(f"ERROE {M.number}")
msg.setText(f" \n {M.response.text} \n ")
msg.exec_()
def SAS_r(window, target,N):
if N == 0:
window.lineEdit.setText(target[1])
elif N == 1:
window.lineEdit_2.setText(target[2])
elif N == 2:
window.checkBox_2.setChecked(target[4])
window.pushButton_6.setEnabled(False)
#window.lineEdit_3.setText(target[8])
elif N == 3:
a = 0
for m in group_list:
if m[0] == target[5]:
break
a = a + 1
window.comboBox.setCurrentIndex(a)
window.pushButton_7.setEnabled(False)
elif N == 4:
#window.comboBox.setCurrentIndex(U_1)
window.lineEdit_3.setText("")
window.pushButton_4.setEnabled(False)
elif N == 5:
window.checkBox.setChecked(target[3])
window.pushButton_5.setEnabled(False)
print(U_1)
print(N)
def start(window, target, IM):
print(f"target - {target}")
global group_list
Alo = recording_spark_api.ls_group()
if Alo.number == 200:
group_list = Alo.response.matrix
#for l in L:
# window.comboBox.addItem(l[1])
# window.comboBox_2.addItem(l[1])
window.lineEdit.setPlaceholderText("Имя")
window.lineEdit_2.setPlaceholderText("")
window.lineEdit_3.setPlaceholderText("")
for mlo in group_list:
window.comboBox.addItem(mlo[1])
if len(target) != 0:
print(target)
window.lineEdit.setText(target[1])
window.lineEdit_2.setText(target[2])
#window.lineEdit_3.setText(target[8])
K = 0
m = True
print(f"group_list - {group_list}, {target}")
for p in group_list:
if target[5] == p[0]:
window.comboBox.setCurrentIndex(K)
U_1 = K
m = False
break
K = K + 1
if m:
group_list.append([target[5],target[6],target[7]])
window.comboBox.addItem(target[6])
window.comboBox.setCurrentIndex(K)
#if m:
# L.append([target[3],target[4],None,None,None,None])
# window.comboBox.addItem(target[4])
# window.comboBox.setCurrentIndex(K)
# U_1 = K
if target[4] == 1:
window.checkBox_2.setChecked(True)
if target[3] == 1:
window.checkBox.setChecked(True)
print("L")
window.pushButton_2.setEnabled(False)
window.pushButton_3.setEnabled(False)
window.pushButton_4.setEnabled(False)
window.pushButton_5.setEnabled(False)
window.pushButton_6.setEnabled(False)
window.pushButton_7.setEnabled(False)
window.setWindowTitle("ID: {} - {}".format(target[0],target[1]))
if IM == 0 or IM == 2:
window.pushButton_2.deleteLater()
window.pushButton_3.deleteLater()
window.pushButton_4.deleteLater()
window.pushButton_5.deleteLater()
window.pushButton_6.deleteLater()
window.pushButton_7.deleteLater()
window.label_7.deleteLater()
window.setWindowTitle("Создания")
def M(window,target,p):
if p == 0:
if target[1] != window.lineEdit.text():
window.pushButton_2.setEnabled(True)
else:
window.pushButton_2.setEnabled(False)
elif p == 1:
if target[2] != window.lineEdit_2.text():
window.pushButton_3.setEnabled(True)
else:
window.pushButton_3.setEnabled(False)
elif p == 2:
if not ("" == window.lineEdit_3.text() or window.lineEdit_3.text() == None): # Проблема
window.pushButton_4.setEnabled(True)
else:
window.pushButton_4.setEnabled(False)
elif p == 3:
if window.checkBox.isChecked() != bool(target[3]):
window.pushButton_5.setEnabled(True)
else:
window.pushButton_5.setEnabled(False)
elif p == 4:
print(window.checkBox_2.isChecked())
if window.checkBox_2.isChecked() != bool(target[4]):
window.pushButton_6.setEnabled(True)
else:
window.pushButton_6.setEnabled(False)
##### !!!СДЕЛАТЬ ПРОВЕРКУ ЧТО ЭТО INT!!!
#if target[9] == None or target[9] == "":
# window.pushButton_7.setEnabled(False)
#else:
# window.pushButton_7.setEnabled(True)
def M_2(window):
print()
"""
if window.lineEdit_4.text() != "":
try:
namber = int(window.lineEdit_4.text())
except ValueError:
window.pushButton.setEnabled(False)
return 0
#if window.pushButton.isEnabled():
if "" == window.lineEdit.text() or window.lineEdit_2.text() == "":
window.pushButton.setEnabled(False)
else:
window.pushButton.setEnabled(True)
"""
#window.lineEdit_2.text()
def GUI(target, IM, themes):
#app = QApplication(sys.argv)
ui_file_name = put + "/content/ui/user.ui"
ui_file = QFile(ui_file_name)
if not ui_file.open(QIODevice.ReadOnly):
print("Cannot open {}: {}".format(ui_file_name, ui_file.errorString()))
sys.exit(-1)
loader = QUiLoader()
window = loader.load(ui_file)
ui_file.close()
if not window:
print(loader.errorString())
sys.exit(-1)
window.show()
window.setWindowIcon(QIcon(f"{put}/content/icon/2icon.png"))
window.setStyleSheet(open(f"{put}content/themes/{themes}/user_all").read())
QTimer.singleShot(0, lambda:start(window, target, IM))
# 71A7BB
#window.pushButton.clicked.connect(lambda:test (window,L))
window.pushButton.clicked.connect(lambda:test(window, target, IM))
if IM == 1:
window.pushButton_2.clicked.connect(lambda:SAS_r (window, target,0))
window.pushButton_3.clicked.connect(lambda:SAS_r (window, target,1))
window.pushButton_6.clicked.connect(lambda:SAS_r (window, target,2))
window.pushButton_7.clicked.connect(lambda:SAS_r (window, target,3))
window.pushButton_4.clicked.connect(lambda:SAS_r (window, target,4))
window.pushButton_5.clicked.connect(lambda:SAS_r (window, target,5))
#window.lineEdit.initStyleOption()
#window.lineEdit.textChanged[str].connect(M)
window.lineEdit.textChanged.connect(lambda:M (window,target,0))
window.lineEdit_2.textChanged.connect(lambda:M (window,target,1))
window.lineEdit_3.textChanged.connect(lambda:M (window,target,2))
window.comboBox.activated.connect(lambda:sex (window.comboBox.currentIndex(),window, target))
window.checkBox.stateChanged.connect(lambda:M (window, target, 3))
window.checkBox_2.stateChanged.connect(lambda:M (window, target, 4))
elif IM == 0 or IM == 2:
window.lineEdit.textChanged.connect(lambda:M_2 (window))
window.lineEdit_2.textChanged.connect(lambda:M_2 (window))
#window_L.widget.hide()
#window_L.setStyleSheet('.QWidget {border-image: url(' + A + ') 0 0 0 0 stretch stretch;} .QLabel{border-image: None;}')
#window_L.pushButton.clicked.connect(lambda:login (window_L))
#sys.exit(app.exec_())
#app.exec_()
print("SEX")
def open_l(target, IM, themes):
#print("Кородний коне: {}, а также наш ооочень длинный и живучий токен {}"
# .format(recording_spark_api.short_token[0],recording_spark_api.live_token[0],))
global R
R = 0
print(target)
GUI(target, IM, themes)
print(f"AAAAA{R}")
return R
#GUI(0)
| romenskiy2012/recording_spark | Client/GUI_user.py | GUI_user.py | py | 12,345 | python | en | code | 1 | github-code | 36 |
13300689829 | from art import logo
import os
bid = list()
def add_new_bidder(name: str, bid_price: int) ->dict[str, int]:
user_data = dict()
user_data["name"] = name
user_data["bid_price"] = bid_price
return user_data
def find_the_highest_bidder(bid: dict[str, int]) ->tuple[str, int]:
highest_bid = 0
winner = ""
for bidder in bid:
if bidder["bid_price"] > highest_bid:
highest_bid = bidder["bid_price"]
winner = bidder["name"]
return winner, highest_bid
def main():
os.system('clear')
should_continue = True
while should_continue:
print(logo)
name = input("What is your name? ")
bid_price = int(input("What is your bid price?: $"))
bid.append(add_new_bidder(name=name, bid_price=bid_price))
is_next_user = input("Are there other users who want to bid? 'yes' or 'no'?:\n")
if is_next_user == 'no':
should_continue = False
else:
os.system('clear')
winner_name, winner_price = find_the_highest_bidder(bid)
os.system('clear')
print(logo)
print(f"The winner is {winner_name} who paid ${winner_price}")
if __name__ == "__main__":
main()
| robmik1974/secret-auction | main.py | main.py | py | 1,215 | python | en | code | 0 | github-code | 36 |
18937659990 | from django.db import models
from wagtail.admin.panels import FieldPanel
from wagtail.snippets.models import register_snippet
class SimpleTaxonomy(models.Model):
"""An abstract model for simple taxonomy terms."""
class Meta:
abstract = True
ordering = ['title']
title = models.CharField(
max_length=100,
help_text='The title of the category'
)
slug = models.SlugField(
max_length=100,
unique=True,
help_text='The slug must be unique for this category'
)
translation_fields = [
'title',
'slug',
]
panels = [
FieldPanel('title'),
FieldPanel('slug'),
]
def __str__(self):
"""Override magic method to return term title."""
return self.title
@register_snippet
class Constituency(SimpleTaxonomy):
"""A concrete model for constituency taxonomy terms."""
class Meta:
verbose_name = 'Constituency'
verbose_name_plural = 'Constituencies'
| IATI/IATI-Standard-Website | taxonomies/models.py | models.py | py | 1,010 | python | en | code | 5 | github-code | 36 |
74698538345 | A = int(input())
B = int(input())
C = int(input())
if A>B and C and A!=B!=C:
print("%d eh o maior" % A)
if B>A and C and A!=B!=C:
print("%d eh o maior" % B)
if C>A and B and A!=B!=C:
print("%d eh o maior" % C)
| jaquelinediasoliveira/SENAI | 1DES/FPOO/Python/ex005.py | ex005.py | py | 231 | python | en | code | 0 | github-code | 36 |
43914452628 | # 벌집
N = int(input())
shell = 1
# N==1인 경우
if N == 1:
print(1)
exit()
# N>1인 경우, shell을 하나씩 증가시켜 N이 해당 shell에 속하는지 확인
while (True):
start = 3*shell**2 - 3*shell + 2
end = 3*shell**2 + 3*shell + 1
if start <= N and N <= end:
print(shell+1)
exit()
shell += 1
| yesjuhee/study-ps | baekjoon/StepByStep/01-Input-Output-Operations/2292.py | 2292.py | py | 355 | python | ko | code | 0 | github-code | 36 |
21241362539 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
import time
import signal
from threading import Thread
from rtm.logger import logger
__author__ = 'David Qian'
"""
Created on 12/08/2016
@author: David Qian
"""
class ExecutorThread(Thread):
"""Executor thread, communicate with the real runner
"""
def __init__(self, cmd, workdir):
super(ExecutorThread, self).__init__()
self._runner = CmdRunner(cmd, workdir)
self._terminate = False
def run(self):
logger.info('start executor thread')
while not self._terminate:
self._runner.start()
self._runner.wait()
logger.info('terminate executor thread')
def terminate(self):
self._runner.terminate()
self._terminate = True
def restart_runner(self):
self._runner.terminate()
class CmdRunner(object):
"""Runner, fork subprocess to execute the command
"""
def __init__(self, cmd, workdir):
self.cmd = cmd.split()
self.workdir = workdir
self.p = None
def start(self):
logger.info('start runner')
self.p = subprocess.Popen(self.cmd, cwd=self.workdir)
logger.info('Runner pid is %d' % self.p.pid)
def terminate(self):
if self.p:
logger.info('terminate runner')
try:
self.p.terminate()
except OSError:
pass
def wait(self):
self.p.wait()
logger.info('runner killed')
class LoopMaster(object):
"""Loop restart executer
"""
def __init__(self, cmd, restart_time, workdir=None):
# hour of the restart time, e.g. 0~23
self.restart_time = int(restart_time)
self._executor = ExecutorThread(cmd, workdir)
self._signals = [
signal.SIGINT,
signal.SIGTERM,
]
self._setup_signal_handler()
def run(self):
logger.info('start master')
self._executor.start()
while True:
time.sleep(3600)
cur_time = time.localtime(time.time())
if self.restart_time <= cur_time.tm_hour < self.restart_time+1:
self._executor.restart_runner()
def terminate(self, signum, frame):
logger.warn('receive signal(%d)' % signum)
self._executor.terminate()
self._executor.join()
raise SystemExit()
def _setup_signal_handler(self):
for signum in self._signals:
signal.signal(signum, self.terminate)
if __name__ == '__main__':
e = LoopMaster('python -m SimpleHTTPServer', 21)
e.run()
| krizex/RunnerTimer | src/rtm/executor.py | executor.py | py | 2,631 | python | en | code | 0 | github-code | 36 |
74147827945 | import re
import argparse
from os import listdir
def read_file(filename: str) -> str:
with open("./regex_labs/src/{}.txt".format(filename)) as f:
return f.read()
def creditcards(content):
"""All credit card numbers and respective brands"""
matches = re.findall(r"([0-9\s]+)\n?([a-zA-Z\s]+)\n?", content)
mylist = []
for match in matches:
number = match[0].replace(" ", "").replace("\n", "")
brand = match[1].replace("\n", "")
mylist.append((number, brand))
return mylist
def phonenumbers(content):
"""All Portuguese phone numbers"""
matches = re.findall(r"\(\+?0?0?351\).?([0-9- ]*)", content)
return [match.replace("-", "").replace(" ", "") for match in matches]
def emails(content):
"""All emails except the ones with username: jose"""
matches = re.findall(r"(.*(?<!\njose)@.+)", content)
return [match for match in matches]
def urls(content):
"""All urls and respective query arguments"""
matches = re.finditer(r"https?://(?P<domain>.+)/(?P<args>\?.+)?", content)
mylist = []
for match in matches:
args = match.group("args")
args = args[1:].split("&") if args else []
mylist.append((match.group("domain"), args))
return mylist
if __name__ == '__main__':
""" python -m regex_labs.regex -r <filename> """
examples = [f.replace(".txt", "") for f in listdir("./regex_labs/src/")]
parser = argparse.ArgumentParser()
parser.add_argument("--run", '-r', choices=examples, required=True)
args = parser.parse_args()
file_content = read_file(args.run)
[print(line) for line in eval(args.run)(file_content)]
| zepcp/code_labs | regex_labs/regex.py | regex.py | py | 1,669 | python | en | code | 1 | github-code | 36 |
70947908905 | """
a good algorithm for concatenating two singly linked list together,
given both the head node of each list
"""
from example_singly_linked_list import SinglyLinkedList
def concat(L, M):
# concat two linked lists together
# the result is stored in L
if M._head is not None: # if M is none, does not matter what L is, simply return L
if L._head is None: # if L._head is None, copy M
L._head = M._head
L._size = M._size
else:
head = L._head
while head._next is not None:
head = head._next
head._next = M._head
# if M._head is None, don't need to do anything
if __name__ == '__main__':
L = SinglyLinkedList()
for i in range(10): L.add(i)
print("L:")
L.show()
M1 = SinglyLinkedList()
for j in range(5): M1.add(j)
print("M1: ")
M1.show()
# normal concat, two not None linked list
concat(L, M1)
L.show() # L changed, since it stores the result
print()
# M is None, concat
M2 = SinglyLinkedList()
M2.show()
concat(L, M2)
L.show() # L remain unchanged
print()
# L is None, M is not None
L2 = SinglyLinkedList()
L2.show()
concat(L2, M1)
L2.show() | luke-mao/Data-Structures-and-Algorithms-in-Python | chapter7/q2.py | q2.py | py | 1,291 | python | en | code | 1 | github-code | 36 |
72908299944 | class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class MinHeap:
def __init__(self):
self.root = None
def insert(self, value):
# Создаем новый узел с заданным значением
new_node = ListNode(value)
# Если куча пуста, делаем новый узел корнем
if not self.root:
self.root = new_node
return
# Иначе вставляем новый узел в отсортированный связный список
if value < self.root.val:
new_node.next = self.root
self.root = new_node
return
current = self.root
while current.next and value >= current.next.val:
current = current.next
# Вставляем новый узел после текущего узла
new_node.next = current.next
current.next = new_node
def extract_min(self):
# Извлекаем минимальное значение из корня и обновляем корень
if self.root:
min_val = self.root.val
self.root = self.root.next
return min_val
def is_empty(self):
return not bool(self.root)
# Пример использования:
if __name__ == "__main__":
min_heap = MinHeap()
# Вставка элементов в кучу
min_heap.insert(5)
min_heap.insert(3)
min_heap.insert(10)
min_heap.insert(2)
min_heap.insert(7)
# Извлечение минимального значения
while not min_heap.is_empty():
print(min_heap.extract_min(), end=' ') # Выведет: 2 3 5 7 10
| TatsianaPoto/yandex | Algorithm_complexity/heap/linked_list_sorted.py | linked_list_sorted.py | py | 1,790 | python | ru | code | 0 | github-code | 36 |
8231917354 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#from __future__ import unicode_literals This breaks __all__ on PY2
from . import config, metrics
from .core import Baseplate
def make_metrics_client(raw_config):
"""Configure and return a metrics client.
This expects two configuration options:
``metrics.namespace``
The root key to namespace all metrics in this application under.
``metrics.endpoint``
A ``host:port`` pair, e.g. ``localhost:2014``. If an empty string, a
client that discards all metrics will be returned.
:param dict raw_config: The app configuration which should have settings
for the metrics client.
:return: A configured client.
:rtype: :py:class:`baseplate.metrics.Client`
"""
cfg = config.parse_config(raw_config, {
"metrics": {
"namespace": config.String,
"endpoint": config.Optional(config.Endpoint),
},
})
return metrics.make_client(cfg.metrics.namespace, cfg.metrics.endpoint)
__all__ = [
"make_metrics_client",
"Baseplate",
]
| Omosofe/baseplate | baseplate/__init__.py | __init__.py | py | 1,147 | python | en | code | null | github-code | 36 |
495660367 | import os
import types
import pytest
import yaml
from dagster import (
DagsterEventType,
DagsterInvalidConfigError,
RunConfig,
check,
execute_pipeline,
pipeline,
seven,
solid,
)
from dagster.core.instance import DagsterInstance, InstanceRef, InstanceType
from dagster.core.storage.event_log import SqliteEventLogStorage
from dagster.core.storage.local_compute_log_manager import LocalComputeLogManager
from dagster.core.storage.pipeline_run import PipelineRun, PipelineRunStatus
from dagster.core.storage.root import LocalArtifactStorage
from dagster.core.storage.runs import SqliteRunStorage
def test_fs_stores():
@pipeline
def simple():
@solid
def easy(context):
context.log.info('easy')
return 'easy'
easy()
with seven.TemporaryDirectory() as temp_dir:
run_store = SqliteRunStorage.from_local(temp_dir)
event_store = SqliteEventLogStorage(temp_dir)
compute_log_manager = LocalComputeLogManager(temp_dir)
instance = DagsterInstance(
instance_type=InstanceType.PERSISTENT,
local_artifact_storage=LocalArtifactStorage(temp_dir),
run_storage=run_store,
event_storage=event_store,
compute_log_manager=compute_log_manager,
)
run = RunConfig()
execute_pipeline(simple, run_config=run, instance=instance)
assert run_store.has_run(run.run_id)
assert run_store.get_run_by_id(run.run_id).status == PipelineRunStatus.SUCCESS
assert DagsterEventType.PIPELINE_SUCCESS in [
event.dagster_event.event_type
for event in event_store.get_logs_for_run(run.run_id)
if event.is_dagster_event
]
stats = event_store.get_stats_for_run(run.run_id)
assert stats.steps_succeeded == 1
assert stats.end_time is not None
def test_init_compute_log_with_bad_config():
with seven.TemporaryDirectory() as tmpdir_path:
with open(os.path.join(tmpdir_path, 'dagster.yaml'), 'w') as fd:
yaml.dump({'compute_logs': {'garbage': 'flargh'}}, fd, default_flow_style=False)
with pytest.raises(DagsterInvalidConfigError, match='Undefined field "garbage"'):
DagsterInstance.from_ref(InstanceRef.from_dir(tmpdir_path))
def test_init_compute_log_with_bad_config_override():
with seven.TemporaryDirectory() as tmpdir_path:
with pytest.raises(DagsterInvalidConfigError, match='Undefined field "garbage"'):
DagsterInstance.from_ref(
InstanceRef.from_dir(tmpdir_path, overrides={'compute_logs': {'garbage': 'flargh'}})
)
def test_init_compute_log_with_bad_config_module():
with seven.TemporaryDirectory() as tmpdir_path:
with open(os.path.join(tmpdir_path, 'dagster.yaml'), 'w') as fd:
yaml.dump(
{'compute_logs': {'module': 'flargh', 'class': 'Woble', 'config': {}}},
fd,
default_flow_style=False,
)
with pytest.raises(check.CheckError, match='Couldn\'t import module'):
DagsterInstance.from_ref(InstanceRef.from_dir(tmpdir_path))
MOCK_HAS_RUN_CALLED = False
def test_get_or_create_run():
with seven.TemporaryDirectory() as tmpdir_path:
instance = DagsterInstance.from_ref(InstanceRef.from_dir(tmpdir_path))
run = PipelineRun.create_empty_run('foo_pipeline', 'bar_run')
assert instance.get_or_create_run(run) == run
assert instance.has_run(run.run_id)
assert instance.get_or_create_run(run) == run
# Run is created after we check whether it exists
with seven.TemporaryDirectory() as tmpdir_path:
instance = DagsterInstance.from_ref(InstanceRef.from_dir(tmpdir_path))
run = PipelineRun.create_empty_run('foo_pipeline', 'bar_run')
def _has_run(self, run_id):
# This is uglier than we would like because there is no nonlocal keyword in py2
global MOCK_HAS_RUN_CALLED # pylint: disable=global-statement
# pylint: disable=protected-access
if not self._run_storage.has_run(run_id) and not MOCK_HAS_RUN_CALLED:
self._run_storage.add_run(PipelineRun.create_empty_run('foo_pipeline', run_id))
return False
else:
return self._run_storage.has_run(run_id)
instance.has_run = types.MethodType(_has_run, instance)
assert instance.get_or_create_run(run) == run
# Run is created after we check whether it exists, but deleted before we can get it
global MOCK_HAS_RUN_CALLED # pylint:disable=global-statement
MOCK_HAS_RUN_CALLED = False
with seven.TemporaryDirectory() as tmpdir_path:
instance = DagsterInstance.from_ref(InstanceRef.from_dir(tmpdir_path))
run = PipelineRun.create_empty_run('foo_pipeline', 'bar_run')
def _has_run(self, run_id):
global MOCK_HAS_RUN_CALLED # pylint: disable=global-statement
# pylint: disable=protected-access
if not self._run_storage.has_run(run_id) and not MOCK_HAS_RUN_CALLED:
self._run_storage.add_run(PipelineRun.create_empty_run('foo_pipeline', run_id))
MOCK_HAS_RUN_CALLED = True
return False
elif self._run_storage.has_run(run_id) and MOCK_HAS_RUN_CALLED:
MOCK_HAS_RUN_CALLED = False
return True
else:
return False
instance.has_run = types.MethodType(_has_run, instance)
with pytest.raises(check.CheckError, match='Inconsistent run storage'):
instance.get_or_create_run(run)
| helloworld/continuous-dagster | deploy/dagster_modules/dagster/dagster_tests/core_tests/storage_tests/test_local_instance.py | test_local_instance.py | py | 5,700 | python | en | code | 2 | github-code | 36 |
10272333559 | from flask import Flask, g, render_template,\
request, redirect, url_for, flash, session
import hashlib
import os
import mysql.connector
import google.oauth2.credentials
import google_auth_oauthlib.flow
from google.auth.transport import requests
import requests, json
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
from models.usuario import Usuario
from models.usuarioDAO import UsuarioDAO
from models.exercicio import exercicio
from models.exercicioDAO import ExercicioDAO
from models.avaliacao import Avaliacao
from models.avaliacaoDAO import AvaliacaoDAO
app = Flask(__name__)
app.secret_key = "senha123"
DB_HOST = "localhost"
DB_USER = "root"
DB_NAME = "academiadb"
DB_PASS = ""
app.auth = {
# acao: { perfil:permissao }
'painel': {0:1, 1:1},
'logout': {0:1, 1:1},
'cadastrar_exercicio': {0:1, 1:1},
'listar_exercicio': {0:1, 1:1},
'cadastrar_saida': {0:1, 1:1}
}
@app.before_request
def autorizacao():
acao = request.path[1:]
acao = acao.split('/')
if len(acao)>=1:
acao = acao[0]
acoes = app.auth.keys()
if acao in list(acoes):
if session.get('logado') is None:
return redirect(url_for('login'))
else:
tipo = session['logado']
if app.auth[acao] == 0:
return redirect(url_for('painel'))
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = mysql.connector.connect(
host=DB_HOST,
user=DB_USER,
password=DB_PASS,
database=DB_NAME
)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
@app.route('/')
def index():
return render_template("login.html")
@app.route('/register', methods=['GET', 'POST'])
def register():
msg = ''
if request.method == "POST":
# valor = request.form['campoHTML']
nome = request.form['nome']
sobrenome = request.form['sobrenome']
email = request.form['email']
senha = request.form['senha']
usuario = Usuario(nome, sobrenome, email, senha)
dao = UsuarioDAO(get_db())
codigo = dao.inserir(usuario)
if codigo > 0:
msg = ("Cadastrado com sucesso!")
else:
msg = ("Erro ao cadastrar!")
vartitulo = "Cadastro"
return render_template("register.html", titulo=vartitulo, msg=msg)
@app.route('/cadastrar_treino', methods=['GET', 'POST'])
def cadastrar_exercicios():
if request.method == "POST":
carga = request.form['carga']
series = request.form['series']
repeticoes = request.form['repeticoes']
exercicios = exercicio(carga, series, repeticoes)
dao = ExercicioDAO(get_db())
codigo = dao.inserir(exercicios)
if codigo > 0:
flash("Cadastrado com sucesso! Código %d" % codigo, "success")
else:
flash("Erro ao cadastrar!", "danger")
vartitulo = "Cadastro de Exercicio"
return render_template("exercicio-cadastrar.html", titulo=vartitulo)
@app.route('/avaliacao', methods=['GET', 'POST'])
def avaliacao():
if request.method == "POST":
peso = request.form['peso']
altura = request.form['altura']
braco = request.form['braco']
ombro = request.form['ombro']
peito = request.form['peito']
cintura = request.form['cintura']
quadril = request.form['quadril']
abdominal = request.form['abdominal']
coxaMedial = request.form['coxaMedial']
panturrilha = request.form['panturrilha']
avaliacao = Avaliacao(peso, altura, braco, ombro, peito, cintura, quadril,
abdominal, coxaMedial, panturrilha,session['logado']['codigo'] )
dao = AvaliacaoDAO(get_db())
codigo = dao.inserir(avaliacao)
if codigo > 0:
flash("Cadastrado com sucesso! Código %d" % codigo, "success")
else:
flash("Erro ao cadastrar!", "danger")
vartitulo = "Avaliacao"
return render_template("avaliacao.html", titulo=vartitulo)
@app.route('/listar_exercicio', methods=['GET',])
def listar_exercicio():
dao = ExercicioDAO(get_db())
exercicios_db = dao.listar()
return render_template("exercicio-listar.html", exercicios=exercicios_db)
@app.route('/listaraval', methods=['GET', 'POST'])
def listaraval():
dao = AvaliacaoDAO(get_db())
avaliacao_db = dao.listar()
return render_template("listaraval.html", avaliacao=avaliacao_db)
@app.route('/cadastrar_saida', methods=['GET', 'POST'])
def cadastrar_saida():
daoUsuario = UsuarioDAO(get_db())
daoPlanta = PlantaDAO(get_db())
if request.method == "POST":
dtsaida = request.form['dtsaida']
usuario = request.form['usuario']
planta = request.form['planta']
saida = Saida(usuario, planta, dtsaida)
daoSaida = SaidaDAO(get_db())
codigo = daoSaida.inserir(saida)
if codigo > 0:
flash("Saída cadastrada com sucesso! Código %d" % codigo, "success")
else:
flash("Erro ao registrar saída!", "danger")
usuarios_db = daoUsuario.listar()
plantas_db = daoPlanta.listar()
return render_template("saida-cadastrar.html",
usuarios=usuarios_db, plantas=plantas_db)
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == "POST":
email = request.form["email"]
senha = request.form["senha"]
# Verificar dados
dao = UsuarioDAO(get_db())
usuario = dao.autenticar(email, senha)
if usuario is not None:
session['logado'] = {
'codigo': usuario[0],
'nome': usuario[3],
'email': usuario[1],
}
return redirect(url_for('painel'))
else:
flash("Erro ao efetuar login!")
return render_template("login.html", titulo="Login")
@app.route('/logout')
def logout():
session['logado'] = None
session.clear()
return redirect(url_for('index'))
@app.route('/forgot')
def forgot():
return render_template("forgot-password.html", titulo ="Esqueci minha senha")
@app.route('/painel')
def painel():
return render_template("index.html", titulo="index")
@app.route('/peito', methods=['GET', 'POST'])
def peito():
dao = ExercicioDAO(get_db())
exercicio_db = dao.listar_peito()
return render_template("peito.html", titulo="peito", exercicio=exercicio_db)
@app.route('/perna', methods=['GET', 'POST'])
def perna():
dao = ExercicioDAO(get_db())
exercicio_db = dao.listar_perna()
return render_template("perna.html", titulo="perna", exercicio=exercicio_db)
@app.route('/braco', methods=['GET', 'POST'])
def braco():
dao = ExercicioDAO(get_db())
exercicio_db = dao.listar_braco()
return render_template("braco.html", titulo="braco", exercicio=exercicio_db)
@app.route('/costas', methods=['GET', 'POST'])
def costas():
dao = ExercicioDAO(get_db())
exercicio_db = dao.listar_costas()
return render_template("costas.html", titulo="costas", exercicio=exercicio_db)
@app.route('/abdomen', methods=['GET', 'POST'])
def abdomen():
dao = ExercicioDAO(get_db())
exercicio_db = dao.listar_abdomen()
return render_template("abdomen.html", titulo="abdomen", exercicio=exercicio_db)
@app.route('/alongamento', methods=['GET', 'POST'])
def alongamento():
dao = ExercicioDAO(get_db())
exercicio_db = dao.listar_alongamento()
return render_template("alongamento.html", titulo="alongamento", exercicio=exercicio_db)
@app.route('/mainaval')
def mainaval():
return render_template("mainaval.html", titulo="mainaval")
@app.route("/login_google")
def login_google():
flow = google_auth_oauthlib.flow.Flow.from_client_secrets_file(
'client_secret.json',
scopes=['https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/userinfo.profile', 'openid'])
flow.redirect_uri = 'http://localhost/callback'
authorization_url, state = flow.authorization_url(
acess_type='offline',
include_granted_scopes='true')
return redirect(authorization_url)
@app.route('/callback')
def callback():
state = request.args.get('state')
code = request.args.get('code')
if code is None or code == '':
flash('Erro ao logar com conta google', 'danger')
return redirect(url_for('login'))
flow = google_auth_oauthlib.flow.Flow.from_client_secrets_file(
'client_secret.json',
scopes=['https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/userinfo.profile', 'openid'],
state=state)
flow.redirect_uri = url_for('callback', _external=True)
authorization_response = request.url
flow.fetch_token(authorization_response=authorization_response)
credentials = flow.credentials
resposta_api = requests.get("https://www.googleapis.com/oauth2/v1/userinfo?alt=json&access_token=" +
credentials.token)
user_info = resposta_api.json()
email = str(user_info['email'])
dao = UsuarioDAO(get_db())
user = dao.obter(email)
print((user_info["email"]))
if user is None:
hash = hashlib.sha512()
senha = os.urandom(50)
secret = app.config['SECRET_KEY']
hash.update(f'{secret}{senha}'.encode('utf-8'))
senha_criptografa = hash.hexdigest()
usuario = Usuario(
user_info['name'],
user_info['email'],
senha_criptografa,
'',
)
id = None
if usuario.senha and usuario.nome and usuario.email:
id = UsuarioDAO.inserir(usuario)
print(id)
if id is None or id <=0:
flash('Erro ao cadastrar usuário', 'danger')
return redirect(url_for('login'))
else:
user = UsuarioDAO.obter(user_info['email'])
session['logado'] = user
flash(f'Seja bem-vindo, {user[1]}!', 'primary')
revoke = requests.post(
'https://gauth2.googleapis.com/revoke',
params={'token': credentials.token},
headers={'content-type': 'application/x-www-form-urlencoded'})
return redirect(url_for('painel'))
if __name__=='__main__':
app.run(host="0.0.0.0", port=80, debug=True) | FaelPressao/Projeto_Academia_Versao_Final | academia/main.py | main.py | py | 10,533 | python | en | code | 0 | github-code | 36 |
43303567074 | from rpython.tool.flattenrec import FlattenRecursion
def test_flattenrec():
r = FlattenRecursion()
seen = set()
def rec(n):
if n > 0:
r(rec, n-1)
seen.add(n)
rec(10000)
assert seen == set(range(10001))
| mozillazg/pypy | rpython/tool/test/test_flattenrec.py | test_flattenrec.py | py | 253 | python | en | code | 430 | github-code | 36 |
33508499662 | import cv2 as cv
import numpy as np
# Load and Read input
cap = cv.VideoCapture('Test.mp4')
#Array to store orientation of each frame
orient_ation = []
count = 0
orient_ation.append(count)
while True:
#Read input for current frame
ret1,current_frame = cap.read()
#Print error message if there is no input
if ret1 == False:
print('There is no valid input')
break
#Gray-scale conversion of current input
current_frame_gray = cv.cvtColor(current_frame,cv.COLOR_BGR2GRAY)
#current_frame_fil = cv.GaussianBlur(current_frame_gray,(25,25),4)
current_frame_fil = cv.medianBlur(current_frame_gray,19)
#Thresholding
ret4,current_frame_thresh = cv.threshold(current_frame_fil,15,255,cv.THRESH_BINARY_INV)
#Contour Detection
im2,current_frame_cont,hierarchy = cv.findContours(current_frame_thresh,cv.RETR_TREE,cv.CHAIN_APPROX_NONE)
#Creat array to store detected contours in descending order by their area
cnt_area_current = sorted(current_frame_cont, key = cv.contourArea, reverse = True) #Sort Area of contour in descending order
#draw contour to original input
cv.drawContours(current_frame,cnt_area_current[0],-1,(255,0,0),3)
#Moments computation
M_current = cv.moments(cnt_area_current[0]) #Calculates moments of larget contour area
cx_current = int(M_current['m10']/M_current['m00']) #CENTER IN X-AXIS
cy_current = int(M_current['m01']/M_current['m00']) #CENTER IN Y-AXIS
#Draw center of contour on orinal input
cv.circle(current_frame,(cx_current,cy_current),7,(255,0,0),-1)
#Draw arrow from center of frame to contour center of dark region
cv.arrowedLine(current_frame,(640,650),(cx_current,cy_current),(0,255,0),10)
#Index region for direction
left_index = int((4*current_frame.shape[1])/10)
right_index = int((6*current_frame.shape[1])/10)
if cx_current <= left_index:
cv.putText(current_frame,'Move Left',(420,100),cv.FONT_HERSHEY_SIMPLEX,2,(0,255,0),5)
elif cx_current >= right_index:
cv.putText(current_frame,'Move Right',(420,100),cv.FONT_HERSHEY_SIMPLEX,2,(0,255,0),5)
else:
cv.putText(current_frame,'Move Forward',(420,100),cv.FONT_HERSHEY_SIMPLEX,2,(0,255,0),5)
#Computes rotatable rectangle that fits to contour of dark region
min_rec = cv.minAreaRect(cnt_area_current[0])
orient_ation.append(min_rec[2])
rotation = abs(orient_ation[count]-orient_ation[count+1])
count = count+1
#Computes corner points for rotatable rectangle to draw it on orginal image
box = cv.boxPoints(min_rec)
box = np.int0(box)
#Draw rotatable rectange to original image
cv.drawContours(current_frame,[box],0,(0,0,255),2)
#Decision of large orientation
if rotation >= 80 or rotation <= -80:
print('Too much rotation')
i=0;
cv.imwrite('fault_%i.jpg',current_frame)
i=i+1
#produce output
cv.imshow('procedure',current_frame)
cv.imshow('threshold',current_frame_thresh)
if cv.waitKey(30) & 0xFF == 27:
break
cap.release()
cv.destroyAllWindows()
| mightykim91/navigation_system | source_code/version_2AB.py | version_2AB.py | py | 3,430 | python | en | code | 0 | github-code | 36 |
30325551719 | # -*- coding: utf-8 -*-
import http.client
import csv
import json
conn = http.client.HTTPSConnection("empresa.app.invoicexpress.com")
# Lendo os dados do arquivo CSV com ponto e vírgula como separador
with open("itens2.csv", newline="") as csvfile:
reader = csv.reader(csvfile, delimiter=";") # Especificando o separador como ponto e vírgula
next(reader) # Ignorar o cabeçalho do CSV
for row in reader:
name = row[0]
description = row[1]
unit_price = row[2]
payload = {
"item": {
"name": name,
"description": description,
"unit_price": unit_price,
"unit": "unit",
"tax": {"name": "IVA23"}
}
}
payload_str = json.dumps(payload)
headers = {
'accept': "application/json",
'content-type': "application/json"
}
conn.request("POST", "/items.json?api_key=sua-api-key-aqui", payload_str, headers)
res = conn.getresponse()
data = res.read()
print(data.decode("utf-8"))
| wesleyy598/Consumindo-API-Python | InvoiceXpress/Importar Invoice/Importar Preços de Portugal.py | Importar Preços de Portugal.py | py | 1,145 | python | en | code | 1 | github-code | 36 |
29413120017 | import numpy as np
import cv2
cap = cv2.VideoCapture(0)
# Define the codes and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if ret == True:
frame = cv2.flip(frame, 0)
# Write the flipped frame
out.write(frame)
cv2.imshow('frame', gray)
k = cv2.waitKey(1)
if k & 0XFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
| land-pack/opencv-example | basic/simple_cap_save_video.py | simple_cap_save_video.py | py | 553 | python | en | code | 1 | github-code | 36 |
6795008221 | from exo_accounts.test_mixins.faker_factories import FakeUserFactory
from test_utils.test_case_mixins import UserTestMixin
from test_utils import DjangoRestFrameworkTestCase
class TestLocationCityCountry(
UserTestMixin,
DjangoRestFrameworkTestCase):
def setUp(self):
super().setUp()
self.create_user()
def test_city_and_country_extract(self):
# Prepare test
test_cases = [
{
'city': 'Madrid',
'state': '',
'country': 'Spain',
'separator': ', ',
},
{
'city': 'Madrid',
'state': '',
'country': 'Spain',
'separator': '- ',
},
{
'city': '',
'state': '',
'country': 'Spain',
'separator': '',
},
# City, State, Country format
{
'city': 'Bangalore',
'state': 'Karnataka',
'country': 'India',
'separator': ', ',
},
]
for case in test_cases:
user = FakeUserFactory(
location='{}{}{}{}{}'.format(
case.get('city'),
case.get('separator'),
case.get('state'),
case.get('separator'),
case.get('country'),
)
)
self.assertEqual(user.city, case.get('city'))
self.assertEqual(user.country, case.get('country'))
| tomasgarzon/exo-services | service-exo-core/utils/tests/test_location.py | test_location.py | py | 1,615 | python | en | code | 0 | github-code | 36 |
17386735173 | import numpy as np
import math
from skimage import io, util
import heapq
def randomPatch(texture, patchLength):
h, w, _ = texture.shape
i = np.random.randint(h - patchLength)
j = np.random.randint(w - patchLength)
return texture[i:i+patchLength, j:j+patchLength]
def L2OverlapDiff(patch, patchLength, overlap, res, y, x):
error = 0
if x > 0:
left = patch[:, :overlap] - res[y:y+patchLength, x:x+overlap]
error += np.sum(left**2)
if y > 0:
up = patch[:overlap, :] - res[y:y+overlap, x:x+patchLength]
error += np.sum(up**2)
if x > 0 and y > 0:
corner = patch[:overlap, :overlap] - res[y:y+overlap, x:x+overlap]
error -= np.sum(corner**2)
return error
def randomBestPatch(texture, patchLength, overlap, res, y, x):
h, w, _ = texture.shape
errors = np.zeros((h - patchLength, w - patchLength))
for i in range(h - patchLength):
for j in range(w - patchLength):
patch = texture[i:i+patchLength, j:j+patchLength]
e = L2OverlapDiff(patch, patchLength, overlap, res, y, x)
errors[i, j] = e
i, j = np.unravel_index(np.argmin(errors), errors.shape)
return texture[i:i+patchLength, j:j+patchLength]
def minCutPath(errors):
# dijkstra's algorithm vertical
pq = [(error, [i]) for i, error in enumerate(errors[0])]
heapq.heapify(pq)
h, w = errors.shape
seen = set()
while pq:
error, path = heapq.heappop(pq)
curDepth = len(path)
curIndex = path[-1]
if curDepth == h:
return path
for delta in -1, 0, 1:
nextIndex = curIndex + delta
if 0 <= nextIndex < w:
if (curDepth, nextIndex) not in seen:
cumError = error + errors[curDepth, nextIndex]
heapq.heappush(pq, (cumError, path + [nextIndex]))
seen.add((curDepth, nextIndex))
def minCutPath2(errors):
# dynamic programming, unused
errors = np.pad(errors, [(0, 0), (1, 1)],
mode='constant',
constant_values=np.inf)
cumError = errors[0].copy()
paths = np.zeros_like(errors, dtype=int)
for i in range(1, len(errors)):
M = cumError
L = np.roll(M, 1)
R = np.roll(M, -1)
# optimize with np.choose?
cumError = np.min((L, M, R), axis=0) + errors[i]
paths[i] = np.argmin((L, M, R), axis=0)
paths -= 1
minCutPath = [np.argmin(cumError)]
for i in reversed(range(1, len(errors))):
minCutPath.append(minCutPath[-1] + paths[i][minCutPath[-1]])
return map(lambda x: x - 1, reversed(minCutPath))
def minCutPatch(patch, patchLength, overlap, res, y, x):
patch = patch.copy()
dy, dx, _ = patch.shape
minCut = np.zeros_like(patch, dtype=bool)
if x > 0:
left = patch[:, :overlap] - res[y:y+dy, x:x+overlap]
leftL2 = np.sum(left**2, axis=2)
for i, j in enumerate(minCutPath(leftL2)):
minCut[i, :j] = True
if y > 0:
up = patch[:overlap, :] - res[y:y+overlap, x:x+dx]
upL2 = np.sum(up**2, axis=2)
for j, i in enumerate(minCutPath(upL2.T)):
minCut[:i, j] = True
np.copyto(patch, res[y:y+dy, x:x+dx], where=minCut)
return patch
s = "https://raw.githubusercontent.com/axu2/image-quilting/master/"
def quilt(texture, patchLength, numPatches, mode="cut", sequence=False):
texture = util.img_as_float(texture)
overlap = patchLength // 6
numPatchesHigh, numPatchesWide = numPatches
h = (numPatchesHigh * patchLength) - (numPatchesHigh - 1) * overlap
w = (numPatchesWide * patchLength) - (numPatchesWide - 1) * overlap
res = np.zeros((h, w, texture.shape[2]))
for i in range(numPatchesHigh):
for j in range(numPatchesWide):
y = i * (patchLength - overlap)
x = j * (patchLength - overlap)
if i == 0 and j == 0 or mode == "random":
patch = randomPatch(texture, patchLength)
elif mode == "best":
patch = randomBestPatch(texture, patchLength, overlap, res, y, x)
elif mode == "cut":
patch = randomBestPatch(texture, patchLength, overlap, res, y, x)
patch = minCutPatch(patch, patchLength, overlap, res, y, x)
res[y:y+patchLength, x:x+patchLength] = patch
if sequence:
io.imshow(res)
io.show()
return res
def quiltSize(texture, patchLength, shape, mode="cut"):
overlap = patchLength // 6
h, w = shape
numPatchesHigh = math.ceil((h - patchLength) / (patchLength - overlap)) + 1 or 1
numPatchesWide = math.ceil((w - patchLength) / (patchLength - overlap)) + 1 or 1
res = quilt(texture, patchLength, (numPatchesHigh, numPatchesWide), mode)
return res[:h, :w]
texture = io.imread(s+"test.png")
io.imshow(texture)
io.show()
io.imshow(quilt(texture, 25, (6, 6), "random"))
io.show()
io.imshow(quilt(texture, 25, (6, 6), "best"))
io.show()
io.imshow(quilt(texture, 20, (6, 6), "cut"))
io.show()
io.imshow(quilt(texture, 20, (3, 3), "cut", True))
io.show() | QURATT/https---github.com-QURATT-DIPProject | image_quilting.py | image_quilting.py | py | 5,186 | python | en | code | 0 | github-code | 36 |
9149914830 | #coding = 'utf-8'
'''
这是一个格栅布局的小例子!
文章链接:http://www.xdbcb8.com/archives/209.html
'''
import sys
from PyQt5.QtWidgets import (QWidget, QPushButton, QApplication, QGridLayout, QLCDNumber)
class Example(QWidget):
'''
格栅布局
'''
def __init__(self):
'''
一些初始设置
'''
super().__init__()
self.Init_UI()
def Init_UI(self):
'''
界面初始设置
'''
grid = QGridLayout()
self.setLayout(grid)
self.setGeometry(300, 300, 400, 300)
self.setWindowTitle('学点编程吧-计算器')
self.lcd = QLCDNumber()
grid.addWidget(self.lcd, 0, 0, 3, 0)#我们使QLCDNumber小部件跨越4行
grid.setSpacing(10)#将垂直和水平间距设置为10
names = ['Cls', 'Bc', '', 'Close',
'7', '8', '9', '/',
'4', '5', '6', '*',
'1', '2', '3', '-',
'0', '.', '=', '+']
positions = [(i, j) for i in range(4, 9) for j in range(4, 8)]#将小部件添加到窗口中
for position, name in zip(positions, names):
#小部件的上的名称和它们的位置一一对应起来,注意zip的用法
if name == '':
continue
button = QPushButton(name)
grid.addWidget(button, *position)
button.clicked.connect(self.Cli)
self.show()
def Cli(self):
'''
点击按钮时对应的槽函数
'''
sender = self.sender().text()
ls = ['/', '*', '-', '=', '+']
if sender in ls:
self.lcd.display('A')#当我们点击'/', '*', '-', '=', '+'时,LCD上显示'A'
else:
self.lcd.display(sender)#反之显示按钮上的名称,如:1
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
app.exit(app.exec_())
| redmorningcn/PyQT5Example | PyQt5All/PyQt56/QGrild layout.pyw | QGrild layout.pyw | pyw | 2,002 | python | zh | code | 1 | github-code | 36 |
34493975789 | import logging
import os
from argparse import ArgumentParser
from typing import Dict, List, Tuple, Set
import pandas as pd
from tqdm import tqdm
from gebert.utils.io import save_node_id2terms_list, save_dict, save_tuples, read_mrconso, read_mrrel
def get_concept_list_groupby_cui(mrconso_df: pd.DataFrame, cui2node_id: Dict[str, int]) \
-> (Dict[int, Set[str]], Dict[int, str], Dict[str, int]):
logging.info("Started creating CUI to terms mapping")
node_id2terms_list: Dict[int, Set[str]] = {}
logging.info(f"Removing duplicated (CUI, STR) pairs, {mrconso_df.shape[0]} rows before deletion")
mrconso_df.drop_duplicates(subset=("CUI", "STR"), keep="first", inplace=True)
logging.info(f"Removed duplicated (CUI, STR) pairs, {mrconso_df.shape[0]} rows after deletion")
unique_cuis_set = set(mrconso_df["CUI"].unique())
logging.info(f"There are {len(unique_cuis_set)} unique CUIs in dataset")
# node_id2cui: Dict[int, str] = {node_id: cui for node_id, cui in enumerate(unique_cuis_set)}
# cui2node_id: Dict[str, int] = {cui: node_id for node_id, cui in node_id2cui.items()}
# assert len(node_id2cui) == len(cui2node_id)
for _, row in tqdm(mrconso_df.iterrows(), miniters=mrconso_df.shape[0] // 50):
cui = row["CUI"].strip()
term_str = row["STR"].strip().lower()
if term_str == '':
continue
node_id = cui2node_id[cui]
if node_id2terms_list.get(node_id) is None:
node_id2terms_list[node_id] = set()
node_id2terms_list[node_id].add(term_str.strip())
logging.info("CUI to terms mapping is created")
return node_id2terms_list
def extract_umls_oriented_edges_with_relations(mrrel_df: pd.DataFrame, cui2node_id: Dict[str, int],
rel2rel_id: Dict[str, int], rela2rela_id: Dict[str, int],
ignore_not_mapped_edges=False) -> List[Tuple[int, int, int, int]]:
cuis_relation_str_set = set()
logging.info("Started generating graph edges")
edges: List[Tuple[int, int, int, int]] = []
not_mapped_edges_counter = 0
for idx, row in tqdm(mrrel_df.iterrows(), miniters=mrrel_df.shape[0] // 100, total=mrrel_df.shape[0]):
cui_1 = row["CUI1"].strip()
cui_2 = row["CUI2"].strip()
rel = row["REL"]
rela = row["RELA"]
# Separator validation
for att in (cui_1, cui_2, rel, rela):
assert "~~" not in str(att)
if cui2node_id.get(cui_1) is not None and cui2node_id.get(cui_2) is not None:
cuis_relation_str = f"{cui_1}~~{cui_2}~~{rel}~~{rela}"
if cuis_relation_str not in cuis_relation_str_set:
cui_1_node_id = cui2node_id[cui_1]
cui_2_node_id = cui2node_id[cui_2]
rel_id = rel2rel_id[rel]
rela_id = rela2rela_id[rela]
edges.append((cui_1_node_id, cui_2_node_id, rel_id, rela_id))
cuis_relation_str_set.add(cuis_relation_str)
else:
if not ignore_not_mapped_edges:
raise AssertionError(f"Either CUI {cui_1} or {cui_2} are not found in CUI2node_is mapping")
else:
not_mapped_edges_counter += 1
if ignore_not_mapped_edges:
logging.info(f"{not_mapped_edges_counter} edges are not mapped to any node")
logging.info(f"Finished generating edges. There are {len(edges)} edges")
return edges
def create_graph_files(mrconso_df: pd.DataFrame, mrrel_df: pd.DataFrame, rel2id: Dict[str, int],
cui2node_id: Dict[str, int], rela2id: Dict[str, int], output_node_id2terms_list_path: str,
output_node_id2cui_path: str, output_edges_path: str, output_rel2rel_id_path: str,
output_rela2rela_id_path, ignore_not_mapped_edges: bool):
node_id2cui: Dict[int, str] = {node_id: cui for cui, node_id in cui2node_id.items()}
node_id2terms_list = get_concept_list_groupby_cui(mrconso_df=mrconso_df, cui2node_id=cui2node_id)
logging.info("Generating edges....")
edges = extract_umls_oriented_edges_with_relations(mrrel_df=mrrel_df, cui2node_id=cui2node_id,
rel2rel_id=rel2id, rela2rela_id=rela2id,
ignore_not_mapped_edges=ignore_not_mapped_edges)
logging.info("Saving the result....")
save_node_id2terms_list(save_path=output_node_id2terms_list_path, mapping=node_id2terms_list, )
save_dict(save_path=output_node_id2cui_path, dictionary=node_id2cui)
save_dict(save_path=output_rel2rel_id_path, dictionary=rel2id)
save_dict(save_path=output_rela2rela_id_path, dictionary=rela2id)
save_tuples(save_path=output_edges_path, tuples=edges)
def create_cui2node_id_mapping(mrconso_df: pd.DataFrame) -> Dict[str, int]:
unique_cuis_set = set(mrconso_df["CUI"].unique())
cui2node_id: Dict[str, int] = {cui: node_id for node_id, cui in enumerate(unique_cuis_set)}
return cui2node_id
def create_relations2id_dicts(mrrel_df: pd.DataFrame):
mrrel_df.REL.fillna("NAN", inplace=True)
mrrel_df.RELA.fillna("NAN", inplace=True)
rel2id = {rel: rel_id for rel_id, rel in enumerate(mrrel_df.REL.unique())}
rela2id = {rela: rela_id for rela_id, rela in enumerate(mrrel_df.RELA.unique())}
rel2id["LOOP"] = max(rel2id.values()) + 1
rela2id["LOOP"] = max(rela2id.values()) + 1
logging.info(f"There are {len(rel2id.keys())} unique RELs and {len(rela2id.keys())} unique RELAs")
print("REL2REL_ID", )
for k, v in rel2id.items():
print(k, v)
print("RELA2RELA_ID", rela2id)
for k, v in rel2aid.items():
print(k, v)
return rel2id, rela2id
def main():
parser = ArgumentParser()
parser.add_argument('--mrconso')
parser.add_argument('--mrrel')
parser.add_argument('--split_val', action="store_true")
parser.add_argument('--train_proportion', type=float)
parser.add_argument('--output_dir', type=str)
args = parser.parse_args()
split_val = args.split_val
output_dir = args.output_dir
if not os.path.exists(output_dir) and output_dir != '':
os.makedirs(output_dir)
logging.info("Loading MRCONSO....")
mrconso_df = read_mrconso(args.mrconso)
mrconso_df["STR"].fillna('', inplace=True)
logging.info("Loading MRREL....")
mrrel_df = read_mrrel(args.mrrel)
logging.info("Generating node index....")
rel2id, rela2id = create_relations2id_dicts(mrrel_df)
if split_val:
train_dir = os.path.join(output_dir, "train/")
val_dir = os.path.join(output_dir, "val/")
for d in (train_dir, val_dir):
if not os.path.exists(d):
os.makedirs(d)
train_proportion = args.train_proportion
num_rows = mrconso_df.shape[0]
shuffled_mrconso = mrconso_df.sample(frac=1.0, random_state=42)
del mrconso_df
num_train_rows = int(num_rows * train_proportion)
train_mrconso_df = shuffled_mrconso[:num_train_rows]
val_mrconso_df = shuffled_mrconso[num_train_rows:]
del shuffled_mrconso
train_output_node_id2terms_list_path = os.path.join(train_dir, "node_id2terms_list")
val_output_node_id2terms_list_path = os.path.join(val_dir, "node_id2terms_list")
train_output_node_id2cui_path = os.path.join(train_dir, "id2cui")
val_output_node_id2cui_path = os.path.join(val_dir, "id2cui")
train_output_edges_path = os.path.join(train_dir, "edges")
val_output_edges_path = os.path.join(val_dir, "edges")
train_output_rel2rel_id_path = os.path.join(train_dir, "rel2id")
val_output_rel2rel_id_path = os.path.join(val_dir, "rel2id")
train_output_rela2rela_id_path = os.path.join(train_dir, "rela2id")
val_output_rela2rela_id_path = os.path.join(val_dir, "rela2id")
train_cui2node_id = create_cui2node_id_mapping(mrconso_df=train_mrconso_df)
val_cui2node_id = create_cui2node_id_mapping(mrconso_df=val_mrconso_df)
logging.info("Creating train graph files")
create_graph_files(mrconso_df=train_mrconso_df, mrrel_df=mrrel_df, rel2id=rel2id, rela2id=rela2id,
cui2node_id=train_cui2node_id,
output_node_id2terms_list_path=train_output_node_id2terms_list_path,
output_node_id2cui_path=train_output_node_id2cui_path,
output_edges_path=train_output_edges_path,
output_rel2rel_id_path=train_output_rel2rel_id_path,
output_rela2rela_id_path=train_output_rela2rela_id_path, ignore_not_mapped_edges=True, )
logging.info("Creating val graph files")
create_graph_files(mrconso_df=val_mrconso_df, mrrel_df=mrrel_df, rel2id=rel2id, rela2id=rela2id,
cui2node_id=val_cui2node_id,
output_node_id2terms_list_path=val_output_node_id2terms_list_path,
output_node_id2cui_path=val_output_node_id2cui_path,
output_edges_path=val_output_edges_path, output_rel2rel_id_path=val_output_rel2rel_id_path,
output_rela2rela_id_path=val_output_rela2rela_id_path,
ignore_not_mapped_edges=True, )
else:
logging.info("Creating graph files")
output_node_id2terms_list_path = os.path.join(output_dir, "node_id2terms_list")
output_node_id2cui_path = os.path.join(output_dir, "id2cui")
output_edges_path = os.path.join(output_dir, "edges")
output_rel2rel_id_path = os.path.join(output_dir, f"rel2id")
output_rela2rela_id_path = os.path.join(output_dir, f"rela2id")
cui2node_id = create_cui2node_id_mapping(mrconso_df=mrconso_df)
create_graph_files(mrconso_df=mrconso_df, mrrel_df=mrrel_df, rel2id=rel2id, rela2id=rela2id,
cui2node_id=cui2node_id,
output_node_id2terms_list_path=output_node_id2terms_list_path,
output_node_id2cui_path=output_node_id2cui_path,
output_edges_path=output_edges_path, output_rel2rel_id_path=output_rel2rel_id_path,
output_rela2rela_id_path=output_rela2rela_id_path, ignore_not_mapped_edges=True, )
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', )
main()
| Andoree/GEBERT | gebert/data/umls2graph.py | umls2graph.py | py | 10,606 | python | en | code | 2 | github-code | 36 |
33319841830 | import pygame as pg
from input_box import InputBox
pg.init()
screen = pg.display.set_mode((640, 480))
def main():
clock = pg.time.Clock()
input_box1 = InputBox(100, 100, 140, 32)
done = False
while not done:
for event in pg.event.get():
if event.type == pg.QUIT:
done = True
input_box1.handle_event(event)
screen.fill((30, 30, 30))
input_box1.draw(screen)
input_box1.update()
pg.display.flip()
clock.tick(30)
if __name__ == '__main__':
main()
pg.quit()
| MrRamka/FlyGame | test_input_form.py | test_input_form.py | py | 575 | python | en | code | 0 | github-code | 36 |
75072650344 | from vedo import Picture, show
from vedo.applications import SplinePlotter
pic = Picture("../data/sox9_exp.jpg").bw() # black & white
plt = SplinePlotter(pic)
plt.show(mode="image", zoom="tight")
outline = plt.line
plt.close()
print("Cutting using outline... (please wait)")
msh = pic.tomesh().cmap("viridis_r")
cut_msh = msh.clone().cut_with_point_loop(outline)
cut_msh.interpolate_data_from(msh, n=3)
show(cut_msh, outline, axes=1).close()
| BiAPoL/PoL-BioImage-Analysis-TS-Early-Career-Track | docs/day2aa_surface_processing/vedo_material/scripts/07-grab_scalars.py | 07-grab_scalars.py | py | 447 | python | en | code | 6 | github-code | 36 |
5099519953 | import telepot
from flask import Flask, request
try:
from Queue import Queue
except ImportError:
from queue import Queue
TOKEN = "525915971:AAHCrRmA_e8BsKDVLFw6pB6XS_BjJsUEnqM"
CHANNEL = "@signorinaggio"
app = Flask(__name__)
update_queue = Queue()
bot = telepot.Bot(TOKEN)
firma = "@formaementisChat"
EBOOK_LIST = []
def on_chat_message(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
if content_type == "document":
file_id = msg['document']['file_id']
messageId = msg['message_id']
bot.sendDocument(CHANNEL,file_id,caption=firma)
EBOOK_LIST.append(file_id)
if chat_id < 0 and chat_id != CHANNEL:
bot.deleteMessage((chat_id, messageId))
elif content_type == "text":
text = msg["text"].lower()
if text.startswith("/start"):
bot.sendMessage(chat_id,"Buongiorno.")
elif text.startswith("/ping"):
bot.sendMessage(chat_id,"Pong.")
bot.message_loop({'chat': on_chat_message}, source=update_queue)
@app.route('/', methods=['GET', 'POST'])
def pass_update():
update_queue.put(request.data)
return 'OK [200] HTTP CODE!!'
if __name__ == '__main__':
app.run(port=8080)
| IlPytone/delegator | app.py | app.py | py | 1,156 | python | en | code | 0 | github-code | 36 |
14854743181 | import pyttsx3 #pip install pyttsx3
import speech_recognition as sr #pip install speechRecognition
from datetime import datetime
import wikipedia #pip install wikipedia
import webbrowser
import os
import smtplib
import psutil
from pygame import mixer
import json
import requests
import time
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishMe():
hour = int(datetime.now().hour)
if hour>=0 and hour<12:
speak("Good Morning!")
elif hour>=12 and hour<18:
speak("Good Afternoon!")
else:
speak("Good Evening!")
speak("I am Jarvis Sir. Please tell me how may I help you")
def takeCommand():
#It takes microphone input from the user and returns string output
r = sr.Recognizer()
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source, duration=0.2)
print("Listening...")
r.energy_threshold = 300
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in')
print(f"User said: {query}\n")
except Exception as e:
# print(e)
print("Say that again please...")
return "None"
return query
def sendEmail(to, content):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login('your email', 'password')
server.sendmail('your email', to, content)
server.close()
def musiconloop(file, stopper):
mixer.init()
mixer.music.load(file)
mixer.music.play()
while True:
input_of_user = input()
if input_of_user == stopper:
mixer.music.stop()
break
if __name__ == "__main__":
wishMe()
init_battery = time.time()
battery_secs = 5*60
init_water = time.time()
init_eyes = time.time()
init_exercise = time.time()
watersecs = 2 * 60
exsecs = 20*60
eyessecs = 10*60
while True:
# if 1:
query = takeCommand().lower()
# Logic for executing tasks based on query
if 'wikipedia' in query:
speak('Searching Wikipedia...')
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=2)
speak("According to Wikipedia")
print(results)
speak(results)
elif 'open youtube' in query:
webbrowser.open("youtube.com")
elif 'open google' in query:
webbrowser.open("google.com")
elif 'open stackoverflow' in query:
webbrowser.open("stackoverflow.com")
elif 'play music' in query:
# music_dir = 'D:\\Non Critical\\songs\\Favorite Songs2'
# songs = os.listdir(music_dir)
# print(songs)
# os.startfile(os.path.join(music_dir, songs[0]))
webbrowser.open("https://open.spotify.com/collection/tracks")
elif 'time' in query:
strTime = datetime.now().strftime("%H:%M:%S")
print(strTime)
speak(f"Sir, the time is {strTime}")
elif 'open vs code' in query:
codePath = "C:\\Users\\ASUS\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"
os.startfile(codePath)
elif 'email to yash' in query:
try:
speak("What should I say?")
content = takeCommand()
to = "receiver's email"
sendEmail(to, content)
speak("Email has been sent!")
except Exception as e:
print(e)
speak("Sorry Sir. I am not able to send this email")
elif 'news' in query:
speak('News for Today .. ')
speak('So first news is..')
url = 'https://newsapi.org/v2/top-headlines?country=in&apiKey=22fa274e85764348aa45e21d5c3026d3'
news = requests.get(url).text
news_dict = json.loads(news)
arts = news_dict['articles']
# n = len(arts)
i = 0
for article in arts:
time.sleep(1)
if i == 5 - 1:
speak("Today's last News is..")
print(article['title'])
speak(article['title'])
break
print(article['title'])
speak(article['title'])
i += 1
time.sleep(1)
if i != 5 - 1:
speak("Moving to the next news..")
elif 'exit' in query:
speak('Thank You Sir. Have a nice day')
break
battery = psutil.sensors_battery()
percent = battery.percent
if percent < 30:
if time.time() - init_battery > battery_secs:
speak(f"Sir Please Charge Your Laptop {percent}% battery remaining")
init_battery = time.time()
if time.time() - init_water > watersecs:
speak('Sir Please Drink Water')
print("Water Drinking time. Enter 'drank' to stop the alarm.")
musiconloop('Drink Water And Mind My Business.mp3', 'drank')
init_water = time.time()
if time.time() - init_eyes >eyessecs:
speak('Eye exercise time')
print("Eye exercise time. Enter 'doneeyes' to stop the alarm.")
musiconloop('Open Your Eyes ALARM.mp3', 'doneeyes')
init_eyes = time.time()
if time.time() - init_exercise > exsecs:
speak('Physical Activity Time')
print("Physical Activity Time. Enter 'donephy' to stop the alarm.")
musiconloop('Workout Alarm.mp3', 'donephy')
init_exercise = time.time()
| yash358/J.A.R.V.I.S | main.py | main.py | py | 5,838 | python | en | code | 0 | github-code | 36 |
24788878049 | import sys
from collections import defaultdict, deque
def main():
T = int(sys.stdin.readline().strip())
for _ in range(T):
F = int(sys.stdin.readline().strip())
graph = defaultdict(set)
ret = defaultdict(int)
# def dfs(start):
# visited = defaultdict(bool)
# dq = deque()
# dq.append(start)
# visited[start] = True
# count = 0
# while dq:
# node = dq.popleft()
# count += 1
# for next in graph[node]:
# if visited[next]:
# continue
# dq.append(next)
# visited[next] = True
# return count
parent = defaultdict(str)
# union find
def find(x):
if parent[x] == "": # root ый┤
return x
parent[x] = find(parent[x])
return parent[x]
def union(x, y):
x = find(x)
y = find(y)
if ret[x] == 0:
ret[x] = 1
if ret[y] == 0:
ret[y] = 1
if x != y:
parent[y] = x
ret[x] += ret[y]
# print("union", x, ret[x], y, ret[y])
for _ in range(F):
f1, f2 = sys.stdin.readline().strip().split()
if f1 < f2:
union(f1, f2)
else:
union(f2, f1)
print(ret[find(f1)])
if __name__ == "__main__":
main()
"""
2
3
Fred Barney
Barney Betty
Betty Wilma
3
Fred Barney
Betty Wilma
Barney Betty
2
3
4
2
2
4
""" | inhyeokJeon/AALGGO | Python/baekjoon/4195_friend.py | 4195_friend.py | py | 1,644 | python | en | code | 0 | github-code | 36 |
13782113749 | from pydantic import BaseModel, validator
import datetime
class Room(BaseModel):
final_date: datetime.datetime = None
initial_date: datetime.datetime = None
size_m2: float = None
location: str = None
mensal_rent: float = None
weekly_rent: float = None
room_id: int = None
deposit_area: float = None
room_TYPE: str = None
hotel_id: int = None
company_cnpj: str = None
@staticmethod
def fromList(list):
return Room(
final_date=list[0],
initial_date=list[1],
size_m2=list[2],
location=list[3],
mensal_rent=list[4],
weekly_rent=list[5],
room_id=list[6],
deposit_area=list[7],
room_TYPE=list[8],
hotel_id=list[9],
company_cnpj=list[10],
)
def __repr__(self):
details = '{\n'
details += 'final_date: ' + self.final_date.strftime('%d/%m/%Y') + '\n'
details += 'initial_date: ' + self.initial_date.strftime('%d/%m/%Y') + '\n'
details += 'size_m2: ' + self.size_m2 + '\n'
details += 'location: ' + self.location + '\n'
details += 'mensal_rent: ' + self.mensal_rent + '\n'
details += 'weekly_rent: ' + self.weekly_rent + '\n'
details += 'room_id: ' + self.room_id + '\n'
details += 'deposit_area: ' + self.deposit_area + '\n'
details += 'room_TYPE: ' + self.room_TYPE + '\n'
details += 'hotel_id: ' + self.hotel_id + '\n'
details += 'company_cnpj: ' + self.company_cnpj + '\n'
details += '}'
return details
def insertSql(self) -> str:
sql = 'insert into room values ('
sql += '"{}"'.format(self.final_date) if self.final_date.strftime('%Y-%m-%d %H:%M:%S') else 'NULL'
sql += ','
sql += '"{}"'.format(self.initial_date) if self.initial_date.strftime('%Y-%m-%d %H:%M:%S') else 'NULL'
sql += ','
sql += '"{}"'.format(self.size_m2) if self.size_m2 else 'NULL'
sql += ','
sql += '"{}"'.format(self.location) if self.location else 'NULL'
sql += ','
sql += '"{}"'.format(self.mensal_rent) if self.mensal_rent else 'NULL'
sql += ','
sql += '"{}"'.format(self.weekly_rent) if self.weekly_rent else 'NULL'
sql += ','
sql += '"{}"'.format(self.room_id) if self.room_id else 'NULL'
sql += ','
sql += '"{}"'.format(self.deposit_area) if self.deposit_area else 'NULL'
sql += ','
sql += '"{}"'.format(self.room_TYPE) if self.room_TYPE else 'NULL'
sql += ','
sql += '"{}"'.format(self.hotel_id) if self.hotel_id else 'NULL'
sql += ','
sql += '"{}"'.format(self.company_cnpj) if self.company_cnpj else 'NULL'
sql += ');'
return sql
@staticmethod
def querySql(where: dict, attr: list = []) -> str:
if len(attr) == 0:
attr = ['final_date', 'initial_date', 'size_m2', 'location', 'mensal_rent', 'weekly_rent', 'room_id', 'deposit_area', 'room_TYPE', 'hotel_id', 'company_cnpj']
sql = 'select {} '.format(','.join(attr))
sql += 'from room '
if len(where.keys()):
sql += "where "
for key, value in where.items():
sql += key
sql += " "
sql += "="
sql += " "
sql += "'{}'".format(value)
sql += " "
sql += ';'
return sql
@staticmethod
def deleteSql(where: dict) -> str:
sql = 'delete from room '
sql += "where "
for key, value in where.items():
sql += key
sql += " "
sql += "="
sql += " "
sql += "'{}'".format(value)
sql += " "
sql += ';'
return sql
@staticmethod
def updateSql(attrDict:dict, where:dict) -> str:
sql = 'update room '
sql += "set "
for key, value in attrDict.items():
sql += "{} = '{}' ".format(key, value)
if len(where.keys()):
sql += "where "
for key, value in where.items():
sql += key
sql += " "
sql += "="
sql += " "
sql += "'{}'".format(value)
sql += " "
sql += ';'
return sql
@validator("final_date", pre=True)
def parse_final_date(cls, value):
return datetime.datetime.strptime(
value,
"%d/%m/%Y"
)
@validator("initial_date", pre=True)
def parse_initial_date(cls, value):
return datetime.datetime.strptime(
value,
"%d/%m/%Y"
)
@staticmethod
def getKeys() -> list[str]:
return ['room_id']
| JulioHey/Banco-de-Dados---EP | server/model/room.py | room.py | py | 4,807 | python | en | code | 0 | github-code | 36 |
29351890076 |
'''
Created by Yuqiao Hu and Yinan Wu
'''
# cited from http://www.cs.cmu.edu/~112/index.html
from cmu_112_graphics import *
import time
def appStarted(app):
reset(app)
def reset(app):
app.rows = 10
app.cols = 10
app.margin = 10
app.textSpace = 40
app.winner = ''
app.dotX = -1
app.dotY = -1
app.listWhite = []
app.listBlack = []
app.isWhite = False
app.gameOver = False
app.AIMode = False
app.currentTime = 5
app.startTime = 0
def keyPressed(app, event):
if (event.key == 'r'):
reset(app)
if (event.key == 'i'):
reset(app)
app.AIMode = True
app.startTime = time.time()
def timerFired(app):
app.currentTime = time.time()
def mousePressed(app, event):
if app.gameOver == False:
app.dotX = event.x
app.dotY = event.y
row, col = getCell(app, app.dotX, app.dotY)
if (0 <= row <= app.rows and 0 <= col <= app.cols
and (row, col) not in app.listWhite
and (row, col) not in app.listBlack):
if not app.AIMode:
if app.isWhite:
app.listWhite.append((row, col))
else:
app.listBlack.append((row, col))
# scoring the current result
if app.isWhite:
app.listWhite.append((row, col))
if scorer(app, app.listWhite):
app.winner = 'White'
app.gameOver = True
else:
app.listBlack.append((row, col))
if scorer(app, app.listBlack):
app.winner = 'Black'
app.gameOver = True
app.isWhite = not app.isWhite
else:
app.listBlack.append((row, col))
AIModifyWhite(app, row, col)
# scoring the current result
if scorer(app, app.listBlack):
app.winner = 'YOU'
app.gameOver = True
if scorer(app, app.listWhite):
app.winner = 'AI'
app.gameOver = True
def scorerBlack(app, virtualList):
# virtualBlackList
for item in virtualList:
row, col = item
# check col
for i in range(col-3, col+1):
if 0 <= i < app.cols-3:
win = True
for j in range(4):
if (row, i+j) not in virtualList:
win = False
if win:
return True
# check row
for i in range(row-3, row+1):
if 0 <= i < app.rows-3:
win = True
for j in range(4):
if (i+j, col) not in virtualList:
win = False
if win:
return True
# check diagonal
for i in range(4):
if 3 <= row+i < app.rows and 3 <= col+i < app.cols:
win = True
for j in range(4):
if (row+i-j, col+i-j) not in virtualList:
win = False
if win:
return True
for i in range(4):
if 0 <= row-i < app.rows-3 and 3 <= col+i < app.cols:
win = True
for j in range(4):
if (row-i+j, col+i-j) not in virtualList:
win = False
if win:
return True
return False
def AIModifyWhite(app, row, col):
AIRow, AICol = -1, -1
# selection based on 4 consecutive black
for i in range(app.rows):
for j in range(app.cols):
virtualBlackList = app.listBlack[:]
if ((i, j) not in app.listBlack and
(i, j) not in app.listWhite):
virtualBlackList.append((i, j))
if scorerBlack(app, virtualBlackList):
AIRow, AICol = i, j
break
if AIRow != -1 and AICol != -1:
break
# selection within 3x3 grid
if AIRow == -1 and AICol == -1:
for i in range(row-1, row+2):
for j in range(col-1, col+2):
if ((i, j) not in app.listBlack and
(i, j) not in app.listWhite):
if 0 <= i <= app.rows and 0 <= j <= app.cols:
AIRow, AICol = i, j
if AIRow != -1 and AICol != -1:
break
# random selection
if AIRow == -1 and AICol == -1:
for i in range(app.rows):
for j in range(app.cols):
if ((i, j) not in app.listBlack and
(i, j) not in app.listWhite):
AIRow, AICol = i, j
break
if AIRow != -1 and AICol != -1:
break
app.listWhite.append((AIRow, AICol))
def scorer(app, listToCheck):
for item in listToCheck:
row, col = item
# check col
for i in range(col-4, col+1):
if 0 <= i < app.cols-4:
win = True
for j in range(5):
if (row, i+j) not in listToCheck:
win = False
if win:
return True
# check row
for i in range(row-4, row+1):
if 0 <= i < app.rows-4:
win = True
for j in range(5):
if (i+j, col) not in listToCheck:
win = False
if win:
return True
# check diagonal
for i in range(5):
if 4 <= row+i < app.rows and 4 <= col+i < app.cols:
win = True
for j in range(5):
if (row+i-j, col+i-j) not in listToCheck:
win = False
if win:
return True
for i in range(5):
if 0 <= row-i < app.rows-4 and 4 <= col+i < app.cols:
win = True
for j in range(5):
if (row-i+j, col+i-j) not in listToCheck:
win = False
if win:
return True
return False
def getCell(app, x, y):
gridWidth = app.width - 2 * app.margin
gridHeight = app.height - app.margin - app.textSpace
colWidth = gridWidth / app.cols
rowHeight = gridHeight / app.rows
col = int((x - app.margin) // colWidth)
row = int((y - app.textSpace) // rowHeight)
return (row, col)
def getCellBounds(app, row, col):
gridWidth = app.width - 2 * app.margin
gridHeight = app.height - app.margin - app.textSpace
colWidth = gridWidth / app.cols
rowHeight = gridHeight / app.rows
x0 = app.margin + col * colWidth
y0 = app.textSpace + row * rowHeight
x1 = app.margin + (col + 1) * colWidth
y1 = app.textSpace + (row + 1) * rowHeight
return (x0, y0, x1, y1)
def drawGrid(app, canvas):
for row in range(app.rows):
for col in range(app.cols):
x0, y0, x1, y1 = getCellBounds(app, row, col)
canvas.create_rectangle(x0, y0, x1, y1)
def drawDot(app, canvas):
if app.listWhite != 0:
for item in app.listWhite:
(row, col) = item
x0, y0, x1, y1 = getCellBounds(app, row, col)
canvas.create_oval(x0 + app.margin/2, y0 + app.margin/2,
x1 - app.margin/2, y1 - app.margin/2, fill='white')
if app.listBlack != 0:
for item in app.listBlack:
(row, col) = item
x0, y0, x1, y1 = getCellBounds(app, row, col)
canvas.create_oval(x0 + app.margin/2, y0 + app.margin/2,
x1 - app.margin/2, y1 - app.margin/2, fill='black')
def drawText(app, canvas):
font = 'Arial 16 bold'
canvas.create_text(app.width/2, 20, text='GoBang Game', font=font)
if app.winner != '':
canvas.create_text(app.width/2, app.height/2, text=f'Winner: {app.winner}', font='Arial 50 bold', fill='red')
if app.currentTime - app.startTime < 2:
canvas.create_text(20, app.height-20, anchor='sw', text='AI mode activated', font='Arial 20 bold', fill='red')
def redrawAll(app, canvas):
drawGrid(app, canvas)
drawDot(app, canvas)
drawText(app, canvas)
runApp(width=520, height=550)
| Katrina0406/My-Projects | GoBang Game/gobang.py | gobang.py | py | 6,490 | python | en | code | 1 | github-code | 36 |
7040650853 | import pytest
import math
from vec import Vector2
import numpy.testing as npt
from adr.World import Ambient
from adr.Components import FreeBody
from adr.Components.Auxiliary import LandingGear
@pytest.fixture
def plane():
env = Ambient()
plane = FreeBody(
name='plane',
type='plane',
mass=23.4,
position_cg=Vector2(-0.2, 0.02),
pitch_rot_inertia=5.2,
ambient=env,
)
return plane
@pytest.fixture
def main_landing_gear():
main_landing_gear = LandingGear(
name='main_landing_gear',
relative_position=Vector2(x=-0.2, y=0),
relative_angle=math.radians(0),
mass=0.3,
height=0.1,
spring_coeff=1000,
dump_coeff=50,
friction_coeff=0.05
)
return main_landing_gear
def test_instantiation(main_landing_gear):
assert(main_landing_gear.type == 'landing_gear')
assert(main_landing_gear.height == 0.1)
assert(main_landing_gear.spring_coeff == 1000)
assert(main_landing_gear.dump_coeff == 50)
assert(main_landing_gear.friction_coeff == 0.05)
def test_floor_contact_point(main_landing_gear):
contact_point = Vector2(0, -0.1)
npt.assert_almost_equal(contact_point.x, 0)
npt.assert_almost_equal(contact_point.y, -0.1)
def test_gear_reaction(plane, main_landing_gear):
main_landing_gear.set_parent(plane)
plane.velocity = Vector2(6, 0.4)
# Plane on air (position.y = 2m), so no reaction on landing gear is expected
plane.position = Vector2(10, 2)
reaction, contact_point = main_landing_gear.gear_reaction()
assert(type(contact_point) is Vector2)
npt.assert_almost_equal(reaction.y, 0)
# Plane on ground (position.y = 0m), so reaction on landing gear is expected
plane.position = Vector2(10, 0)
reaction, contact_point = main_landing_gear.gear_reaction()
npt.assert_almost_equal(reaction.y, 80.0)
def test_gear_friction(plane, main_landing_gear):
main_landing_gear.set_parent(plane)
plane.velocity = Vector2(6, 0.4)
# Plane on air (position.y = 2m), so no friction on landing gear is expected
plane.position = Vector2(10, 2)
friction, contact_point = main_landing_gear.gear_friction()
assert(type(contact_point) is Vector2)
npt.assert_almost_equal(friction.x, 0)
# Plane on ground (position.y = 0m), going forward, expected friction on negative x direction
plane.position = Vector2(10, 0)
friction, contact_point = main_landing_gear.gear_friction()
npt.assert_almost_equal(friction.x, -4.0)
# Plane on ground (position.y = 0m), going backwards, expected friction on positive x direction
plane.velocity = Vector2(-6, 0.4)
plane.position = Vector2(10, 0)
friction, contact_point = main_landing_gear.gear_friction()
npt.assert_almost_equal(friction.x, 4.0)
| CeuAzul/ADR | tests/Components/Auxiliary/test_LandingGear.py | test_LandingGear.py | py | 2,830 | python | en | code | 12 | github-code | 36 |
43348915031 | """
Default tests for Env classes
"""
import pytest
import numpy as np
import tensorflow as tf
from sionna.ofdm import PilotPattern
from cebed.envs import OfdmEnv, EnvConfig
def mock_pilot_pattern(config):
"""Dummy pilot pattern where the pilots are set to one"""
shape = [
config.n_ues,
config.num_streams_per_tx,
config.num_ofdm_symbols,
config.fft_size,
]
mask = np.zeros(shape, bool)
mask[..., 3, :] = True
shape[2] = 1
pilots = np.zeros(shape, np.complex64)
pilots[..., 0, :] = np.ones((config.fft_size,), np.complex64)
pilots = np.reshape(pilots, [config.n_ues, config.num_streams_per_tx, -1])
return PilotPattern(mask=mask, pilots=pilots)
@pytest.mark.parametrize("n_ues", [1, 4])
@pytest.mark.parametrize("nr", [1, 4])
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_env(n_ues, nr):
"""test env works properly"""
config = EnvConfig()
config.num_rx_antennas = nr
config.n_ues = n_ues
env = OfdmEnv(config)
batch_size = 10
snr_db = 20
outputs = env(batch_size, snr_db)
assert len(outputs) == 2
expected_y_shape = [
batch_size,
1,
config.num_rx_antennas,
config.num_ofdm_symbols,
config.fft_size,
]
expected_h_shape = [
batch_size,
1,
config.num_rx_antennas,
config.n_ues,
config.num_streams_per_tx,
config.num_ofdm_symbols,
config.fft_size,
]
assert outputs[0].shape == expected_y_shape
assert outputs[1].shape == expected_h_shape
outputs = env(batch_size, snr_db, return_x=True)
assert len(outputs) == 3
expected_x_shape = [
batch_size,
config.n_ues,
config.num_streams_per_tx,
config.num_ofdm_symbols,
config.fft_size,
]
assert outputs[0].shape == expected_x_shape
@pytest.mark.parametrize("p_spacing", [1, 2])
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_block_pilot_pattern_values(p_spacing):
"""Block pilot pattern values"""
config = EnvConfig()
config.p_spacing = p_spacing
env = OfdmEnv(config)
for i in range(0, config.num_ofdm_symbols):
if i not in env.pilot_ofdm_symbol_indices:
print(env.get_mask().shape)
assert all(env.get_mask()[0, 0, i] == tf.zeros(shape=(config.fft_size,)))
indices = np.arange(0, config.fft_size, config.p_spacing)
for i in env.pilot_ofdm_symbol_indices:
for j in indices:
assert env.get_mask()[0, 0, i, j] == 1
@pytest.mark.parametrize("nues", [2, 4])
def test_get_mask(nues):
config = EnvConfig()
config.n_ues = nues
env = OfdmEnv(config)
mask = env.get_mask()
assert mask.shape == [
nues,
env.config.num_streams_per_tx,
env.config.num_ofdm_symbols,
env.config.fft_size,
]
@pytest.mark.parametrize("p_spacing", [1, 2])
@pytest.mark.parametrize("nr", [4, 8])
@pytest.mark.parametrize("nues", [2, 4])
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_mimo_block_pilot_pattern(p_spacing, nr, nues):
"""Test block pilot pattern properties"""
config = EnvConfig()
config.num_rx_antennas = nr
config.n_ues = nues
config.p_spacing = p_spacing
env = OfdmEnv(config)
assert env.n_pilot_symbols == len(config.pilot_ofdm_symbol_indices)
assert env.n_pilot_subcarriers == int(
env.rg.num_effective_subcarriers / config.p_spacing
)
mask = env.get_mask()
assert int(np.count_nonzero(mask)) / nues == env.rg.num_pilot_symbols.numpy()
def test_extract_at_pilot_locations():
"""test extract at pilot locations"""
config = EnvConfig()
config.pilot_pattern = mock_pilot_pattern(config)
env = OfdmEnv(config)
batch_size = 10
y_shape = [
batch_size,
1,
config.num_rx_antennas,
config.num_ofdm_symbols,
config.fft_size,
]
y = np.ones(y_shape, dtype=np.complex64)
y[:, 0, :, 3, :] = -1 * np.ones((config.fft_size,))
yp = env.extract_at_pilot_locations(y)
expect_yp_shape = [
batch_size,
1,
config.num_rx_antennas,
config.n_ues,
config.num_streams_per_tx,
env.rg.pilot_pattern.num_pilot_symbols.numpy(),
]
assert yp.shape == expect_yp_shape
assert (yp.numpy() == -1 * np.ones(expect_yp_shape, np.complex64)).all()
h_hat = env.estimate_at_pilot_locations(y)
expected_h_shape = [
batch_size,
1,
config.num_rx_antennas,
config.n_ues,
config.num_streams_per_tx,
config.num_ofdm_symbols,
config.fft_size,
]
assert h_hat.shape == expected_h_shape
assert (
h_hat[:, 0, :, :, 0, 3, :].numpy()
== -1 * np.ones((config.fft_size,), np.complex64)
).all()
for i in range(config.num_ofdm_symbols):
if i != 3:
assert (
h_hat[:, 0, :, :, 0, i, :].numpy()
== np.zeros((config.fft_size,), np.complex64)
).all()
| SAIC-MONTREAL/CeBed | tests/test_env.py | test_env.py | py | 5,096 | python | en | code | 7 | github-code | 36 |
32793887647 | import os
import torch
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dataset_path = os.path.dirname(__file__) + '/../data/dataset.txt'
teacher_forcing_ratio = 0.5
HIDDEN_SIZE = 512
def change_to_device(model):
if device.type == 'cpu':
model.cpu()
else:
model.cuda()
| junix/gen_poem | conf/__init__.py | __init__.py | py | 333 | python | en | code | 0 | github-code | 36 |
16044252945 |
from pymysql import connect
import yaml
import logging.config
class DB():
def __init__(self):
"""连接数据库"""
logging.info('===================== init data =====================')
logging.info("connect db")
self.conn = connect(host='127.0.0.1', user='root', password='Zx123456', db='django_restful')
def clear(self, table_name):
"""清除表中数据"""
logging.info("clear db...")
clear_sql = 'truncate ' + table_name + ';' # 注意在truncate后面加上空格
with self.conn.cursor() as cursor:
# 清除外键约束
cursor.execute("set foreign_key_checks=0;")
cursor.execute(clear_sql)
self.conn.commit()
def insert(self, table_name, table_data):
"""插入数据"""
logging.info("insert data...")
# 遍历数据
for key in table_data:
table_data[key] = "'" + str(table_data[key]) + "'"
key = ','.join(table_data.keys())
value = ','.join(table_data.values())
logging.info(key, value)
insert_sql = 'insert into ' + table_name + '('+key+')'+'values'+'('+value+')'
logging.info(insert_sql)
with self.conn.cursor() as cursor:
cursor.execute(insert_sql)
self.conn.commit()
def close(self):
"""关闭数据库连接"""
logging.info("close db")
self.conn.close()
logging.info("===========init finisher!===========")
def init_data(self, datas):
"""初始化数据"""
logging.info("init ab...")
for table, data in datas.items():
self.clear(table)
for d in data:
self.insert(table, d)
self.close()
if __name__ == '__main__':
db = DB()
# 调试各个方法
# db.clear("api_user")
# db.clear("api_group")
# user_data = {'id': 1, 'username': '51zxw', 'email': '51zxw@163.com', 'groups': 'http://127.0.0.1:8000/groups/1'}
# db.insert("api_user", user_data)
# group_data = {'id': 1, 'name': 'Developer'}
# db.insert('api_group', group_data)
# db.close()
# 初始化数据
f = open('datas.yaml', 'r', encoding="utf-8")
datas = yaml.load(f, Loader=yaml.FullLoader) # 禁用警告 yaml.load(input, Loader=yaml.FullLoader)
db.init_data(datas)
| langlixiaobailongqaq/django_restful | api/test_project/mysql_action.py | mysql_action.py | py | 2,049 | python | en | code | 1 | github-code | 36 |
73788737704 | import pathlib
import re
import shutil
import subprocess
import tarfile
import tempfile
import urllib.parse
import urllib.request
import zipfile
javaVersion = "11.0.12+7"
def createBinaryArchive(platform: str, arch: str) -> None:
print(f"Processing platform/arch '{platform}/{arch}'...")
lspCliVersion = getLspCliVersion()
targetDirPath = pathlib.Path(__file__).parent.parent.joinpath("target")
lspCliArchivePath = pathlib.Path(__file__).parent.parent.joinpath(
targetDirPath, f"lsp-cli-{lspCliVersion}.tar.gz")
with tempfile.TemporaryDirectory() as tmpDirPathStr:
tmpDirPath = pathlib.Path(tmpDirPathStr)
print("Extracting lsp-cli archive...")
with tarfile.open(lspCliArchivePath, "r:gz") as tarFile: tarFile.extractall(path=tmpDirPath)
lspCliDirPath = tmpDirPath.joinpath(f"lsp-cli-{lspCliVersion}")
relativeJavaDirPath = downloadJava(tmpDirPath, lspCliDirPath, platform, arch)
print("Setting default for JAVA_HOME in startup script...")
if platform == "windows":
lspCliDirPath.joinpath("bin", "lsp-cli").unlink()
binScriptPath = lspCliDirPath.joinpath("bin", "lsp-cli.bat")
searchPattern = re.compile("^set REPO=.*$", flags=re.MULTILINE)
else:
lspCliDirPath.joinpath("bin", "lsp-cli.bat").unlink()
binScriptPath = lspCliDirPath.joinpath("bin", "lsp-cli")
searchPattern = re.compile("^BASEDIR=.*$", flags=re.MULTILINE)
with open(binScriptPath, "r") as file: binScript = file.read()
if platform == "windows":
insertStr = f"\r\nif not defined JAVA_HOME set JAVA_HOME=\"%BASEDIR%\\{relativeJavaDirPath}\""
else:
insertStr = f"\n[ -z \"$JAVA_HOME\" ] && JAVA_HOME=\"$BASEDIR\"/{relativeJavaDirPath}"
regexMatch = searchPattern.search(binScript)
assert regexMatch is not None
binScript = binScript[:regexMatch.end()] + insertStr + binScript[regexMatch.end():]
with open(binScriptPath, "w") as file: file.write(binScript)
lspCliBinaryArchiveFormat = ("zip" if platform == "windows" else "gztar")
lspCliBinaryArchiveExtension = (".zip" if platform == "windows" else ".tar.gz")
lspCliBinaryArchivePath = targetDirPath.joinpath(
f"lsp-cli-{lspCliVersion}-{platform}-{arch}")
print(f"Creating binary archive '{lspCliBinaryArchivePath}{lspCliBinaryArchiveExtension}'...")
shutil.make_archive(str(lspCliBinaryArchivePath), lspCliBinaryArchiveFormat,
root_dir=tmpDirPath)
print("")
def downloadJava(tmpDirPath: pathlib.Path, lspCliDirPath: pathlib.Path,
platform: str, arch: str) -> str:
javaArchiveExtension = (".zip" if platform == "windows" else ".tar.gz")
javaArchiveName = (f"OpenJDK11U-jdk_{arch}_{platform}_hotspot_"
f"{javaVersion.replace('+', '_')}{javaArchiveExtension}")
javaUrl = ("https://github.com/adoptium/temurin11-binaries/releases/download/"
f"jdk-{urllib.parse.quote_plus(javaVersion)}/{javaArchiveName}")
javaArchivePath = lspCliDirPath.joinpath(javaArchiveName)
print(f"Downloading JDK from '{javaUrl}' to '{javaArchivePath}'...")
urllib.request.urlretrieve(javaUrl, javaArchivePath)
print("Extracting JDK archive...")
if javaArchiveExtension == ".zip":
with zipfile.ZipFile(javaArchivePath, "r") as zipFile: zipFile.extractall(path=tmpDirPath)
else:
with tarfile.open(javaArchivePath, "r:gz") as tarFile: tarFile.extractall(path=tmpDirPath)
print("Removing JDK archive...")
javaArchivePath.unlink()
relativeJavaDirPathString = f"jdk-{javaVersion}"
jdkDirPath = tmpDirPath.joinpath(relativeJavaDirPathString)
jmodsDirPath = (jdkDirPath.joinpath("jmods") if platform == "mac" else
jdkDirPath.joinpath("Contents", "Home", "jmods"))
javaTargetDirPath = lspCliDirPath.joinpath(relativeJavaDirPathString)
print("Creating Java distribution...")
subprocess.run(["jlink", "--module-path", str(jmodsDirPath), "--add-modules", "java.se",
"--strip-debug", "--no-man-pages", "--no-header-files", "--compress=2",
"--output", str(javaTargetDirPath)])
print("Removing JDK directory...")
shutil.rmtree(jdkDirPath)
return relativeJavaDirPathString
def getLspCliVersion() -> str:
with open("pom.xml", "r") as file:
regexMatch = re.search(r"<version>(.*?)</version>", file.read())
assert regexMatch is not None
return regexMatch.group(1)
def main() -> None:
createBinaryArchive("linux", "x64")
createBinaryArchive("mac", "x64")
createBinaryArchive("windows", "x64")
if __name__ == "__main__":
main()
| valentjn/lsp-cli | tools/createBinaryArchives.py | createBinaryArchives.py | py | 4,493 | python | en | code | 7 | github-code | 36 |
13124489294 |
def is_palindrome(text):
"""Cheks if text is palindrome.
Args:
text: string to be checked
Returns:
True if text is a palindrome, False if not """
text = text.lower()
for i in range(len(text) // 2):
if text[i] != text[len(text) -i-1]:
return False
return True
print(is_palindrome('kajak'))
| pawel123789/Project3 | is_palindrome.py | is_palindrome.py | py | 355 | python | en | code | 0 | github-code | 36 |
12296289562 | #ordered collection
#heterogenous
#growable
#mutable
#properties of array
#square bracket
list1=[1,2,3,4,5,6,'a',"asd",4.5,5.555555,[1,2,3,4],{1,2,4,5,6},(3,4,2,1),{'key1':1,'key2':2}]
#print(list1)
#print(list1[1:4:1])
#slicing operator
#part 1-starting index
#part 2-last index
#part 3- number of steps,-1 for reverse single step
ketan=[1,2,3,1,12,3,4,34,34,3]
shri=[4,5,6]
chichi=[7,8,9]
ketan.extend(shri)
ketan.append(chichi)
#print(ketan)
#print(ketan)
ketan.remove(1)
print("asd{}asd".format(ketan))
| 00143kabir/c_programmes | python/python_lists.py | python_lists.py | py | 509 | python | en | code | 0 | github-code | 36 |
8899583521 | from flask import redirect, render_template, request, url_for
from flask_login import login_required
from application import app, db, get_css_framework, ITEMS_PER_PAGE
from application.room.models import Room
from application.place.models import Place
from application.place.forms import PlaceForm
from application.place.forms import PlaceUpdateForm
from flask_paginate import Pagination, get_page_parameter
@app.route("/place", methods=["GET"])
def place_index():
search = False
q = request.args.get('q')
if q:
search = True
page = request.args.get(get_page_parameter(), type=int, default=1)
total = Place.query.count()
places = Place.query.order_by(Place.name)\
.slice((page - 1) * ITEMS_PER_PAGE, page * ITEMS_PER_PAGE)
pagination = Pagination(page=page, total=total, search=search, record_name='places', per_page=ITEMS_PER_PAGE,
css_framework=get_css_framework(), format_total=True, format_number=True)
return render_template("place/list.html", places=places, pagination=pagination)
@app.route("/place/new/")
@login_required
def place_form():
return render_template("place/new.html", form=PlaceForm())
@app.route("/place/<place_id>/delete/", methods=["POST"])
@login_required
def place_delete(place_id):
place = Place.query.get(place_id)
roomswithdeletedplace = Room.query.filter(Room.place_id == place_id).all()
for room in roomswithdeletedplace:
room.place_id = None
message = "Place " + place.name + " deleted!"
db.session().delete(place)
db.session().commit()
return render_template("info.html", message=message)
@app.route("/place/<place_id>/", methods=["GET"])
def place_view(place_id):
place = Place.query.get(place_id)
return render_template("place/update.html", place=place, form=PlaceUpdateForm())
@app.route("/place/<place_id>/update/", methods=["POST"])
@login_required
def place_update(place_id):
form = PlaceUpdateForm(request.form)
place = Place.query.get(place_id)
if form.name.data == "":
form.name.data = place.name
if not form.validate():
return render_template("place/update.html", place=place, form=form)
place.name = form.name.data
if form.address.data != "":
place.address = form.address.data
db.session().commit()
message = "Place updated!"
return render_template("place/update.html", place=place, form=form, message=message)
@app.route("/place/", methods=["POST"])
@login_required
def place_create():
form = PlaceForm(request.form)
if not form.validate():
return render_template("place/new.html", form=form)
place = Place(form.name.data, form.address.data)
db.session().add(place)
db.session().commit()
message = "Place created!"
return render_template("place/new.html", form=form, message=message)
| Robustic/Orchestime | application/place/views.py | views.py | py | 2,867 | python | en | code | 0 | github-code | 36 |
71064138024 | print("Welcome to Calculator")
#Addition Function
def sum(num1, num2):
#find operator
add_pos = expr.find("+")
if add_pos != -1:
print(inValid)
#recognize "+" expression
expr[add_pos] = "+"
isdigit(expr[ :add_pos]) #Find number before plus sign
isdigit(expr[add_pos+1: ]) #Find number after plus sign
#addition operator
add = num1 + num2
#Find out last stored value
value = input("You can also type 'last' to see recently stored value: ")
if value == "last":
#Adding last sum to list
a_store = [add]
for i in store:
print(i)
return add
#Subtraction Function
def diff(num1, num2):
#find operator
sub_pos = expr.find("-")
if sub_pos != -1:
print(inValid)
#recognize "-" expression
expr[sub_pos] = "-"
isdigit(expr[ :sub_pos]) #Find number before minus sign
isdigit(expr[sub_pos+1: ]) #Find number after minus sign
#Subtraction operator
subtract = num1 - num2
#Find out last stored value
value = input("You can also type 'last' to see recently stored value: ")
if value == "last":
#Adding last subtraction to list
s_store = [subtract]
for i in store:
print(i)
return subtract
#Multiplication function
def product(num1, num2):
mult_pos = expr.find("*")
if mult_pos != -1:
print(inValid)
#recognize "*" expression
expr[mult_pos] = "*"
isdigit(expr[ :mult_pos]) #Find number before multiplication sign
isdigit(expr[mult_pos+1: ]) #Find number after multiplication sign
#multiplication operator
multiply = num1 * num2
#Find out last stored value
value = input("You can also type 'last' to see recently stored value: ")
if value == "last":
#Adding last multiplication to list
m_store = [multiply]
for i in store:
print(i)
return multiply
#Division Function
def quotient(num1, num2):
#find operator
div_pos = expr.find("/")
if div_pos != -1:
print(inValid)
#recognize "/" expression
expr[div_pos] = "/"
isdigit(expr[ :div_pos]) #Find number before division sign
isdigit(expr[div_pos+1: ]) #Find number after division sign
#division operator
divide = num1 / num2
#Find out last stored value
value = input("You can also type 'last' to see recently stored value: ")
if value == "last":
#Adding last division to list
d_store = [divide]
for i in store:
print(i)
return divide
inValid ="In Valid"
| masonperry/Program-4 | Program04 Perry.py | Program04 Perry.py | py | 2,688 | python | en | code | 0 | github-code | 36 |
33723812737 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 8 11:02:21 2019
@author: routhier
"""
import os
import pandas as pd
import numpy as np
import datagenerator as script
def test_generator(tmpdir, monkeypatch):
def mock_read_csv(file_in, sep):
table = pd.DataFrame()
table['Start'] = np.array([500, 8000])
table['Stop'] = np.array([1500, 7600])
table['Strand'] = np.array(['+', '-'])
table['Chr'] = 'chr1'
return table
monkeypatch.setattr(pd, 'read_csv', mock_read_csv)
p = tmpdir.mkdir("tmpStart_data").join("tmpfile.csv")
d ='/users/invites/routhier/Documents/' + \
'Projet_nucleosomes/' + \
'Programme/seq_chr_sacCer3/sacCer3'
# run script
script.main(["--directory", d, "--file", str(p),
"--balance_factor", "4", "--max_chr", "1"])
local_x0 = np.load(os.path.dirname(str(p)) + '/X0_start.npy')
local_x1 = np.load(os.path.dirname(str(p)) + '/X1_start.npy')
assert local_x1.shape == (2, 299) and local_x0.shape == (8, 299) | etirouthier/MultiAnnotation | DataPipeline/test_datagenerator.py | test_datagenerator.py | py | 1,105 | python | en | code | 0 | github-code | 36 |
317625316 | #!/bin/python3
import math
import os
import random
import re
import sys
def flatten(matrix, offset, size):
Y, X = offset
m, n = size
ret = []
for y in range(Y, Y+m):
ret.append(matrix[y][X])
for x in range(X+1, X+n):
ret.append(matrix[Y+m-1][x])
for y in range(Y+m-2, Y-1, -1):
ret.append(matrix[y][X+n-1])
for x in range(X+n-2, X, -1):
ret.append(matrix[Y][x])
return ret
def enroll(matrix, offset, size, f):
Y, X = offset
m, n = size
for y in range(Y, Y+m):
matrix[y][X] = f.pop(0)
for x in range(X+1, X+n):
matrix[Y+m-1][x] = f.pop(0)
for y in range(Y+m-2, Y-1, -1):
matrix[y][X+n-1] = f.pop(0)
for x in range(X+n-2, X, -1):
matrix[Y][x] = f.pop(0)
return matrix
# Complete the matrixRotation function below.
def matrixRotation(matrix, r):
m, n = len(matrix), len(matrix[0])
L = min(m, n)
for l in range(L//2):
f = flatten(matrix, (l, l), (m-2*l, n-2*l))
rot = r % len(f)
f = f[len(f)-rot:] + f[:-rot] if rot else f
matrix = enroll(matrix, (l, l), (m-2*l, n-2*l), f)
for m in matrix:
print(*m)
return matrix
if __name__ == '__main__':
mnr = input().rstrip().split()
m = int(mnr[0])
n = int(mnr[1])
r = int(mnr[2])
matrix = []
for _ in range(m):
matrix.append(list(map(int, input().rstrip().split())))
matrixRotation(matrix, r)
| DStheG/hackerrank | HackerRank/matrix-rotation-algo.py | matrix-rotation-algo.py | py | 1,373 | python | en | code | 0 | github-code | 36 |
7183191265 | #!/usr/bin/env python3
"""Init Tsne and the appropriate values"""
import numpy as np
def P_init(X, perplexity):
"""Initializes the values D, P, betas, and H"""
n, d = X.shape
def dist(X):
"""Finds the dist D"""
sum_X = np.sum(np.square(X), axis=1)
D = np.add(np.add(-2 * np.matmul(X, X.T), sum_X).T, sum_X)
np.fill_diagonal(D, 0)
return D
def entropy():
"""Finds the shannon entropy H"""
return np.log2(perplexity)
D = dist(X)
P = np.zeros((n, n))
betas = np.ones((n, 1))
H = entropy()
return D, P, betas, H
| JohnCook17/holbertonschool-machine_learning | unsupervised_learning/0x00-dimensionality_reduction/2-P_init.py | 2-P_init.py | py | 611 | python | en | code | 3 | github-code | 36 |
37854390965 | #!/usr/bin/env python3
'''
curve fit to histogram
'''
import collections
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.axes as maxes
import matplotlib.patches as mpatches
from matplotlib.lines import Line2D as mline
from .markline import add_fcurve
__all__=['add_gauss_fit']
# gaussian fit
def gauss_1d(x, x0, sigma, I):
return I*np.exp(-(x-x0)**2/(2*sigma**2))
def cents_to_edges(cents):
'''
bin center to bin edges
'''
semiws=np.diff(cents)/2
edges=cents[1:]-semiws
edges=np.asarray([cents[0]-semiws[0], *edges, cents[-1]+semiws[-1]])
return edges
def fit_gauss1d_to_data(cnts, xs):
if len(cnts)+1==len(xs):
edges=xs
cents=(edges[:-1]+edges[1:])/2
elif len(cnts)==len(xs):
cents=xs
edges=cents_to_edges(cents)
else:
raise ValueError('mismatch between len of `cnts` and `xs`')
# init guess
ws=cnts/np.sum(cnts)
x0=np.sum(ws*cents)
std=np.sqrt(np.sum(ws*(cents-x0)**2))
I=np.sum(cnts*np.diff(edges))/(np.sqrt(2*np.pi)*std)
p0=(x0, std, I)
popt, _=curve_fit(gauss_1d, cents, cnts, p0=p0)
func=lambda x: gauss_1d(x, *popt)
# to namedtuple
t_gauss1d=collections.namedtuple('Gauss1d', ['x0', 'sigma', 'I'])
popt=t_gauss1d(*popt)
return func, popt
# data from object returned by hist plot
def get_data_from_polygon(p):
'''
get cnts, edges from object returned from `hist` plot
'''
path=p.get_path()
verts=path.vertices
xs, ys=verts.T
# stepfilled
backs,=np.nonzero(np.diff(xs)<0) # backward path
if len(backs)>0:
n=backs[0]+1
xs=xs[:n]
ys=ys[:n]
cnts=ys[1:-1:2]
edges=xs[::2]
return cnts, edges
def get_data_from_line(l):
'''
get ys, xs from Line2D
'''
assert isinstance(l, mline)
xs, ys=l.get_data()
return ys, xs
def get_data_from_bars(p):
'''
cnts, edges from BarContainer
'''
cnts=[]
cents=[]
# edges=[]
for b in p:
x0, y0=b.get_xy()
w=b.get_width()
h=b.get_height()
cnts.append(y0+h)
cents.append(x0+w/2)
cnts=np.asarray(cnts)
cents=np.asarray(cents)
# bin centers to edges
edges=cents_to_edges(cents)
return cnts, edges
def get_data_from_plt(p):
'''
get cnts, edges from object returned from `hist` plot
'''
if isinstance(p, mpatches.Polygon):
return get_data_from_polygon(p)
# list returned from hist plot
if len(p)==1 and isinstance(p[0], mpatches.Polygon):
return get_data_from_polygon(p[0])
# bar collection
if not all([isinstance(t, mpatches.Rectangle) for t in p]):
s='only support `mpatches.Polygon` and collection of bars'
raise ValueError(s)
return get_data_from_bars(p)
# get patches from ax
def split_hist_patches(patches):
'''
split hist patches based on
- type: polygon (for step) and rectangle (for bars)
- fc: facecolor for bars
'''
hists=[]
prevfc=None # fc of previous patch, None if not bar
for p in patches:
if isinstance(p, mpatches.Polygon):
hists.append([p])
prevfc=None
continue
elif not isinstance(p, mpatches.Rectangle):
# only consider Polygon and Rectangle
continue
# first bar in new group
if prevfc is None or p.get_fc()!=prevfc:
hists.append([p])
prevfc=p.get_fc()
else: # same group
hists[-1].append(p)
return hists
def get_patches_from_ax(ax, hlabel=None, hind=None):
'''
get patches of hist plot from given ax
patches in ax is first splitted to groups of hist plot,
based on
- type: polygon (for step) and rectangle (for bars)
- fc: facecolor for bars
if `hlabel` is given, groups with given label is selected
`hind` specify index of group in hists to return
if both `hlabel` and `hind` None, use all patches
'''
if hlabel is None and hind is None:
return ax.patches
hists=split_hist_patches(ax.patches)
if hlabel is not None:
hists=[g for g in hists if g[0].get_label()==hlabel]
if hind is None:
if len(hists)>1:
raise ValueError('too many hist groups found. use `hind` to specify one')
return hists[0]
return hists[hind]
def add_gauss_fit(*args, **kwargs):
'''
add gaussian fit for hist plot
2 way to call
add_gauss_fit(p, **kwargs) # for p from hist plot
add_gauss_fit(ax, hlabel='some hist', hind=0) # use patches with given label in ax
add_gauss_fit(ax, cnts, edges)
'''
if len(args)==1:
p,=args
if isinstance(p, maxes.Axes):
ax=p
pkws={}
for k in ['hlabel', 'hind']:
if k in kwargs:
pkws[k]=kwargs.pop(k)
p=get_patches_from_ax(ax, **pkws)
elif isinstance(p, mpatches.Polygon):
ax=p.axes
else:
ax=p[0].axes
cnts, edges=get_data_from_plt(p)
else:
ax, cnts, edges=args
func, popt=fit_gauss1d_to_data(cnts, edges)
add_fcurve(ax, func, **kwargs)
return popt
| hujh08/datapy | plot/curvefit.py | curvefit.py | py | 5,360 | python | en | code | 0 | github-code | 36 |
34326375432 | import tensorflow as tf
# from tensorflow.keras import layers
from tensorflow import keras
from data import DataManager
import os
from utils import utils
# https://github.com/rlcode/reinforcement-learning-kr/blob/master/3-atari/1-breakout/breakout_a3c.py
# https://github.com/yinchuandong/A3C-keras/blob/master/a3c.py
# https://github.com/seungeunrho/minimalRL/blob/master/a3c.py
class BaseModel:
''' Super Class Model '''
dense_input = None
cnn_input = None
output_activation = None
model_actor = None
model_critic = None
train_x_raw = None
train_x_chart = None
train_y = None
eval_x = None
eval_y = None
epoch = None
def __init__(self, _input_size, _output_size, output_activation='tanh'):
self.input_size = _input_size
self.output_size = _output_size
self.output_activation = output_activation
# self.train_x_raw = _train_x_raw
# self.train_x_chart = _train_x_chart
# self.train_y = _train_y
# self.eval_x = _eval_x
# self.eval_y = _eval_y
self.epoch = 10
def get_cnn_model(self):
self.cnn_input = keras.layers.Input(shape=(299, 299, 5), name='cnn_input')
model_cnn = keras.layers.Conv2D(64, kernel_size=(3, 3), activation='relu', strides=2, padding='same')(self.cnn_input)
model_cnn = keras.layers.Conv2D(256, kernel_size=(3, 3), activation='relu', strides=2, padding='same')(model_cnn)
# model_cnn = keras.layers.Conv2D(512, kernel_size=(3, 3), activation='relu', strides=2, padding='same')(model_cnn)
model_cnn = keras.layers.Conv2D(256, kernel_size=(3, 3), activation='relu', strides=2, padding='same')(model_cnn)
model_cnn = keras.layers.Conv2D(128, kernel_size=(3, 3), activation='relu', strides=2, padding='same')(model_cnn)
model_cnn = keras.layers.AveragePooling2D((10, 10))(model_cnn)
model_cnn = keras.layers.Flatten()(model_cnn)
return model_cnn
def get_dense_model(self):
self.dense_input = keras.layers.Input(shape=(self.input_size,), name='dense_input')
model_dense = keras.layers.Dense(128, activation='relu')(self.dense_input)
model_dense = keras.layers.Dense(256, activation='relu')(model_dense)
# model_dense = keras.layers.Dense(512, activation='relu')(model_dense)
# model_dense = keras.layers.Dense(1024, activation='relu')(model_dense)
# model_dense = keras.layers.Dense(512, activation='relu')(model_dense)
model_dense = keras.layers.Dense(256, activation='relu')(model_dense)
model_dense = keras.layers.Dense(128, activation='relu')(model_dense)
return model_dense
def get_dense_out_model(self, model_dense, model_cnn):
model_share = keras.layers.concatenate([model_dense, model_cnn])
model_share = keras.layers.Flatten()(model_share)
model_share = keras.layers.Dense(512, activation='relu')(model_share)
# model_out = keras.layers.Dense(1024, activation='relu')(model_out)
# model_out = keras.layers.Dense(2048, activation='relu')(model_out)
# model_out = keras.layers.Dense(1024, activation='relu')(model_out)
model_actor = keras.layers.Dense(256, activation='relu')(model_share)
model_actor = keras.layers.Dense(128, activation='relu')(model_actor)
model_actor = keras.layers.Dense(self.output_size, activation=self.output_activation, name='model_out')(model_actor)
model_critic = keras.layers.Dense(256, activation='relu')(model_share)
model_critic = keras.layers.Dense(128, activation='relu')(model_critic)
model_critic = keras.layers.Dense(1, activation=self.output_activation, name='model_out')(model_critic)
return model_actor, model_critic
def build_model(self):
model_dense = self.get_dense_model()
model_cnn = self.get_cnn_model()
model_actor, model_critic = self.get_dense_out_model(model_dense, model_cnn)
self.model_actor = keras.Model(inputs=[self.dense_input, self.cnn_input], outputs=[model_actor])
self.model_critic = keras.Model(inputs=[self.dense_input, self.cnn_input], outputs=[model_critic])
return self.model_actor, self.model_critic
def get_global_model(self, _class_name):
model_dense = self.get_dense_model()
model_cnn = self.get_cnn_model()
model_actor, model_critic = self.get_dense_out_model(model_dense, model_cnn)
model_actor = keras.Model(inputs=[self.dense_input, self.cnn_input], outputs=[model_actor])
model_critic = keras.Model(inputs=[self.dense_input, self.cnn_input], outputs=[model_critic])
file_actor, file_critic = self.get_weight_file(_class_name)
if file_actor is None:
model_actor.load_weights(file_actor)
model_critic.load_weights(file_critic)
return model_actor, model_critic
def get_model_weight_path(self, _class_name):
paths = os.getcwd() + '/model_weight/' + _class_name + '/'
if not os.path.exists(paths):
os.makedirs(paths)
return paths
def get_weight_file(self, _class_name):
best_loss_file = None
best_loss = 100
file_list = os.listdir(self.get_model_weight_path(_class_name))
file_list.sort()
# for file in file_list:
# loss = float(file.split('.')[0].split('_')[3])
# if best_loss > loss:
# best_loss = loss
# best_loss_file = file
# return best_loss_file, best_loss
actor = file_list[-2]
critic = file_list[-1]
return actor, critic
def model_evaluate_and_save(self, _actor, _critic, _class_name):
# self.model_actor.compile(optimizer='rmsprop', loss=_loss_func, metrics=['accuracy'])
# loss, accuracy = self.model_actor.evaluate(self.eval_x, self.eval_y)
#
# _, best_loss = self.get_best_loss_file(_class_name)
# if best_loss > loss:
today = utils.get_today()
time_now = utils.get_time()
path = self.get_model_weight_path(_class_name)
file_path = path + _class_name + '_' + today + '_' + time_now + '_'
_actor.save_weights(file_path + 'actor.h5')
_critic.save_weights(file_path + 'critic.h5')
| aoba0203/magi | train/agent/BaseModel.py | BaseModel.py | py | 6,290 | python | en | code | 0 | github-code | 36 |
41709039982 | from AnilistPython import Anilist
import csv
anilist = Anilist()
myList = anilist.search_anime(score=range(50, 99))
anilist.print_anime_info("Vinland saga")
field_names = ['name_romaji', 'name_english', 'starting_time', 'ending_time', 'cover_image', 'banner_image', 'airing_format', 'airing_status', 'airing_episodes', 'season', 'desc', 'average_score', 'genres', 'next_airing_ep']
# Open the csv file for writing
with open('AniMap.csv', 'w', newline= '', encoding='utf-8') as fileObj:
# Create a CSV Dictwriter object
writerObj = csv.DictWriter(fileObj, fieldnames=field_names)
writerObj.writerows(myList) | ZackaryElmo/AniMap | AniListToCSV.py | AniListToCSV.py | py | 643 | python | en | code | 0 | github-code | 36 |
9454046228 | # coding: utf-8
import os
from mongoengine import connect
from fastapi import APIRouter
from app.database.documents import Article
from app.database.utils import query_to_dict
router = APIRouter(prefix="/api", tags=["Api"])
@router.get("/articles")
def articles(skip: int = 0, limit: int = 10):
"""List the articles in database. This endpoint provides a `skip` and
`limit` parameters to navigate among the articles. Throw a 400 HTTP response
with an error message if arguments are not set properly.
Args:
skip (int, optional): how many documents must be skipped. Defaults to 0.
limit (int, optional): limit to the retrieved number of documents.
Defaults to 10.
"""
connect(host=os.environ["MONGODB_URL"])
count = Article.objects.count()
if skip + limit > count:
return {"error": f"Database counts only {count} articles."}, 400
elif skip < 0:
return {"error": "`skip` argument must be >= 0."}, 400
elif skip > limit:
return {
"error": (
"`skip` argument value cannot be higher than `limit`"
" argument value."
)
}, 400
articles = query_to_dict(query_set=Article.objects[skip:skip + limit])
return {"count": len(articles), "items": articles}
@router.get("/article")
def article(url: str):
"""Target an article to retrieve with its URL.
Args:
url (str): the URL of the article to retrieve.
"""
connect(host=os.environ["MONGODB_URL"])
articles = query_to_dict(query_set=Article.objects(url=url))
return {"article": articles[0]} | nicolasjlln/lbc-challenge | app/routers/api.py | api.py | py | 1,626 | python | en | code | 0 | github-code | 36 |
28091727369 | from flask import render_template, request, redirect, url_for, send_from_directory, jsonify, make_response, flash, Markup
import os
from werkzeug.utils import secure_filename
from web_scripts import *
@app.route('/')
def home():
return render_template('main.html')
@app.route('/upload-music', methods = ['GET', 'POST'])
def upload_music():
if request.method == 'POST':
try:
#checking for file size using data from cookies
if not allowed_filesize(request.cookies.get('filesize')):
print('File exceeded maximum size')
return make_response(jsonify({'message':'Exceeded Max Size'}), 300)
music = request.files.get('file')
impulse = request.cookies.get('user_choice')
impulse = f'/{impulse}.wav'
print(music.filename)
if music.filename == "":
print('Music must have a filename')
return make_response(jsonify({'message':'Must have a filename'}), 300)
if not allowed_file(music.filename):
#checking for invalid extensions
print('Invalid Music Extension')
return make_response(jsonify({'message':'Invalid Music Extension (mp3 & wav only)'}), 300)
else:
#checking for malicious filenames
filename = secure_filename(music.filename)
#saving uploaded music into directory
music.save(os.path.join(app.config["MUSIC_UPLOADS"],filename))
#applying reverb algorithm
path = build_reverb(filename, impulse)
#downloads the slowed & reverbed file
return make_response(jsonify({'message':path, 'title':filename}), 200)
except:
url = request.get_json()['url']
#downloading file from youtube
try:
filename, title = get_music(url)
impulse = f'/{request.cookies.get("user_choice")}.wav'
print('reverbing...')
path = build_reverb(filename, impulse)
return make_response(jsonify({'message':path, 'title':title}), 200)
except Exception as e:
return make_response(jsonify({'message':e}), 300)
return render_template('upload_music.html')
| philipk19238/slowed-and-reverbed | app/routes.py | routes.py | py | 2,366 | python | en | code | 2 | github-code | 36 |
31456650987 |
from nltk.corpus import movie_reviews
import re
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk import pos_tag
import string
clitics = open('clitics', 'r').readlines()
sents0 = movie_reviews.words("neg/cv000_29416.txt")
sents1 = movie_reviews.words("pos/cv041_21113.txt")
texts2 = sents0 + sents1
# ################################################
# Remove all newline characters.
def RemoveAllNewline():
a1 = []
a1 = string.splitlines(texts2)
return a1
# ################################################
# Replace HTML character codes (i.e., &...;) with whitespace.
def ReplaceHTMLCharacters():
a=[]
for w in texts2:
a.append(re.sub('<*?&;>', ' ', w))
return a
#################################################
# Remove all URLs .
def RemoveAllURLs():
b = []
for w in ReplaceHTMLCharacters():
b.append(re.sub(r'^https?://.*[\r\n]*', '', w))
return b
#################################################
#Split each punctuation (using library called string to detectpunctuation symbols) into its own token using whitespace
def SplitEachPunctuation():
c = []
punct=[string.punctuation]
for item in RemoveAllURLs():
if item not in punct:
c.append(item)
return c
################################################
#Split clitics using whitespace (see clitics file in the section materials).
def SplitClitics():
d =[]
for item in SplitEachPunctuation():
for i in clitics:
d.append(re.sub(i, ' ' + i, item))
return d
################################################
# Remove stopwords.
def RemoveStopwords():
e = []
stop_words = set(stopwords.words("english"))
for item in SplitClitics():
if item not in stop_words:
e.append(item)
return e
#################################################
#Each token is tagged with its part-of-speech using nltk tagger .
def pos():
f = []
for t in RemoveStopwords():
f = word_tokenize(t)
f.append(pos_tag(t))
return f
#################################################
# Apply lemmatization using nltk.
def lemmatization():
g = []
for w in RemoveStopwords():
lemma = WordNetLemmatizer().lemmatize(w, pos='n')
g.append(lemma)
return g
#################################################
# Convert text to lowercase.
def lowCase():
h = []
for w in RemoveStopwords():
h.append(w.lower())
return h
##################################################
print(lowCase())
| hassanMetwally/pre-processing | pre processing.py | pre processing.py | py | 2,678 | python | en | code | 0 | github-code | 36 |
13628425885 | from collections import Counter
import pandas as pd
import nltk
from src.tagger import Tagger
def get_counts(dataf):
with open(dataf, "r") as fh:
# Get counts
raw = fh.read()
# tokens = nltk.word_tokenize(raw)
tokens = raw.split()
unigrm = Counter(tokens)
bigrm = nltk.bigrams(tokens)
bigrm_fdist = nltk.FreqDist(bigrm)
return unigrm, bigrm_fdist
def get_tps(word, nextword, unigrm, bigrm):
counts_word = unigrm[str(word)]
counts_next = unigrm[str(nextword)]
counts_bigram = bigrm[(str(word), str(nextword))]
# There can be a count of 0 in rare cases when spacy removes apostrophes (e.g. c')
fwtp = 0 if counts_word == 0 else (counts_bigram / counts_word)
bwtp = 0 if counts_next == 0 else (counts_bigram / counts_next)
return fwtp, bwtp
def main(lang, dataf, prefix=""):
# Find N-Adj, Adj-N pairs and get their FW-TP and BW-TP
adjnoun = []
nounadj = []
alls = []
j = 0
# Tagger
tagger = Tagger(lang)
# Get unigrams and bigrams
print("Getting counts...")
unigrm, bigrm = get_counts(dataf)
print("Counts done.")
with open(dataf, "r") as fh:
for line in fh:
j += 1
if j % 1000 == 0:
print("%i sentences parsed" % j)
sentence = line.strip()
parsed = tagger.parseSentence(sentence)
for i, word in enumerate(parsed):
nextword = ""
if (i + 1) < len(parsed):
nextword = parsed[i + 1]
# There can be a count of 0 in rare cases when spacy removes apostrophes (e.g. c')
if unigrm[str(word)] == 0 or unigrm[str(nextword)] == 0:
pass
else:
# Adj-Noun
if tagger.isAdj(word) and tagger.isNoun(nextword):
# print("Adj-N", word, nextword)
fw, bw = get_tps(word, nextword, unigrm, bigrm)
adjnoun.append([lang, word, nextword, "fw", fw])
adjnoun.append([lang, word, nextword, "bw", bw])
alls.append([lang, "fw", fw])
alls.append([lang, "bw", bw])
# Noun-Adj
if tagger.isNoun(word) and tagger.isAdj(nextword):
# print("N-adj", word, nextword)
fw, bw = get_tps(word, nextword, unigrm, bigrm)
nounadj.append([lang, word, nextword, "fw", fw])
nounadj.append([lang, word, nextword, "bw", bw])
alls.append([lang, "fw", fw])
alls.append([lang, "bw", bw])
# Create dataframes
ANdf = pd.DataFrame(adjnoun, columns=["lang", "word", "nextword", "direction", "prob"])
NAdf = pd.DataFrame(nounadj, columns=["lang", "word", "nextword", "direction", "prob"])
alldf = pd.DataFrame(alls, columns=["lang", "direction", "prob"])
# Save them to file
ANdf.to_csv("{}_{}_AdjNoun_tps.csv".format(prefix, lang), sep=";")
NAdf.to_csv("{}_{}_NounAdj_tps.csv".format(prefix, lang), sep=";")
alldf.to_csv("{}_{}_tps.csv".format(prefix, lang), sep=";")
| rgalhama/retro_adjs | src/analyses_TPs/tps.py | tps.py | py | 3,266 | python | en | code | 0 | github-code | 36 |
151229982 | import sys
import uuid
import os
import shutil
from lxml import etree
import openpyxl
from zipfile import ZipFile
core = "docProps/core.xml"
def extractWorkbook(filename, outfile="xml"):
with ZipFile(filename, "r") as zip:
zip.extract(core, outfile)
def checkForCheaters(filename):
try:
parser = etree.XMLParser(load_dtd=True, resolve_entities=True, no_network=False)
tree = etree.parse(filename, parser=parser)
root = tree.getroot()
print(etree.tostring(root))
arr=[]
for child in root:
if 'creator' in child.tag or 'lastModifiedBy' in child.tag:
arr.append(child.text)
print(child.text)
flag=True
if len(arr)!=2 or arr[0]==arr[1]:
flag=False
return (flag, arr)
except Exception:
print("Error! checkForCheaters")
return None
def getScore(filename,answers):
try:
wb_obj = openpyxl.load_workbook(filename)
sheet_obj = wb_obj.active
score=0
for i in range(len(answers)):
studentsAnswer = str(sheet_obj.cell(row=i+1, column=1).value)
answer=answers[i]
if answer==studentsAnswer:
score+=1
return score
except Exception:
print("Error! getScore")
return None
if __name__ == "__main__":
# if len(sys.argv) == 2:
# filename = sys.argv[1]
# else:
# print("Usage:", sys.argv[0], "<filename>")
# exit(1)
filename='xls.xlsx'
tmpFolder = "./uploads/" + str(uuid.uuid4())
os.mkdir(tmpFolder)
extractWorkbook(filename, tmpFolder)
workbook = tmpFolder + "/" + core
cheater=checkForCheaters(workbook)
score=getScore(filename,['aboba','aboba1','None','123'])
print(score)
print("Removing tmp folder:", workbook)
shutil.rmtree(tmpFolder) | suborofu/tulactf-2022-writeups | web/Cheaters/web/flask-serv/tester.py | tester.py | py | 1,878 | python | en | code | 0 | github-code | 36 |
24201075393 | # quick sort 구현
def quick_sort(start, end):
global n
if start >= end:
return
pivot = n[start]
# print("pivot:",pivot)
low = start + 1
high = end
while low <= high:
while low < end + 1 and n[low] <= pivot: low += 1
while high > start and n[high] > pivot: high -= 1
if low <= high:
n[low], n[high] = n[high], n[low]
if n[high] < pivot:
n[start], n[high] = n[high], n[start]
# print(n)
# print("_pivot",n[high])
quick_sort(start, high - 1)
quick_sort(high + 1, end)
T = int(input())
n = []
while T:
n = list(map(int, input().split()))
quick_sort(0, len(n) - 1)
print(n[-3])
T -= 1
# test code
# n = [1,1,1,1,1,1, 9,4,5,1,2,6,1000,9]
#
# quick_sort(0, len(n) - 1)
#
# print(n) | superyodi/burning-algorithm | basic/boj_2693.py | boj_2693.py | py | 807 | python | en | code | 1 | github-code | 36 |
35257408476 | import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk,GdkPixbuf
from ui import login
import socket
import select
import json
import os
import redis
from ui import event
HOST = "127.0.0.1"
PORT = 5000
class ChatWindow(Gtk.Window):
def __init__(self):
super().__init__(title="Mega Chat | Chat")
event.Event(name="login", callback=self.regy_date)
self.login_win = login.LoginWindow()
self.login_win.show_all()
self.connection = None
self.__interfase()
def __interfase(self):
self.set_position(Gtk.WindowPosition.CENTER)
self.set_size_request(800, 600)
master_box=Gtk.Box()
master_box.set_spacing(5)
self.add(master_box)
left_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
left_box.set_size_request(200, -1)
master_box.pack_start(left_box, False, True, 0)
separator = Gtk.VSeparator()
master_box.pack_start(separator, False, True, 0)
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_scale(
filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"Avatar.png"
),
width = 190,
height = 190,
preserve_aspect_ratio=True,
)
avatar = Gtk.Image.new_from_pixbuf(pixbuf)
left_box.pack_start(avatar, False, True, 5)
separator = Gtk.HSeparator()
left_box.pack_start(separator, False, True, 5)
user_label= Gtk.Label(label="User name")
left_box.pack_start(user_label, False, True, 5)
separator = Gtk.HSeparator()
left_box.pack_start(separator, False, True, 5)
l_space = Gtk.Alignment()
left_box.pack_start(l_space, True, True, 5)
separator = Gtk.HSeparator()
left_box.pack_start(separator, False, True, 0)
b_box = Gtk.ButtonBox()
left_box.pack_start(b_box, False, True, 5)
close_button = Gtk.Button(label="Close")
close_button.connect("clicked", Gtk.main_quit)
b_box.pack_start(close_button, True, True, 5)
center_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
master_box.pack_start(center_box, True, True, 0)
separator = Gtk.VSeparator()
master_box.pack_start(separator, False, True, 0)
scroll_box = Gtk.ScrolledWindow()
scroll_box.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
center_box.pack_start(scroll_box, True, True, 5)
self.chat_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
scroll_box.add(self.chat_box)
separator = Gtk.HSeparator()
center_box.pack_start(separator, False, False, 5)
send_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
send_box.set_spacing(5)
center_box.pack_start(send_box, False, True, 5)
separator = Gtk.HSeparator()
center_box.pack_start(separator, False, False, 5)
smile_buttom = Gtk.Button(label = ":-}")
send_box.pack_start(smile_buttom, False, False, 0)
message_entry = Gtk.Entry()
send_box.pack_start(message_entry, True, True, 0)
send_button = Gtk.Button(label = "Send")
send_box.pack_start(send_button, False, False, 0)
right_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
right_box.set_size_request(200, 1)
master_box.pack_start(right_box, False, True, 0)
favorit_label = Gtk.Label(label="Избранное")
right_box.pack_start(favorit_label, False, True, 5)
# test_input = {
# "message": (
# "Компиля́ция — сборка программы, включающая трансляцию всех модулей программы, "
# "написанных на одном или нескольких исходных языках программирования высокого "
# "уровня и/или языке ассемблера, в эквивалентные программные модули на "
# "низкоуровневом языке, близком машинному коду"
# ),
# "user": "Vasia"
# }
#
# test_output = {
# "message": (
# "Инициализация — создание, активация, подготовка к работе, определение параметров. " "Приведение программы или устройства в состояние готовности к использованию. "
# ),
# "user": "User"
# }
# self.__add_message_box(test_input)
# self.__add_message_box(test_output, False)
# self.__add_message_box(test_input)
# self.__add_message_box(test_input)
# self.__add_message_box(test_output, False)
# self.__add_message_box(test_output, False)
# self.__add_message_box(test_input)
# self.__add_message_box(test_output, False)
def __add_message_box(self, data, input=True):
message_frame = Gtk.Frame()
message_box = Gtk.Box()
message_frame.add(message_box)
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_scale(
filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
f".contacts/{data['user']}.png" if input
else "Avatar.png"
),
width = 100,
height = 100,
preserve_aspect_ratio=True,
)
avatar = Gtk.Image.new_from_pixbuf(pixbuf)
text_label = Gtk.Label()
text_label.set_markup(data["message"])
text_label.set_selectable(True)
text_label.set_line_wrap(True)
if input:
message_box.pack_start(avatar, False, True, 5)
else:
text_label.set_justify(Gtk.Justification.RIGHT)
message_box.pack_end(avatar, False, True, 5)
message_box.pack_start(text_label, True, False, 5)
self.chat_box.pack_start(message_frame, False, True, 5)
def regy_date(self, *args, **kwargs):
self.login_win.hide()
storage = redis.StrictRedis() #подключаемся к мем кэшу. ссылка на доступ к базе данных
try:
self.login_win = str(storage.get("login"))
self.password = str(storage.get("password"))
except:
redis.RedisError
print("Данных почемуто нет")
Gtk.main_quit()
else:
self.__create_conntection()
self.show_all()
def __create_conntection(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# self.sock.setblocking(0)
self.sock.connect((HOST,PORT))
result = self.connection.recv(2048)
data = json.load(result.decode("utf-8")) #преобразуем строку обратно в объект при помощи лоад
if data.get("status") != "OK":
print(data.get("message"))
Gtk.main_quit()
else:
data = json.dumps({"login": self.login, "password": self.password})
self.connection.send(data.encode("utf-8"))
self.__run()
def __run(self):
pass
# self.epoll = select.epoll()
# self.epoll.register(self.sock.fileno(), select.EPOLLIN)
| Kiril0l/gtk_new | ui/chat.py | chat.py | py | 7,521 | python | ru | code | 0 | github-code | 36 |
4454907121 | from flask_testing import TestCase
from config import create_app
from db import db
AUTHORISED_ENDPOINTS_DATA = (
("POST", "/new_resource/"),
("POST", "/tag_resource/"),
("POST", "/upload_file/1/"),
("PUT", "/resource_status/1/read/"),
("PUT", "/resource_status/1/dropped/"),
("PUT", "/resource_status/1/to_read/"),
("PUT", "/update_resource/"),
("PUT", "/update_user/"),
("DELETE", "/delete_resource/1/"),
("DELETE", "/delete_tag/1/"),
("DELETE", "/delete_file/1/"),
("GET", "/my_user/"),
("GET", "/my_resources/"),
("GET", "/my_tags/"),
("GET", "/my_resources_with_tag/1/"),
)
UNAUTHORISED_ENDPOINTS_DATA = (
("POST", "/register/"),
("POST", "/login/"),
)
NO_INPUT_ENDPOINTS_DATA = (("GET", "/general_stats/"),)
class TestApp(TestCase):
"""
Some basic tests validating that everything is okay with the user authentication.
"""
def create_app(self):
return create_app("config.TestingConfig")
def setUp(self):
db.init_app(self.app)
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def iterate_endpoints(
self,
endpoints_data,
status_code_method,
expected_resp_body,
headers=None,
payload=None,
):
"""
A simple function to iterate across endpoints. Makes it easier to test stuff.
"""
if not headers:
headers = {}
if not payload:
payload = {}
resp = None
for method, url in endpoints_data:
if method == "GET":
resp = self.client.get(url, headers=headers)
elif method == "POST":
resp = self.client.post(url, headers=headers)
elif method == "PUT":
resp = self.client.put(url, headers=headers)
elif method == "DELETE":
resp = self.client.delete(url, headers=headers)
status_code_method(resp)
if not expected_resp_body == "":
self.assertEqual(resp.json, expected_resp_body)
def test_protected_endpoints(self):
"""
Go through all endpoints that require authentication and make sure you can't get any information without a token.
"""
self.iterate_endpoints(
AUTHORISED_ENDPOINTS_DATA,
self.assert_401,
{
"message": "You need a token to get access to this endpoint \N{winking face}"
},
)
def test_unprotected_endpoints(self):
"""
Go through all endpoints that don't require a token, but require input, and make sure you don't get anything
without providing the right input.
"""
self.iterate_endpoints(UNAUTHORISED_ENDPOINTS_DATA, self.assert_400, "")
def test_no_input_endpoints(self):
"""
Go through all unprotected endpoints that don't need input and make sure you get a response 200 OK.
"""
self.iterate_endpoints(NO_INPUT_ENDPOINTS_DATA, self.assert_200, "")
def test_expired_token_raises(self):
"""
Go though all protected endpoints and make sure you get the right error when you use an expired token.
"""
headers = {
"Authorization": "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOjM2LCJleHAiOjE2NjA4OTE1MTZ9.pbx2hPf9hi7JhHkRPsHeQIrcDKsZn9n80jNCVaPo3IA"
}
self.iterate_endpoints(
AUTHORISED_ENDPOINTS_DATA,
self.assert_401,
{"message": "Sorry, your token has expired. Please, log in again."},
headers,
)
def test_invalid_token_raises(self):
"""
Go though all protected endpoints and make sure you get the right error when you use an invalid token.
"""
headers = {"Authorization": "Bearer eyJ0eXAiOiJKV1QiLCJhbGcin9n80jNCVaPo3IA"}
self.iterate_endpoints(
AUTHORISED_ENDPOINTS_DATA,
self.assert_401,
{
"message": "Sorry, your token is invalid \N{unamused face}. Please, register or login again to obtain a valid token."
},
headers,
)
| tedypav/FlaskCourse_OnlinePersonalLibrary | tests/test_application.py | test_application.py | py | 4,228 | python | en | code | 1 | github-code | 36 |
28318096244 | from sqlalchemy import create_engine, Column, String, Integer
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import pymysql
pymysql.install_as_MySQLdb()
# 构建连接引擎对象
engine = create_engine("mysql://root@localhost/py1709_torn_db1",
encoding="utf-8", echo=True)
# 获取一个连接会话
Session = sessionmaker(bind=engine)
session = Session()
# 构建一个基础类型
Base = declarative_base(bind=engine)
# 定义自定义类型
# 自定义类型创建完成之后,sqlalchemy会根据管理的类型自动创建一个intrumentation管理对象
# 通过intrumentation管理对象底层封装了自定义类型和数据库表之间的各种关联操作
class Person(Base):
__tablename__ = "persons"
id = Column(Integer, primary_key=True)
name = Column(String(50))
age = Column(Integer)
# 通过类型的__table__属性查看它的数据库表元数据
# 通过Base。metadata属性封装的函数完成数据库之间的数据同步操作
# print(Person.__table__)
# Base.metadata.create_all() # 将所有salalchemy管理的对象同步到数据库中产生对应的数据表
# 1. 程序中直接创建的对象,是保存并运行在内存中的~一旦程序结束,内存中的数据会清空
# 临时状态(游离状态):程序中直接创建的对象,临时对象
# 特点:程序中有数据,缓存中无数据,数据库中无数据
p = Person(name="jerry", age=12)
print(p, p.id, p.name, p.age)
# 2. 程序中的对象,可以通过连接会话session的add()函数,将对象交给sqlalchemy进行管理
# 缓存状态(托管状态):对象只是存在于连接会话缓存中,数据库中并没有相关数据,缓存对象
# 特点:程序中有数据,缓存中有数据,数据库中无数据
session.add(p)
# 3. 缓存中的数据,可以通过连接会话session的commit()函数,将缓存数据提交给数据库进行持久化保存
# 持久状态(持久persistent状态):对象在程序中存在,在数据库中有对应的记录
# 特点:程序中有数据{id}, 缓存中有数据, 数据库中有数据
session.commit()
print(p.id, p.name, p.age)
# 修改操作
# 一旦对缓存状态的对象进行修改,此时缓存对象和数据库中的数据不一致~
# 就会形成脏数据,脏数据并不是不可取的,更新操作就是将这样的数据从缓存同步到数据库(commit)
p.name = "shuke"
# 可以通过session.dirty来查询缓存中的脏数据
session.commit()
# 删除操作
session.delete(p)# 直接删除一个缓存的数据[脏数据],通过commit()提交到数据库
session.commit()
# 注意删除的只能是持久对象
#p2 = Person(id=1)
#session.delete(p2)# 抛出异常~不能删除,因为p2不是持久对象is not persisted
| laomu/py_1709 | 2.Tornado_cursor/days02数据模型/demo02sqlalchemy增删改.py | demo02sqlalchemy增删改.py | py | 2,838 | python | zh | code | 0 | github-code | 36 |
23987656239 | import sys
from cefpython3 import cefpython as cef
from widgets.cefapplication import CefApplication
from widgets.config import ZOOM_FACTOR
from widgets.mainwindow import MainWindow
def main():
"""
See https://github.com/cztomczak/cefpython/blob/master/api/ApplicationSettings.md
for mor settings
"""
sys.excepthook = cef.ExceptHook # To shutdown all CEF processes on error
# see for more infos
settings = {
'auto_zooming': f'{ZOOM_FACTOR}'
}
cef.Initialize(settings)
app = CefApplication(sys.argv)
main_window = MainWindow()
main_window.show()
main_window.activateWindow()
main_window.raise_()
app.exec_()
if not cef.GetAppSetting("external_message_pump"):
app.stopTimer()
del main_window # Just to be safe, similarly to "del app"
del app # Must destroy app object before calling Shutdown
cef.Shutdown()
if __name__ == '__main__':
main()
| slo-ge/viewsive | src/start.py | start.py | py | 945 | python | en | code | 0 | github-code | 36 |
36378630131 | import unittest
import os
import opendatasets as od
import sqlite3
import pandas as pd
#Testing automated pipeline
class TestDownloadAndSaveDataset(unittest.TestCase):
def setUp(self):
# Set up necessary variables for testing
self.dataset_url = 'https://www.kaggle.com/datasets/thedevastator/jobs-dataset-from-glassdoor/download?datasetVersionNumber=2'
self.file_path = 'jobs-dataset-from-glassdoor/salary_data_cleaned.csv'
self.db_path = '../data/clean_salary.sqlite'
def test_download_and_save_dataset(self):
# Download dataset
od.download(self.dataset_url)
# Check if the downloaded file exists
self.assertTrue(os.path.exists(self.file_path))
# Read the CSV file into a DataFrame
cleancsv_df = pd.read_csv(self.file_path)
# Check if DataFrame is not empty
self.assertFalse(cleancsv_df.empty)
# Connect to SQLite database and save the DataFrame
conn = sqlite3.connect(self.db_path)
cleancsv_df.to_sql('clean_salary', conn, index=False, if_exists='replace')
# Check if the table exists in the database
cursor = conn.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='clean_salary';")
result = cursor.fetchone()
self.assertIsNotNone(result)
# Close the database connection
conn.close()
def tearDown(self):
# Clean up after the test
if os.path.exists(self.file_path):
os.remove(self.file_path)
if os.path.exists(self.db_path):
os.remove(self.db_path)
if __name__ == '__main__':
unittest.main()
| arpita739/made-template | project/test.py | test.py | py | 1,677 | python | en | code | null | github-code | 36 |
42778570533 | from typing import Any
import pytest
from pydantic import ValidationError
from toucan_connectors.toucan_connector import ToucanDataSource
class DataSource(ToucanDataSource):
collection: str # required, validated against type
query: Any # required, not validated
comment: str = None # not required, no default, validated against type when present
test_default: int = 101 # not required because it has a default, validated
def test_instantiation():
# no errors with required args at the right type
data_source = {
'domain': 'my_domain',
'name': 'my_name',
'collection': 'my_collection',
'query': {},
}
mds = DataSource(**data_source)
assert mds.name == data_source['name']
assert mds.test_default == 101
def test_required_arg():
# error with missing required arg
data_source = {'name': 'my_name', 'collection': 'my_collection', 'query': {}}
with pytest.raises(ValidationError) as e:
DataSource(**data_source)
assert 'domain' in e.value.errors()[0]['loc'] # Are we testing pydantic here ?
assert e.value.errors()[0]['msg'] == 'field required'
def test_required_arg_wrong_type():
# error with required arg of wrong type
data_source = {'domain': [], 'name': 'my_name', 'collection': 'my_collection', 'query': {}}
with pytest.raises(ValidationError) as e:
DataSource(**data_source)
assert 'domain' in e.value.errors()[0]['loc']
assert e.value.errors()[0]['msg'] == 'str type expected'
def test_not_required():
data_source = {
'domain': 'my_domain',
'name': 'my_name',
'collection': 'my_collection',
'query': {},
'comment': 'test',
}
mds = DataSource(**data_source)
assert mds.comment == 'test'
def test_default_override():
data_source = {
'domain': 'my_domain',
'name': 'my_name',
'collection': 'my_collection',
'query': {},
'test_default': 102,
}
mds = DataSource(**data_source)
assert mds.test_default == 102
def test_default_override_validated():
data_source = {
'domain': 'my_domain',
'name': 'my_name',
'collection': 'my_collection',
'query': {},
'test_default': {},
}
with pytest.raises(ValidationError):
DataSource(**data_source)
def test_unknown_arg():
data_source = {
'domain': 'my_domain',
'name': 'my_name',
'collection': 'my_collection',
'query': {},
'unk': '@',
}
with pytest.raises(ValidationError) as e:
DataSource(**data_source)
assert 'unk' in e.value.errors()[0]['loc']
assert e.value.errors()[0]['msg'] == 'extra fields not permitted'
def test_get_form():
default_form = ToucanDataSource.get_form(None, {})
assert default_form == {
'title': 'ToucanDataSource',
'type': 'object',
'properties': {
'domain': {'title': 'Domain', 'type': 'string'},
'name': {'title': 'Name', 'type': 'string'},
'type': {'title': 'Type', 'type': 'string'},
'load': {'title': 'Load', 'type': 'boolean', 'default': True},
'live_data': {'title': 'Live Data', 'type': 'boolean', 'default': False},
'validation': {'title': 'Validation', 'type': 'object'},
'parameters': {'title': 'Parameters', 'type': 'object'},
'cache_ttl': {
'title': "Slow Queries' Cache Expiration Time",
'description': 'In seconds. Will override the 5min instance default and/or the connector value',
'type': 'integer',
},
},
'required': ['domain', 'name'],
'additionalProperties': False,
}
| ToucanToco/toucan-connectors | tests/test_datasource.py | test_datasource.py | py | 3,757 | python | en | code | 16 | github-code | 36 |
28511009690 | import tester # tester
import random
import pexpect
import time
import struct
import sys
import socket
import importlib.util
EASYDB_PATH = "/cad2/ece326f/tester/bin/easydb"
def load_module(modname):
path = tester.datapath(modname + ".py", 'asst3')
spec = importlib.util.spec_from_file_location(modname, path)
mod = importlib.util.module_from_spec(spec)
tester.includepath()
spec.loader.exec_module(mod)
return mod
def try_connect(db, server):
retry = 0
while retry < 3:
try:
return db.connect(server.host, server.port)
except ConnectionRefusedError:
retry += 1
print("Connection Refused -- retrying in 1 second")
time.sleep(1)
db.connect(server.host, server.port)
class Client:
def __init__(self, server):
# make sure server is running
assert(server.program)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((server.host, server.port))
# Dump rows of a table from the database.
# table_id: int, table id of the table.
def dump(self, table_id):
self.sock.send(bytearray([0, 0, 0, 42, 0, 0, 0, table_id]))
resp = self.sock.recv(4096)
if struct.unpack("!i", resp[:4])[0] == 1:
rows = resp[4:].decode("utf-8").split('\n')
return [ row.split('\t') for row in rows if len(row) > 0 ]
return None
def close(self):
self.sock.close()
del self.sock
def __del__(self):
if hasattr(self, 'sock'):
self.sock.close()
# convenience function
def dump(server, table_id):
client = Client(server)
return client.dump(table_id)
class Server:
def __init__(self, filename=None):
self.host = "localhost"
self.port = random.randint(1024, 9999)
if filename is None:
self.schema = tester.datapath('export.txt', 'asst3')
else:
self.schema = filename
def start(self, datafile=None):
if datafile is not None:
self.datafile = tester.datapath(datafile, 'asst3')
else:
self.datafile = ""
path = "%s -g %d %s localhost %s"%(EASYDB_PATH, self.port, self.schema,
self.datafile)
self.program = pexpect.spawn(path, [], encoding='utf-8')
self.program.logfile = open('tester.log', 'a')
self.program.logfile.write("\n-------- %s --------\n\n"%sys.argv[0])
idx = self.program.expect([r"\]", pexpect.EOF])
self.program.logfile.flush()
if idx != 0:
self.program.close(force=True)
self.program.logfile.close()
del self.program
return False
return True
def expect(self, substr, timeout=3):
try:
return self.program.expect_exact(substr, timeout=timeout)
except:
return None
def look(self, regex, timeout=3):
try:
return self.program.expect(regex, timeout=timeout)
except:
return None
def end(self):
self.program.terminate(force=True)
self.program.expect(pexpect.EOF)
self.program.logfile.flush()
self.program.close(force=True)
self.program.logfile.close()
del self.program
def __del__(self):
if hasattr(self, 'program'):
self.end()
def start_test(testname, marks):
test = tester.Core(testname, marks)
tester.includepath()
return test
# Run the test case of a given function and return updated total mark.
# func: python function, function to run the test case on; funcArgs: tuple, arguments of the function to run; case_number: int or str, case number;
# mark:int, mark of this test case; total_mark: int, total mark so far; error_raise: bool, True if an given error should raise in the test casek;
# error: error that should / should not raise in the test case; false_error: bool, False if other errors can raise but not this one.
def run_test_case(func, funcArgs, case_number, mark, total_mark, error_raise, error, false_error=False):
result = None
try:
# Run the funcion with given arguments.
result = func(*funcArgs)
except error as e:
# If other errors can raise but not this one...
if false_error:
print("CASE {} FAIL: an error except {} should raise, but {} raises instead: {}".format(case_number, error, error, str(e)))
# If the given error should raise...
elif error_raise and (not false_error):
total_mark = total_mark + mark
print("CASE {} PASS".format(case_number))
# If an error should not raise...
else:
print("CASE {} FAIL: no error should raise, but an errror raises: {}".format(case_number, str(e)))
except Exception as e:
# If other errors raise but not this particular one...
if false_error:
total_mark = total_mark + mark
print("CASE {} PASS".format(case_number))
else:
# If a particular error should raise but other error raise instead...
if error_raise:
print("CASE {} FAIL: {} should raise, but other error raises instead: {}".format(case_number, error, str(e)))
# If an error raises while the code should not raise any error...
else:
print("CASE {} FAIL: no error should raise, but an error raises: {}".format(case_number, str(e)))
else:
# If an error should raise...
if error_raise:
if false_error:
print("CASE {} FAIL: an error except {} should raise, but no error raises".format(case_number, error))
else:
print("CASE {} FAIL: {} should raise, but no error raises".format(case_number, error))
# If an error should not raise...
else:
total_mark = total_mark + mark
print("CASE {} PASS".format(case_number))
# Return the updated total mark.
return (total_mark, result)
| CoraZhang/Object-Oriented-Programming | tester/scripts/asst3.py | asst3.py | py | 6,122 | python | en | code | 0 | github-code | 36 |
34105486078 | from pymongo import MongoClient
from wa_api import WA_API
# Collection Names
AC = "archers"
CC = "competitions"
QC = "qualifications"
QAC = "qualifications_arrows"
class MongoManage:
def __init__(self, host='localhost', port=27017, rs=None):
if rs:
self.client = MongoClient(host=host, port=port, replicaset=rs, readPreference='primaryPreferred')
else:
self.client = MongoClient(host=host, port=port)
self.db = None
def set_database(self, db_name='wa'):
self.db = self.client[db_name]
def insert(self, collection, obj):
"""
Insert for the collections with no dependencies with other collections
OR where dependencies has already been resolved
"""
try:
result = self.db[collection].insert_one(obj)
return result.inserted_id
except:
print("{0} collection: failed to insert object: {1}".format(collection, obj))
return -1
def insert_qualification(self, qualification):
competition_id = self.db[CC].find_one({'wa_id': qualification['competition_id']})['_id']
qualification['competition_id'] = competition_id
wa_archer_ids = [aid for aid in qualification['archer_ids']]
archer_ids = []
for wa_ai in wa_archer_ids:
try:
aid = self.db[AC].find_one({'wa_id': wa_ai})['_id']
except TypeError:
# if no such archer is found in MongoDB, find him via API and add him
print("Archer with {0} World Archer ID was not found in the DB, inserting it...")
wa = WA_API()
archer = wa.db__get_single_archer(wa_ai)
aid = self.insert(AC, archer)
print("...inserting of archer is done, _id: {0}".format(aid))
archer_ids.append(aid)
qualification['archer_ids'] = archer_ids
try:
result = self.db[QC].insert_one(qualification)
return result.inserted_id
except:
print("Qualifications collection: failed to insert qualification: {0}".format(qualification))
return -1
def get_qualifications(self, individual_team=None):
"""
:param individual_team:
1 - return only individual qualification results
2 - return only team qualification results
(others) - return both
"""
if individual_team == 1:
qualifications = self.db[QC].find({"is_team": 0})
elif individual_team == 2:
qualifications = self.db[QC].find({"is_team": 1})
else:
qualifications = self.db[QC].find()
# populate the Competitions and Archers Collections
qualifications = list(qualifications)
for i in range(0, len(qualifications)):
qualifications[i]['competition_id'] = self.db[CC].find_one({
"_id": qualifications[i]['competition_id']
})['wa_id']
qualifications[i]['archer_ids'] = [self.db[AC].find_one({
"_id": aid
})['wa_id'] for aid in qualifications[i]['archer_ids']]
return qualifications
def get_arrows_within_competition(self, competition_wa_id):
competition_id = self.db[CC].find_one({"wa_id": competition_wa_id})['_id']
qualifications = self.db[QC].find({"competition_id": competition_id, "is_team": 0})
qualification_ids = [q['_id'] for q in qualifications]
qualification_arrows = self.db[QAC].find({"qualification_id": {"$in": qualification_ids}})
qualification_arrows = [qa['arrows'] for qa in qualification_arrows]
arrows = []
for arrows_list in qualification_arrows:
arrows.extend(arrows_list)
return arrows
def get_competitions(self):
competitions = self.db[CC].find()
return list(competitions)
def get_individual_qualification_scores_within_competition(self, competition_wa_id):
competition_id = self.db[CC].find_one({"wa_id": competition_wa_id})['_id']
qualifications = self.db[QC].find({"competition_id": competition_id, "is_team": 0})
return list(qualifications)
def get_maximum_individual_qualification_score(self):
male = self.db[QC].find({'is_team': 0, 'category': 'RM'}).sort([('score', -1)]).limit(1)[0]
female = self.db[QC].find({'is_team': 0, 'category': 'RW'}).sort([('score', -1)]).limit(1)[0]
# populate the Competitions And Archers Collections
male['competition_id'] = self.db[CC].find_one({'_id': male['competition_id']})
female['competition_id'] = self.db[CC].find_one({'_id': female['competition_id']})
male['archer_ids'] = self.db[AC].find_one({'_id': male['archer_ids'][0]})
female['archer_ids'] = self.db[AC].find_one({'_id': female['archer_ids'][0]})
return {
"male": male,
"female": female,
}
def get_archer_results(self, archer_wa_id):
archer = self.db[AC].find_one({"wa_id": archer_wa_id})
qualifications = self.db[QC].find({"archer_ids": archer['_id']})
qualifications = list(qualifications)
# populate the Competitions Collection
for i in range(0, len(qualifications)):
qualifications[i]['competition_id'] = self.db[CC].find_one({'_id': qualifications[i]['competition_id']})
return {
"archer": archer,
"qualifications": qualifications,
}
def get_country_results(self, NOC):
qualifications = self.db[QC].aggregate([
{
"$unwind": "$archer_ids",
},
{
"$lookup":
{
"from": AC,
"localField": "archer_ids",
"foreignField": "_id",
"as": "archers",
},
},
{
"$match": {"{0}.NOC".format(AC): NOC},
},
])
# The Mongo Request above does return a little broken results
# So that's why we have to adjust and combine them a bit
qualifications = list(qualifications)
unique_qualifications = list({q['_id']: q for q in qualifications}.values())
for q in qualifications:
for i in range(0, len(unique_qualifications)):
if q['_id'] == unique_qualifications[i]['_id']:
for archer in unique_qualifications[i]['archers']:
if archer['wa_id'] == q['archers'][0]['wa_id']:
break
else:
unique_qualifications[i]['archers'].append(q['archers'][0])
# For each of unique qualifications,
# populate the Competitions Collection
# and delete the unnecessary "archer_ids" field
for i in range(0, len(unique_qualifications)):
unique_qualifications[i]['competition_id'] = self.db[CC].find_one({"_id": unique_qualifications[i]['competition_id']})
try:
del unique_qualifications[i]['archer_ids']
except KeyError:
pass
return unique_qualifications
| Tayum/di0d | courses/database_discipline/course3_term2/coursework/mongomanage.py | mongomanage.py | py | 7,237 | python | en | code | 0 | github-code | 36 |
12780763778 |
import random
def main():
questionCount = 10
correctResults = 0
print("Test d'addition. Combien de chiffres voulez-vous?")
chiffre = int(input())
if chiffre == 1:
maxValue = 10
elif chiffre == 2:
maxValue = 100
elif chiffre == 3:
maxValue = 1000
for i in range(1, questionCount+1):
a = random.randrange(0, maxValue)
b = random.randrange(0, maxValue)
r = a + b
print("%d: que vaut %d + %d ?"%(i, a, b))
user_result_string = input()
try:
user_number = int(user_result_string)
except ValueError:
user_number = -1
if user_number == r:
correctResults += 1
print("correct. score : %d/%d"%(correctResults,i))
else:
print("désole, mais %d + %d vaut %d et non %s. score : %d/%d"%(a,b,r,user_result_string, correctResults, i))
print("")
print("bravo, mais fais attention, ce sera plus compliqué la prochaine fois.")
pct_correct = 100 * correctResults / questionCount
if pct_correct == 100:
commentaire = "parfait, t'es le boss"
elif pct_correct > 80:
commentaire = "vraiment pas mal"
elif pct_correct > 50:
commentaire = "tu feras mieux la prochaine fois"
else:
commentaire = "retourne à la maternelle"
print("ton score est de %d%%. %s."%(pct_correct, commentaire))
input()
main() | janoscoder/experiments | incubator/mahault_add_training.py | mahault_add_training.py | py | 1,437 | python | en | code | 0 | github-code | 36 |
74646989223 | from unittest import TestCase
from src.dense_retriever import DenseRetriever
class TestRetrieval(TestCase):
def test_retrieval(self):
retriever = DenseRetriever("msmarco-distilbert-base-v3")
sentences = ["this is a test", "the food is hot on the table"]
for index, sentence in enumerate(sentences):
retriever.add_text_and_index(sentence, str(index))
query = "the food is warm"
expected = "1"
predicted = retriever.get_indices_and_scores_from_text(query)
assert predicted[0][0] == expected
| fractalego/samsumbot_client | test/test_retriever.py | test_retriever.py | py | 565 | python | en | code | 0 | github-code | 36 |
21539046169 | import random
import array as arr
masiv = arr.array('i', [random.randint(35, 55)
for _ in range(12)])
print("Маси учнів підгрупи:")
print(masiv)
set1 = max(masiv)
counter = masiv.index(set1)
print(f"Найбільша маса: {set1}")
print(f"Номер учня, маса якого найбільша: {counter + 1}")
| RiabtsevaAnne/9project | project.py | project.py | py | 344 | python | uk | code | 0 | github-code | 36 |
38922961353 | from urllib.request import FancyURLopener
from bs4 import BeautifulSoup
from random import choice
import csv
from time import sleep
from urllib.parse import quote,unquote
import json
user_agents = [
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
'Opera/9.25 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9'
]
class MyOpener(FancyURLopener, object):
version = choice(user_agents)
myopener = MyOpener()
def _ids():
with open("meta_final.csv", 'r') as infile:
tv_reader = csv.reader(infile)
next(tv_reader)
return list(map(lambda x : x[-1], tv_reader))
def fetch_cast_data():
for index, _id in enumerate(_ids()):
print (index)
url ='http://www.imdb.com/title/{}/fullcredits?ref_=tt_ql_1'.format(_id)
try:
html = myopener.open(url).read()
except:
html = "error"
with open('data/' + _id + '.html', 'wb') as outfile:
outfile.write(html)
sleep(.5)
fetch_cast_data() | nmaswood/tv_scraping | fetch_cast_html.py | fetch_cast_html.py | py | 1,368 | python | en | code | 0 | github-code | 36 |
71056879784 | from wordcloud import WordCloud
import matplotlib.pyplot as plt
from collections import Counter
from konlpy.tag import Okt
from PIL import Image
import numpy as np
import sys
#사용자 정의 가능한 정보 입력
least_num = int(input("워드 클라우드 단어 최소 빈도를 정수로 입력하시오.:"))
directory = input("데이터의 주소를 입력해 주세요.(파일단위입니다.):")
temp_save_dirc = input("완성된 워드클라우드가 저장될 주소를 입력해 주세요.:")
#파일 주소 처리
empty_list = []
empty_str = ""
for i in directory:
if(i == "\\"):
i = '/'
empty_list.append(i)
else:
empty_list.append(i)
real_dirc = empty_str.join(empty_list)
#저장 주소 처리
save_empty_list = []
save_empty_str = ""
for i in temp_save_dirc:
if(i == "\\"):
i = '/'
save_empty_list.append(i)
else:
save_empty_list.append(i)
real_save_dirc = save_empty_str.join(save_empty_list)
real_save_dirc = real_save_dirc + "/Word_cloud.png"
#matplotlib 대화형 모드 켜기
plt.ion()
#워드클라우드의 기본 데이터 위치 설정
with open(real_dirc, 'r', encoding='utf-8') as f:
text = f.read()
# OKT 사전 설정
okt = Okt()
#명사만 추출
nouns = okt.nouns(text)
# 단어의 길이가 1개인 것은 제외
words = [n for n in nouns if len(n) > 1]
# 위에서 얻은 words를 처리하여 단어별 빈도수 형태의 딕셔너리 데이터를 구함
c = Counter(words)
#각 단어의 빈도수 확인
print(c)
#최소 빈도수 처리
key = list(c.keys())
for a in key:
if(c[a] < least_num):
del c[a]
#빈도수가 맞지 않을 시 프로그램을 종료
if(len(c) == 0):
print("최소 빈도수가 너무 큽니다. 다시 설정해 주세요.")
print("프로그램을 종료합니다.")
sys.exit()
#워드클라우드 만들기
wc = WordCloud(background_color="white" , font_path=r"C:/Windows/Fonts/malgun.ttf", width=600, height=600, scale=2.0, max_font_size=250)
gen = wc.generate_from_frequencies(c)
plt.figure()
plt.imshow(gen)
#파일로 저장
wc.to_file(real_save_dirc) | LimJinOuk/Word-Cloud | WordCloud.py | WordCloud.py | py | 2,206 | python | ko | code | 0 | github-code | 36 |
43301493084 | from rpython.jit.metainterp.counter import JitCounter
def test_get_index():
jc = JitCounter(size=128) # 7 bits
for i in range(10):
hash = 400000001 * i
index = jc._get_index(hash)
assert index == (hash >> (32 - 7))
def test_get_subhash():
assert JitCounter._get_subhash(0x518ebd) == 0x8ebd
def test_fetch_next_hash():
jc = JitCounter(size=2048)
# check the distribution of "fetch_next_hash() & ~7".
blocks = [[jc.fetch_next_hash() & ~7 for i in range(65536)]
for j in range(2)]
for block in blocks:
assert 0 <= jc._get_index(block[0]) < 2048
assert 0 <= jc._get_index(block[-1]) < 2048
assert 0 <= jc._get_index(block[2531]) < 2048
assert 0 <= jc._get_index(block[45981]) < 2048
# should be correctly distributed: ideally 2047 or 2048 different
# values
assert len(set([jc._get_index(x) for x in block])) >= 2040
# check that the subkeys are distinct for same-block entries
subkeys = {}
for block in blocks:
for x in block:
idx = jc._get_index(x)
subkeys.setdefault(idx, []).append(jc._get_subhash(x))
collisions = 0
for idx, sks in subkeys.items():
collisions += len(sks) - len(set(sks))
assert collisions < 5
def index2hash(jc, index, subhash=0):
assert 0 <= subhash < 65536
return (index << jc.shift) | subhash
def test_tick():
jc = JitCounter()
jc._tick_slowpath = "not callable in this test!"
incr = jc.compute_threshold(4)
for i in range(5):
r = jc.tick(index2hash(jc, 104), incr)
assert r is (i == 3)
for i in range(5):
r = jc.tick(index2hash(jc, 108), incr)
s = jc.tick(index2hash(jc, 109), incr)
assert r is (i == 3)
assert s is (i == 3)
jc.reset(index2hash(jc, 108))
for i in range(5):
r = jc.tick(index2hash(jc, 108), incr)
assert r is (i == 3)
def test_collisions():
jc = JitCounter(size=4) # 2 bits
incr = jc.compute_threshold(4)
for i in range(5):
for sk in range(100, 105):
r = jc.tick(index2hash(jc, 3, subhash=sk), incr)
assert r is (i == 3)
jc = JitCounter()
incr = jc.compute_threshold(4)
misses = 0
for i in range(5):
for sk in range(100, 106):
r = jc.tick(index2hash(jc, 3, subhash=sk), incr)
if r:
assert i == 3
elif i == 3:
misses += 1
assert misses < 5
def test_install_new_chain():
class Dead:
next = None
def should_remove_jitcell(self):
return True
class Alive:
next = None
def should_remove_jitcell(self):
return False
#
jc = JitCounter()
assert jc.lookup_chain(104) is None
d1 = Dead()
jc.install_new_cell(104, d1)
assert jc.lookup_chain(104) is d1
d2 = Dead()
jc.install_new_cell(104, d2)
assert jc.lookup_chain(104) is d2
assert d2.next is None
#
d3 = Alive()
jc.install_new_cell(104, d3)
assert jc.lookup_chain(104) is d3
assert d3.next is None
d4 = Alive()
jc.install_new_cell(104, d4)
assert jc.lookup_chain(104) is d3
assert d3.next is d4
assert d4.next is None
def test_change_current_fraction():
jc = JitCounter()
incr = jc.compute_threshold(8)
# change_current_fraction() with a fresh new hash
jc.change_current_fraction(index2hash(jc, 104), 0.95)
r = jc.tick(index2hash(jc, 104), incr)
assert r is True
# change_current_fraction() with an already-existing hash
r = jc.tick(index2hash(jc, 104), incr)
assert r is False
jc.change_current_fraction(index2hash(jc, 104), 0.95)
r = jc.tick(index2hash(jc, 104), incr)
assert r is True
# change_current_fraction() with a smaller incr
incr = jc.compute_threshold(32)
jc.change_current_fraction(index2hash(jc, 104), 0.95)
r = jc.tick(index2hash(jc, 104), incr)
assert r is False
r = jc.tick(index2hash(jc, 104), incr)
assert r is True
| mozillazg/pypy | rpython/jit/metainterp/test/test_counter.py | test_counter.py | py | 4,080 | python | en | code | 430 | github-code | 36 |
20762327407 | import twilio_setup
import eleven_labs_setup
import call_handling
import latency_management
import interruption_handling
import call_mimic
def main():
# Initialize Twilio and Eleven Labs
twilio_api = twilio_setup.initialize_twilio()
eleven_labs_api = eleven_labs_setup.initialize_eleven_labs()
# Start a call
call_data = call_handling.initiate_call(twilio_api, eleven_labs_api)
# Monitor the call for latency and interruptions
while call_data['status'] != 'ended':
latency = latency_management.measure_latency(call_data)
if latency > 0:
latency_management.reduce_latency(call_data, latency)
if interruption_handling.detect_interruption(call_data):
interruption_handling.handle_interruption(call_data)
# Mimic real call
call_mimic.simulate_background_noise(call_data)
call_mimic.simulate_voice_tones(call_data)
# End the call
call_handling.end_call(call_data)
if __name__ == "__main__":
main() | shadowaxe99/Phonezone | main.py | main.py | py | 1,011 | python | en | code | 0 | github-code | 36 |
1762674415 | import logging
import itertools
from typing import Optional
import demoji
from .apple import scraper_apple
from .google import scraper_google
__all__ = ["scraper", "scraper_google", "scraper_apple"]
def content_filter(content: str) -> Optional[str]:
content = demoji.replace(content)
if len(content) < 20:
return None
content = " ".join(filter(lambda x: len(x) < 15, content.split()))
return content
def scraper(
google_package: str,
apple_name: str,
lans: list[str] = ["en"],
countries: list[str] = ["us"],
count: int = 10000,
):
for lan, country in itertools.product(lans, countries):
logging.info(f"read reviews on {lan}, {country} @ google")
for review in scraper_google(google_package, lan, country, count):
review = content_filter(review)
if review: yield review
for country in countries:
logging.info(f"read reviews on {country} @ apple")
for review in scraper_apple(apple_name, country, count):
review = content_filter(review)
if review: yield review
| moriW/app_words | scraper/__init__.py | __init__.py | py | 1,099 | python | en | code | 0 | github-code | 36 |
14733803314 | # a plugin: CSV whitelist.
# here we create a 'document type' (or 'an instance of Doc') with one input (a csv file)
# NOTE: 'doc' is a magic variable that is used to build a Doc instance `Doc( **module.doc )`
# This eliminates any need for us to 'from doc import Doc', which is good.
from datetime import datetime
from utils import date_from_str
def counter(*args):
global count
try:
count += 1
except:
count = 1
return count
doc = {
'name':'whitelist',
'inputs':{
'name' : 'whitelist_csv', # again, a unique name is always required
# csv_input simply wants to read a file. So 'location' is just a file path.
'location' : 'whitelist.csv', # This path will be read immediately, so we can use a relative path (to the plugin file)
# csv_input only knows how to use one value - a dictionary key we name with 'id'
'id': 'hash',
# 'data' is a 'Mapper': it massages the raw input data into the document's format
'data': {
'REMAP': { # REMAP instructs the Mapper to name outputs directly from inputs
'name': 0, # our output dictionary will have a 'name' field taken from column 0
'hash': 1, # and a 'hash' field taken from column 1
'date.created': (2, lambda v: date_from_str(v)),
'comment': 3,
},
'from_whitelist': True, # this field will simply be copied
'counter': counter, # THIS, IS, PYTHON
'date.retrieved': lambda v: datetime.utcnow().replace(microsecond=0), # yes, we can
},
},
}
| JeffKwasha/hachit | plugins/whitelist.py | whitelist.py | py | 1,667 | python | en | code | 1 | github-code | 36 |
5134072941 | import asyncio
from telethon.tl.functions.channels import EditAdminRequest
from telethon.tl.functions.contacts import BlockRequest, UnblockRequest
from telethon.tl.types import ChatAdminRights
from telethon.errors.rpcerrorlist import ChatSendMediaForbiddenError, PeerIdInvalidError
from . import *
@telebot.on(admin_cmd(pattern="schd ?(.*)"))
@telebot.on(sudo_cmd(pattern="schd ?(.*)", allow_sudo=True))
async def schd(event):
a = event.pattern_match.group(1)
b = a.split(" ")
wwait = b[0]
times = int(b[1])
idds = b[2]
previous_message = await event.get_reply_message()
if previous_message:
previous_message = await event.get_reply_message()
idds = previous_message.id
if idds:
idds = int(b[2])
kk = await event.reply("`Schedule Broadcasting Msg...`")
er = 0
done = 0
count = 0
chatidd = await event.get_chat()
chatidd = chatidd.id
while count != times:
count += 1
er = 0
done = 0
await asyncio.sleep(int(wwait))
await kk.edit("`Broadcasting...`")
msg = await borg.get_messages(chatidd, ids=idds)
async for x in event.client.iter_dialogs():
if x.is_group:
chat = x.id
try:
done += 1
await borg.send_message(chat, msg)
except BaseException:
er += 1
await kk.edit(f"Done in {done} chats, error in {er} chat(s)")
await kk.reply("`Schedule Broadcast Finished...`")
| ankitkumarbh/Telegram-Userbot | telebot/plugins/schd.py | schd.py | py | 1,538 | python | en | code | 0 | github-code | 36 |
34588741728 | import os
from pathlib import Path
from pyontutils.utils import get_working_dir
from pyontutils.integration_test_helper import _TestScriptsBase as TestScripts
from .common import project_path, project_path_real, test_organization, onerror
from .common import fake_organization
import sparcur
import sparcur.cli
import sparcur.paths
import sparcur.backends
from sparcur.utils import log
from sparcur.pennsieve_api import FakeBFLocal
def fake_setup(self, *args, **kwargs):
""" replace _setup_bfl with a version that handles repated invocation of
cli.Main.__init__ as occurs during testing """
# FIXME obviously the whole init process should be reworked to avoid the
# utter insanity that cli.Main.__init__ is at the moment ...
if self.options.clone or self.anchor.id != fake_organization:
self.Remote = self._remote_class._new(
self._cache_class._local_class, self._cache_class)
if (hasattr(self.Remote, '_api') and
not isinstance(self.Remote._api, self.Remote._api_class)):
log.warning(f'stale _api on remote {self.Remote._api}')
for cls in self.Remote.mro():
if hasattr(cls, '_api'):
try:
del cls._api
except AttributeError as e:
pass
self._old_setup_bfl()
else:
self._cache_class._anchor = self.anchor # don't trigger remote lookup
self.bfl = self._remote_class._api = FakeBFLocal(self.anchor.id, self.anchor)
sparcur.cli.Main._old_setup_bfl = sparcur.cli.Main._setup_bfl
sparcur.cli.Main._setup_bfl = fake_setup
only = tuple()
skip = ('dashboard_server',)
ci_skip = tuple()
working_dir = get_working_dir(__file__)
if working_dir is None:
# python setup.py test will run from the module_parent folder
working_dir = Path(__file__).parent.parent
post_load = lambda : None
def post_main():
# just wipe out the state of these after every test
# there are countless strange and hard to debug errors
# that can occur because of mutation of class aka global state
# they really don't teach the fact that class level variables
# are actually global variables and should be treated with fear
sparcur.backends.PennsieveRemote._new(sparcur.paths.Path,
sparcur.paths.PennsieveCache)
mains = {'cli-real': [['spc', 'clone', test_organization],
['spc', 'pull'],
#['spc', 'refresh'], # XXX insanely slow and no longer used due to brokeness
['spc', 'fetch'],
# nonsense with consistently incorrectly sized files in pandora
# find objects/ -exec ls -al {} \+ | grep -v 1024 | grep -v 4096 | grep -v total | grep -v objects | grep tom
['spc', 'fetch', '--mbf'], # FIXME abstract --mbf
#['spc', 'report', 'access'], # TODO no easy way to test this ...
['spc', 'rmeta'],],
'cli': [['spc', 'find', '--name', '*.xlsx'],
['spc', 'find', '--name', '*', '--limit', '3'],
['spc', 'status'],
['spc', 'meta'],
['spc', 'export'],
['spc', 'report', 'completeness'],
['spc', 'report', 'contributors'],
['spc', 'report', 'filetypes'],
['spc', 'report', 'keywords'],
['spc', 'report', 'subjects'],
['spc', 'report', 'samples'],
['spc', 'report', 'pathids'],
['spc', 'report', 'errors'],
['spc', 'report', 'size'],
['spc', 'report', 'test'],
['spc', 'tables'],
['spc', 'missing'],
#['spc', 'annos'], # XXX insanely slow
#['spc', 'annos', 'export'], # XXX insanely slow
],
}
mains['cli'] = [args +
['--project-path', project_path.as_posix(), '-N', '--local', '--jobs', '1'] +
(['--raw'] if 'report' in args else [])
for args in mains['cli']]
_cli_real = mains.pop('cli-real')
if 'CI' not in os.environ:
mains['cli'].extend([args + ['--project-path', project_path_real.as_posix(), '-N', '--jobs', '1']
for args in _cli_real])
# if the real project path exists then remove it so that we can test cloning
# and keep the cloned directory around until the next time we run the tests
if project_path_real.exists():
project_path_real.rmtree(onerror=onerror)
log.info(skip)
TestScripts.populate_tests(sparcur, working_dir, mains, skip=skip,
post_load=post_load, post_main=post_main,
only=only, do_mains=True)
| SciCrunch/sparc-curation | test/test_integration.py | test_integration.py | py | 4,836 | python | en | code | 11 | github-code | 36 |
4079175993 | import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from deconstruct_lc import read_config
from deconstruct_lc import tools_fasta
from deconstruct_lc import tools_lc
from deconstruct_lc.scores.norm_score import NormScore
class RemovePfam(object):
def __init__(self):
config = read_config.read_config()
self.data_dp = os.path.join(config['fps']['data_dp'])
self.puncta = os.path.join(self.data_dp, 'experiment', 'puncta_uni.fasta')
self.nopuncta = os.path.join(self.data_dp, 'experiment', 'nopuncta_uni.fasta')
self.pfam_puncta = os.path.join(self.data_dp, 'experiment', 'puncta_pfam.tsv')
self.pfam_nopuncta = os.path.join(self.data_dp, 'experiment', 'nopuncta_pfam.tsv')
self.k = 6
self.lce = 1.6
self.lca = 'SGEQAPDTNKR'
self.lc_m = 0.06744064704548541
self.lc_b = 16.5
def run_percent_pfam(self):
puncta_perc = os.path.join(self.data_dp, 'experiment', 'puncta_percent_pfam.tsv')
self.percent_pfam(self.puncta, self.pfam_puncta, puncta_perc)
nopuncta_perc = os.path.join(self.data_dp, 'experiment', 'nopuncta_percent_pfam.tsv')
self.percent_pfam(self.nopuncta, self.pfam_nopuncta, nopuncta_perc)
def percent_pfam(self, fasta_fp, pfam_fp, fpo):
df = pd.read_csv(pfam_fp, sep='\t')
pids, seqs = tools_fasta.fasta_to_id_seq(fasta_fp)
frac_pfam = []
for id, seq in zip(pids, seqs):
ndf = df[df['uniprot_acc'] == id]
ndf = ndf.sort_values(by='seq_start')
segmented = self.segment_seq(seq, ndf)
len_seg = 0
for seg in segmented:
len_seg += len(seg)
frac_pfam.append(float(len(seq) - len_seg)/float(len(seq)))
ns = NormScore()
scores = ns.lc_norm_score(seqs)
df_out = pd.DataFrame({'Uniprot ID': pids, 'LC Score': scores,
'Pfam Fraction': frac_pfam}, columns=['Uniprot ID', 'LC Score', 'Pfam Fraction'])
df_out = df_out.sort_values(by='LC Score', ascending=False)
df_out.to_csv(fpo, sep='\t')
print(np.mean(frac_pfam))
def run_with_pfam(self):
puncta_out = os.path.join(self.data_dp, 'experiment', 'puncta_nopfam.tsv')
self.with_pfam(self.puncta, self.pfam_puncta, puncta_out)
nopuncta_out = os.path.join(self.data_dp, 'experiment', 'nopuncta_nopfam.tsv')
self.with_pfam(self.nopuncta, self.pfam_nopuncta, nopuncta_out)
def with_pfam(self, fasta_fp, pfam_fp, fpo):
"""
How many proteins in the set have pfam domains?
What is the fraction occupied by pfam domains?"""
df = pd.read_csv(pfam_fp, sep='\t')
pfam_ids = list(set(df['uniprot_acc']))
pids, seqs = tools_fasta.fasta_to_id_seq(fasta_fp)
print(len(pids))
nopfam_ids = list(set(pids) - set(pfam_ids))
nopfam_seqs = []
for pid, seq in zip(pids, seqs):
if pid in nopfam_ids:
nopfam_seqs.append(seq)
ns = NormScore()
scores = ns.lc_norm_score(nopfam_seqs)
df_out = pd.DataFrame({'UniProt ID': nopfam_ids, 'LC Score': scores}, columns=['UniProt ID', 'LC Score'])
df_out = df_out.sort_values(by='LC Score', ascending=False)
df_out.to_csv(fpo, sep='\t')
def fetch_score(self, df, pids):
scores = []
for pid in pids:
df = df[df['Protein ID'] == pid]
scores.append(list(df['LC Score'])[0])
return scores
def score_in_pfam(self):
ids, seqs = tools_fasta.fasta_to_id_seq(self.nopuncta)
df = pd.read_csv(self.pfam_nopuncta, sep='\t', index_col=0)
below = 0
above = 0
norm_scores = []
fl_norm_scores = []
for id, seq in zip(ids, seqs):
ndf = df[df['uniprot_acc'] == id]
ndf = ndf.sort_values(by='seq_start')
segmented = self.pfam_segments(seq, ndf)
total = 0
for item in segmented:
total += len(item)
if total >= 100:
above += 1
fl_score, fl_length = self.get_segment_scores([seq])
fl_norm = self.norm_function([fl_score], [fl_length])
raw_score, length = self.get_segment_scores(segmented)
norm_score = self.norm_function([raw_score], [length])
norm_scores.append(norm_score[0])
fl_norm_scores.append(fl_norm[0])
else:
below += 1
print(above)
print(below)
print(np.mean(norm_scores))
print(np.mean(fl_norm_scores))
print(np.median(norm_scores))
print(np.median(fl_norm_scores))
plt.hist(fl_norm_scores, alpha=0.5, bins=20, range=(-100, 200), label='Full length scores')
plt.hist(norm_scores, alpha=0.5, bins=20, range=(-100, 200), label='Inside Pfam scores')
plt.legend()
plt.show()
def run(self):
ids, seqs = tools_fasta.fasta_to_id_seq(self.puncta)
df = pd.read_csv(self.pfam_puncta, sep='\t', index_col=0)
new_seqs = []
below = 0
above = 0
norm_scores = []
fl_norm_scores = []
for id, seq in zip(ids, seqs):
ndf = df[df['uniprot_acc'] == id]
ndf = ndf.sort_values(by='seq_start')
segmented = self.segment_seq(seq, ndf)
total = 0
for item in segmented:
total += len(item)
if total >= 100:
above += 1
fl_score, fl_length = self.get_segment_scores([seq])
fl_norm = self.norm_function([fl_score], [fl_length])
raw_score, length = self.get_segment_scores(segmented)
norm_score = self.norm_function([raw_score], [length])
norm_scores.append(norm_score[0])
fl_norm_scores.append(fl_norm[0])
else:
below += 1
print(above)
print(below)
print(np.mean(norm_scores))
print(np.mean(fl_norm_scores))
print(np.median(norm_scores))
print(np.median(fl_norm_scores))
plt.hist(fl_norm_scores, alpha=0.5, bins=20, range=(-100, 200), label='Full length scores')
plt.hist(norm_scores, alpha=0.5, bins=20, range=(-100, 200), label='Outside Pfam scores')
plt.legend()
plt.show()
def pfam_segments(self, seq, df):
new_seq = []
for i, row in df.iterrows():
new_seq.append(seq[row['seq_start']: row['seq_end']+1])
return new_seq
def segment_seq(self, seq, df):
"""Given intervals, pull out the domain, and segment around it"""
start = 0
new_seq = []
for i, row in df.iterrows():
new_seq.append(seq[start:row['seq_start']])
start = row['seq_end'] + 1
new_seq.append(seq[start:])
return new_seq
def pfam_in_common(self):
df = pd.read_csv(self.pfam_puncta, sep='\t', index_col=0)
print(df['pfamA_acc'].value_counts())
def get_segment_scores(self, segment_seq):
total_motifs = 0
total_length = 0
for seq in segment_seq:
motifs = tools_lc.count_lc_motifs(seq, self.k, self.lca, self.lce)
total_motifs += motifs
total_length += len(seq)
return total_motifs, total_length
def norm_function(self, raw_scores, lengths):
norm_scores = []
for raw_score, length in zip(raw_scores, lengths):
norm_score = raw_score - ((self.lc_m * length) + self.lc_b)
norm_scores.append(norm_score)
return norm_scores
def main():
rp = RemovePfam()
rp.pfam_in_common()
if __name__ == '__main__':
main() | shellydeforte/deconstruct_lc | deconstruct_lc/remove_structure/remove_pfam.py | remove_pfam.py | py | 7,821 | python | en | code | 0 | github-code | 36 |
17218989395 | import torch
import torch.nn as nn
from modules.updown_cell import UpDownCell
from modules.captioner import Captioner
class UpDownCaptioner(Captioner):
def __init__(self, vocab, image_feature_size=2048, embedding_size=1000, hidden_size=512,
attention_projection_size=512, seq_length=20, beam_size=3,
pretrained_embedding=None, state_machine=None):
super(UpDownCaptioner, self).__init__()
vocab_size = len(vocab)
self.vocab = vocab
self.seq_length = seq_length
self.state_machine = state_machine
self.image_feature_size = image_feature_size
self.beam_size = beam_size
# define up-down cell
self._cell = UpDownCell(image_feature_size=image_feature_size, embedding_size=embedding_size,
hidden_size=hidden_size, attention_projection_size=attention_projection_size)
# define embedding layer
if pretrained_embedding is not None:
# if use pre-trained word embedding
self._embedding_layer = nn.Embedding.from_pretrained(pretrained_embedding).float()
else:
self._embedding_layer = nn.Embedding(num_embeddings=vocab_size,
embedding_dim=embedding_size)
# produce the logits which used to soft-max distribution
self._output_layer = nn.Linear(hidden_size, vocab_size, bias=True)
self._log_softmax = nn.LogSoftmax(dim=1)
self.criterion = nn.CrossEntropyLoss(ignore_index=self.vocab['<unk>'])
def load(self, PATH):
self.load_state_dict(torch.load(PATH))
| Songtuan/Captioning-Model | modules/captioner/UpDownCaptioner.py | UpDownCaptioner.py | py | 1,641 | python | en | code | 0 | github-code | 36 |
32158875551 | #! /usr/bin/env python3
import sys
from math import log
"""A module for demonstraiting exceptions."""
def convert(s):
"""Convert to an integer."""
try:
x = int(s)
except (ValueError, TypeError) as e:
print("Conversion error: {}".format(str(e)), file=sys.stderr)
raise
return x
def string_log(s):
v = convert(s)
return log(v)
| perenciolo/pluralsight | python/fundamental/01/exceptional.py | exceptional.py | py | 378 | python | en | code | 0 | github-code | 36 |
29642501697 | import argparse
import numpy as np
import scipy.stats
from statsmodels.stats.proportion import *
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Patch
import matplotlib.patches as mpatches
matplotlib.rcParams['font.family'] = 'Arial'
def get_conf_int_stats(obs_count, total_count, method='jeffreys'):
pref_value = obs_count/total_count
ci_lower, ci_upper = proportion_confint(obs_count, total_count, alpha=0.05, method=method)
return pref_value, [ci_lower, ci_upper]
def plot_rs_by_test_suite_grid_5_by_6(rs, models, human_data, test_names, model2run_indice, model2color, model2name, add_test_name=True, savepath=None):
# Plot results as bar graph grid 5*6
n_row = 5
n_col = 6
bar_width = 0.75
fig, axs = plt.subplots(n_row, n_col, figsize=(8, 6.5), sharey='row', sharex='col')
plt.subplots_adjust(wspace=0.4, hspace=0.4)
for k, test_name in enumerate(test_names):
row_id = k // n_col
col_id = k % n_col
axs[row_id, col_id].set_title('Test {}'.format(k+1), fontsize=12)
axs[row_id, col_id].set_ylim(0,1)
axs[row_id, col_id].set_xlim(-1.75,len(models)-0.25)
axs[row_id, col_id].set_xticks(np.arange(0, len(models)))
axs[row_id, col_id].set_yticks(np.arange(0, 1.2, 0.25))
axs[row_id, col_id].set_xticklabels([])
axs[row_id, col_id].spines['right'].set_visible(False)
axs[row_id, col_id].spines['top'].set_visible(False)
axs[row_id, col_id].grid(linestyle='--', alpha=0.5, zorder=0, axis='y')
axs[row_id, col_id].set_axisbelow(True)
axs[row_id, col_id].errorbar(-1, human_data[test_name]['acc_value'], yerr=[[human_data[test_name]['acc_value'] - human_data[test_name]['acc_lower']], [human_data[test_name]['acc_upper'] - human_data[test_name]['acc_value']]], label='Human', color='black', marker='None', linestyle='none')
axs[row_id, col_id].bar(-1, human_data[test_name]['acc_value'], label='Human', width=bar_width, color='white', edgecolor='k')
for i, model in enumerate(models):
data = np.array([rs[model][run_index][test_name]['item_acc_list'] for run_index in model2run_indice[model]], dtype='float')
score_averaged_across_run = np.mean(data, axis=0)
y_mean = np.mean(score_averaged_across_run)
yerr = 1.96*(np.std(score_averaged_across_run)/np.sqrt(len(score_averaged_across_run)))
axs[row_id, col_id].bar(i, y_mean, label=model, width=bar_width, color=model2color[model], yerr=yerr)
for index in range(k+1, n_row*n_col):
row_id = index // n_col
col_id = index % n_col
axs[row_id, col_id].set_axis_off()
ax = axs[4, 5]
ax.bar(0, 0, label='Human', width=0.35, color='black', fill=False)
for i, model in enumerate(models):
ax.bar(i+1, 0, label=model2name[model], width=0.35, color=model2color[model])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.legend(loc = 'center', bbox_to_anchor=(-1.2, 0.5), ncol=2, fontsize=12)
fig.text(0.06, 0.5, 'Test Accuracy Score', ha='center', va='center', rotation='vertical')
if add_test_name:
textstr = '\n'.join(['({}) {}'.format(k+1, test_name2pretty_name[test_name]) for k, test_name in enumerate(test_names)])
props = dict(boxstyle='round,pad=0.5', facecolor='white', alpha=0.5, ec='lightgray')
fig.text(0.94, 0.5, textstr, fontsize=10,
verticalalignment='center', bbox=props, linespacing = 1.65)
if savepath is not None:
plt.savefig(savepath, bbox_inches='tight')
plt.show(block=False)
plt.pause(1)
plt.close()
def plot_rs_by_test_suite_grid_3_by_9(rs, models, human_data, test_names, model2run_indice, model2color, model2name, savepath=None):
# Plot results as bar graph grid 3*9
n_row = 3
n_col = 9
bar_width = 0.75
fig, axs = plt.subplots(n_row, n_col, figsize=(11, 3.6), sharey='row', sharex='col')
plt.subplots_adjust(wspace=0.4, hspace=0.4)
for k, test_name in enumerate(test_names):
row_id = k // n_col
col_id = k % n_col
axs[row_id, col_id].set_title('Test {}'.format(k+1), fontsize=10)
axs[row_id, col_id].set_ylim(0,1)
axs[row_id, col_id].set_xlim(-1.75,len(models)-0.25)
axs[row_id, col_id].spines['right'].set_visible(False)
axs[row_id, col_id].spines['top'].set_visible(False)
axs[row_id, col_id].grid(linestyle='--', alpha=0.5, zorder=0, axis='y')
axs[row_id, col_id].set_xticks(np.arange(0, len(models)))
axs[row_id, col_id].set_yticks(np.arange(0, 1.2, 0.25))
axs[row_id, col_id].set_xticklabels([])
axs[row_id, col_id].set_axisbelow(True)
axs[row_id, col_id].errorbar(-1, human_data[test_name]['acc_value'], yerr=[[human_data[test_name]['acc_value'] - human_data[test_name]['acc_lower']], [human_data[test_name]['acc_upper'] - human_data[test_name]['acc_value']]], color='black', marker='None', linestyle='none')
axs[row_id, col_id].bar(-1, human_data[test_name]['acc_value'], label='Human', width=bar_width, color='white', edgecolor='k')
for i, model in enumerate(models):
data = np.array([rs[model][run_index][test_name]['item_acc_list'] for run_index in model2run_indice[model]], dtype='float')
score_averaged_across_run = np.mean(data, axis=0)
y_mean = np.mean(score_averaged_across_run)
yerr = 1.96*(np.std(score_averaged_across_run)/np.sqrt(len(score_averaged_across_run)))
# bar plot
axs[row_id, col_id].bar(i, y_mean, label=model2name[model], width=bar_width, color=model2color[model], yerr=yerr)
if k == 22:
axs[row_id, col_id].legend(loc='center', bbox_to_anchor=(0.5, -0.35), ncol=5, fontsize=10)
for index in range(k+1, n_row*n_col):
row_id = index // n_col
col_id = index % n_col
axs[row_id, col_id].set_axis_off()
fig.text(0.08, 0.5, 'Test Accuracy Score', ha='center', va='center', rotation='vertical')
if savepath is not None:
plt.savefig(savepath, bbox_inches='tight')
plt.show(block=False)
plt.pause(1)
plt.close()
def plot_aggregated_rs(rs, models, human_data, test_names, model2run_indice, model2color, model2name, savepath=None):
# Plot averaged performance over all the test suites
plt.figure(figsize=(2.5,2.5))
ax = plt.gca()
bar_width = 0.75
# Use asymptotic confidence interval
human_acc_by_test_suite = [human_data[test_name]['acc_value'] for test_name in test_names]
human_acc_mean = np.mean(human_acc_by_test_suite)
yerr = 1.96*(np.std(human_acc_by_test_suite)/np.sqrt(len(human_acc_by_test_suite)))
ax.bar(-1, human_acc_mean, label='Human', width=bar_width, color='black', fill=False, yerr=yerr)
print('Human average acc: {}'.format(human_acc_mean))
for i, model in enumerate(models):
data = [[rs[model][run_index][test_name]['acc'] for test_name in test_names] for run_index in model2run_indice[model]]
test_suite_acc_list_averaged_across_run = np.mean(data, axis=0)
mean_test_suite_acc = np.mean(test_suite_acc_list_averaged_across_run)
yerr = 1.96*(np.std(test_suite_acc_list_averaged_across_run)/np.sqrt(len(test_suite_acc_list_averaged_across_run)))
ax.bar(i, mean_test_suite_acc, label=model2name[model], width=bar_width, color=model2color[model], yerr=yerr)
ax.set_ylim(0,1)
ax.set_xlim(-1.75,len(models)-0.25)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xticks(np.arange(-1, len(models)))
ax.set_yticks(np.arange(0, 1.2, 0.25))
ax.set_xticklabels([])
plt.ylabel('Accuracy Score')
plt.legend(loc = 'center', bbox_to_anchor=(1.45, 0.5))
if savepath is not None:
plt.savefig(savepath, bbox_inches='tight')
plt.show(block=False)
plt.pause(1)
plt.close()
def plot_summmary_across_model_conditions(exp_data_all, model_conditions, savepath=None):
fig = plt.figure(constrained_layout=False, figsize=(7.2,2.4))
hatch_style_list = [{'hatch':None}, {'hatch':'///'}, {'hatch':'.'}]
model_condition2style = dict(zip(['finetune', 'nyt_from_scratch', 'bllip_from_scratch'], hatch_style_list))
gs = fig.add_gridspec(nrows=1, ncols=4, width_ratios=[0.25, 0.8, 0.8, 0.8], wspace=0.1)
bar_width = 0.75
ax = fig.add_subplot(gs[0])
human_acc_by_test_suite = [human_data[test_name]['acc_value'] for test_name in test_names]
human_acc_mean = np.mean(human_acc_by_test_suite)
yerr = 1.96*(np.std(human_acc_by_test_suite)/np.sqrt(len(human_acc_by_test_suite)))
ax.bar(0, human_acc_mean, label='Human', width=bar_width, color='black', fill=False, yerr=yerr)
print('Human average acc: {}'.format(human_acc_mean))
ax.set_ylim(0,1)
ax.set_xlim(-0.75,0.75)
ax.set_yticks(np.arange(0, 1.2, 0.25))
ax.set_ylabel('Accuracy', fontsize=10)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xticks([])
ax.set_xticklabels([])
for model_cond_idx, model_condition in enumerate(model_conditions):
ax = fig.add_subplot(gs[model_cond_idx+1])
rs, models, model2run_indice, model2name, model2color = exp_data_all[model_condition]
for i, model in enumerate(models):
data = [[rs[model][run_index][test_name]['acc'] for test_name in test_names] for run_index in model2run_indice[model]]
test_suite_acc_list_averaged_across_run = np.mean(data, axis=0)
mean_test_suite_acc = np.mean(test_suite_acc_list_averaged_across_run)
yerr = 1.96*(np.std(test_suite_acc_list_averaged_across_run)/np.sqrt(len(test_suite_acc_list_averaged_across_run)))
ax.bar(i, mean_test_suite_acc, label=model2name[model], width=bar_width, color=model2color[model], yerr=yerr, **model_condition2style[model_condition])
ax.set_ylim(0,1)
ax.set_xlim(-0.75,len(models)-0.25)
ax.spines['left'].set_visible(False)
ax.set_yticks([])
ax.set_yticklabels([])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xticks([])
ax.set_xticklabels([])
if model_cond_idx == 2:
colors =['C{}'.format(k) for k in range(4)]
model_names = ['GibbsComplete', 'InfillT5', 'InfillBART', 'ILM']
model_condition_names = ['Pretrain/Fine-tune', 'From scratch (NYT)', 'From scratch (BLLIP)']
color_legend = plt.legend(handles=[mpatches.Patch(facecolor='white', edgecolor='k', label='Human')]+[mpatches.Patch(facecolor=colors[k], edgecolor=colors[k], label=model_names[k]) for k in range(len(model_names))], loc='upper left', bbox_to_anchor=(1.15, 1.05), ncol=1, fontsize=10)
hatch_legend = plt.legend(handles=[mpatches.Patch(facecolor='lightgray', edgecolor='k', linewidth=0, label=model_condition_names[k], **hatch_style_list[k]) for k in range(len(hatch_style_list))], loc='upper left', bbox_to_anchor=(1.15, 0.41), ncol=1, fontsize=10)
ax.add_artist(color_legend)
ax.add_artist(hatch_legend)
if savepath is not None:
plt.savefig(savepath, bbox_inches='tight')
plt.show(block=False)
plt.pause(1)
plt.close()
def run_paired_t_tests(exp_data_all, model_conditions):
for model_cond_idx, model_condition in enumerate(model_conditions):
rs, models, model2run_indice, model2name, model2color = exp_data_all[model_condition]
model_acc_list_all = []
for i, model in enumerate(models):
data = [[rs[model][run_index][test_name]['acc'] for test_name in test_names] for run_index in model2run_indice[model]]
model_acc_list_all.append(np.mean(data, axis=0))
print('{:<22} {:<15} {:<15} {:<6} {:<6}'.format('Learning setup', 'Model name', 'Model name', 't_stat', 'p_value'))
print('-'*70)
for i in range(len(models)):
for j in range(i+1, len(models)):
d1 = np.array(model_acc_list_all[i])
d2 = np.array(model_acc_list_all[j])
t_stat, p_value = scipy.stats.ttest_rel(d1, d2, alternative='two-sided')
print('{:<22} {:<15} {:<15} {:<6.3f} {:<6.3f}'.format(model_condition, model2name[models[i]], model2name[models[j]], t_stat, p_value))
for i in range(len(models)):
d1 = np.array(model_acc_list_all[i])
d2 = [human_data[test_name]['acc_value'] for test_name in test_names]
t_stat, p_value = scipy.stats.ttest_rel(d1, d2, alternative='two-sided')
print('{:<22} {:<15} {:<15} {:<6.3f} {:<6.3f}'.format(model_condition, model2name[models[i]], 'Human', t_stat, p_value))
print()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Analyze results in Evaluation III.')
parser.add_argument('--rerank', action='store_true', help='Plot results from directly specialized models with reranking.')
args = parser.parse_args()
DO_RERANK='rerank' if args.rerank else 'norerank'
DATA_DIR='data/exp1'
test_names = ["agreement_subj", "agreement_subj-long", "agreement_emb-subj-long", "agreement_subj-with-coord", "agreement_subj-with-PP",
"clause_VP","clause_VP-with-PP-adjunct", "clause_VP-with-adjunct-long",
"clause_VP-with-complement", "clause_VP-with-complement-long", "clause_VP-gerund",
"clause_phrasal-verb", "clause_phrasal-verb-with-subj",
"clause_resultative", "clause_resultative-long",
"coord_S", "coord_VP", "coord_emb-NP", "coord_emb-VP",
"coord_either", "coord_neither", "coord_gap-NP", "gap_adjunct", "gap_obj", "gap_subj", "gap_phrasal-verb"]
pretty_test_names = ["Number Agreement", "Number Agreement (Long Subject)", "Number Agreement (Embedded Clause)",
"Number Agreement (Coordination)", "Number Agreement (with PP)", "Clausal Structure", "Clausal Structure (PP Adjunct)",
"Clausal Structure (Long Adjunct)", "Clausal Structure (Complement)", "Clausal Structure (Long Complement)",
"Gerund", "Phrasal Verb", "Phrasal Verb (with NP)", "Resultative", "Resultative (Long NP)", "S Coordiation",
"VP Coordination", "Embedded NP Coordination", "Embedded VP Coordination", "Coordination (either)",
"Coordination (neither)", "Coordination in wh-clause", "Filler-Gap (Adjunct)", "Filler-Gap (Object)",
"Filler-Gap (Subject)", "Filler-Gap (Phrasal Verb)"]
test_name2pretty_name = dict(zip(test_names, pretty_test_names))
stimuli_example = {}
for test_name in test_names:
stimuli_path = '../stimuli/exp1/{}.txt'.format(test_name)
with open(stimuli_path) as f:
line = f.readline()
stimuli_example[test_name] = line.strip().replace('%%', '____')
# Load human behavioral results
with open('{}/results/human_eval_rs.txt'.format(DATA_DIR)) as f:
lines = f.readlines()
lines = [line.strip().split() for line in lines if line.strip() != '']
human_data = {}
for line in lines:
test_name = line[1]
human_data[test_name] = {}
human_data[test_name]['acc'] = float(line[2])
proportions1 = [float(item) for item in line[3].split('/')]
proportions2 = [float(item) for item in line[4].split('/')]
acc_value, [acc_lower, acc_upper] = get_conf_int_stats(proportions1[0] + proportions2[0], proportions1[1] + proportions2[1], method='jeffreys')
human_data[test_name]['acc_value'] = acc_value
human_data[test_name]['acc_lower'] = acc_lower
human_data[test_name]['acc_upper'] = acc_upper
exp_data_all = {}
fig_dir = 'fig/exp1/'
model_name_list = ['GibbsComplete', 'InfillT5', 'InfillBART', 'ILM']
model_color_list = ['C0', 'C1', 'C2', 'C3']
model_conditions = ['finetune', 'nyt_from_scratch', 'bllip_from_scratch']
model_condition2dir_name = dict(zip(model_conditions, ['pretrain-finetune', 'nyt-lg', 'bllip-lg']))
for model_condition in model_conditions:
if model_condition == 'nyt_from_scratch':
# Load and visualize results for models trained from scratch on a subset of NYT
models = ['gibbscomplete-nyt-lg', 't5-nyt-lg', 'bart-nyt-lg', 'ilm-nyt-lg']
model2run_indice = {'gibbscomplete-nyt-lg':['0001', '0002', '0003'], 't5-nyt-lg':['0001', '0002', '0003'],
'bart-nyt-lg':['0001', '0002', '0003'], 'ilm-nyt-lg':['0001', '0002', '0003']}
elif model_condition == 'finetune':
# Load and visualize results for pretrained models finetuned on a subset of NYT 2007
models = ['gibbscomplete', 't5-finetune', 'bart-finetune', 'ilm']
if DO_RERANK == 'rerank':
model2run_indice = {'gibbscomplete':['0001', '0002', '0003'], 't5-finetune':['1001', '1002', '1003'],
'bart-finetune':['1001', '1002', '1003'], 'ilm':['1001', '1002', '1003']}
else:
model2run_indice = {'gibbscomplete':['0001', '0002', '0003'], 't5-finetune':['0001', '0002', '0003'],
'bart-finetune':['0001', '0002', '0003'], 'ilm':['0001', '0002', '0003']}
elif model_condition == 'bllip_from_scratch':
# Load and visualize results for models trained from scratch on BLLIP-lg
models = ['gibbscomplete-bllip-lg', 't5-bllip-lg', 'bart-bllip-lg', 'ilm-bllip-lg']
model2run_indice = {'gibbscomplete-bllip-lg':['0101', '0102', '0103'], 't5-bllip-lg':['0001', '0002', '0003'],
'bart-bllip-lg':['0001', '0002', '0003'], 'ilm-bllip-lg':['0001', '0002', '0003']}
model2name = dict(zip(models, model_name_list))
model2color = dict(zip(models, model_color_list))
rs = {}
for model in models:
rs[model] = {}
for run_index in model2run_indice[model]:
rs[model][run_index] = {}
for test_name in test_names:
rs[model][run_index][test_name] = {'acc':None, 'item_acc_list':[]}
if model.startswith('gibbscomplete'):
path = '{}/results/{}/{}_{}_eval_rs.txt'.format(DATA_DIR, model_condition2dir_name[model_condition], model, run_index)
else:
if DO_RERANK == 'rerank':
path = '{}/results/{}/{}_rerank_{}_eval_rs.txt'.format(DATA_DIR, model_condition2dir_name[model_condition], model, run_index)
else:
path = '{}/results/{}/{}_{}_eval_rs.txt'.format(DATA_DIR, model_condition2dir_name[model_condition], model, run_index)
lines = open(path).readlines()
lines = [line.strip().split() for line in lines]
for line in lines:
if len(line) < 1:
continue
test_name = line[0]
item_acc = float(line[2])
rs[model][run_index][test_name]['item_acc_list'].append(item_acc)
for test_name in test_names:
rs[model][run_index][test_name]['acc'] = np.mean(rs[model][run_index][test_name]['item_acc_list'])
plot_rs_by_test_suite_grid_5_by_6(rs, models, human_data, test_names, model2run_indice, model2color, model2name, savepath='{}/exp1_{}_{}_eval_grid_bar_5x6.pdf'.format(fig_dir, DO_RERANK, model_condition))
plot_rs_by_test_suite_grid_3_by_9(rs, models, human_data, test_names, model2run_indice, model2color, model2name, savepath='{}/exp1_{}_{}_eval_grid_bar_3x9.pdf'.format(fig_dir, DO_RERANK, model_condition))
# plot_aggregated_rs(rs, models, human_data, test_names, model2run_indice, model2color, model2name, savepath='{}/exp1_{}_{}_eval_bar_average_score.pdf'.format(fig_dir, model_condition, DO_RERANK))
exp_data_all[model_condition] = [rs, models, model2run_indice, model2name, model2color]
run_paired_t_tests(exp_data_all, model_conditions)
plot_summmary_across_model_conditions(exp_data_all, model_conditions, savepath='{}/exp1_{}_overall_summary.pdf'.format(fig_dir, DO_RERANK))
| pqian11/fragment-completion | analysis/exp1_analysis.py | exp1_analysis.py | py | 20,639 | python | en | code | 5 | github-code | 36 |
31911094208 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import shutil
import sys
sys.path.insert(0, os.path.abspath("../..")) # path to the actual project root folder
# -- Project information -----------------------------------------------------
project = "Spotted dmi bot"
copyright = "2021, Tend, drendog, alepiaz, Helias"
author = "Tend, drendog, alepiaz, Helias"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest", # to run doctests
"sphinx.ext.napoleon", # to use NumPy and Google style docstrings
"sphinx.ext.githubpages", # generates the .nojekyll file
"sphinx.ext.viewcode", # add source code links to the documentation
"sphinx_rtd_dark_mode", # dark mode for ReadTheDocs
"sphinx_autodoc_typehints", # improves the type hinting
"sphinx.ext.viewcode", # add source code links to the documentation
"sphinx.ext.coverage", # add coverage links to the documentation
"sphinx.ext.intersphinx", # add external mapping to other documentation
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme" # [optional, to use the far superior Read the Docs theme]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = [
"css/dark.css",
]
html_logo = "_static/img/spotted-logo.jpg"
# -- Extension configuration -------------------------------------------------
# -- Configuration of "sphinx_autodoc_typehints" -----------------------------
typehints_use_rtype = False
typehints_defaults = "comma"
# -- Run sphinx-apidoc -------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/readthedocs/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
# Additionally it helps us to avoid running apidoc manually
try: # for Sphinx >= 1.7
from sphinx.ext import apidoc
except ImportError:
from sphinx import apidoc
output_dir = os.path.join(os.path.dirname(__file__), "api")
module_dir = os.path.join(os.path.dirname(__file__), "../../src/spotted")
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
try:
import sphinx
cmd_line = f"sphinx-apidoc --implicit-namespaces -t templates -f -o {output_dir} {module_dir}"
args = cmd_line.split(" ")
if tuple(sphinx.__version__.split(".")) >= ("1", "7"):
# This is a rudimentary parse_version to avoid external dependencies
args = args[1:]
apidoc.main(args)
except Exception as e:
print("Running `sphinx-apidoc` failed!\n{}".format(e))
# -- External mapping --------------------------------------------------------
python_version = ".".join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
"sphinx": ("https://www.sphinx-doc.org/en/master", None),
"python": ("https://docs.python.org/" + python_version, None),
"matplotlib": ("https://matplotlib.org", None),
"numpy": ("https://numpy.org/doc/stable", None),
"sklearn": ("https://scikit-learn.org/stable", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference", None),
"setuptools": ("https://setuptools.pypa.io/en/stable/", None),
"pyscaffold": ("https://pyscaffold.org/en/stable", None),
"telegram": ("https://docs.python-telegram-bot.org/en/stable/", None),
}
| TendTo/Telegram-SpottedDMI-Bot | docs/source/conf.py | conf.py | py | 4,898 | python | en | code | null | github-code | 36 |
12075612630 |
from CrearPreguntas import *
import random
class Partida:
def __init__(self):
self._puntaje = 0
self._preguntas_partida = []
self._nombre = ""
self._nivel = 0
self._respuesta = 0
self._vivo = True
def get_puntaje(self):
return self._puntaje
def get_vivo(self):
return self._vivo
def get_nivel(self):
return self._nivel
def set_nombre(self, nombre):
self._nombre = nombre
def get_nombre(self):
return self._nombre
def aumentar_puntaje(self):
self._puntaje += 10
def mostrar_puntaje(self):
print("TU PUNTAJE ES: ", self._puntaje, "\n")
def configurar_juego(self):
lista_preguntas = CrearPreguntas()
lista = lista_preguntas.get_lista_preguntas()
dificultad = 1
primera_ves = True
categoria = ""
while len(self._preguntas_partida) < 5:
numero = random.randint(0, 24)
if primera_ves == True:
if lista[numero].dificultad == dificultad:
self._preguntas_partida.append(lista[numero])
dificultad += 1
categoria = lista[numero].categoria
primera_ves = False
print("\nLa categoria de las preguntas es: ", categoria)
else:
if lista[numero].dificultad == dificultad and lista[numero].categoria == categoria:
self._preguntas_partida.append(lista[numero])
dificultad += 1
primera_ves = False
def jugar(self):
print(self._preguntas_partida[self._nivel].pregunta)
print()
print("1. " + self._preguntas_partida[self._nivel].opcion1)
print("2. " + self._preguntas_partida[self._nivel].opcion2)
print("3. " + self._preguntas_partida[self._nivel].opcion3)
print("4. " + self._preguntas_partida[self._nivel].opcion4)
def subir_nivel(self):
self._nivel += 1
def responder(self, respuesta):
self._respuesta = respuesta
if self._respuesta == self._preguntas_partida[self._nivel].respuesta:
print("\n¡CORRECTO!")
self.aumentar_puntaje()
self.mostrar_puntaje()
else:
print("\n¡INCORRECTO!")
self._vivo = False
self.mostrar_puntaje()
| pSARq/retoSofka | Partida.py | Partida.py | py | 1,979 | python | es | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.