seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
30905711413 | import pygame
from settings import Settings
from beatmap import Beatmap
from note import Note
from lane import Lane
class Rhythm:
def __init__(self, beatmap, resolution=(1280, 2000)):
pygame.mixer.pre_init(44100, -16, 2, 2048)
pygame.init()
pygame.mixer.init()
pygame.font.init()
self.channel = pygame.mixer.find_channel(True)
self.channel.set_volume(5.8)
self.beatmap = beatmap
self.resolution = resolution
self._should_loop = True
self._font = pygame.font.SysFont('Hack NF', 70)
self._clock = pygame.time.Clock()
self._screen = pygame.display.set_mode(resolution)
self._combo = 0
self.d_lane = Lane(64)
self.f_lane = Lane(192)
self.j_lane = Lane(320)
self.k_lane = Lane(448)
self.populate_lanes()
pygame.mixer.music.load(self.beatmap._audio)
pygame.mixer.music.play(-1)
pygame.mixer.music.set_volume(0.5)
def populate_lanes(self):
for lane in self.beatmap.notes:
if lane == '64':
self.d_lane.load_objects(self.beatmap.notes.get(lane))
elif lane == '192':
self.f_lane.load_objects(self.beatmap.notes.get(lane))
elif lane == '320':
self.j_lane.load_objects(self.beatmap.notes.get(lane))
elif lane == '448':
self.k_lane.load_objects(self.beatmap.notes.get(lane))
def input(self, event):
if e.type == pygame.KEYDOWN and e.key == pygame.K_d:
self.d_lane.send_hit()
elif e.type == pygame.KEYDOWN and e.key == pygame.K_f:
self.f_lane.send_hit()
elif e.type == pygame.KEYDOWN and e.key == pygame.K_j:
self.j_lane.send_hit()
elif e.type == pygame.KEYDOWN and e.key == pygame.K_k:
self.k_lane.send_hit()
def logic(self):
pass
def render(self):
self._screen.fill((0, 0, 0))
# Hit zones for each note
pygame.draw.rect(self._screen, (0, 200, 255), (160, 10, 80, 40), 3)
pygame.draw.rect(self._screen, (0, 200, 255), (280, 10, 80, 40), 3)
pygame.draw.rect(self._screen, (0, 200, 255), (400, 10, 80, 40), 3)
pygame.draw.rect(self._screen, (0, 200, 255), (520, 10, 80, 40), 3)
combo_surface = self._font.render(str(self._combo), False, (255, 255, 255))
self._screen.blit(combo_surface, (700, 10))
pygame.display.flip()
def loop(self):
key_down = pygame.key.get_pressed()
while self._should_loop:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self._should_loop = False
if event.type == pygame.KEYDOWN:
self.input(event)
self.logic()
self.render()
self._clock.tick(300)
titania= Beatmap('maps/titania/', 'titania_basic.osu', 'audio.ogg')
mania_clone = Rhythm(titania)
mania_clone.loop()
| zkxjzmswkwl/osu-mania-but-worse | main.py | main.py | py | 3,019 | python | en | code | 0 | github-code | 13 |
71004552977 | import argparse
import importlib
import logging
import os
import types
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.utils.data as data_utils
from tqdm import tqdm
from transformers import HerbertTokenizer, RobertaModel
import utils
from datasets.massive import IntentDataset
@dataclass
class ModelEvaluator():
logger: logging.Logger
language_model: nn.Module
intent_classifier: nn.Module
intents_len: int
test_loader: data_utils.DataLoader
device: torch.device
def evaluate(self):
self.language_model.eval()
self.intent_classifier.eval()
intents_list = []
labels_list = []
val_epoch_acc = 0
with torch.no_grad():
for tokenizer_output, labels in tqdm(self.test_loader):
tokenizer_output = {key: val.to(self.device)
for key, val in tokenizer_output.items()}
labels_one_hot = nn.functional.one_hot(
labels, self.intents_len)
labels_one_hot = labels_one_hot.to(
self.device).type(torch.float)
lm_outputs = self.language_model(**tokenizer_output)
cls_hiddens = lm_outputs.pooler_output
hidden_state = lm_outputs.last_hidden_state.mean(dim=1)
intents_pred = self.intent_classifier(
cls_hiddens, hidden_state)
intents_decoded = intents_pred.argmax(dim=1).cpu()
accuracy = torch.sum(
intents_decoded == labels).sum() / intents_decoded.shape[0]
val_epoch_acc += accuracy.item()
# [0] because batch size is 1
intents_to_save = intents_pred[0].cpu().numpy()
label_to_save = labels[0]
intents_list.append(intents_to_save)
labels_list.append(label_to_save)
accuracy = val_epoch_acc / len(self.test_loader)
self.logger.info(f'Evaluation accuracy: {accuracy}')
labels_np = np.array(labels_list)
intents_np = np.array(intents_list)
return intents_np, labels_np
def load_exp_modules(exp_path: str) -> Tuple[types.ModuleType]:
config_path = os.path.join(exp_path, 'config')
config_path_str = str(config_path).replace('/', '.')
config = importlib.import_module(config_path_str)
model_path = os.path.join(exp_path, 'models', 'intent_classifier')
model_path_str = str(model_path).replace('\\', '/').replace('/', '.')
model = importlib.import_module(model_path_str)
return config, model
def main():
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--exp', type=str, default=None,
help="Experiment folder path.")
args = parser.parse_args()
exp_path = os.path.abspath(args.exp)
config_module, IntentClassifier_module = load_exp_modules(args.exp)
config = config_module.Config()
IntentClassifier = IntentClassifier_module.IntentClassifier
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
tokenizer = HerbertTokenizer.from_pretrained(
"allegro/herbert-klej-cased-tokenizer-v1")
collate_fn = utils.collate_fn_factory(tokenizer)
test_dataset = IntentDataset(
path=config.dataset_path, mode='val', random_seed=config.dataset_random_seed)
test_loader = data_utils.DataLoader(test_dataset,
batch_size=1, # 1 for testing purposes
shuffle=True,
collate_fn=collate_fn)
language_model = RobertaModel.from_pretrained(
"allegro/herbert-klej-cased-v1", is_decoder=False)
language_model = language_model.to(device)
intent_classifier = IntentClassifier(
hidden_dim=768, output_dim=len(test_dataset.intents))
model_path = os.path.join(exp_path, 'best.pt')
intent_classifier.load_state_dict(torch.load(model_path))
intent_classifier = intent_classifier.to(device)
intents_len = len(test_dataset.intents)
model_evaluator = ModelEvaluator(
logger, language_model, intent_classifier, intents_len, test_loader, device)
intents_np, labels_np = model_evaluator.evaluate()
intents_df = pd.DataFrame(intents_np)
labels_df = pd.DataFrame({60: labels_np})
df = intents_df.join(labels_df)
save_path = os.path.join(exp_path, 'test_results.csv')
df.to_csv(save_path, index=False, header=False)
logger.info(f'A .csv file with intent preditcion vectors and labels saved to {save_path}.')
if __name__ == "__main__":
main()
| Kacprate/Intent-classification-Polish-language | test.py | test.py | py | 4,862 | python | en | code | 0 | github-code | 13 |
17755401630 | import atexit
import time
import requests
from requests.auth import HTTPBasicAuth
from .queues import _get_queue
from ..connection import get_connection
from ..utils import pprint, get_config
def publish_message(credentials: HTTPBasicAuth, uri: str, vhost: str, exchange: str, routing_key: str, message: str, output: str):
body = {
'routing_key': routing_key,
'payload': message,
'payload_encoding': 'string',
'properties': {}
}
response = requests.post(uri + '/{}/{}/publish'.format(vhost, exchange), auth=credentials, json=body)
pprint(response, output)
def publishing(env: str, credentials: HTTPBasicAuth, uri: str, vhost: str, exchange: str, routing_key: str, queue: str, message: str, n: int, output: str):
connection = get_connection(env, protocol='amqps')[1]
channel = connection.channel()
atexit.register(_close_connection, connection, channel)
print('Press Crtl+D or Ctrl+C to exit... \n')
config = get_config()
if not queue:
queue = config['default'].get('queue')
uri = '/'.join(uri.split('/')[:-2])
data = _get_queue(credentials, f'{uri}/api/queues', vhost, name=queue).json()
channel.queue_declare(
queue=queue,
durable=data['durable'],
arguments=data['arguments']
)
while True:
for _ in range(n):
channel.basic_publish(
exchange=exchange,
routing_key=routing_key,
body=message
)
if output:
print('Message published!')
time.sleep((1000 - (n * 10))/1000)
def _close_connection(connection, channel):
print('Closing channel...')
channel.close()
print('Channel closed.')
print('Closing channel...')
connection.close()
print('Connection closed.')
print('\nGood bye!')
| brianou7/rabbitmqcli | rabbitmqcli/modules/exchanges.py | exchanges.py | py | 1,855 | python | en | code | 0 | github-code | 13 |
73603481296 | class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
idx = [-1] * 256
length = 0
i = 0
for j in range(len(s)):
i = max(i, idx[ord(s[j])] + 1)
length = max(length, j - i + 1)
idx[ord(s[j])] = j
return length
| BenjaminAnding/leetcodesolutions | Medium/LongestSubstringWithoutRepeatingCharacters/LongestSubstringWithoutRepeatingCharacters.py | LongestSubstringWithoutRepeatingCharacters.py | py | 362 | python | en | code | 0 | github-code | 13 |
10976822103 | # -*- coding: utf-8 -*-
"""
Created on Tue May 26 14:41:28 2020
@author: luist
"""
import numpy.random as rng
import numpy as np
import keras
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import ComplementNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.linear_model import SGDClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
from sklearn import tree
from sklearn import svm
from keras.layers import Input, Conv1D, Conv2D, Lambda, merge, Dense, Flatten,MaxPooling1D, Dropout, MaxPooling2D
from keras.models import Model, Sequential
from keras.regularizers import l2
from keras import backend as K
from keras.optimizers import SGD,Adam
from keras.losses import binary_crossentropy
class Validate_models:
def __init__(self,X_treino,Xval):
self.Xval = Xval
self.X_treino = X_treino
self.n_classes,self.n_exemplos,self.w,self.h = X_treino.shape
self.n_val,self.n_ex_val,_,_ = Xval.shape
def batch_function(self,n,s='train'):
if s == 'train':
X = self.X_treino
else:
X = self.Xval
n_classes, n_exemplos, w, h = X.shape
"""Cria um batch de n pares, metade da mesma classe e a outra metade de diferentes classes para o treino da rede"""
categorias = rng.choice(n_classes,size=(n,),replace=False)
pares=[np.zeros((n, w, h,1)) for i in range(2)]
targets=np.zeros((n,))
targets[n//2:] = 1
for i in range(n):
categoria = categorias[i]
index_1 = rng.randint(0,n_exemplos)
pares[0][i,:,:,:] = X[categoria,index_1].reshape(w,h,1)
index_2 = rng.randint(0,n_exemplos)
#pick images of same class for 1st half, different for 2nd
categoria_2 = categoria if i >= n//2 else (categoria + rng.randint(1,n_classes)) % n_classes
pares[1][i,:,:,:] = X[categoria_2,index_2].reshape(w,h,1)
return pares, targets
def validate_models(self, N, trials, tam, model, s= 'val'):
pairs_train, targets_train = self.batch_function(tam)
lista1 = pairs_train[0]
lista2 = pairs_train[1]
pairs_train2 = []
for i in range(len(lista1)):
seq3=[]
seq = lista1[i].flatten()
seq2 = lista2[i].flatten()
for j in seq:
seq3.append(j)
for k in seq2:
seq3.append(k)
pairs_train2.append(np.asarray(seq3))
n_corretos = 0
pairs2train=np.asarray(pairs_train2).reshape(tam,54*100*2,1)
targets_train = np.asarray(targets_train).reshape(tam,1)
print(pairs2train.shape)
print(targets_train.shape)
# cnn_net= model.fit(pairs2train, targets_train, batch_size=50,epochs=20,verbose=1)
lista_acc=[]
for n in range(2,N+1):
for t in range(trials):
print(t)
pairs_val2=[]
pairs_val,targets_val = self.one_shot_task(n,s)
lista11= pairs_val[0]
lista22 = pairs_val[1]
for i2 in range(len(lista11)):
seq3=[]
seq = lista11[i2].flatten()
seq2 = lista22[i2].flatten()
for j2 in seq:
seq3.append(j2)
for k2 in seq2:
seq3.append(k2)
pairs_val2.append(np.asarray(seq3))
pairs2val=np.asarray(pairs_val2).reshape(n,54*100*2,1)
kernel = 1.0 * RBF(1.0)
if (model == 'SVM'):
reg = svm.SVC(probability = True)
if (model == 'RandomForest'):
reg = RandomForestClassifier(max_depth=2, random_state=0)
if (model == 'MLPClassifier'):
reg = MLPClassifier(solver='adam', alpha=1e-4,hidden_layer_sizes=(100,50,20), random_state=1)
reg.fit(pairs2train, targets_train)
print(pairs2val.shape)
print(targets_val.shape)
pred = reg.predict(pairs2val)
print("Target:",targets_val)
print("Previsão Probabilidade:",pred)
pred_list=[]
for i in pred:
pred_list.append(i[0])
if np.argmax(pred_list) == 0:
n_corretos +=1
percent_correct = (n_corretos / trials)
lista_acc.append(percent_correct)
n_corretos= 0
print(lista_acc)
return lista_acc
| larngroup/One_Shot_Siamese_Net_Drug_Discovery | Validate_models.py | Validate_models.py | py | 5,517 | python | en | code | 2 | github-code | 13 |
38720252962 | import os
import sys
import numpy as np
ignore_attrs=['True Label','CHR','Nuc-Pos','REF-Nuc','ALT-Nuc','Ensembl-Gene-ID','Uniprot-Accession']
#id_names=["Ensembl_geneid","Uniprot_id_Polyphen2"]
#id_names_prefix=["ENSMNL","Uniprot_id_Polyphen2"]
label_name="True Label"
def is_nan_symbol(el):
return el=="" or el=="-" or el=="."
def make_key_id(head_mapping,arr):
l=[arr[i] for i in [head_mapping["CHR"],head_mapping["Nuc-Pos"],head_mapping["REF-Nuc"],head_mapping["ALT-Nuc"]]]
key_id="_".join(l)
return key_id
#
output_path="03data_table/"
os.makedirs(output_path,exist_ok=True)
for filename in sys.argv[1:]:
score_data=[]
#filename="dbnsfp_data/data_chr1.tsv"
fp=open(filename)
head=next(fp)
head_arr=head.strip().split(",")
header_mapping={el:i for i,el in enumerate(head_arr)}
first=True
enabled_head=[]
for line in fp:
arr=line.strip().split(",")
#for index
key_id=make_key_id(header_mapping,arr)
data_line=[]
data_line.append(key_id)
if first: enabled_head.append("key")
#for label
label_index=header_mapping[label_name]
label_el=arr[label_index]
label_arr=set(label_el.split("|"))
if len(label_arr)==1:
el=list(label_arr)[0]
data_line.append(el)
if first: enabled_head.append(label_name)
else:
continue
#for score data
for i,el in enumerate(arr):
k=head_arr[i]
if not k in ignore_attrs:
if not is_nan_symbol(el):
data_line.append(el)
if first: enabled_head.append(k)
else:
data_line.append("")
if first: enabled_head.append(k)
score_data.append(data_line)
first=False
basename=os.path.basename(filename)
out_fp=open(output_path+basename,"w")
s=",".join(enabled_head)
out_fp.write(s)
out_fp.write("\n")
for pair in score_data:
s=",".join(pair)
out_fp.write(s)
out_fp.write("\n")
| clinfo/PathoGN | script/03totable.py | 03totable.py | py | 1,801 | python | en | code | 1 | github-code | 13 |
26068889668 | n = int(input())
array = [-1 for i in range(n)]
oper = []
tmp = input().split()
while tmp[0] != 'S' :
oper.append(tmp)
tmp = input().split()
def is_connected(oper1, oper2, array):
if Counter(array)[-1] == 0:
return 'yes'
father1 = oper1
while array[father1] != -1:
father1 = array[father1]
array[oper1] = father1 if father1 != oper1 else -1
father2 = oper2
while array[father2] != -1:
father2 = array[father2]
array[oper2] = father2 if father2 != oper2 else -1
#print(array)
if father1 == father2:
return 'yes'
else:
return 'no'
from collections import Counter
for inst in oper:
if inst[0] == 'C':
print(is_connected(int(inst[1]) - 1, int(inst[2]) - 1, array))
else:
#print(array)
tmp = min(int(inst[1]), int(inst[2])) - 1
num_of_None = Counter(array)[-1]
if int(inst[1]) > int(inst[2]):
father = int(inst[1]) - 1
if num_of_None == 0:
break
while array[father] != -1:
father = array[father]
array[father] = tmp
else:
if num_of_None == 0:
break
father = int(inst[2]) - 1
while array[father] != -1:
father = array[father]
array[father] = tmp
#print(array)
k = Counter(array)[-1]
if k == 1:
print('The network is connected.')
else:
print('There are '+ str(k) + ' components.')
| piglaker/PTA_ZJU_mooc | src13.py | src13.py | py | 1,490 | python | en | code | 0 | github-code | 13 |
7147556099 | import OpenGL.GL as gl
class Texture(object):
def __init__(self, data=None, width=None, height=None,
filt=gl.GL_NEAREST, dtype=gl.GL_UNSIGNED_BYTE):
""" Texture object.
If data is None an empty texture will be created
"""
self._data = data
# format of texture object
if self._data.ndim > 2 and self._data.shape[2] == 3:
self._format = gl.GL_RGB
else:
self._format = gl.GL_RGBA
if dtype == gl.GL_FLOAT:
self._format = gl.GL_R32F
self._image_format = gl.GL_RED
self._pname = gl.GL_REPEAT
self._filter_min = gl.GL_LINEAR
self._filter_mag = gl.GL_LINEAR
else:
self._pname = gl.GL_CLAMP_TO_EDGE
## Filtering mode if texture pixels < screen pixels
self._filter_min = filt
## Filtering mode if texture pixels > screen pixels
self._filter_mag = filt
self._image_format = self._format
# Create the Texture
self._handle = gl.glGenTextures(1)
if self._data is not None:
if width is None or height is None:
try:
height, width, bands = self._data.shape
except ValueError:
height, width = self._data.shape
# Bind texture
gl.glBindTexture(gl.GL_TEXTURE_2D, self._handle)
# Avoid banding if row length is odd
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
# Set Texture wrap and filter modes
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S,
self._pname);
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T,
self._pname);
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER,
self._filter_min);
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER,
self._filter_mag);
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, self._format, width, height,
0, self._image_format, dtype, self._data)
# Unbind texture
gl.glBindTexture(gl.GL_TEXTURE_2D, 0);
def bind(self, texUnit=0):
gl.glActiveTexture(gl.GL_TEXTURE0 + texUnit)
gl.glBindTexture(gl.GL_TEXTURE_2D, self._handle)
def handle(self):
return self._handle
def unbind(self):
gl.glBindTexture(gl.GL_TEXTURE_2D, 0)
| ElsevierSoftwareX/SOFTX_2018_174 | fieldanimation/texture.py | texture.py | py | 2,453 | python | en | code | 1 | github-code | 13 |
34141176062 | import pandas as pd
import numpy as np
import tensorflow as tf
from biom import load_table
from tensorflow import keras
from keras.layers import MultiHeadAttention, LayerNormalization, Dropout, Layer
from keras.layers import Embedding, Input, GlobalAveragePooling1D, Dense
from keras.models import Sequential, Model
BATCH_SIZE=16
NUM_CLASSES=3
NUM_SAMPLES_PER_CLASS=4
MAX_FEATURES_PER_SAMPLE=3
BATCH_SIZE=32
def shuffle_class_order(line):
defs = [int()]*MAX_FEATURES_PER_SAMPLE
fields = tf.io.decode_csv(line, record_defaults=defs, field_delim=',')
fields = tf.transpose(fields)
return fields
def create_triplets(classes):
classes = tf.random.shuffle(classes)
positives = classes[0, :]
negatives = tf.reshape(classes[1:, :], [-1, MAX_FEATURES_PER_SAMPLE])
positives = tf.random.shuffle(positives)
negatives = tf.random.shuffle(negatives)
ancor = positives[tf.newaxis, 0, :]
ancor = tf.tile(ancor, tf.constant([NUM_SAMPLES_PER_CLASS-1,1]))
positives = positives[1:, :]
negatives = negatives[:(NUM_SAMPLES_PER_CLASS-1)]
return ancor, positives, negatives
def create_dataset(inputs_dir):
dataset = tf.data.Dataset.list_files(inputs_dir, shuffle=False)
dataset = dataset.interleave(
lambda file_path: tf.data.TextLineDataset(file_path),
cycle_length=tf.data.AUTOTUNE,
num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.map(shuffle_class_order, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.batch(NUM_SAMPLES_PER_CLASS) # batch size should be # examples per class
dataset = dataset.batch(NUM_CLASSES) # batch size should be # class
dataset = dataset.cache()
dataset = dataset.map(create_triplets, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.repeat(NUM_SAMPLES_PER_CLASS-1)
dataset = dataset.repeat(NUM_SAMPLES_PER_CLASS)
dataset = dataset.unbatch()
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(tf.data.AUTOTUNE)
return dataset
dataset = create_dataset('test.inputs')
for i in range(2):
for s in dataset:
# print(o)
print(s)
# print(i)
print('run!!!!')
| kwcantrell/scale-16s | transformer-util.py | transformer-util.py | py | 2,166 | python | en | code | 0 | github-code | 13 |
39871266214 | """Integration tests for the kingpin.actors.support.api module"""
from nose.plugins.attrib import attr
from tornado import testing
from tornado import httpclient
from kingpin.actors import exceptions
from kingpin.actors.support import api
__author__ = 'Matt Wise <matt@nextdoor.com>'
HTTPBIN = {
'path': '/',
'http_methods': {'get': {}},
'attrs': {
'get': {
'path': '/get',
'http_methods': {'get': {}},
},
'post': {
'path': '/post',
'http_methods': {'post': {}},
},
'put': {
'path': '/put',
'http_methods': {'put': {}},
},
'delete': {
'path': '/delete',
'http_methods': {'delete': {}},
},
'status': {
'path': '/status/%res%',
'http_methods': {'get': {}},
},
'basic_auth': {
'path': '/basic-auth/username/password',
'http_methods': {'get': {}},
}
}
}
class HTTPBinRestConsumer(api.RestConsumer):
_CONFIG = HTTPBIN
_ENDPOINT = 'http://httpbin.org'
class HTTPBinRestConsumerBasicAuthed(HTTPBinRestConsumer):
_CONFIG = dict(HTTPBinRestConsumer._CONFIG)
_CONFIG['auth'] = {
'user': 'username',
'pass': 'password',
}
class IntegrationRestConsumer(testing.AsyncTestCase):
integration = True
@attr('http', 'integration')
@testing.gen_test(timeout=60)
def integration_base_get(self):
httpbin = HTTPBinRestConsumer()
ret = yield httpbin.http_get()
self.assertIn('DOCTYPE', ret)
@attr('http', 'integration')
@testing.gen_test(timeout=60)
def integration_get_json(self):
httpbin = HTTPBinRestConsumer()
ret = yield httpbin.get().http_get()
self.assertEquals(ret['url'], 'http://httpbin.org/get')
@attr('http', 'integration')
@testing.gen_test(timeout=60)
def integration_get_basic_auth(self):
httpbin = HTTPBinRestConsumerBasicAuthed()
ret = yield httpbin.basic_auth().http_get()
self.assertEquals(
ret, {'authenticated': True, 'user': 'username'})
@attr('http', 'integration')
@testing.gen_test(timeout=60)
def integration_get_basic_auth_401(self):
httpbin = HTTPBinRestConsumer()
with self.assertRaises(exceptions.InvalidCredentials):
yield httpbin.basic_auth().http_get()
@attr('http', 'integration')
@testing.gen_test(timeout=60)
def integration_get_with_args(self):
httpbin = HTTPBinRestConsumer()
ret = yield httpbin.get().http_get(foo='bar', baz='bat')
self.assertEquals(ret['url'], 'http://httpbin.org/get?baz=bat&foo=bar')
@attr('http', 'integration')
@testing.gen_test(timeout=60)
def integration_post(self):
httpbin = HTTPBinRestConsumer()
ret = yield httpbin.post().http_post(foo='bar', baz='bat')
self.assertEquals(ret['url'], 'http://httpbin.org/post')
self.assertEquals(ret['form'], {'foo': 'bar', 'baz': 'bat'})
@attr('http', 'integration')
@testing.gen_test(timeout=60)
def integration_put(self):
httpbin = HTTPBinRestConsumer()
ret = yield httpbin.put().http_put(foo='bar', baz='bat')
self.assertEquals(ret['url'], 'http://httpbin.org/put')
self.assertEquals(ret['data'], 'foo=bar&baz=bat')
@attr('http', 'integration')
@testing.gen_test(timeout=60)
def integration_delete(self):
httpbin = HTTPBinRestConsumer()
ret = yield httpbin.delete().http_delete(foo='bar', baz='bat')
self.assertEquals(
ret['url'],
'http://httpbin.org/delete?baz=bat&foo=bar')
@attr('http', 'integration')
@testing.gen_test(timeout=60)
def integration_status_401(self):
httpbin = HTTPBinRestConsumer()
with self.assertRaises(exceptions.InvalidCredentials):
yield httpbin.status(res='401').http_get()
@attr('http', 'integration')
@testing.gen_test(timeout=60)
def integration_status_403(self):
httpbin = HTTPBinRestConsumer()
with self.assertRaises(exceptions.InvalidCredentials):
yield httpbin.status(res='403').http_get()
@attr('http', 'integration')
@testing.gen_test(timeout=60)
def integration_status_500(self):
httpbin = HTTPBinRestConsumer()
with self.assertRaises(httpclient.HTTPError):
yield httpbin.status(res='500').http_get()
@attr('http', 'integration')
@testing.gen_test(timeout=60)
def integration_status_501(self):
httpbin = HTTPBinRestConsumer()
with self.assertRaises(exceptions.RecoverableActorFailure):
yield httpbin.status(res='501').http_get()
@attr('http', 'integration')
@testing.gen_test(timeout=60)
def integration_status_502(self):
httpbin = HTTPBinRestConsumer()
with self.assertRaises(httpclient.HTTPError):
yield httpbin.status(res='502').http_get()
@attr('http', 'integration')
@testing.gen_test(timeout=60)
def integration_status_503(self):
httpbin = HTTPBinRestConsumer()
with self.assertRaises(httpclient.HTTPError):
yield httpbin.status(res='503').http_get()
@attr('http', 'integration')
@testing.gen_test(timeout=60)
def integration_status_504(self):
httpbin = HTTPBinRestConsumer()
with self.assertRaises(httpclient.HTTPError):
yield httpbin.status(res='504').http_get()
| smmorneau/kingpin | kingpin/actors/support/test/integration_api.py | integration_api.py | py | 5,538 | python | en | code | null | github-code | 13 |
28303152448 | import json
from tqdm import tqdm
import concurrent.futures
from costante_gral import URL_BASE, RUTA_BUSQUEDA, RUTA_DATOS, RUTA_INFORMES
from funciones.api import consumir_api
from funciones.csv_funciones import guardar_csv
from funciones.json_funciones import leer_json, guardar_json
from funciones.parses import parse_categoria, parse_produco
def crear_lista_url_productos():
data = leer_json(f'{RUTA_DATOS}categorias.json')
productos_urls = []
for categoria in data[0]:
categoria_seleccionada = parse_categoria(categoria)
for pagina in categoria_seleccionada.paginas:
url_productos = URL_BASE + RUTA_BUSQUEDA + categoria_seleccionada.url + '?' + pagina['url']
productos_urls.append(url_productos)
# Guardar las URLs en un archivo JSON
with open(f'{RUTA_DATOS}urls_productos.json', 'w') as file:
json.dump(productos_urls, file)
def descargar_productos(sucursal, headers):
enlaces = leer_json(f'{RUTA_DATOS}urls_productos.json')
productos_dic = []
marcas = []
# Configurar la barra de progreso
total_enlaces = len(enlaces)
progress_bar = tqdm(total=total_enlaces, desc="Descargando y ordenando productos", unit="enlace")
def procesar_producto(producto):
marca_actual = {'id': producto['brandId'], 'nombre': producto['brand']}
if marca_actual not in marcas:
marcas.append(marca_actual)
productos_dic.append(parse_produco(producto))
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
for enlace in enlaces:
enlace_actual = enlace + f'&sc={sucursal.sucursal_id}'
future = executor.submit(consumir_api, enlace_actual, headers, proxy=None)
futures.append(future)
for future in concurrent.futures.as_completed(futures):
productos = future.result()
for prod in productos:
executor.submit(procesar_producto, prod)
progress_bar.update(1) # Actualizar el progreso de la barra de progreso por cada enlace procesado
progress_bar.close() # Cerrar la barra de progreso
# Ordenar los productos por categoría
productos_dic = sorted(productos_dic, key=lambda x: x.categoria_id)
guardar_json(marcas, f'{RUTA_DATOS}lista_marcas.json')
for elemento in productos_dic:
guardar_csv(elemento, f'{RUTA_INFORMES}{sucursal.nombre.replace(" ", "-")}.csv')
| leosant027/proyecto_hiper | funciones/producto.py | producto.py | py | 2,439 | python | es | code | 0 | github-code | 13 |
30880992898 | def raizCuadradaEnt (numero):
valor=0
if (numero ==0):
return numero
i=1
while(i*i <= numero): ## complejidad O(log N)
i*=2 #voy multiplicando por 2
valor= busquedaBinaria(numero,i//2,i) #cuando me pase, llamo a la busqueda.
## uso // pues quiero un resultado entero, sino tendria que castear.
return valor
def busquedaBinaria(x,i,f): #le paso el numero, inicio y fin. Complejidad O(log N)
if(i==f):
return i
else:
medio=(i+f)//2
if(x==medio*medio):
return medio
if(x>medio*medio):
return busquedaBinaria(x,medio,f-1)
else:
return busquedaBinaria(x,i,medio-1)
#main
print (raizCuadradaEnt(2))
print (raizCuadradaEnt(9))
print (raizCuadradaEnt(18))
print (raizCuadradaEnt(199))
#debug visual con https://pythontutor.com/visualize.html#mode=edit
| eduardost/p3 | raizCuadradaEnt.py | raizCuadradaEnt.py | py | 826 | python | es | code | 0 | github-code | 13 |
17054446894 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.DiscountInfos import DiscountInfos
from alipay.aop.api.domain.DishList import DishList
from alipay.aop.api.domain.OtherAmountInfos import OtherAmountInfos
from alipay.aop.api.domain.PaymentList import PaymentList
from alipay.aop.api.domain.RefundList import RefundList
class KoubeiCateringOrderSyncModel(object):
def __init__(self):
self._adjust_amount = None
self._amount = None
self._biz_product = None
self._business_type = None
self._dinner_type = None
self._discount_amount = None
self._discount_infos = None
self._dish_amount = None
self._dish_list = None
self._ext_infos = None
self._koubei_payment_amount = None
self._offline_payment_amount = None
self._order_id = None
self._order_style = None
self._order_time = None
self._other_amount_discountable = None
self._other_amount_infos = None
self._other_amount_undiscountable = None
self._out_biz_no = None
self._partner_id = None
self._pay_style = None
self._payment_list = None
self._people_num = None
self._pos_version = None
self._receivable_amount = None
self._refund_list = None
self._shop_id = None
self._status = None
self._table_no = None
self._total_paymented_amount = None
self._use_online_promotion_flag = None
@property
def adjust_amount(self):
return self._adjust_amount
@adjust_amount.setter
def adjust_amount(self, value):
self._adjust_amount = value
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def biz_product(self):
return self._biz_product
@biz_product.setter
def biz_product(self, value):
self._biz_product = value
@property
def business_type(self):
return self._business_type
@business_type.setter
def business_type(self, value):
self._business_type = value
@property
def dinner_type(self):
return self._dinner_type
@dinner_type.setter
def dinner_type(self, value):
self._dinner_type = value
@property
def discount_amount(self):
return self._discount_amount
@discount_amount.setter
def discount_amount(self, value):
self._discount_amount = value
@property
def discount_infos(self):
return self._discount_infos
@discount_infos.setter
def discount_infos(self, value):
if isinstance(value, list):
self._discount_infos = list()
for i in value:
if isinstance(i, DiscountInfos):
self._discount_infos.append(i)
else:
self._discount_infos.append(DiscountInfos.from_alipay_dict(i))
@property
def dish_amount(self):
return self._dish_amount
@dish_amount.setter
def dish_amount(self, value):
self._dish_amount = value
@property
def dish_list(self):
return self._dish_list
@dish_list.setter
def dish_list(self, value):
if isinstance(value, list):
self._dish_list = list()
for i in value:
if isinstance(i, DishList):
self._dish_list.append(i)
else:
self._dish_list.append(DishList.from_alipay_dict(i))
@property
def ext_infos(self):
return self._ext_infos
@ext_infos.setter
def ext_infos(self, value):
self._ext_infos = value
@property
def koubei_payment_amount(self):
return self._koubei_payment_amount
@koubei_payment_amount.setter
def koubei_payment_amount(self, value):
self._koubei_payment_amount = value
@property
def offline_payment_amount(self):
return self._offline_payment_amount
@offline_payment_amount.setter
def offline_payment_amount(self, value):
self._offline_payment_amount = value
@property
def order_id(self):
return self._order_id
@order_id.setter
def order_id(self, value):
self._order_id = value
@property
def order_style(self):
return self._order_style
@order_style.setter
def order_style(self, value):
self._order_style = value
@property
def order_time(self):
return self._order_time
@order_time.setter
def order_time(self, value):
self._order_time = value
@property
def other_amount_discountable(self):
return self._other_amount_discountable
@other_amount_discountable.setter
def other_amount_discountable(self, value):
self._other_amount_discountable = value
@property
def other_amount_infos(self):
return self._other_amount_infos
@other_amount_infos.setter
def other_amount_infos(self, value):
if isinstance(value, list):
self._other_amount_infos = list()
for i in value:
if isinstance(i, OtherAmountInfos):
self._other_amount_infos.append(i)
else:
self._other_amount_infos.append(OtherAmountInfos.from_alipay_dict(i))
@property
def other_amount_undiscountable(self):
return self._other_amount_undiscountable
@other_amount_undiscountable.setter
def other_amount_undiscountable(self, value):
self._other_amount_undiscountable = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def partner_id(self):
return self._partner_id
@partner_id.setter
def partner_id(self, value):
self._partner_id = value
@property
def pay_style(self):
return self._pay_style
@pay_style.setter
def pay_style(self, value):
self._pay_style = value
@property
def payment_list(self):
return self._payment_list
@payment_list.setter
def payment_list(self, value):
if isinstance(value, list):
self._payment_list = list()
for i in value:
if isinstance(i, PaymentList):
self._payment_list.append(i)
else:
self._payment_list.append(PaymentList.from_alipay_dict(i))
@property
def people_num(self):
return self._people_num
@people_num.setter
def people_num(self, value):
self._people_num = value
@property
def pos_version(self):
return self._pos_version
@pos_version.setter
def pos_version(self, value):
self._pos_version = value
@property
def receivable_amount(self):
return self._receivable_amount
@receivable_amount.setter
def receivable_amount(self, value):
self._receivable_amount = value
@property
def refund_list(self):
return self._refund_list
@refund_list.setter
def refund_list(self, value):
if isinstance(value, list):
self._refund_list = list()
for i in value:
if isinstance(i, RefundList):
self._refund_list.append(i)
else:
self._refund_list.append(RefundList.from_alipay_dict(i))
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def table_no(self):
return self._table_no
@table_no.setter
def table_no(self, value):
self._table_no = value
@property
def total_paymented_amount(self):
return self._total_paymented_amount
@total_paymented_amount.setter
def total_paymented_amount(self, value):
self._total_paymented_amount = value
@property
def use_online_promotion_flag(self):
return self._use_online_promotion_flag
@use_online_promotion_flag.setter
def use_online_promotion_flag(self, value):
self._use_online_promotion_flag = value
def to_alipay_dict(self):
params = dict()
if self.adjust_amount:
if hasattr(self.adjust_amount, 'to_alipay_dict'):
params['adjust_amount'] = self.adjust_amount.to_alipay_dict()
else:
params['adjust_amount'] = self.adjust_amount
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.biz_product:
if hasattr(self.biz_product, 'to_alipay_dict'):
params['biz_product'] = self.biz_product.to_alipay_dict()
else:
params['biz_product'] = self.biz_product
if self.business_type:
if hasattr(self.business_type, 'to_alipay_dict'):
params['business_type'] = self.business_type.to_alipay_dict()
else:
params['business_type'] = self.business_type
if self.dinner_type:
if hasattr(self.dinner_type, 'to_alipay_dict'):
params['dinner_type'] = self.dinner_type.to_alipay_dict()
else:
params['dinner_type'] = self.dinner_type
if self.discount_amount:
if hasattr(self.discount_amount, 'to_alipay_dict'):
params['discount_amount'] = self.discount_amount.to_alipay_dict()
else:
params['discount_amount'] = self.discount_amount
if self.discount_infos:
if isinstance(self.discount_infos, list):
for i in range(0, len(self.discount_infos)):
element = self.discount_infos[i]
if hasattr(element, 'to_alipay_dict'):
self.discount_infos[i] = element.to_alipay_dict()
if hasattr(self.discount_infos, 'to_alipay_dict'):
params['discount_infos'] = self.discount_infos.to_alipay_dict()
else:
params['discount_infos'] = self.discount_infos
if self.dish_amount:
if hasattr(self.dish_amount, 'to_alipay_dict'):
params['dish_amount'] = self.dish_amount.to_alipay_dict()
else:
params['dish_amount'] = self.dish_amount
if self.dish_list:
if isinstance(self.dish_list, list):
for i in range(0, len(self.dish_list)):
element = self.dish_list[i]
if hasattr(element, 'to_alipay_dict'):
self.dish_list[i] = element.to_alipay_dict()
if hasattr(self.dish_list, 'to_alipay_dict'):
params['dish_list'] = self.dish_list.to_alipay_dict()
else:
params['dish_list'] = self.dish_list
if self.ext_infos:
if hasattr(self.ext_infos, 'to_alipay_dict'):
params['ext_infos'] = self.ext_infos.to_alipay_dict()
else:
params['ext_infos'] = self.ext_infos
if self.koubei_payment_amount:
if hasattr(self.koubei_payment_amount, 'to_alipay_dict'):
params['koubei_payment_amount'] = self.koubei_payment_amount.to_alipay_dict()
else:
params['koubei_payment_amount'] = self.koubei_payment_amount
if self.offline_payment_amount:
if hasattr(self.offline_payment_amount, 'to_alipay_dict'):
params['offline_payment_amount'] = self.offline_payment_amount.to_alipay_dict()
else:
params['offline_payment_amount'] = self.offline_payment_amount
if self.order_id:
if hasattr(self.order_id, 'to_alipay_dict'):
params['order_id'] = self.order_id.to_alipay_dict()
else:
params['order_id'] = self.order_id
if self.order_style:
if hasattr(self.order_style, 'to_alipay_dict'):
params['order_style'] = self.order_style.to_alipay_dict()
else:
params['order_style'] = self.order_style
if self.order_time:
if hasattr(self.order_time, 'to_alipay_dict'):
params['order_time'] = self.order_time.to_alipay_dict()
else:
params['order_time'] = self.order_time
if self.other_amount_discountable:
if hasattr(self.other_amount_discountable, 'to_alipay_dict'):
params['other_amount_discountable'] = self.other_amount_discountable.to_alipay_dict()
else:
params['other_amount_discountable'] = self.other_amount_discountable
if self.other_amount_infos:
if isinstance(self.other_amount_infos, list):
for i in range(0, len(self.other_amount_infos)):
element = self.other_amount_infos[i]
if hasattr(element, 'to_alipay_dict'):
self.other_amount_infos[i] = element.to_alipay_dict()
if hasattr(self.other_amount_infos, 'to_alipay_dict'):
params['other_amount_infos'] = self.other_amount_infos.to_alipay_dict()
else:
params['other_amount_infos'] = self.other_amount_infos
if self.other_amount_undiscountable:
if hasattr(self.other_amount_undiscountable, 'to_alipay_dict'):
params['other_amount_undiscountable'] = self.other_amount_undiscountable.to_alipay_dict()
else:
params['other_amount_undiscountable'] = self.other_amount_undiscountable
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.partner_id:
if hasattr(self.partner_id, 'to_alipay_dict'):
params['partner_id'] = self.partner_id.to_alipay_dict()
else:
params['partner_id'] = self.partner_id
if self.pay_style:
if hasattr(self.pay_style, 'to_alipay_dict'):
params['pay_style'] = self.pay_style.to_alipay_dict()
else:
params['pay_style'] = self.pay_style
if self.payment_list:
if isinstance(self.payment_list, list):
for i in range(0, len(self.payment_list)):
element = self.payment_list[i]
if hasattr(element, 'to_alipay_dict'):
self.payment_list[i] = element.to_alipay_dict()
if hasattr(self.payment_list, 'to_alipay_dict'):
params['payment_list'] = self.payment_list.to_alipay_dict()
else:
params['payment_list'] = self.payment_list
if self.people_num:
if hasattr(self.people_num, 'to_alipay_dict'):
params['people_num'] = self.people_num.to_alipay_dict()
else:
params['people_num'] = self.people_num
if self.pos_version:
if hasattr(self.pos_version, 'to_alipay_dict'):
params['pos_version'] = self.pos_version.to_alipay_dict()
else:
params['pos_version'] = self.pos_version
if self.receivable_amount:
if hasattr(self.receivable_amount, 'to_alipay_dict'):
params['receivable_amount'] = self.receivable_amount.to_alipay_dict()
else:
params['receivable_amount'] = self.receivable_amount
if self.refund_list:
if isinstance(self.refund_list, list):
for i in range(0, len(self.refund_list)):
element = self.refund_list[i]
if hasattr(element, 'to_alipay_dict'):
self.refund_list[i] = element.to_alipay_dict()
if hasattr(self.refund_list, 'to_alipay_dict'):
params['refund_list'] = self.refund_list.to_alipay_dict()
else:
params['refund_list'] = self.refund_list
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.table_no:
if hasattr(self.table_no, 'to_alipay_dict'):
params['table_no'] = self.table_no.to_alipay_dict()
else:
params['table_no'] = self.table_no
if self.total_paymented_amount:
if hasattr(self.total_paymented_amount, 'to_alipay_dict'):
params['total_paymented_amount'] = self.total_paymented_amount.to_alipay_dict()
else:
params['total_paymented_amount'] = self.total_paymented_amount
if self.use_online_promotion_flag:
if hasattr(self.use_online_promotion_flag, 'to_alipay_dict'):
params['use_online_promotion_flag'] = self.use_online_promotion_flag.to_alipay_dict()
else:
params['use_online_promotion_flag'] = self.use_online_promotion_flag
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiCateringOrderSyncModel()
if 'adjust_amount' in d:
o.adjust_amount = d['adjust_amount']
if 'amount' in d:
o.amount = d['amount']
if 'biz_product' in d:
o.biz_product = d['biz_product']
if 'business_type' in d:
o.business_type = d['business_type']
if 'dinner_type' in d:
o.dinner_type = d['dinner_type']
if 'discount_amount' in d:
o.discount_amount = d['discount_amount']
if 'discount_infos' in d:
o.discount_infos = d['discount_infos']
if 'dish_amount' in d:
o.dish_amount = d['dish_amount']
if 'dish_list' in d:
o.dish_list = d['dish_list']
if 'ext_infos' in d:
o.ext_infos = d['ext_infos']
if 'koubei_payment_amount' in d:
o.koubei_payment_amount = d['koubei_payment_amount']
if 'offline_payment_amount' in d:
o.offline_payment_amount = d['offline_payment_amount']
if 'order_id' in d:
o.order_id = d['order_id']
if 'order_style' in d:
o.order_style = d['order_style']
if 'order_time' in d:
o.order_time = d['order_time']
if 'other_amount_discountable' in d:
o.other_amount_discountable = d['other_amount_discountable']
if 'other_amount_infos' in d:
o.other_amount_infos = d['other_amount_infos']
if 'other_amount_undiscountable' in d:
o.other_amount_undiscountable = d['other_amount_undiscountable']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'partner_id' in d:
o.partner_id = d['partner_id']
if 'pay_style' in d:
o.pay_style = d['pay_style']
if 'payment_list' in d:
o.payment_list = d['payment_list']
if 'people_num' in d:
o.people_num = d['people_num']
if 'pos_version' in d:
o.pos_version = d['pos_version']
if 'receivable_amount' in d:
o.receivable_amount = d['receivable_amount']
if 'refund_list' in d:
o.refund_list = d['refund_list']
if 'shop_id' in d:
o.shop_id = d['shop_id']
if 'status' in d:
o.status = d['status']
if 'table_no' in d:
o.table_no = d['table_no']
if 'total_paymented_amount' in d:
o.total_paymented_amount = d['total_paymented_amount']
if 'use_online_promotion_flag' in d:
o.use_online_promotion_flag = d['use_online_promotion_flag']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/KoubeiCateringOrderSyncModel.py | KoubeiCateringOrderSyncModel.py | py | 20,534 | python | en | code | 241 | github-code | 13 |
17015102955 | from collections import defaultdict
from typing import DefaultDict, List, Tuple
from queue import Queue
def bfs(graph: DefaultDict[int, List[int]], num_nodes: int) -> List[int]:
result = []
seen = defaultdict(bool)
q = Queue()
q.put(0)
seen[0] = True
while not q.empty():
node_id = q.get()
print(node_id, end=" ")
result.append(node_id)
for child_id in graph[node_id]:
if not seen[child_id]:
q.put(child_id)
seen[child_id] = True
return result
def initialise_graph(edges: List[Tuple[int]]) -> DefaultDict[int, List[int]]:
graph = defaultdict(list)
for u, v in edges:
# add directed edge from u to v
graph[u].append(v)
return graph
| chrisjdavie/interview_practice | geeksforgeeks/graph/bfs/iterative/run.py | run.py | py | 776 | python | en | code | 0 | github-code | 13 |
26783638804 | """
One-time migration script from sqlalchemy models and sqlite database to custom ORM & PostgreSQL.
Not designed to work as part of the regular alembic system, merely placed here for archive purposes.
Should never need to run this again.
2021-05-03
"""
from datetime import datetime, timedelta
import sqlite3
from data.post_data import PostData, PostModel
from data.snapshot_data import SnapshotData, SnapshotModel, SnapshotFrontpageModel
from data.user_data import UserData
from services import post_service
from utils.logger import logger
from utils.reddit import base36decode
_post_data = PostData()
_snapshot_data = SnapshotData()
_user_data = UserData()
DB_FILE = "src/database.db"
def migrate_posts(offset=0):
"""Grabs posts in batches of 1000 at a time and migrates them to the new database.
Returns number of processed rows. If less than 1000, at end of the table."""
conn = sqlite3.connect(DB_FILE)
conn.row_factory = sqlite3.Row
rows = conn.execute("SELECT * FROM posts LIMIT 1000 OFFSET ?;", (offset,)).fetchall()
conn.close()
row = None
for row in rows:
# If the post already exists in the database we don't need to do anything.
post_id36 = row["id"]
post = post_service.get_post_by_id(post_id36)
if post:
continue
# OH RIGHT NO USER DATA IS SAVED IN THE OLD DATABASE.
# username = row["name"]
# if not user_service.get_user(username):
# user = UserModel()
# user.username = username
# _user_data.insert(user, error_on_conflict=False)
post = PostModel()
post.set_id(post_id36)
# post.author = username
post.title = row["title"]
post.created_time = row["created_time"]
post.flair_text = row["flair"] # will add flair id in later mass update/backfill.. and user info
_post_data.insert(post, error_on_conflict=False)
if not row:
logger.warning("No rows processed!")
else:
logger.info(f"Most recent migrated row: psk={row['psk']}, id={row['id']}")
return len(rows)
def migrate_snapshots(date, hour):
conn = sqlite3.connect(DB_FILE)
conn.row_factory = sqlite3.Row
row = conn.execute("SELECT * FROM snapshots WHERE date=? and hour=?;", (date, hour)).fetchone()
# No data, past the last recorded snapshot?
if not row:
return
old_snapshot_psk = row["psk"]
snapshot = SnapshotModel()
snapshot.created_time = row["datetime"]
snapshot.date = date
snapshot.hour = hour
snapshot.subscribers = row["subscribers"]
new_snapshot = _snapshot_data.insert(snapshot)
rows = conn.execute(
"SELECT sf.*, p.id FROM snapshot_frontpage sf JOIN posts p on sf.post_psk = p.psk WHERE snapshot_psk=?;",
(old_snapshot_psk,),
).fetchall()
conn.close()
for row in rows:
sfp_model = SnapshotFrontpageModel()
sfp_model.post_id = base36decode(row["id"])
sfp_model.snapshot_id = new_snapshot.id
sfp_model.rank = row["rank"]
sfp_model.score = row["score"]
_snapshot_data.insert(sfp_model)
def main():
current_offset = 0
while True:
processed_posts = migrate_posts(current_offset)
current_offset += processed_posts
if processed_posts < 1000:
break
if current_offset % 1000 == 0:
logger.info(f"Migrated {current_offset} posts total")
current_datetime = datetime.fromisoformat("2020-05-12 04:00:00.000")
now = datetime.utcnow()
while current_datetime <= now:
try:
migrate_snapshots(current_datetime.date(), current_datetime.hour)
except Exception:
logger.exception(f"Failed to migrate {current_datetime.date()} - {current_datetime.hour}")
current_datetime += timedelta(hours=1)
if current_datetime.hour == 0:
logger.info(f"Finished migrating {current_datetime.date()}")
if __name__ == "__main__":
main()
| r-anime/modbot | scripts/frontpage_sqlite_migration.py | frontpage_sqlite_migration.py | py | 4,011 | python | en | code | 3 | github-code | 13 |
36461137075 | from csv import reader
if __name__ == '__main__':
with open('prog1.csv', 'r') as emp_obj:
# pass the file object to reader() to get the reader object
csv_emp = reader(emp_obj)
my_dict = {}
header = next(csv_emp)
if header != None:
list = []
for row in csv_emp:
my_dict1 = {}
my_dict1['empId'] = row[0]
my_dict1['empname'] = row[1]
my_dict1['dept_id'] = row[2]
# print(my_dict1)
list.append(my_dict1)
#print(list)
with open('dept1.csv', 'r') as dept_obj:
# pass the file object dj to reader() to get the reader object
csv_dept = reader(dept_obj)
header1 = next(csv_dept)
# Pass reader object to list() to get a list of lists
if header != None:
#list1 = []
my_dictnw = {}
for row1 in csv_dept:
# my_dict2 = {}
# my_dict2['dept_id'] = row1[0]
# my_dict2['DeptName'] = row1[1]
# list1.append(my_dict2)
my_dictnw[row1[0]] = row1[1]
print(my_dictnw)
#print(list1)
# print(my_dict2)
# print(type(row))
for x in list:
if x['dept_id'] in my_dictnw:
x['DeptName'] = my_dictnw[x['dept_id']]
# break
print(list)
| Anushadsilva/python_practice | dictionary/csv_read2.py | csv_read2.py | py | 1,435 | python | en | code | 0 | github-code | 13 |
26348800393 | # Calculate the multiplication and sum of two numbers
# Given two integer numbers return their product only if the product is equal to or lower than 1000, else return their sum.
number1 = int(input("Give the first number \n"))
number2 = int(input("Give the Second number"))
result = number1 * number2
if result <= 1000:
print(f"Product of {number1} and {number2} is: {result}")
else:
result = number1 + number2
print(f"Sum of {number1} and {number2} is {result}") | suniledupuganti/Python_Basics | Exercise1.py | Exercise1.py | py | 475 | python | en | code | 0 | github-code | 13 |
29581534605 | from helpers.mock_data import gen_array
from search import linear_search, binary_search, exponential_search, interpolation_search
from sort import quick_sort
def test_linear_search():
array = gen_array(length=10)
item = linear_search(array=array, element=array[5])
assert item == 5, item
def test_binary_search():
array = gen_array(length=10)
sorted_array = quick_sort(array=array)
item = binary_search(array=sorted_array, element=sorted_array[5])
assert item == 5, item
def test_exponential_search():
array = gen_array(length=10)
sorted_array = quick_sort(array=array)
item = exponential_search(array=sorted_array, element=sorted_array[5])
assert item == 5, item
def test_interpolation_search():
array = gen_array(length=10)
sorted_array = quick_sort(array=array)
item = interpolation_search(array=sorted_array, element=sorted_array[5])
assert item == 5, item
if __name__ == '__main__':
test_linear_search()
test_binary_search()
test_exponential_search()
test_interpolation_search()
| LANneeer/algorithms | test/test_search.py | test_search.py | py | 1,071 | python | en | code | 0 | github-code | 13 |
39230222973 | import os
import subprocess
import sys
import tempfile
from lxml import etree
log = sys.stderr.write
trans = {i: i + "_" for i in ("node", "graph", "subgraph", "edge")}
def dotgraph(xml_, output=None, links_only=False, title=""):
dot = makedot(xml_, links_only=links_only, title=title)
if output:
with open(output + ".dot", "w") as out:
out.write(dot)
cmd = subprocess.Popen(
["dot", "-Tpdf"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
env=os.environ.copy(),
)
png, _ = cmd.communicate(dot.encode("utf-8"))
if not output:
tfile, tname = tempfile.mkstemp(dir="/tmp")
write = lambda x: os.write(tfile, x)
close = lambda: os.close(tfile)
view = True
else:
tname = output
tfile = open(output, "wb")
write = lambda x: tfile.write(x)
close = lambda: tfile.close()
view = False
write(png)
close()
if view:
os.system('geeqie -t -r file:"%s" &' % tname)
def get_name(ele, safe=False):
names = ele.xpath("name")
if names:
name = names[0].text
else:
name = ele.get("name")
if safe:
name = trans.get(name, name)
return name.strip()
def makedot(xml_, links_only=False, title="dd"):
dom = etree.fromstring(xml_.encode("utf-8"))
dot = [
"\ndigraph tables {",
'graph [rankdir = RL, label="%s", labelloc=t];' % title,
"node [shape = plaintext];",
"subgraph cluster_key { label=Key;",
'_tn [width=1.8, label = "TABLE NAME", filled=True, shape=box, '
'style=filled, fillcolor="#ccccff", rank=max];',
'_pk [width=1.8, label = "PRIMARY KEY", shape=box, '
'fontcolor="red", rank=max];',
'_un [width=1.8, label = "UNIQUE", filled=True, shape=box, '
'style=filled, fillcolor="#ccffcc", rank=min];',
'_op [width=1.8, label = "OPTIONAL", shape=box, '
'fontcolor="#888888", rank=max];',
"}",
]
lu = {}
# find fields pointed to by Foreign Key fields, don't assume FKs only
# point to Primary Keys
fk_targets = dom.xpath("//field/foreign_key/@target")
for table in dom.xpath("//table"):
skip = table.xpath('./attr[@key="dot_ignore"]')
if skip and skip[0].text == "true":
continue
name = get_name(table, safe=True) + " [label="
ports = [
'<<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"><TR>'
'<TD BGCOLOR="#ccccff">%s</TD></TR>' % get_name(table)
]
for field in table.xpath("./field"):
if (
links_only
and not field.get("primary_key") == "true"
and not field.get("id") in fk_targets
and not field.xpath(".//foreign_key")
):
continue
lu[field.get("id")] = "%s:%s" % (
get_name(table, safe=True),
field.get("id"),
)
fname = get_name(field)
if field.get("allow_null") == "true":
fname = '<FONT COLOR="#888888">%s</FONT>' % fname
if field.get("primary_key") == "true":
fname = '<FONT COLOR="red">%s</FONT>' % fname
attr = ""
if field.get("unique") == "true" or field.get("primary_key") == "true":
attr = ' BGCOLOR="#ccffcc"'
ports.append(
'<TR><TD PORT="%s"%s>%s</TD></TR>' % (field.get("id"), attr, fname)
)
name += "\n".join(ports)
name += "</TABLE>>];"
dot.append(name)
for table in dom.xpath("//table"):
skip = table.xpath('./attr[@key="dot_ignore"]')
if skip and skip[0].text == "true":
continue
name = get_name(table, safe=True)
for field in table.xpath("./field"):
for fk in field.xpath("./foreign_key"):
if fk.get("target") in lu:
dot.append(
"%s:%s -> %s" % (name, field.get("id"), lu[fk.get("target")])
)
else:
log(
"No '%s' target for %s:%s\n"
% (fk.get("target"), name, field.get("id"))
)
for m2m in table.xpath("./attr"):
if m2m.get("key") != "dj_m2m_target":
continue
# break
for line in m2m.text.strip().split("\n"):
# dot.append('%s -> %s [label="%s"]' %
# (name, line.split()[1], line.split()[0]))
dot.append("%s -> %s" % (name, line.split()[1]))
dot.append("}\n")
dot = "\n".join(dot)
return dot
def main():
links_only = False
if "--links-only" in sys.argv:
links_only = True
sys.argv.remove("--links-only")
filename = sys.argv[1]
if len(sys.argv) > 2:
output = sys.argv[2]
else:
output = None
dotgraph(open(filename).read(), output=output, links_only=links_only)
if __name__ == "__main__":
main()
| tbnorth/dml | dml/dotgraph.py | dotgraph.py | py | 5,155 | python | en | code | 0 | github-code | 13 |
38020634878 | from AthenaCommon.Logging import logging
logConfigDigitization = logging.getLogger( 'ConfigDigitization' )
#check job configuration
from Digitization.DigiConfigCheckers import checkDetFlagConfiguration
checkDetFlagConfiguration()
#Pool input
from AthenaCommon.AppMgr import ServiceMgr
if not hasattr(ServiceMgr, 'EventSelector'):
import AthenaPoolCnvSvc.ReadAthenaPool
if hasattr(ServiceMgr, 'PoolSvc'):
ServiceMgr.PoolSvc.MaxFilesOpen = 0 # Never close Input Files
from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
if not athenaCommonFlags.DoFullChain:
ServiceMgr.EventSelector.InputCollections = athenaCommonFlags.PoolHitsInput()
#Settings the following attributes reduces the job size slightly
#ServiceMgr.AthenaPoolCnvSvc.PoolAttributes += [ "TREE_BRANCH_OFFSETTAB_LEN ='100'" ]
#ServiceMgr.AthenaPoolCnvSvc.PoolAttributes += [ "DEFAULT_BUFFERSIZE = '2048'" ]
#--------------------------------------------------------------
# Conditions Tag
#--------------------------------------------------------------
from Digitization.DigitizationFlags import digitizationFlags
if (digitizationFlags.IOVDbGlobalTag.statusOn and digitizationFlags.IOVDbGlobalTag.get_Value()!='default'):
IOVDbSvc=theApp.service('IOVDbSvc')
IOVDbSvc.GlobalTag=digitizationFlags.IOVDbGlobalTag.get_Value()
#--------------------------------------------------------------
# GeoModel
#--------------------------------------------------------------
from AtlasGeoModel import SetGeometryVersion
from AtlasGeoModel import GeoModelInit
from AtlasGeoModel import SetupRecoGeometry
#--------------------------------------------------------------
# Magnetic field service
#--------------------------------------------------------------
try:
import MagFieldServices.SetupField
except:
#fall-back for 19.0.X releases.
include( "BFieldAth/BFieldAth_jobOptions.py" )
#--------------------------------------------------------------
# Pileup configuration
#--------------------------------------------------------------
from Digitization.DigiConfigCheckers import syncDetFlagsAndDigitizationJobProperties
syncDetFlagsAndDigitizationJobProperties()
#--------------------------------------------------------------
# Configure Run and Lumi Block and Pile-Up Lumi overriding
# (if required)
#--------------------------------------------------------------
if digitizationFlags.RunAndLumiOverrideList.statusOn:
if not(DetFlags.pileup.any_on()):
AthError( "This job will try to override pile-up luminosity configuration, but no pile-up will be set up!" )
include("Digitization/LumiBlockOverrides.py")
if digitizationFlags.dataRunNumber.statusOn:
logDigitization_flags.warning('digitizationFlags.RunAndLumiOverrideList has been set! digitizationFlags.dataRunNumber (set to %s) will be ignored. ', digitizationFlags.dataRunNumber.get_Value() )
else:
include("Digitization/RunNumberOverride.py")
#-----------------------------------------------------------
# Check Beam and Digitization jobproperties are synchronised
#-----------------------------------------------------------
from Digitization.DigiConfigCheckers import syncBeamAndDigitizationJobProperties
syncBeamAndDigitizationJobProperties()
digitizationFlags.lockMostFlags()
#--------------------------------------------------------------
# More Pileup configuration
#--------------------------------------------------------------
if DetFlags.pileup.any_on() or digitizationFlags.doXingByXingPileUp():
# protection for SteppingCache usage - currently incompatible with PileUpTools
if digitizationFlags.SignalPatternForSteppingCache.statusOn and digitizationFlags.doXingByXingPileUp():
raise RuntimeError("SteppingCache is incompatible with PileUpTools. Please switch off either digitizationFlags.SignalPatternForSteppingCache or digitizationFlags.doXingByXingPileUp.")
include( "Digitization/ConfigPileUpEventLoopMgr.py" )
if DetFlags.pileup.any_on():
logConfigDigitization.info("PILEUP CONFIGURATION:")
logConfigDigitization.info(" -----> Luminosity = %s cm^-2 s^-1", jobproperties.Beam.estimatedLuminosity())
logConfigDigitization.info(" -----> Bunch Spacing = %s ns", digitizationFlags.bunchSpacing.get_Value())
# in any case we need the PileUpMergeSvc for the digitize algos
if not hasattr(ServiceMgr, 'PileUpMergeSvc'):
from AthenaCommon import CfgGetter
ServiceMgr += CfgGetter.getService("PileUpMergeSvc")
#--------------------------------------------------------------
# Subdetector-specific configuration
#--------------------------------------------------------------
include( "Digitization/DetectorDigitization.py" )
#--------------------------------------------------------------
# Random Number Engine and Seeds
#--------------------------------------------------------------
# attach digi and pileup seeds to random number service configurable and print them out
from AthenaCommon.ConfigurableDb import getConfigurable
ServiceMgr += getConfigurable(digitizationFlags.rndmSvc.get_Value())()
digitizationFlags.rndmSeedList.addtoService()
digitizationFlags.rndmSeedList.printSeeds()
rndmSvc = getConfigurable(digitizationFlags.rndmSvc.get_Value())()
rndmSvc.OutputLevel = WARNING
if digitizationFlags.readSeedsFromFile.get_Value():
rndmSvc.Seeds=[]
rndmSvc.ReadFromFile=True
rndmSvc.FileToRead=digitizationFlags.rndmSeedInputFile.get_Value()
logConfigDigitization.info("Random seeds for Digitization will be read from the file %s",digitizationFlags.rndmSeedInputFile.get_Value())
# write out a summary of the time spent
from AthenaCommon.AppMgr import theAuditorSvc
from GaudiAud.GaudiAudConf import ChronoAuditor, MemStatAuditor
if not 'ChronoAuditor/ChronoAuditor' in theAuditorSvc.Auditors:
theAuditorSvc += ChronoAuditor()
if not 'MemStatAuditor/MemStatAuditor' in theAuditorSvc.Auditors:
theAuditorSvc += MemStatAuditor()
# LSFTimeLimi. Temporary disable
# include( "LSFTimeKeeper/LSFTimeKeeperOptions.py" )
# LSFTimeKeeperSvc = Service( "LSFTimeKeeperSvc" )
# LSFTimeKeeperSvc.OutputLevel=2; # remaining CPU at each event
| rushioda/PIXELVALID_athena | athena/Simulation/Digitization/share/ConfigDigitization.py | ConfigDigitization.py | py | 6,099 | python | en | code | 1 | github-code | 13 |
39255234691 |
ROCK = 0
PAPER = 1
SCISSORS = 3
type_score = [1, 2, 3]
LOST = 0
DRAW = 1
WON = 2
win_score = [0, 3, 6]
hands = {'A' : 0, 'B' : 1, 'C' : 2, 'X' : 0, 'Y' : 1, 'Z' : 2}
win_values = [-2, 1]
win_hands = {'A': 'Y', 'B': 'Z', 'C': 'X'}
lose_hands = {'A': 'Z', 'B': 'X', 'C': 'Y'}
def win(you, me):
if you == me:
return 3
if (me - you) in win_values:
return 6
return 0
with open('day_2\data.txt') as f:
line = f.readline()
score = 0
card = 0
while line:
cards = line.split()
print(cards[0])
if cards[1] == 'X':
cards[1] = lose_hands[cards[0]]
elif cards[1] == 'Y':
cards[1] = cards[0]
elif cards[1] == 'Z':
cards[1] = win_hands[cards[0]]
score += win(hands[cards[0]], hands[cards[1]])
score += type_score[hands[cards[1]]]
line = f.readline()
#print(f'{l.index(m)} - {m}')
print(f'total {score}') | orikam/advantcoding_2022 | day2/d2q1.py | d2q1.py | py | 948 | python | en | code | 0 | github-code | 13 |
11900178154 |
import numpy as np
import tensorflow as tf
import os
from dataloader import DataLoader
import utils
from Networks.imagenet_traintest import TrainTestHelper
import argparse
def train(dataloader, trainer, validator, batches, max_iteration, print_freq):
np.random.seed(1234)
tf.random.set_seed(1234)
trainstep = trainer.get_step()
valstep = validator.get_step()
train_dict = {"iteration":[], "loss": []}
for i in range(max_iteration):
batch_x, batch_y = dataloader.read_batch(batches, "train")
trainstep(batch_x, batch_y)
if i % print_freq == 0: # validation loss
batch_x, batch_y = dataloader.read_batch(batches, "val")
valstep(batch_x, batch_y)
train_dict["iteration"].append(i)
train_dict["loss"].append(float(validator.loss_logger.result()))
print("iteration {} - loss {}".format(i + 1, train_dict["loss"][-1]))
def get_imagenet_prediction(image, hot_vec, network, loss_func):
pred = network(image, training=False)
i = tf.math.argmax(pred[0])
loss = loss_func(hot_vec, pred)
return i, np.array(pred[0])[i], loss
def save_predicted_results(test_images, labels, network, paths, loss_func, title, output_path):
with open(os.path.join(output_path, "{}.txt".format(title)), 'w') as f:
correct_sum = 0
for i in range(len(test_images)):
pred, score, loss = get_imagenet_prediction(test_images[i][np.newaxis, :,:,:], labels[i], network, loss_func)
f.write("{} {} {} {}\n".format(paths[i], pred, score, loss))
if int(pred) == int(labels[i]):
correct_sum += 1
f.write("correctness {}\n".format(correct_sum/len(test_images)))
def get_args():
parser = argparse.ArgumentParser(description='Process training arguments.')
parser.add_argument('--nntype', default="PerceptualModel", help='The type of the network')
parser.add_argument('--cls_num', type=int, default=1000, help='The number of classes in the dataset')
parser.add_argument('--input_size', type=int, nargs=2, default=(224, 224))
parser.add_argument('--train_path', type=str, required=True)
parser.add_argument('--val_path', type=str, required=True)
parser.add_argument('--test_path', type=str, required=True)
parser.add_argument('--output_path', type=str, default=os.getcwd(), help='The path to keep the output')
parser.add_argument('--print_freq', '-pf', type=int, default=10)
parser.add_argument('--lr', type=float, default=5e-5, help='learning rate')
parser.add_argument('--batchs_num', '-bs', type=int, default=2, help='number of batches')
parser.add_argument('--train_iterations', '-iter', type=int, default=800, help='The maximum iterations for learning')
return parser.parse_args()
def main():
tf.keras.backend.set_floatx('float32')
args = get_args()
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
dataloader = DataLoader(args.train_path, args.val_path, args.test_path, args.cls_num, args.input_size,
name="dataloader", output_path=args.output_path)
network = utils.get_network(args.nntype)
network.freeze_layers(19)
optimizer = tf.keras.optimizers.Adam(learning_rate=args.lr)
loss = tf.keras.losses.SparseCategoricalCrossentropy()
trainer = TrainTestHelper(network, optimizer, loss, training=True)
validator = TrainTestHelper(network, optimizer, loss, training=False)
test_images, labels = dataloader.read_batch(200, "test")
save_predicted_results(test_images, labels, network, dataloader.paths_logger["test"], loss, "before_training", args.output_path)
train(dataloader, trainer, validator, args.batchs_num, args.train_iterations, args.print_freq)
save_predicted_results(test_images, labels, network, dataloader.paths_logger["test"], loss, "after_training", args.output_path)
if __name__ == "__main__":
main()
| LotanLevy/ImageNetFineTuning | imagenet_fine_tuning.py | imagenet_fine_tuning.py | py | 3,972 | python | en | code | 0 | github-code | 13 |
21263885236 | from operator import attrgetter
class Business(object):
def __init__(self, chain_name, location, id):
self.chain_name = chain_name
self.location = location
self.id = id
class Chain(object):
def __init__(self, chain_name, frequency):
self.chain_name = chain_name
self.frequency = frequency
# remove duplicate elements from the sorted list
def remove_duplicate(arr):
if not arr or len(arr) <= 1:
return arr
i, j = 0, 1
while j < len(arr):
if arr[i].id != arr[j].id:
arr[i + 1] = arr[j]
i += 1
j += 1
return arr[:i + 1]
def get_chain_freq(biz_list, location):
if not biz_list or len(biz_list) == 0 or not location:
return []
# sort by id and then remove the duplicate elements from list
biz_list.sort(key=attrgetter('id'))
biz_list = remove_duplicate(biz_list)
# sort the list by location and chain name
biz_list.sort(key=attrgetter('location', 'chain_name'))
for i in range(0, len(biz_list)):
print("(" + str(biz_list[i].id) + ", " + biz_list[i].location + ", " + biz_list[i].chain_name + ")")
# locate the starting and ending index of the chain that was located in the given location
for i in range(0, len(biz_list)):
if biz_list[i].location == location: break
for j in range(i, len(biz_list)):
if biz_list[j].location != location: break
# corner case, deal the case that the last element happen to be a qualified chain
j = j + 1 if j == len(biz_list) - 1 and biz_list[j].location == location else j
# if the given location can not be found in current chain, return a empty list
if i == j == len(biz_list) - 1:
return []
# count the id frequency wrt to each chain name
chain_list = []
c_name, freq = biz_list[i].chain_name, 0
for k in range(i, j):
if c_name == biz_list[k].chain_name:
freq += 1
if c_name != biz_list[k].chain_name:
# add the previous chain's stat into the chain list
chain_list.append(Chain(c_name, freq))
# reset the chain name and frequency for new chain
c_name = biz_list[k].chain_name
freq = 1
# append the last qualified chain into the list
chain_list.append(Chain(c_name, freq))
# sort the list in ascending order of frequency and chain name
chain_list.sort(key=attrgetter('frequency', 'chain_name'))
return chain_list
def get_chain_freq_1(biz_list, location):
if not biz_list or len(biz_list) == 0 or not location:
return []
# loc_count store the (location, count) pairs, ids use to track the duplicate cases
loc_count = dict()
ids = set()
for i in range(0, len(biz_list)):
cursor = biz_list[i]
if cursor.location == location:
# check if the new biz is duplicate
if cursor.id not in ids:
# calculate the frequency of each chain under the given location
if cursor.chain_name in loc_count.keys():
loc_count[cursor.chain_name] += 1
else:
loc_count[cursor.chain_name] = 1
ids.add(cursor.id)
# create the chain_list and order the element in ascending order of freq and chain_name
chain_list = []
for key in loc_count:
chain_name = key
freq = loc_count[key]
chain_list.append(Chain(chain_name, freq))
chain_list.sort(key=attrgetter('frequency', 'chain_name'))
return chain_list
# location Austin, output whole food 2, Peets Coffe 1, Starbucks 1
b1 = Business("Starbucks", "Seattle", 101)
b2 = Business("Peets Coffee", "San Francisco", 102)
b3 = Business("Amazon", "Austin", 103)
b4 = Business("Starbucks", "San Francisco", 104)
b5 = Business("Peets Coffee", "Austin", 105)
b6 = Business("Starbucks", "Austin", 106)
b7 = Business("Amazon", "Austin", 103)
b8 = Business("Amazon", "Austin", 107)
b9 = Business("Starbucks", "Austin", 108)
b10 = Business("Starbucks", "Austin", 109)
biz_list = [b1, b2, b3, b4, b5, b6, b7, b8, b9, b10]
for i in range(0, len(biz_list)):
print("(" + str(biz_list[i].id) + ", " + biz_list[i].location + ", " + biz_list[i].chain_name + ")")
print()
arr1 = get_chain_freq_1(biz_list, "Austin")
# arr1 = get_chain_freq(biz_list, "Austin")
for i in range(0, len(arr1)):
print(arr1[i].chain_name + ", " + str(arr1[i].frequency))
| sundaycat/Leetcode-Practice | legacy/Interview Preparation/BusinessChain.py | BusinessChain.py | py | 4,457 | python | en | code | 0 | github-code | 13 |
21094385541 | from ipdb import set_trace
from os import system
from pprint import pp
from helpers import term_wrap, star_line, center_string_stars
# ! BIG O NOTATION
# * TIME COMPLEXITY
class ConstantTime(): # O(1)
def first_func(self): # TOTAL OPS => O(6)
x = 1 # O(1)
y = 2 # O(1)
name = "layne" # O(1)
z = x + y # O(1)
print(name) # O(1)
return z # O(1)
def with_list(self, lst): # O(4)
print(len(lst)) # O(1)
print(lst[0]) # O(1)
print(lst[1]) # O(1)
print(lst[-1]) # O(1)
class LinearTime():
def first_func(self, lst): # O(N)
print(len(lst)) # O(1)
print("len(lst)") # O(1)
print("125") # O(1)
print(len(lst)) # O(1)
for el in lst:
other = "hi" # O(N)
print(el) # O(N)
print(other) # O(N)
# to calc the big o, we needt o go line by line and add each big o
# O(1 + 1 + 1 + 1 + n + n + n) => O(4 + 3n)
# n = 1 => O(4 + 3) => O(7)
# n = 10 => O(4 + 30) => O(34)
# n = 100 => O(4 + 300) => O(304)
# n = 100000000000000000 => O(4 + 300000000000000000) => O(3000000000000000004)
# n = ♾ => O(1 + 3*♾) => Can drop the CONSTANT
# => O(3 * ♾) => Can ALSO DROP THE COEFFICENT of N
# The resulting Big O is just O(N)
def parallel(self, lst1): # O(n=len(lst1))
x = 1 # O(1)
y = 2 # O(1)
z = 3 # O(1)
for el1 in lst1:
el1 + x # O(N)
el1 + y # O(N)
for el2 in lst1:
el2 - z # O(N)
el2 - y # O(N)
return z # O(1)
# O(3 + n + n + n + n + 1 ) => O(4 + 4n) => O(4n) => O(n)
def parallel2(self, lst1, lst2): # n = len(lst1), m = len(lst2)
x = 1 # O(1)
y = 2 # O(1)
z = 3 # O(1)
for el1 in lst1:
el1 + x # O(N)
el1 + y # O(N)
for el2 in lst2:
el2 - z # O(M)
el2 - y # O(M)
return z # O(1)
# O(3 + n + n + m + m + 1 ) => O(4 + 2n + 2m) => O(2n + 2m) => O(n + m)
def find_el(self, lst, el):
for x in lst:
if x == el:
print(x)
return "DONE"
else:
print("still searching")
class QuadraticTime():
def add_each_element(self, lst): # O(n^2)
# [1,2,3,4] => [2, 3, 4, 5, 3, ....]
new_list = []
for el1 in lst:
for el2 in lst:
new_list.append(el1 + el2) # O(n * n)
print(len(lst))
print(len(new_list))
def print_each_element(self, matrix): # O(n^2)
for row in matrix:
for x in row:
print(f"{x}", end=" => ")
print("")
def show_diff_lists(self, lst1, lst2): # O(n * m)
for el1 in lst1:
for el2 in lst2:
print(f'{el1} : {el2}')
class ConstantSpace():
def add_one(self, lst):
return [x + 1 for x in lst]
class LinearSpace():
def add_one_linear(self, lst):
output = []
for x in lst:
output.append(x + 1)
return output
class QuadraticSpace():
def add_one_quad(self, lst):
output = []
for x in lst:
output.append([x + 1] * len(lst))
return output
if __name__ == "__main__":
system("clear")
term_wrap("Big O Notation")
constant = ConstantTime()
linear = LinearTime()
quadratic = QuadraticTime()
const_space = ConstantSpace()
# constant.with_list([1 for _ in range(100)])
# linear.first_func([1 for _ in range(100)])
# linear.find_el([1,2,3,4,5], 1)
# linear.find_el([1,2,3,4,5], 5)
# quadratic.add_each_element([1, 2, 3, 4])
# quadratic.add_each_element([1 for _ in range(10000)])
m1 = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 1, 2, 3],
[4, 5, 6, 7]
]
# quadratic.print_each_element(m1)
# quadratic.show_diff_lists([1, 2, 3], [4, 5, 6, 7])
print(const_space.add_one([1, 2, 3, 4]))
set_trace()
center_string_stars("BYE")
| rothberry/west-050123-live | 4-phase/06-big-o/lib/big_o.py | big_o.py | py | 4,333 | python | en | code | 0 | github-code | 13 |
25141396493 | import re
from flask import json
from tools.datetime_convertations import DateTime
from tools.for_db.work_with_booking_info import query_booking_info_by_id
from tools.for_db.work_with_links import add_link
from tools.for_db.work_with_slots import add_slot_and_get_id
start = '2021-10-07T15:00:56.273Z'
end = '2021-10-07T16:00:56.273Z'
dt_for_link = DateTime().utc_plus_delta(days=7)
end_interval = DateTime().utc_plus_delta(days=10)
def test_guest_calendar_post(app_for_test, test_admin, link_id):
admin_id = test_admin.get_id()
add_slot_and_get_id(start, end, admin_id)
add_slot_and_get_id('2020-09-01T15:00:56.273Z', '2020-09-01T16:00:56.273Z', admin_id)
add_slot_and_get_id(dt_for_link, end_interval, admin_id)
add_link(link_id, admin_id, dt_for_link)
res1 = app_for_test.post(f'/calendars/{link_id}/bookings/',
data=json.dumps(dict(guest_name='Name', guest_email='test@ma.c',
topic='Topic', start=start, end=end)),
content_type='application/json')
res2 = app_for_test.post('/calendars/asdfga/bookings/',
data=json.dumps(dict(guest_name='Name', guest_email='test@ma.c',
topic='Topic', start=start, end=end)),
content_type='application/json')
res3 = app_for_test.post(f'/calendars/{link_id}/bookings/',
data=json.dumps(dict(guest_name='Name', guest_email='test@ma.c', start=start, end=end)),
content_type='application/json')
res4 = app_for_test.post(f'/calendars/{link_id}/bookings/',
data=json.dumps(dict(guest_email='test@ma.c', start=start, end=end)),
content_type='application/json')
res5 = app_for_test.post(f'/calendars/{link_id}/bookings/',
data=json.dumps(dict(guest_name='Name', guest_email='test.c',
topic='Topic', start=start, end=end)),
content_type='application/json')
assert res1.status == '200 OK'
assert re.search(r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}', res1.json['uuid'])
assert res1.json['start'] == start
assert res1.json['end'] == end
assert res2.status == '401 UNAUTHORIZED'
assert res3.status == '409 CONFLICT'
assert res4.status == '400 BAD REQUEST'
assert res5.status == '400 BAD REQUEST'
def test_guest_calendar_get_200(app_for_test, link_id):
res = app_for_test.get(f'/calendars/{link_id}')
assert res.status == '200 OK'
assert res.json == {'id': 1, 'slots': [{'id': 3, 'start_interval': dt_for_link, 'end_interval': end_interval,
'booking_id': None}], 'valid_until': dt_for_link}
def test_guest_calendar_get_404(app_for_test):
res = app_for_test.get('/calendars/link')
assert res.status == '404 NOT FOUND'
def test_guest_calendar_get_401(app_for_test, test_admin):
add_link('abc', test_admin.get_id(), valid_until='2021-10-12T17:34:59.603Z')
res = app_for_test.get('/calendars/abc')
assert res.status == '401 UNAUTHORIZED'
def test_guest_calendar_delete_200(app_for_test, link_id):
booking_uuid = query_booking_info_by_id(1).uuid
res = app_for_test.delete(f'/calendars/{link_id}/bookings/{booking_uuid}')
assert res.status == '200 OK'
assert res.json == {'detail': 'Successful request', 'status': 200}
def test_guest_calendar_delete_409(app_for_test, link_id):
res = app_for_test.delete(f'/calendars/{link_id}/bookings/1')
assert res.status == '409 CONFLICT'
assert res.json == {'detail': 'Unable to delete booking info', 'status': 409}
def test_guest_calendar_delete_404(app_for_test):
res = app_for_test.delete('/calendars/wrong_link/bookings/1')
assert res.status == '404 NOT FOUND'
assert res.json == {'detail': 'Shareable link not found', 'status': 404}
def test_guest_calendar_delete_401(test_admin, app_for_test, link_id):
add_link('expired_link', test_admin.get_id(), DateTime().utc_plus_delta(days=-1))
res = app_for_test.delete('/calendars/expired_link/bookings/1')
assert res.status == '401 UNAUTHORIZED'
assert res.json == {'detail': 'Unauthorized - link has expired', 'status': 401}
| meetingbook/meetingbook | backend/tests/test_guest_calendars.py | test_guest_calendars.py | py | 4,413 | python | en | code | 3 | github-code | 13 |
35224837284 | import cv2
import torch
from detectron2.engine import DefaultPredictor
from detectron2 import model_zoo
from detectron2.config import get_cfg
cfg = get_cfg()
cfg.MODEL.DEVICE = 'cpu'
cfg.merge_from_file(
model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"),
)
cfg.DATALOADER.NUM_WORKERS = 4
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml",
)
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
cfg.MODEL.RETINANET.NUM_CLASSES = 1
cfg.TEST.EVAL_PERIOD = 600
cfg.MODEL.WEIGHTS = "model_final.pth"
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7
predictor = DefaultPredictor(cfg)
def analyse(image, use_max_area=0):
outputs = predictor(image)
predictions = outputs['instances']
boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None
areas = []
max_damage_area = float('-inf')
max_box = None
total_damage_area = 0
all_boxes = []
for i in range(len(boxes)):
area = boxes[i].area()
if area > max_damage_area:
max_damage_area = area
max_box = boxes[i]
total_damage_area += area
areas.append(area)
all_boxes += boxes[i].tensor.numpy().astype(int).tolist()
if use_max_area == '1':
damage_area = max_damage_area
all_boxes = max_box.tensor.numpy().astype(int).tolist()
else:
damage_area = total_damage_area
image_area = image.shape[0] * image.shape[1]
damage = (torch.div(damage_area, image_area) * 100).item()
return damage > 2, {
'damage': damage,
'boxes': all_boxes,
}
| vinaykudari/car-health-score | api/helper.py | helper.py | py | 1,672 | python | en | code | 0 | github-code | 13 |
9350585295 | #!/usr/bin/python3
from fontParts.world import *
import sys
# Open UFO
ufo = sys.argv[1]
font = OpenFont(ufo)
print(f'Kern virama in {ufo}')
# Modify UFO
kern = int(sys.argv[2])
virama = font['virama']
# virama.leftMargin += kern
for contour in virama.contours:
for point in contour.points:
if point.x < 0:
print(f'adjusting point at {point.x}, {point.y}')
point.x -= kern
# for glyph in font:
# if glyph.name.endswith('.base'):
# print(f'{glyph.name}, {glyph.width}')
# Save UFO
font.changed()
font.save()
font.close()
| nlci/knda-font-badami | tools/kern_virama.py | kern_virama.py | py | 573 | python | en | code | 3 | github-code | 13 |
72732857939 | from rivertopo.burning import burn_lines
from osgeo import gdal, ogr
import argparse
import logging
import numpy as np
# Entry point for use in setup.py
def main():
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument('lines', type=str, help='linestring features with DEM-sampled Z')
argument_parser.add_argument('input_raster', type=str, help='DEM input raster')
argument_parser.add_argument('output_raster', type=str, help='DEM output raster with objects burned in')
argument_parser.add_argument('--log-level', type=str)
input_arguments = argument_parser.parse_args()
lines_path = input_arguments.lines
input_raster_path = input_arguments.input_raster
output_raster_path = input_arguments.output_raster
input_raster_dataset = gdal.Open(input_raster_path)
band = input_raster_dataset.GetRasterBand(1)
band_array = band.ReadAsArray()
lines_datasrc = ogr.Open(lines_path)
# Create an intermediate, in-memory dataset that the lines will be burned
# into. This is done in order to prevent writing a premature output raster
# in case something goes wrong during the line rasterization.
intermediate_driver = gdal.GetDriverByName("MEM")
intermediate_raster_dataset = intermediate_driver.CreateCopy(
"temp", # the dataset has to have a name
input_raster_dataset,
)
# Burn the line layers into the temp raster
for layer in lines_datasrc:
burn_lines(intermediate_raster_dataset, layer)
logging.info(f"burned layer {layer.GetName()} into temporary raster")
int_band = intermediate_raster_dataset.GetRasterBand(1)
int_band_array = int_band.ReadAsArray()
# only store the minimum values
#int_band_array = np.minimum(int_band_array, band_array)
# replace values in the intermediate band array with original band if they are larger
int_band_array = np.where(int_band_array > band_array, band_array, int_band_array)
# Write the updated array back to the band
int_band.WriteArray(int_band_array)
# Save changes to the file
intermediate_raster_dataset.FlushCache()
# Line rasterization is now complete, copy the temp raster to output file
output_driver = gdal.GetDriverByName("GTiff")
intermediate_raster_dataset = output_driver.CreateCopy(
output_raster_path,
intermediate_raster_dataset,
)
logging.info("output raster written")
# Allows executing this module with "python -m"
if __name__ == '__main__':
main() | SDFIdk/rivertopo | rivertopo/burn_line_z.py | burn_line_z.py | py | 2,534 | python | en | code | 3 | github-code | 13 |
14791914603 | import random
def create_deck():
deck = []
for i in range(2, 11):
for e in ('C', 'P', 'B', 'X'):
deck.append(str(i)+e)
for i in ('V', 'D', 'K', 'T'):
for e in ('C', 'P', 'B', 'X'):
deck.append(i+e)
return deck
def start(deck):
hand = []
for i in range(2):
hand.append(random.choice(deck))
deck.remove(hand[-1])
return hand, deck
def nachalo():
deck = create_deck()
hand = []
hand, deck = start(deck)
| Anakkobitskaya/Anak | jed.py | jed.py | py | 503 | python | en | code | 0 | github-code | 13 |
39403268082 | """Matcha repository - search and recommend"""
from datetime import date
from databases.interfaces import Record
from backend.models import models_enums, models_matcha, models_user
from backend.repositories import (BaseAsyncRepository, postgres_reconnect,
repo_interfaces)
from backend.settings import settings_base
class MatchaDatabaseRepository(BaseAsyncRepository, repo_interfaces.MatchaInterface):
"""Search users."""
async def _collect_rows(
self,
array_of_queries: list[str],
query_mods: dict,
interests: list[str] | None,
is_count: bool = False,
excluded_users_query: str = "",
) -> list[dict] | int:
"""Return records from database."""
(
coordinates_query,
sexual_preferences_query,
age_gap_query,
fame_rating_gap_query,
) = array_of_queries
query: str = f"""
SELECT u.user_id as user_id, first_name, last_name, birthday, gender, sexual_orientation, biography,
main_photo_name, fame_rating, last_online, interests, city, interests_common
FROM (
SELECT puser_id as user_id, first_name, last_name, birthday, gender, sexual_orientation, biography,
main_photo_name, fame_rating, last_online, interests, pcity as city,
cardinality(interests & ARRAY[{','.join(interests) if interests else ''}]::integer[])
as interests_common
FROM
(
SELECT p.user_id as puser_id, p.city as pcity, *
FROM profiles as p
LEFT JOIN user_locations as l
ON l.user_id = p.user_id
) as p_l
WHERE (
({coordinates_query})
AND ({sexual_preferences_query})
AND {fame_rating_gap_query}
AND {age_gap_query}
AND puser_id != :user_id
{excluded_users_query}
)
) as u
LEFT JOIN visits as v
ON v.target_user_id = u.user_id and v.user_id = :user_id
WHERE (is_paired is NULL AND is_blocked is NULL AND is_reported is NULL AND is_match is NULL)
{f"ORDER BY {query_mods.pop('order_by')} {query_mods.pop('order_direction')} LIMIT :limit OFFSET :offset ;"
if not is_count else ''}
"""
if is_count:
query = f"SELECT COUNT (*) FROM ({query}) as c;"
del query_mods["limit"]
del query_mods["offset"]
result: list[Record] | Record = await self.database_connection.fetch_all(
query,
query_mods,
)
if is_count:
return result[0]._mapping.get("count", 0)
if not result:
return []
return [dict(row) for row in result]
async def _collect_total_amount_of_rows(
self, array_of_queries: list[str], query_mods: dict, interests: list[str] | None
) -> int:
"""Return amount of records."""
return await self._collect_rows(
array_of_queries, query_mods, interests, is_count=True
)
@staticmethod
def determine_sexual_preferences_for_user(
user_profile: models_user.UserProfile,
) -> str:
"""Prepare search query for expected gender and preferences."""
expected_preferences: list[str]
if (
user_profile.sexual_orientation
== models_enums.SexualPreferencesEnum.HOMOSEXUAL
):
expected_preferences = [
str(models_enums.SexualPreferencesEnum.HOMOSEXUAL.value),
str(models_enums.SexualPreferencesEnum.BI.value),
]
return f"sexual_orientation in ({','.join(expected_preferences)}) AND gender={user_profile.gender}"
elif (
user_profile.sexual_orientation
== models_enums.SexualPreferencesEnum.HETEROSEXUAL
):
expected_preferences = [
str(models_enums.SexualPreferencesEnum.HETEROSEXUAL.value),
str(models_enums.SexualPreferencesEnum.BI.value),
]
return f"sexual_orientation in ({','.join(expected_preferences)}) AND gender={int(not user_profile.gender)}"
else:
return (
f"(sexual_orientation != {models_enums.SexualPreferencesEnum.HOMOSEXUAL} "
f"AND gender != {int(not user_profile.gender)}"
") OR ("
f"sexual_orientation != {models_enums.SexualPreferencesEnum.HOMOSEXUAL} "
f"AND gender != {user_profile.gender})"
)
def _make_query_entities(
self,
params: models_matcha.SearchQueryModel,
order_direction: models_enums.SearchOrderEnum,
order_by: str | None,
offset: int,
limit: int,
user_profile: models_user.UserProfile,
coordinates_query: str,
) -> (list[str], dict):
"""Make entities for query."""
query_mods: dict = {
"user_id": params.user_id,
"order_by": order_by if order_by else "user_id",
"order_direction": order_direction.name,
"limit": limit,
"offset": offset,
}
age_gap_query: str = ""
if params.age_gap:
min_age: str = date.strftime(
date(
date.today().year - params.age_gap[0],
date.today().month,
date.today().day,
),
"%Y-%m-%d",
)
max_age: str = date.strftime(
date(
date.today().year - params.age_gap[1],
date.today().month,
date.today().day,
),
"%Y-%m-%d",
)
age_gap_query = f"(birthday > '{max_age}' AND birthday < '{min_age}')"
fame_rating_gap_query: str = ""
if params.fame_rating_gap:
query_mods["min_rating"] = params.fame_rating_gap[0]
query_mods["max_rating"] = params.fame_rating_gap[1]
fame_rating_gap_query = (
"(fame_rating >:min_rating AND fame_rating <:max_rating)"
)
sexual_preferences_query: str = self.determine_sexual_preferences_for_user(
user_profile
)
array_of_queries = [
coordinates_query,
sexual_preferences_query,
age_gap_query,
fame_rating_gap_query,
]
return array_of_queries, query_mods
@postgres_reconnect
async def search_users(
self,
params: models_matcha.SearchQueryModel,
order_direction: models_enums.SearchOrderEnum,
order_by: str | None,
offset: int,
limit: int,
user_profile: models_user.UserProfile,
coordinates_query: str,
) -> models_matcha.SearchUsersModels:
"""Search users."""
array_of_queries, query_mods = self._make_query_entities(
params,
order_direction,
order_by,
offset,
limit,
user_profile,
coordinates_query,
)
records: list[dict] = await self._collect_rows(
array_of_queries, query_mods, params.interests_id
)
amount: int = await self._collect_total_amount_of_rows(
array_of_queries, query_mods, params.interests_id
)
return models_matcha.SearchUsersModels(users=records, amount=amount)
@postgres_reconnect
async def recommend_users(
self,
params: models_matcha.SearchQueryModel,
user_profile: models_user.UserProfile,
coordinates_query: str,
order_direction: models_enums.SearchOrderEnum = models_enums.SearchOrderEnum.ASC,
excluded_users: list[int] | None = None,
order_by: str = "fame_rating, interests_common",
limit: int = settings_base.limit_recommendations,
offset: int = 0,
) -> list[models_user.UserProfile]:
"""Create new list of recommended users for user_id."""
array_of_queries, query_mods = self._make_query_entities(
params,
order_direction,
order_by,
offset,
limit,
user_profile,
coordinates_query,
)
excluded_users_query: str = ""
if excluded_users:
excluded_users_query = (
f" AND puser_id NOT IN ({','.join(map(str, excluded_users))})"
)
records: list[dict] = await self._collect_rows(
array_of_queries,
query_mods,
params.interests_id,
excluded_users_query=excluded_users_query,
)
return [models_user.UserProfile(**record) for record in records]
| LsHanaha/matcha | backend/repositories/repo_matcha.py | repo_matcha.py | py | 9,000 | python | en | code | 2 | github-code | 13 |
2968899230 | import pyglet.gl as GL
from numpy.random import randint
class Pipe():
def __init__(self, x, size, height):
"""Class that defines a pair of pipes, top and bottom."""
self.x, self.height, self.size = x, height, size
self.spacing = randint(low=2*self.size, high=4*self.size)
self.velocity = 4
def show(self):
"""Shows the pipe."""
self._draw_pipes(self.x, self.spacing, self.height, delta=self.size)
def updates(self):
"""Updates the x position of the pipe."""
self.x -= self.velocity
@staticmethod
def _draw_pipes(x, spacing, height, delta=80):
"""Draw the pair of pipes."""
GL.glPushMatrix()
GL.glColor4f(1., 1., 1., 1.)
GL.glBegin(GL.GL_QUADS)
GL.glVertex2f(x, 0)
GL.glVertex2f(x + delta, 0)
GL.glVertex2f(x + delta, height - 2 * spacing)
GL.glVertex2f(x, height - 2 * spacing)
GL.glEnd()
GL.glBegin(GL.GL_QUADS)
GL.glVertex2f(x, height)
GL.glVertex2f(x, height - spacing)
GL.glVertex2f(x + delta, height - spacing)
GL.glVertex2f(x + delta, height)
GL.glEnd()
GL.glPopMatrix() | israelcamp/GeneticAlgorithms | FlappyPacman/pipe.py | pipe.py | py | 1,204 | python | en | code | 0 | github-code | 13 |
689794044 | ########################################################
# Rename new names in MCWeightDict to old one
#
# If you want to use old names in I3MCWeightDict,
# add this module just before your hdfwriter (or rootwriter)
#
from icecube import icetray, dataclasses
import math
class fill_old_weights(icetray.I3ConditionalModule):
def __init__(self, ctx):
icetray.I3ConditionalModule.__init__(self, ctx)
self.AddParameter("WeightDictName","I3MCWeightDict name","I3MCWeightDict")
self.AddOutBox("OutBox")
def Configure(self):
self.wmapname = self.GetParameter("WeightDictName")
def DAQ(self, frame) :
# make copy of weight map
wmap = dataclasses.I3MapStringDouble(frame[self.wmapname])
wmap["InjectionSurfaceR"] = wmap["CylinderRadius"]
wmap["GeneratorVolume"] = math.pi * wmap["CylinderRadius"]**2 * wmap["CylinderHeight"]
wmap["TotalInteractionProbabilityWeight"] = wmap["TotalWeight"]
wmap["TotalInteractionProbability"] = wmap["InteractionWeight"]
wmap["TotalPropagationProbability"] = wmap["PropagationWeight"]
wmap["TotalCrosssectionCGS"] = wmap["TotalXsectionCGS"]
wmap["InteractionCrosssectionCGS"] = wmap["InteractionXsectionCGS"]
wmap["InjectionAreaNormCGS"] = wmap["InjectionAreaCGS"]
wmap["TotalDetectionLength"] = wmap["TrueActiveLengthBefore"] + wmap["TrueActiveLengthAfter"]
wmap["RangeInMeterWaterEquiv"] = wmap["RangeInMWE"]
frame.Delete(self.wmapname)
frame.Put(self.wmapname, wmap)
self.PushFrame(frame,"OutBox");
return True
| wardVD/IceSimV05 | src/neutrino-generator/resources/examples/fill_old_weights.py | fill_old_weights.py | py | 1,625 | python | en | code | 1 | github-code | 13 |
71685105619 | import math
class Item:
def __init__(self, image, x, y):
self.image = image
self.x = x
self.y = y
self.size = (image.get_height()+image.get_width())/2
def collides(self, other):
if math.sqrt((self.x-other.x)*(self.x-other.x)+ \
(self.y-other.y)*(self.y-other.y))<(self.size+other.size)/2:
return True
return False
| JoePrezioso/NeuroPi | neuropi/objects.py | objects.py | py | 394 | python | en | code | 12 | github-code | 13 |
10313430445 | import pandas as pd
customers = pd.read_csv("noahs-customers.csv")
products = pd.read_csv("noahs-products.csv")
# they bought the same thing at the same time, except diff colours
orders = pd.read_csv("noahs-orders.csv")
orders_items = pd.read_csv("noahs-orders_items.csv")
# only items in-store
orders = orders[orders['shipped'] == orders["ordered"]]
times = orders['ordered'].str.split(' ',expand=True)
orders['date'] = times[0]
orders['time'] = times[1]
orders = orders.drop(['ordered', 'shipped'], axis = 1)
items = pd.merge(orders, orders_items, on="orderid", how='inner')
items = pd.merge(items, products, on="sku", how="inner")
# only items with colours
items = items[items['desc'].str.endswith(')')]
# split out colours
colours = items['desc'].str.split(' \(',expand=True)
items['desc'] = colours[0]
items['colour'] = colours[1]
# find all of emily's orders
emily = customers[customers['phone'] == "914-868-0316"]
emilyitems = pd.merge(emily, items, on="customerid", how="inner")
# make things a little easier to read
emilyitems = emilyitems.drop(['name', 'address', 'citystatezip', 'birthdate', 'phone'], axis = 1)
for index, row in emilyitems.iterrows():
# find same date, same item, diff colour
guy = items[(items['date'] == row['date']) & (items['desc'] == row['desc']) & (items['colour'] != row['colour'])]
# lns = pd.concat(guy)
emilyitems = pd.concat([emilyitems, guy], ignore_index = True)
# remove unique rows
emilyitems = emilyitems[emilyitems.duplicated(subset=["date"], keep=False)].sort_values('date') # dates
emilyitems = emilyitems[emilyitems.duplicated(subset=["desc"], keep=False)].sort_values('date') # diff items
# there at around the same time is 2019/06/01, customer 8835
customers = customers[customers['customerid'] == 8835]
print(customers) # imagine moving in with someone on staten island | wolframalexa/HanukkahOfData | day7.py | day7.py | py | 1,850 | python | en | code | 1 | github-code | 13 |
24991415636 | # A complication by Ben
# Since this isn't timed, I'll put in comments.
import sys
l = [list(map(int, s.strip())) for s in sys.stdin] # the initial world
def step(old):
new = [[i + 1 for i in x] for x in old]
extinct = set() # extinction set--positions that have already flashed
nxt, fla = doflashes(new, extinct)
print(f'fla {fla} ext {extinct}')
# At this point fla should equal len(extinct)
return nxt, fla
def doflashes(world, extinct):
new = [row[:] for row in world]
flashes = 0
for ri, row in enumerate(new):
for ci, cell in enumerate(row):
if (ri, ci) in extinct:
continue # already flashed, ignore it
if cell > 9:
flashes += 1
new[ri][ci] = 0
extinct.add((ri, ci))
for x, y in ((ci, ri-1), (ci, ri+1), (ci-1, ri), (ci+1, ri), (ci-1, ri-1), (ci-1, ri+1), (ci+1, ri-1), (ci+1, ri+1)):
if 0 <= y < len(new) and 0 <= x < len(row) and (y, x) not in extinct:
new[y][x] += 1 # contribute to a neighbor
nworld, fla = doflashes(new, extinct) # recur
return nworld, flashes + fla
return new, flashes
def show(wld):
for row in wld:
for col in row:
print(str(col), end=' ')
print()
st = 0
world = [row[:] for row in l] # current world
primes = [] # primes encountered
# Simultaneous to stepping the world, compute a Sieve of Eratosthenes
def is_prime(x):
if x == 1:
return False
for p in primes:
if x % p == 0:
return False
# No point testing primes for which x/p < 2
if p//2 > x:
break
primes.append(x)
print(f'{x} prime')
return True
while True:
if all(all(x == 0 for x in r) for r in world):
print(f'syncd step {st}')
break
world, fla = step(world)
st += 1
is_prime(st) # Update the sieve, even though there's no sync
print(f'step {st}')
show(world)
# Hypothesis: after sync, the flash period is 10
syncst = st
while True:
st += 1
# ordering is important: is_prime must be consulted for every number
if is_prime(st) and (st - syncst) % 10 == 0:
print(f'prime step {st}')
break
| Grissess/aoc2021 | day11c.py | day11c.py | py | 2,296 | python | en | code | 0 | github-code | 13 |
19351970326 | #! /usr/bin/env python
import argparse
from collections import defaultdict
from tools import templates
from tools.experiment_parser import parse_all
from tools.table_generator import format_table
SEPARATE_EF = True
def kmer_to_read_coverage(c, k, read_length=100):
if c is not None:
return c * read_length / (read_length - k + 1)
def compute_average(table_lines, std_key_suffix='_std'):
table_cnt = defaultdict(lambda: defaultdict(int))
table_sum = defaultdict(lambda: defaultdict(float))
table_avg = defaultdict(lambda: defaultdict(float))
table_std_sum = defaultdict(lambda: defaultdict(float))
for key, val in table_lines.items():
for k, v in val.items():
try:
table_sum[key[1:]][k] += v
table_cnt[key[1:]][k] += 1.0
except TypeError:
pass
for key, val in table_sum.items():
for k, v in val.items():
if table_cnt[key][k] == 0:
table_avg[key][k] = None
else:
table_avg[key][k] = v / table_cnt[key][k]
for key, val in table_lines.items():
for k, v in val.items():
try:
table_std_sum[key[1:]][k] += (v - table_avg[key[1:]][k]) ** 2
except TypeError:
pass
for key, val in table_std_sum.items():
for k, v in val.items():
if table_cnt[key][k] <= 1:
table_avg[key][k + std_key_suffix] = 0
else:
table_avg[key][k + std_key_suffix] = (v / (table_cnt[key][k] - 1)) ** 0.5
return table_avg
def main(args):
table_lines = parse_all(args.path, args.filter, not args.no_error, legacy=args.legacy)
header = [
'seq_name',
'provided_coverage', 'provided_error_rate', 'provided_k',
'coverage', 'error_rate',
'genome_size',
'q1', 'q2', 'q',
'guessed_coverage', 'guessed_error_rate',
'provided_loglikelihood', 'loglikelihood', 'guessed_loglikelihood',
]
header_avg = [
'provided_coverage',
'provided_error_rate',
'provided_k',
'coverage', 'coverage_std',
'error_rate', 'error_rate_std',
'genome_size', 'genome_size_std',
'q1', 'q1_std',
'q2', 'q2_std',
'q', 'q_std',
'guessed_coverage', 'guessed_coverage_std',
'guessed_error_rate', 'guessed_error_rate_std',
'provided_loglikelihood', 'provided_loglikelihood_std',
'loglikelihood', 'loglikelihood_std',
'guessed_loglikelihood', 'guessed_loglikelihood_std',
]
# header = [
# 'provided_coverage', 'provided_error_rate',
# 'coverage', 'error_rate',
# ]
format_templates = {
'html': templates.html,
'csv': templates.csv,
'tex': templates.tex,
}
format_escape = {
'tex': lambda x: x.replace('_', '\\_'),
}
titles = {
'provided_coverage': 'Coverage',
'provided_error_rate': 'Error Rate',
'coverage': 'Est. Coverage',
'coverage_std': 'Est. Coverage Std',
'error_rate': 'Est. Error Rate',
'error_rate_std': 'Est. Error Rate Std',
'genome_size': 'Est. Genome Size',
'genome_size_std': 'Est. Genome Size Std',
}
if args.average:
table_lines = compute_average(table_lines)
header = header_avg
print(format_table(
header,
titles,
sorted(
list(table_lines.values()),
key=lambda x: (
x['provided_coverage'],
x['provided_error_rate'],
x['provided_k'],
x.get('repeats', False),
x['seq_name'],
)
),
template_file=format_templates[args.format],
escape=format_escape.get(args.format, None),
))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parse experiment output and generate table')
parser.add_argument('path', help='Experiment')
parser.add_argument('-f', '--format', default='html', help='Table format')
parser.add_argument('-i', '--filter', default='*.out', help='Filter files')
parser.add_argument('-a', '--average', action='store_true',
help='Compute average from all sequences')
parser.add_argument('-ne', '--no-error', action='store_true', help='Error is unknown')
parser.add_argument('--legacy', action='store_true', help='Run in legacy mode')
args = parser.parse_args()
main(args)
| mhozza/covest | tools/experiment_table.py | experiment_table.py | py | 4,557 | python | en | code | 5 | github-code | 13 |
9678999510 | #!/usr/bin/env python
# coding: utf-8
# In[39]:
import numpy as np
# import time as time
# In[40]:
def Rdecomp (MD, OD):
alpha=np.ones(len(MD))
beta=np.ones(len(OD))
alpha[0]=(MD[0])**0.5
# tic=time.time()
for i in range(len(OD)):
beta[i]= OD[i]/alpha[i]
alpha[i+1] = (MD[i+1]-beta[i]**2)**0.5
# toc=time.time()
print('alpha= ', alpha, '\n', 'beta= ', beta)
# print('time= ', toc-tic)
# In[ ]:
while True:
MD=np.array([int(j) for j in input("Enter your positive symetirc definite main diagonal, for instance: 1,2,3,4,...: \n").split(',')])
OD=np.array([int(j) for j in input("Enter your positive symetirc definite outer diagonal: \n").split(',')])
# MD=np.ones(32000000)*32
# OD=np.ones(31999999)*8
if len(MD) == len(OD)+1:
break
else:
print("Your given arrays are not valid. Check your input arrays again.")
try:
Rdecomp(MD, OD)
except:
print("Given matrix is not posibble to be decomposed to R and R transposed")
| MobinaSedaghat/Exercises | Alliance/Assignment 1, Exercise 3, Part 7&8- Efficient Version.py | Assignment 1, Exercise 3, Part 7&8- Efficient Version.py | py | 1,032 | python | en | code | 0 | github-code | 13 |
35219274365 | import time
import numpy as np
import cupy as cp
from cupy.cuda import Device
from cupy.cuda.runtime import getDeviceCount
from ..common import _start, _finish
# 計測開始
def start(method_name: str = '', k: int = None) -> float:
_start(method_name, k)
return time.perf_counter()
# 計測終了
def finish(start_time: float, isConverged: bool, num_of_iter: int, final_residual: float, final_k: int = None) -> float:
elapsed_time = time.perf_counter() - start_time
_finish(elapsed_time, isConverged, num_of_iter, final_residual, final_k)
return elapsed_time
# パラメータの初期化
def init(b, x=None, maxiter=None) -> tuple:
T = np.float64
b = cp.array(b)
b_norm = cp.linalg.norm(b)
N = b.size
if isinstance(x, np.ndarray):
x = cp.array(x)
else:
x = cp.zeros(N, dtype=T)
if maxiter == None:
maxiter = N
residual = cp.zeros(maxiter+1, T)
num_of_solution_updates = cp.zeros(maxiter+1, np.int)
return b, x, maxiter, b_norm, N, residual, num_of_solution_updates
class MultiGpu(object):
# numbers
begin: int = 0
end: int = 0
num_of_gpu: int = 0
# dimentinal size
N: int = 0
local_N: int = 0
# matrix
A: list = []
# vector
x: list = []
y: list = []
out: np.ndarray = None
# gpu stream
streams = None
# GPUの初期化
@classmethod
def init(cls):
cls.begin = 0
cls.end = getDeviceCount() - 1
cls.num_of_gpu = getDeviceCount()
cls.streams = [None] * cls.num_of_gpu
# init memory allocator
for i in range(cls.num_of_gpu):
Device(i).use()
pool = cp.cuda.MemoryPool(cp.cuda.malloc_managed)
cp.cuda.set_allocator(pool.malloc)
cls.streams[i] = cp.cuda.Stream()
# Enable P2P
for j in range(cls.num_of_gpu):
if i == j:
continue
cp.cuda.runtime.deviceEnablePeerAccess(j)
# メモリー領域を確保
@classmethod
def alloc(cls, A, b, T):
# dimentional size
cls.N = b.size
cls.local_N = cls.N // cls.num_of_gpu
# byte size
cls.nbytes = b.nbytes
cls.local_nbytes = b.nbytes // cls.num_of_gpu
# init list
cls.A = [None] * cls.num_of_gpu
cls.x = [None] * cls.num_of_gpu
cls.y = [None] * cls.num_of_gpu
# allocate A, x, y
for i in range(cls.num_of_gpu):
Device(i).use()
# divide A
if isinstance(A, np.ndarray):
cls.A[i] = cp.array(A[i*cls.local_N:(i+1)*cls.local_N], T)
else:
from cupyx.scipy.sparse import csr_matrix
cls.A[i] = csr_matrix(A[i*cls.local_N:(i+1)*cls.local_N])
cls.x[i] = cp.zeros(cls.N, T)
cls.y[i] = cp.zeros(cls.local_N, T)
# allocate output vector
cls.out = cp.zeros(cls.N, T)
# matvec with multi-gpu
@classmethod
def dot(cls, A, x):
# copy to workers
for i in range(cls.num_of_gpu):
Device(i).use()
cp.cuda.runtime.memcpyPeerAsync(cls.x[i].data.ptr, i, x.data.ptr, cls.end, cls.nbytes, cls.streams[i].ptr)
# dot
cls.y[i] = cls.A[i].dot(cls.x[i])
# copy to master
for i in range(cls.num_of_gpu):
cp.cuda.runtime.memcpyPeerAsync(cls.out[i*cls.local_N].data.ptr, cls.end, cls.y[i].data.ptr, i, cls.local_nbytes, cls.streams[i].ptr)
# sync
for i in range(cls.num_of_gpu):
cls.streams[i].synchronize()
return cls.out
| 5enxia/parallel-krylov | v3/gpu/common.py | common.py | py | 3,653 | python | en | code | 1 | github-code | 13 |
10155745334 | from collections import defaultdict
import itertools
class determinizeAFND:
def __init__(self, pTokens):
self.NameFile = 'tokens.txt' #ptokens
self.states = 0
self.done = False
self.automaton = defaultdict(list)
self.mapGramatic = {}
self.symbols = list()
self.StatesRemoved = {}
self.RecurrentLine = None
self.FinalStates = list()
self.BuildingAFND()
self.determinize()
def determinize(self):
keys = self.getListOfKeysAutomoton()
for indexKey in keys:
state = self.GetValueKey(indexKey)
initState = state
if self.IsNotStateRule(state):
state = state.split(",")
state = [(position.strip()) for position in state]
data= []
for indexSymbol in range(len(self.symbols)):
newValue = list()
for value in state:
firstTest = self.GetValueKey(int(value))
secondTest = self.GetValueKey(indexKey)
if self.TestTypeState(firstTest,secondTest):
copyValue = self.mapGramatic[initState]
self.mapGramatic[initState+"f"] = copyValue
self.mapGramatic.pop(initState)
if (self.IsNotAutomatonEmpty(int(value),indexSymbol)):
if(self.automaton[int(value)][indexSymbol] not in newValue):
newValue.append(self.automaton[int(value)][indexSymbol])
if (len(newValue) == 1):
self.automaton[indexKey][indexSymbol] = newValue[0]
data.append((str(chr(self.symbols[indexSymbol]))+":"+str(newValue[0])))
if (len(newValue) >= 2 ) :
self.SearchStatesEqualsAndAddState(indexKey,indexSymbol,newValue)
newState = [str(position) for position in newValue]
newState = ','.join(newState)
else:
for(index,value) in enumerate(self.automaton[indexKey]):
if self.IsHaveInderminism(value):
self.SearchStatesEqualsAndAddState(indexKey,index,value)
newState = [str(position) for position in value]
newState = ','.join(newState)
if self.IsKeysInc(keys):
copyValue = set(self.automaton.keys()) - set(keys)
[keys.append(newKey) for newKey in copyValue]
self.Minimize()
self.FillAllTable()
self.done = True
def FillAllTable(self):
self.mapGramatic["Xf"] = self.states
for key in self.automaton.keys():
for index in range(len(self.automaton[key])):
if self.IsNotAutomatonEmpty(key,index) == False:
self.automaton[key][index] = self.states
self.automaton[self.states] = [self.states] * len(self.symbols)
self.IncNumberOfStates()
def Minimize(self):
copyAutomaton = self.automaton.copy()
visited = self.DFS(0,copyAutomaton)
diff = list (set(copyAutomaton.keys()) - visited )
if len(diff) > 0:
self.RemoveStates(diff)
states = []
for value in self.mapGramatic.values():
test = self.GetValueKey(value)
if self.isStateFinal(test):
states.append(self.mapGramatic[test])
self.FinalStates[:] = states
for key in list(self.automaton):
if('TEf' in self.mapGramatic):
if(self.mapGramatic['TEf'] == key) :
continue
visited = self.DFS(key,copyAutomaton)
visitedYet = True
for state in states:
if( state in list(visited)):
visitedYet = False
if visitedYet:
self.RemoveStates([key])
def StateVisited(self, pStates,pNodes):
for state in pStates:
if( state in list(pNodes)):
return False
return True
def RemoveStates(self,pStates):
for index in pStates:
removed = self.GetValueKey(int(index))
self.StatesRemoved[removed] = self.mapGramatic[removed]
self.mapGramatic.pop(removed)
self.automaton.pop(index)
def DFS(self, init, pAutomaton ):
visited = set()
stack = [init]
while stack:
state = stack.pop()
if(state not in visited):
visited.add(state)
stack.extend(set(pAutomaton[state]) - visited)
if('' in stack):
count = stack.count('')
for i in range(int(count)):
stack.pop(stack.index(''))
return visited
def UpdateKeys(self, pkeys,pcopyValue):
return [pkeys.append(newKey) for newKey in pcopyValue]
def IsKeysInc(self, pkeys):
return (len(self.automaton.keys()) > len(pkeys))
def TestTypeState(self, pFirstTest,pSecondTest):
return ( self.isStateFinal(pFirstTest) and ( "f" not in pSecondTest ))
def SearchStatesEqualsAndAddState(self, pIndexKey, pIndex, pvalue):
permutatioValues = self.getListOfCombinations(pvalue)
addState = True
for value in permutatioValues:
state = str(value)
state = state[1:-1] ##remove '()'
if (state in self.mapGramatic) or (state+"f" in self.mapGramatic):
addState = False
pvalue = str(pvalue)
pvalue = pvalue[1:-1]
if(pvalue+"f" in self.mapGramatic):
pvalue = pvalue + "f"
if addState:
self.mapGramatic[pvalue] = self.states
self.automaton[self.states] = ['']*len(self.symbols)
self.IncNumberOfStates()
##self.setState(pvalue)
self.automaton[pIndexKey][pIndex] = self.mapGramatic[pvalue]
def getListOfCombinations(self, pvalue):
return list(itertools.permutations(pvalue))
def IsHaveInderminism(self,pvalue):
return (type(pvalue) is list)
def IsNotStateRule(self,pState):
return (pState[0].isalpha() is not True)
def GetValueKey(self,pindex):
for key, value in self.mapGramatic.items():
if value == pindex:
return key
def getListOfKeysAutomoton(self):
return list(self.automaton.keys())
def BuildingAFND(self):
self.FillTableOfSymbol()
self.HandlingTypesInInput()
def HandlingTypesInInput(self):
for self.RecurrentLine in open(self.NameFile,'r'):
self.ReadGrammar() if self.isGrammar() else self.ReadToken()
def ReadToken(self):
self.RemoveAndSplitToken()
if self.HaveStateYet('Sf'):
SymbolThatGetNameOfRule = 'Sf'
else:
SymbolThatGetNameOfRule = 'S'
for i in range(0,len(self.RecurrentLine)):
if self.isEmptyStates():
self.setState(SymbolThatGetNameOfRule)
if self.isOneSizeToken(i):
state = 'T' + str(self.states) + 'f'
else:
state = 'T' + str(self.states)
if self.HaveNoStateYet(state):
self.setState(state)
self.isSymbolDeterministic(self.RecurrentLine[i],state,SymbolThatGetNameOfRule)
SymbolThatGetNameOfRule = state
def isOneSizeToken(self,pI):
return (pI == (len(self.RecurrentLine) - 1))
def isEmptyStates(self):
return (self.states == 0)
def ReadGrammar(self):
self.RemoveAndSplitGrammar()
position = self.findPosition(0)
if position.get('x_one') < 0 or position.get('x_two') < 0:
return None
state = self.getState(0,position)
if self.HaveNoStateYet(state):
self.setState(state)
SymbolThatGetNameOfRule = state
for i in range(1,len(self.RecurrentLine)):
if self.isNotRuleEmpty(i):
position = self.findPosition(i)
state = self.getState(i,position)
if position.get('x_one') < 0 or position.get('x_two') < 0: # |a| É TERMINAL
state = 'TEf'
if self.HaveNoStateYet(state):
self.setState(state)
self.isSymbolDeterministic(self.RecurrentLine[i],state,SymbolThatGetNameOfRule)
else:
if self.HaveStateYet(state + "f"):
state = state + "f"
if self.HaveNoStateYet(state):
self.setState(state)
self.isSymbolDeterministic(self.getProduction(self.RecurrentLine[i],position),state,SymbolThatGetNameOfRule)
def isSymbolDeterministic(self,pSymbol,pState,pSymbolThatGetNameOfRule):
idxState = self.mapGramatic[pState]
idxSymbolThatGetNameOfRule = self.mapGramatic[pSymbolThatGetNameOfRule]
idxSymbol = self.symbols.index(ord(pSymbol))
if(self.IsNotAutomatonEmpty(idxSymbolThatGetNameOfRule,idxSymbol)):
value = self.getValueAutomaton(idxSymbolThatGetNameOfRule,idxSymbol)
if idxState not in value:
value.append(self.mapGramatic[pState])
if len(value) > len([self.automaton[idxSymbolThatGetNameOfRule][idxSymbol]]):
self.automaton[idxSymbolThatGetNameOfRule][idxSymbol] = value
else:
self.automaton[idxSymbolThatGetNameOfRule][idxSymbol] = self.mapGramatic[pState]
def IsNotAutomatonEmpty(self,pIdxSymbolThatGetNameOfRule,pIdxSymbol):
return (self.automaton[pIdxSymbolThatGetNameOfRule][pIdxSymbol] != '')
def getValueAutomaton(self,pIdxSymbolThatGetNameOfRule,pIdxSymbol):
if(type(self.automaton[pIdxSymbolThatGetNameOfRule][pIdxSymbol]) is list):
return self.automaton[pIdxSymbolThatGetNameOfRule][pIdxSymbol]
else:
return [self.automaton[pIdxSymbolThatGetNameOfRule][pIdxSymbol]]
def getState(self,pI,pPosition):
return self.RecurrentLine[pI][pPosition.get('x_one')+1:pPosition.get('x_two')] + self.getTypeGrammar()
def isNotRuleEmpty(self,pI):
return (self.RecurrentLine[pI] != ' ' and self.RecurrentLine[pI] != '')
def setState(self,pState):
if 'S' == pState:
self.mapGramatic[pState] = 0
self.automaton[0] = ['']*len(self.symbols)
self.IncNumberOfStates()
elif self.isStateFinal(pState) and self.HaveStateYet(pState[0:-1]):
self.mapGramatic[pState] = self.mapGramatic[pState[0:-1]] ##SERA?
self.mapGramatic.pop(pState[0:-1])
else:
self.mapGramatic[pState] = self.states
self.automaton[self.states] = ['']*len(self.symbols)
self.IncNumberOfStates()
def isStateFinal(self,pState):
return ('f' in pState)
def HaveStateYet(self,pState):
return (pState in self.mapGramatic.keys())
def HaveNoStateYet(self,pState):
return (pState not in self.mapGramatic.keys())
def IncNumberOfStates(self):
self.states += 1
def getTypeGrammar(self):
if self.IsGrammarFinal():
return "f"
else:
return ""
def IsGrammarFinal(self):
return ('' in self.RecurrentLine or ' ' in self.RecurrentLine)
def FillTableOfSymbol(self):
##fileReader = open(self.NameFile,'r')
for self.RecurrentLine in open(self.NameFile,'r'):
self.SetAlphabetSymbols()
def HandlingLines(self):
self.RemoveAndSplitGrammar() if self.isGrammar() else self.RemoveAndSplitToken
def isGrammar(self):
return "::=" in self.RecurrentLine
def RemoveAndSplitGrammar(self):
self.RecurrentLine = self.RecurrentLine.replace(':=', '|')
self.RecurrentLine = self.RecurrentLine.split('|')
self.RecurrentLine = [(rule.strip()) for rule in self.RecurrentLine]
def RemoveAndSplitToken(self):
self.RecurrentLine = self.RecurrentLine.strip()
def findPosition(self,pCaracter):
return {'x_one': self.RecurrentLine[pCaracter].find("<"), 'x_two': self.RecurrentLine[pCaracter].find('>') }
def SetAlphabetSymbols(self):
if self.isGrammar():
self.RemoveAndSplitGrammar()
for Icaracter in range(1,len(self.RecurrentLine)):
position = self.findPosition(Icaracter)
if position.get('x_one')>= 0 and position.get('x_two') >= 0:
symbol = self.getProduction(self.RecurrentLine[Icaracter],position)
if self.HaveNoSymbolYet(symbol):
self.symbols.append(ord(symbol))
elif(len(self.RecurrentLine[Icaracter])==1):
if self.HaveNoSymbolYet(self.RecurrentLine[Icaracter]):
self.symbols.append(ord(self.RecurrentLine))
else:
self.RemoveAndSplitToken()
for Icaracter in range(0,len(self.RecurrentLine)):
if self.HaveNoSymbolYet(self.RecurrentLine[Icaracter]) and self.RecurrentLine[Icaracter] != " " :
self.symbols.append(ord(self.RecurrentLine[Icaracter]))
def HaveNoSymbolYet(self,pSymbol):
return ord(pSymbol) not in self.symbols
def getProduction(self, Pprod,Pposition):
if Pposition.get('x_one') > 0:
return Pprod[0:Pposition.get('x_one')]
else:
return Pprod[Pposition.get('x_two') + 1: -1]
def main():
afnd = determinizeAFND('tokens.txt')
if __name__ == '__main__':
main()
| Ivairpuerari/Compiladores | Afnd.py | Afnd.py | py | 14,129 | python | en | code | 0 | github-code | 13 |
11097993345 | year_tax = int(input())
tennis_racquets = int(input())
sneaker_pairs = int(input())
sneakers = year_tax / 6
tracksuit = sneaker_pairs * 0.80
basketball = tracksuit * 1/4
accessories = basketball * 1/5
total = year_tax + sneakers + tracksuit + basketball + accessories
price_djokovic = total / 8
sponsor = total * 7/8
print(f"Price to be paid by Djokovic {price_djokovic}")
print(f"Price to be paid by sponsors {sponsor}")
| tanchevtony/SoftUni_Python_basic | More exercises/exam 9-10 march 2019/01 basketball equipment.py | 01 basketball equipment.py | py | 425 | python | en | code | 0 | github-code | 13 |
30138442802 | from tests.base import ApiDBTestCase
from zou.app.utils import fields
from zou.app.models.project import Project
class ProjectTestCase(ApiDBTestCase):
def setUp(self):
super(ProjectTestCase, self).setUp()
self.generate_fixture_project_status()
self.generate_fixture_project()
self.generate_fixture_project("Agent 327")
self.generate_fixture_project("Big Buck Bunny")
self.open_status_id = str(self.open_status.id)
def test_get_projects(self):
projects = self.get("data/projects")
self.assertEqual(len(projects), 3)
def test_get_project(self):
project = self.get_first("data/projects?relations=true")
project_again = self.get("data/projects/%s" % project["id"])
project["project_status_name"] = "Open"
self.assertEqual(project, project_again)
self.get_404("data/projects/%s" % fields.gen_uuid())
def test_create_project(self):
data = {
"name": "Cosmos Landromat 2",
"description": "Video game trailer.",
}
self.project = self.post("data/projects", data)
self.assertIsNotNone(self.project["id"])
self.assertEqual(
self.project["project_status_id"], str(self.open_status_id)
)
projects = self.get("data/projects")
self.assertEqual(len(projects), 4)
def test_update_project(self):
project = self.get_first("data/projects")
data = {"name": "Cosmos Landromat 3"}
self.put("data/projects/%s" % project["id"], data)
project_again = self.get("data/projects/%s" % project["id"])
self.assertEqual(data["name"], project_again["name"])
self.put_404("data/projects/%s" % fields.gen_uuid(), data)
def test_delete_project(self):
projects = self.get("data/projects")
self.assertEqual(len(projects), 3)
project = projects[0]
self.delete("data/projects/%s" % project["id"], 400)
self.generate_fixture_project_closed_status()
self.generate_fixture_project_closed()
self.delete("data/projects/%s" % self.project_closed.id)
self.assertIsNone(Project.get(self.project_closed.id))
def test_project_status(self):
data = {"name": "stalled", "color": "#FFFFFF"}
self.open_status = self.post("data/project-status", data)
data = {"name": "close", "color": "#000000"}
self.close_status = self.post("data/project-status", data)
data = {
"name": "Cosmos Landromat 2",
"description": "Video game trailer.",
"project_status_id": self.open_status["id"],
}
self.project = self.post("data/projects", data)
self.assertIsNotNone(self.project["id"])
project_again = self.get("data/projects/%s" % self.project["id"])
self.assertEqual(
project_again["project_status_id"], self.open_status["id"]
)
def test_get_project_by_name(self):
project_before = self.get("data/projects")[1]
project = self.get_first(
"data/projects?name=%s" % project_before["name"].lower()
)
self.assertEqual(project["id"], project_before["id"])
| cgwire/zou | tests/models/test_project.py | test_project.py | py | 3,215 | python | en | code | 152 | github-code | 13 |
72545240659 | # create by fanfan on 2018/8/15 0015
import os
from glob import glob
import numpy as np
from scipy.misc import imread
import tensorflow as tf
import math
from tensorflow.contrib.layers import conv2d as conv2d_1
from tensorflow.contrib.layers import conv2d_transpose
from tensorflow.contrib.layers import fully_connected
from tensorflow.contrib.layers import batch_norm
from tensorflow.examples.tutorials.mnist import input_data
from GAN.cartoon import config
from GAN.cartoon import utils
from GAN.cartoon.ops import lrelu,conv2d,deconv2d,conv_cond_concat,linear,concat
import time
def conv_out_size_same(size, stride):
return int(math.ceil(float(size) / float(stride)))
class DCGAN(object):
def __init__(self,sess,input_height=108,input_width=108,crop=True,batch_size=200,sample_num=200,output_height=64,output_width=64,
y_dim=None,z_dim=100,gf_dim=64,df_dim=64,gfc_dim=1024,dfc_dim=1024,c_dim=3,dataset_name='default',input_fname_pattern='*.jpg',
checkoutpoint_dir=None,sample_dir=None,data_dir='./data'):
'''
:param sess: tensorflow运行session
:param input_height: 输入图片的高度
:param input_width: 输入图片的宽度
:param crop: 是否需要剪裁
:param batch_size: 一次训练的训练个数
:param sample_num: 生成样本的数目
:param output_height: 输出图片高度
:param output_width: 输出图片宽度
:param y_dim: 如果是mnist,则是10,否则是None
:param z_dim: 随机的变量的维书,默认100
:param gf_dim: 生成器G第一层卷积的fillters数目
:param df_dim: 判别器D第一层卷积的fillters数目
:param gfc_dim: 生成器全连接层的维度
:param dfc_dim: 判别器全连接层的维度
:param c_dim: 图片的颜色,默认3,如果是灰度图,则为1,
:param dataset_name: 数据集名称,mnist或者自定义
:param input_fname_pattern: 输入图片名字的正则匹配
:param checkoutpoint_dir: 模型保存位置
:param sample_dir: 生成的样例图片位置
:param data_dir: 数据集的根目录
'''
self.sess = sess
self.crop = crop
self.batch_size = batch_size
self.sample_num = sample_num
self.input_height = input_height
self.input_width = input_width
self.output_height = output_height
self.output_width = output_width
self.y_dim = y_dim
self.z_dim = z_dim
self.gf_dim = gf_dim
self.df_dim = df_dim
self.gfc_dim = gfc_dim
self.dfc_dim = dfc_dim
self.d_bn1 = utils.batch_norm(scope='d_bn1')
self.d_bn2 = utils.batch_norm(scope='d_bn2')
if not self.y_dim:
self.d_bn3 = utils.batch_norm(scope='d_bn3')
self.g_bn0 = utils.batch_norm(scope='g_bn0')
self.g_bn1 = utils.batch_norm(scope='g_bn1')
self.g_bn2 = utils.batch_norm(scope='g_bn2')
if not self.y_dim:
self.g_bn3 = utils.batch_norm(scope='g_bn3')
self.dataset_name = dataset_name
self.input_fname_pattern = input_fname_pattern
self.checkpoint_dir = checkoutpoint_dir
self.data_dir = data_dir
if self.dataset_name == 'mnist':
self.data_x ,self.data_y = self.load_mnist()
self.c_dim = self.data_x[0].shape[-1]
else:
data_path = os.path.join(self.data_dir,self.dataset_name,self.input_fname_pattern)
self.data = glob(data_path)
if len(self.data) == 0:
raise Exception("[!] No data found in '" + data_path + "'")
np.random.shuffle(self.data)
imreadImg = imread(self.data[0])
if len(imreadImg.shape) >= 3:
self.c_dim = imread(self.data[0]).shape[-1]
else:
self.c_dim = 1
self.grayscale = (self.c_dim == 1)
self.build_model()
def build_model(self):
if self.y_dim:
self.y = tf.placeholder(tf.float32,[self.batch_size,self.y_dim],name='y')
else:
self.y = None
if self.crop:
image_dims = [self.output_height,self.output_width,self.c_dim]
else:
image_dims = [self.input_height,self.input_width,self.c_dim]
self.inputs = tf.placeholder(tf.float32,[self.batch_size] + image_dims,name='real_images')
self.z = tf.placeholder(tf.float32,[None,self.z_dim],name='z')
self.z_summary = tf.summary.histogram('z',self.z)
self.G = self.generator(self.z,self.y)
self.D,self.D_logits = self.discriminator(self.inputs,self.y,reuse=False)
self.sampler_data = self.sampler(self.z,self.y)
self.D_,self.D_logits_ = self.discriminator(self.G,self.y,reuse=True)
self.d_summary = tf.summary.histogram('d',self.D)
self.d__summary = tf.summary.histogram('d_',self.D_)
self.G_summary = tf.summary.image('G',self.G)
self.d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits,labels=tf.ones_like(self.D)))
self.d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,labels=tf.zeros_like(self.D_)))
self.g_loss =tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,labels=tf.ones_like(self.D_)))
self.d_loss_real_summary = tf.summary.scalar('d_loss_real',self.d_loss_real)
self.d_loss_fake_summary = tf.summary.scalar('d_loss_fake',self.d_loss_fake)
self.d_loss = self.d_loss_real + self.d_loss_fake
self.g_loss_summary = tf.summary.scalar('g_loss',self.g_loss)
self.d_loss_summary = tf.summary.scalar('d_loss',self.d_loss)
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
self.saver = tf.train.Saver()
def sampler(self,z,y=None):
with tf.variable_scope('generator') as scope:
scope.reuse_variables()
if not self.y_dim:
s_h,s_w = self.output_height,self.output_width
s_h2,s_w2 = conv_out_size_same(s_h,2),conv_out_size_same(s_w,2)
s_h4,s_w4 = conv_out_size_same(s_h2,2),conv_out_size_same(s_w2,2)
s_h8,s_w8 = conv_out_size_same(s_h4,2),conv_out_size_same(s_w4,2)
s_h16,s_w16 = conv_out_size_same(s_h8,2),conv_out_size_same(s_w8,2)
h0 = fully_connected(z,self.gf_dim*8*s_h16*s_w16,scope='g_h0_linear',activation_fn=None)
h0 = tf.reshape(h0,[-1,s_h16,s_w16,self.gf_dim *8])
btn_h0 = batch_norm(h0,is_training=False,scope='g_btn_h0')
h0 = tf.nn.relu(btn_h0)
h1 = conv2d_transpose(h0,self.gf_dim*4,kernel_size=5,stride=2,scope='g_h1')
btn_h1 = batch_norm(h1,is_training=False,scope='g_btn_h1')
h1 = tf.nn.relu(btn_h1)
h2 = conv2d_transpose(h1,self.gf_dim*2,kernel_size=5,stride=2,scope='g_h2')
btn_h2 = batch_norm(h2,is_training=False,scope='g_btn_h2')
h2 = tf.nn.relu(btn_h2)
h3 = conv2d_transpose(h2,self.gf_dim,kernel_size=5,stride=2,scope='g_h3')
btn_h3 = batch_norm(h3,is_training=False,scope='g_btn_h3')
h3 = tf.nn.relu(btn_h3)
h4 = conv2d_transpose(h3,self.c_dim,kernel_size=5,stride=2,scope='g_h4')
return tf.nn.tanh(h4)
else:
s_h,s_w = self.output_height,self.output_width
s_h2,s_h4 = int(s_h/2),int(s_h/4)
s_w2,s_w4 = int(s_w/2),int(s_w/4)
yb = tf.reshape(y,[self.batch_size,1,1,self.y_dim])
z = tf.concat([z,y],1)
h0 = fully_connected(z,self.gfc_dim,activation_fn=None,scope='g_h0_linear')
btn_h0 = batch_norm(h0,is_training=False,scope='g_btn_h0')
h0 = tf.nn.relu(btn_h0)
h0 = tf.concat([h0,y],1)
h1 = fully_connected(h0,self.gf_dim *2*s_h4*s_w4,activation_fn=None,scope='g_h1_linear')
btn_h1 = batch_norm(h1,is_training=False,scope='g_btn_h1')
h1 = tf.nn.relu(btn_h1)
h1 = tf.reshape(h1,[self.batch_size,s_h4,s_w4,self.gf_dim*2])
x_shapes = h1.get_shape()
y_shapes = yb.get_shape()
h1 = tf.concat([h1,yb* tf.ones([x_shapes[0],x_shapes[1],x_shapes[2],y_shapes[3]])],3)
h2 = conv2d_transpose(h1,self.gf_dim*2,kernel_size=5,stride=2,scope='g_h2')
btn_h2 = batch_norm(h2,is_training=False,scope="g_btn_h2")
h2 = tf.nn.relu(btn_h2)
x_shapes = h2.get_shape()
h2 = tf.concat([h2,yb*tf.ones([x_shapes[0],x_shapes[1],x_shapes[2],y_shapes[3]])],3)
h3 = conv2d_transpose(h2,self.c_dim,kernel_size=5,stride=2,scope='g_h3')
return tf.nn.sigmoid(h3)
def load_data(self):
data_path = os.path.join(self.data_dir,self.dataset_name)
return x,y_vec
@property
def model_dir(self):
return '{}_{}_{}_{}'.format(self.dataset_name,self.batch_size,self.output_height,self.output_width)
def save_mode(self,checkpoint_dir,step):
model_name = 'DCGAN.model'
checkpoint_dir = os.path.join(checkpoint_dir,self.model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,os.path.join(checkpoint_dir,model_name),global_step=step)
def restore_mode(self,checkpoint_dir,step):
checkpoint_dir = os.path.join(checkpoint_dir,self.model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess,os.path.join(checkpoint_dir,ckpt_name))
return True
else:
print("[*] Failed to find a checkpoint")
return False
def discriminator(self, image, y=None, reuse=False):
with tf.variable_scope("discriminator") as scope:
if reuse:
scope.reuse_variables()
if not self.y_dim:
h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim * 2, name='d_h1_conv')))
h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim * 4, name='d_h2_conv')))
h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim * 8, name='d_h3_conv')))
h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h4_lin')
return tf.nn.sigmoid(h4), h4
else:
yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])
x_shapes = image.get_shape()
y_shapes = yb.get_shape()
x = tf.concat([image, yb * tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], 3)
h0 = conv2d_1(x, self.c_dim + self.y_dim, kernel_size=5, stride=2, scope='d_h0_conv',activation_fn=None)
h0 = tf.nn.leaky_relu(h0)
x_shapes = h0.get_shape()
h0 = tf.concat([h0, yb * tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], 3)
h1 = conv2d_1(h0, self.df_dim + self.y_dim, kernel_size=5, stride=2, scope='d_h1_conv',activation_fn=None)
btn_h1 = batch_norm(h1, scope='d_btn_h1')
h1 = tf.nn.leaky_relu(btn_h1)
h1 = tf.reshape(h1, [self.batch_size, -1])
h1 = tf.concat([h1, y], 1)
h2 = fully_connected(h1, self.dfc_dim, activation_fn=None, scope='d_h2_linear')
btn_h2 = batch_norm(h2, scope='d_btn_h2')
h2 = tf.nn.leaky_relu(btn_h2)
h2 = tf.concat([h2, y], 1)
h3 = fully_connected(h2, 1, activation_fn=None, scope='d_h3_linear')
return tf.nn.sigmoid(h3), h3
def generator(self, z, y=None):
with tf.variable_scope("generator") as scope:
if not self.y_dim:
s_h, s_w = self.output_height, self.output_width
s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)
# project `z` and reshape
self.z_, self.h0_w, self.h0_b = linear(
z, self.gf_dim * 8 * s_h16 * s_w16, 'g_h0_lin', with_w=True)
self.h0 = tf.reshape(
self.z_, [-1, s_h16, s_w16, self.gf_dim * 8])
h0 = tf.nn.relu(self.g_bn0(self.h0))
self.h1, self.h1_w, self.h1_b = deconv2d(
h0, [self.batch_size, s_h8, s_w8, self.gf_dim * 4], name='g_h1', with_w=True)
h1 = tf.nn.relu(self.g_bn1(self.h1))
h2, self.h2_w, self.h2_b = deconv2d(
h1, [self.batch_size, s_h4, s_w4, self.gf_dim * 2], name='g_h2', with_w=True)
h2 = tf.nn.relu(self.g_bn2(h2))
h3, self.h3_w, self.h3_b = deconv2d(
h2, [self.batch_size, s_h2, s_w2, self.gf_dim * 1], name='g_h3', with_w=True)
h3 = tf.nn.relu(self.g_bn3(h3))
h4, self.h4_w, self.h4_b = deconv2d(
h3, [self.batch_size, s_h, s_w, self.c_dim], name='g_h4', with_w=True)
return tf.nn.tanh(h4)
else:
s_h, s_w = self.output_height, self.output_width
s_h2, s_h4 = int(s_h / 2), int(s_h / 4)
s_w4, s_w4 = int(s_w / 2), int(s_w / 4)
yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])
z = tf.concat([z, y], 1)
h0 = fully_connected(z, self.gfc_dim, activation_fn=None, scope='g_h0_linear')
btn_h0 = self.g_bn0(h0)
h0 = tf.nn.relu(btn_h0)
h0 = tf.concat([h0, y], 1)
h1 = fully_connected(h0, self.gf_dim * 2 * s_h4 * s_w4, activation_fn=None, scope='g_h1_linear')
btn_h1 = self.g_bn1(h1)
h1 = tf.nn.relu(btn_h1)
h1 = tf.reshape(h1, [self.batch_size, s_h4, s_w4, self.gf_dim * 2])
x_shapes = h1.get_shape()
y_shapes = yb.get_shape()
h1 = tf.concat([h1, yb * tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], 3)
h2 = conv2d_transpose(h1, self.gf_dim * 2, kernel_size=5, stride=2, scope='g_h2', activation_fn=None)
btn_h2 = self.g_bn2(h2)
h2 = tf.nn.relu(btn_h2)
x_shapes = h2.get_shape()
h2 = tf.concat([h2, yb * tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], 3)
h3 = conv2d_transpose(h2, self.c_dim, kernel_size=5, stride=2, scope='g_h3', activation_fn=None)
return tf.nn.sigmoid(h3)
def train(self):
d_optim = tf.train.AdamOptimizer(config.learning_rate) \
.minimize(self.d_loss, var_list=self.d_vars)
g_optim = tf.train.AdamOptimizer(config.learning_rate) \
.minimize(self.g_loss, var_list=self.g_vars)
try:
tf.global_variables_initializer().run()
except:
tf.initialize_all_variables().run()
sample_z = np.random.uniform(-1, 1, size=(self.sample_num, self.z_dim))
if config.dataset == 'mnist':
sample_inputs = self.data_x[0:self.sample_num]
sample_labels = self.data_y[0:self.sample_num]
else:
sample_files = self.data[0:self.sample_num]
sample = [
get_image(sample_file,
input_height=self.input_height,
input_width=self.input_width,
resize_height=self.output_height,
resize_width=self.output_width,
crop=self.crop,
grayscale=self.grayscale) for sample_file in sample_files]
if (self.grayscale):
sample_inputs = np.array(sample).astype(np.float32)[:, :, :, None]
else:
sample_inputs = np.array(sample).astype(np.float32)
counter = 1
start_time = time.time()
could_load = self.restore_mode(self.checkpoint_dir,step=1)
if could_load:
counter = checkpoint_counter
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
for epoch in range(config.epoch):
if config.dataset == 'mnist':
batch_idxs = min(len(self.data_x), config.train_size) // config.batch_size
else:
self.data = glob(os.path.join(
config.data_dir, config.dataset, self.input_fname_pattern))
np.random.shuffle(self.data)
batch_idxs = min(len(self.data), config.train_size) // config.batch_size
for idx in range(0, int(batch_idxs)):
if config.dataset == 'mnist':
batch_images = self.data_x[idx * config.batch_size:(idx + 1) * config.batch_size]
batch_labels = self.data_y[idx * config.batch_size:(idx + 1) * config.batch_size]
else:
batch_files = self.data[idx * config.batch_size:(idx + 1) * config.batch_size]
batch = [
get_image(batch_file,
input_height=self.input_height,
input_width=self.input_width,
resize_height=self.output_height,
resize_width=self.output_width,
crop=self.crop,
grayscale=self.grayscale) for batch_file in batch_files]
if self.grayscale:
batch_images = np.array(batch).astype(np.float32)[:, :, :, None]
else:
batch_images = np.array(batch).astype(np.float32)
batch_z = np.random.uniform(-1, 1, [config.batch_size, self.z_dim]) \
.astype(np.float32)
if config.dataset == 'mnist':
# Update D network
_ = self.sess.run([d_optim],
feed_dict={
self.inputs: batch_images,
self.z: batch_z,
self.y: batch_labels,
})
# Update G network
_ = self.sess.run([g_optim],
feed_dict={
self.z: batch_z,
self.y: batch_labels,
})
# Run g_optim twice to make sure that d_loss does not go to zero (different from paper)
_ = self.sess.run([g_optim],
feed_dict={self.z: batch_z, self.y: batch_labels})
errD_fake = self.d_loss_fake.eval({
self.z: batch_z,
self.y: batch_labels
})
errD_real = self.d_loss_real.eval({
self.inputs: batch_images,
self.y: batch_labels
})
errG = self.g_loss.eval({
self.z: batch_z,
self.y: batch_labels
})
else:
# Update D network
_, summary_str = self.sess.run([d_optim, self.d_sum],
feed_dict={self.inputs: batch_images, self.z: batch_z})
self.writer.add_summary(summary_str, counter)
# Update G network
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={self.z: batch_z})
self.writer.add_summary(summary_str, counter)
# Run g_optim twice to make sure that d_loss does not go to zero (different from paper)
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={self.z: batch_z})
self.writer.add_summary(summary_str, counter)
errD_fake = self.d_loss_fake.eval({self.z: batch_z})
errD_real = self.d_loss_real.eval({self.inputs: batch_images})
errG = self.g_loss.eval({self.z: batch_z})
counter += 1
print("Epoch: [%2d/%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, g_loss: %.8f" \
% (epoch, config.epoch, idx, batch_idxs,
time.time() - start_time, errD_fake + errD_real, errG))
if np.mod(counter, 100) == 1:
if config.dataset == 'mnist':
samples, d_loss, g_loss = self.sess.run(
[self.sampler, self.d_loss, self.g_loss],
feed_dict={
self.z: sample_z,
self.inputs: sample_inputs,
self.y: sample_labels,
}
)
save_images(samples, image_manifold_size(samples.shape[0]),
'./{}/train_{:02d}_{:04d}.png'.format(config.sample_dir, epoch, idx))
print("[Sample] d_loss: %.8f, g_loss: %.8f" % (d_loss, g_loss))
else:
try:
samples, d_loss, g_loss = self.sess.run(
[self.sampler, self.d_loss, self.g_loss],
feed_dict={
self.z: sample_z,
self.inputs: sample_inputs,
},
)
save_images(samples, image_manifold_size(samples.shape[0]),
'./{}/train_{:02d}_{:04d}.png'.format(config.sample_dir, epoch, idx))
print("[Sample] d_loss: %.8f, g_loss: %.8f" % (d_loss, g_loss))
except:
print("one pic error!...")
if np.mod(counter, 500) == 2:
self.save_mode(config.checkpoint_dir, counter)
def train1(self):
d_optimizer = tf.train.AdamOptimizer(config.learning_rate).minimize(self.d_loss,var_list=self.d_vars)
g_optimizer = tf.train.AdamOptimizer(config.learning_rate).minimize(self.g_loss,var_list=self.g_vars)
tf.global_variables_initializer().run()
self.g_summary_run = tf.summary.merge([self.z_summary,self.d__summary,self.G_summary,self.d_loss_fake_summary,self.g_loss_summary])
self.d_summary_run = tf.summary.merge([self.z_summary,self.d_summary,self.d_loss_real_summary,self.d_loss_summary])
self.summary_writer = tf.summary.FileWriter('./logs',self.sess.graph)
sample_z = np.random.uniform(-1,1,size=(self.sample_num,self.z_dim))
if config.dataset == 'mnist':
sample_inputs = self.data_x[0:self.sample_num]
sample_labels = self.data_y[0:self.sample_num]
else:
pass
count = 1
start_time = time.time()
could_load = self.restore_mode(self.checkpoint_dir,count)
if could_load:
print("load success")
else:
print("load failed..")
for epoch in range(config.epoch):
if config.dataset == 'mnist':
batch_idxs = min(len(self.data_x),config.train_size)// config.batch_size
else:
batch_idxs = 0
for idx in range(0,int(batch_idxs)):
if config.dataset == 'mnist':
batch_images = self.data_x[idx * config.batch_size:(idx+1)*config.batch_size]
batch_labels = self.data_y[idx * config.batch_size:(idx+1)*config.batch_size]
else:
pass
batch_z = np.random.uniform(-1,1,[config.batch_size, self.z_dim]).astype(np.float32)
if config.dataset == 'mnist':
# 跟新 Distribute 判别式网络
_,summary_str = self.sess.run([d_optimizer,self.d_summary_run],feed_dict={
self.inputs:batch_images,
self.z:batch_z,
self.y:batch_labels,
})
self.summary_writer.add_summary(summary_str,count)
_,summary_str = self.sess.run([g_optimizer,self.g_summary_run],feed_dict={
self.z:batch_z,
self.y:batch_labels
})
self.summary_writer.add_summary(summary_str,count)
_, summary_str = self.sess.run([g_optimizer, self.g_summary_run], feed_dict={
self.z: batch_z,
self.y: batch_labels
})
self.summary_writer.add_summary(summary_str, count)
errD_fake = self.d_loss_fake.eval({
self.z:batch_z,
self.y:batch_labels
})
errD_real = self.d_loss_real.eval({
self.inputs:batch_images,
self.y:batch_labels
})
errG = self.g_loss.eval({
self.z:batch_z,
self.y:batch_labels
})
else:
pass
count += 1
print("Epoch:[%2d/%2d] [%4d/%4d] time: %4.4f,d_loss:%.8f,g_loss:%.8f" % (epoch,config.epoch,idx,batch_idxs,time.time() - start_time,errD_fake + errD_real,errG))
if count % 10 == 0:
if config.dataset == "mnist":
samples,d_loss,g_loss = self.sess.run([self.sampler_data,self.d_loss,self.g_loss],feed_dict={
self.z:sample_z,
self.inputs:sample_inputs,
self.y:sample_labels,
})
if count % 50 == 0:
self.save_mode(config.checkpoint_dir, count)
| fanfanfeng/nlp_research | GAN/cartoon/dcgan1.py | dcgan1.py | py | 27,183 | python | en | code | 8 | github-code | 13 |
33817370006 | import os, stat
#-------------#
# Import Vars #
#-------------#
Import('*')
#---------#
# Sources #
#---------#
src = []
for root, dirs, files in os.walk("."):
if root.find('.svn') == -1:
for file in [f for f in files if not f.endswith('~')]:
src.append(os.path.join(root, file))
install = env.Install(Dir(env.subst('$data_directory/'+root)), os.path.join(root, file))
#---------------------------------#
# Distribute to src_dir & bin_dir #
#---------------------------------#
#dist_files = ['SConscript'] + src
#env.Distribute (src_dir, dist_files)
#env.Distribute (bin_dir, src)
#Export(['env', 'src_dir', 'bin_dir'])
env.Alias('install', install)
# Install locale
import glob
src = glob.glob('locale/*')
po_files = [os.path.basename(po) for po in src if po.endswith('.po')]
languages = [po[0:-3] for po in po_files]
for lang in languages:
mo_tgt_file = lang + '/LC_MESSAGES/vdrift.mo'
mo_file = 'locale/' + mo_tgt_file
po_file = 'locale/' + lang + ".po"
env.MoBuild(mo_file, po_file)
install = env.InstallAs(os.path.join(env.subst('$locale_directory'), mo_tgt_file), mo_file)
env.Alias("install", install)
| vtereshkov/vdrift-data-short | data/SConscript | SConscript | 1,159 | python | en | code | 0 | github-code | 13 | |
71685850579 | import json
import logging
import time
import flask
import flask_cors
import numpy as np
import podsearch
import scann
import transformers
log = logging.getLogger(__name__)
search_fn = None
def load_search_fn():
global search_fn
if search_fn is not None:
return
load_search_fn_in_progress = True
start = time.time()
def _path(asset): return f'{podsearch.BASE_DIR}/../{asset}'
tokenizer = transformers.AutoTokenizer.from_pretrained(_path('tokenizer'))
model = transformers.AutoModel.from_pretrained(_path('model')).eval()
searcher = scann.scann_ops_pybind.load_searcher(_path('searcher'))
with open(_path('encodings_metadata.json'), 'rt') as fileobj:
encodings_metadata = json.load(fileobj)
def _search_fn(query):
results = []
query = podsearch.utils.preprocess(query)
encoding = podsearch.utils.encode([query], tokenizer, model)
encoding = encoding.squeeze().numpy()
encoding /= np.linalg.norm(encoding)
neighbors, scores = searcher.search(encoding)
return dict(
query=query,
results=[encodings_metadata[i] for i in neighbors],
scores=scores.tolist(),
)
search_fn = _search_fn
log.info(f'finished loading search_fn in {time.time()-start} sec')
def create_server():
server = flask.Flask(__name__)
@server.get('/wakeup')
def wakeup():
global search_fn
load_search_fn()
return {'awake': search_fn is not None}
@server.get('/awake')
def awake():
global search_fn
return {'awake': search_fn is not None}
@server.get('/search')
def search():
global search_fn
query = flask.request.args.get('query', '')
return search_fn(query)
flask_cors.CORS(server)
return server
if __name__ == '__main__':
create_server().run(debug=True, host='0.0.0.0', port=8080)
#
| joepatmckenna/podsearch | py/podsearch/services/search_v1.py | search_v1.py | py | 1,931 | python | en | code | 0 | github-code | 13 |
70149606738 | import csv
import logging
from dataclasses import dataclass
from pathlib import Path
from PIL import Image
from tqdm import tqdm
logging.basicConfig(filename='ViVQA_sanity_check.log',
filemode='w',
format='%(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
@dataclass
class Meta:
img_root_dir = Path('/mnt/disks/nlpvnhub/dinhanhx/')
# where COCO train2017 and val2017 dirs are located
csv_dir = Path('/mnt/disks/nlpvnhub/dinhanhx/ViVQA-main')
# where github.com/kh4nh12/ViVQA is cloned
img_train_dir = img_root_dir.joinpath('train2017')
img_val_dir = img_root_dir.joinpath('val2017')
train_file = csv_dir.joinpath('train.csv')
test_file = csv_dir.joinpath('test.csv')
class DataUnpacker:
def __init__(self, meta: Meta, sanity_check=False) -> None:
self.meta = meta
if sanity_check:
self.run_sanity_check()
def get_item(self, index: int = 0, target='train_file'):
"""Get data by index coresponding in csv files
Parameters
----------
index : int, optional
index of data in json file, by default 0
target : str, optional
'train_file', 'test_file', by default 'train_file'
Returns
-------
A tuple of Dict containing data, and Path to image file
"""
target = self.meta.__getattribute__(target)
meta_dir_list = [self.meta.img_train_dir, self.meta.img_val_dir]
with open(target) as target_file:
reader = csv.DictReader(target_file)
for i, line in enumerate(reader):
if i == index:
for img_dir in meta_dir_list:
img_file = img_dir.joinpath(str(line['img_id']).zfill(12)+'.jpg')
if img_file.is_file():
return line, img_file
return (None, None)
def run_sanity_check(self):
"""Check files, directories, and images path exist or not
"""
meta_file_list = [self.meta.train_file, self.meta.test_file]
meta_dir_list = [self.meta.img_train_dir, self.meta.img_val_dir]
for p in meta_dir_list+meta_dir_list:
if not p.exists():
logger.warn(f'{p} does not exist')
for target in meta_file_list:
with open(target) as target_file:
reader = csv.DictReader(target_file)
for line in tqdm(reader):
file_found = False
for img_dir in meta_dir_list:
img_file = img_dir.joinpath(str(line['img_id']).zfill(12)+'.jpg')
if img_file.is_file():
file_found = True
if img_dir != self.meta.img_train_dir:
logger.info(f'{line} @ {target} has {img_file}')
if not file_found:
logger.warn(f'{line} @ {target} has no image')
def get_image_list(self):
meta_file_list = [self.meta.test_file]
meta_dir_list = [self.meta.img_train_dir, self.meta.img_val_dir]
image_list = set()
for target in meta_file_list:
with open(target) as target_file:
reader = csv.DictReader(target_file)
for line in tqdm(reader):
for img_dir in meta_dir_list:
img_file = img_dir.joinpath(str(line['img_id']).zfill(12)+'.jpg')
if img_file.is_file():
image_list.add(img_file)
return list(image_list)
if '__main__' == __name__:
meta = Meta()
data_unpacker = DataUnpacker(meta)
image_set = data_unpacker.get_image_list()
l = len(image_set)
h, w = 0, 0
min_h, min_w = Image.open(image_set[0]).height, Image.open(image_set[0]).width
max_h, max_w = 0, 0
for img_path in tqdm(image_set):
img = Image.open(img_path)
h += img.height
w += img.width
if img.height * img.width <= min_h * min_w:
min_h, min_w = img.height, img.width
if img.height * img.width >= max_h * max_w:
max_h, max_w = img.height, img.width
print(f'Number of image-text pairs: {l}')
print(f'Average H W: {h/l} {w/l}')
print(f'Min H W: {min_h} {min_w}')
print(f'Max H W: {max_h} {max_w}')
| dinhanhx/VL-datasets | vivqa_data.py | vivqa_data.py | py | 4,461 | python | en | code | 1 | github-code | 13 |
23989928630 | import torch
DEFAULT_DEVICE = 'cuda:0' if torch.cuda.is_available() else 'cpu'
from durbango.nb_utils import is_iterable
def num_parameters(module, only_trainable: bool = False) -> int:
"""
Get number of (optionally, trainable) parameters in the module.
"""
params = filter(lambda x: x.requires_grad, module.parameters()) if only_trainable else module.parameters()
return sum(p.numel() for p in params)
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def print_shape(obj):
if torch.is_tensor(obj): return obj.shape
elif isinstance(obj, (list, tuple)): return [print_shape(x) for x in obj]
elif isinstance(obj, dict): return {k: print_shape(v) for k, v in obj.items()}
else: return obj
def avg_checkpoints(sds):
new_sd = {}
for k in sds[0].keys():
new_sd[k] = torch.mean([sd[k] for sd in sds])
return new_sd
def get_shapes(x):
"""Recursive"""
if hasattr(x, 'shape'):
return tuple(x.shape)
elif isinstance(x, dict):
return {k: get_shapes(v) for k,v in x.items()}
elif is_iterable(x):
return [get_shapes(v) for v in x]
else:
return None
def get_tensor_shapes_and_pointers(x):
"""Recursive"""
if isinstance(x, torch.Tensor):
return (x.shape, x.data_ptr())
elif isinstance(x, dict):
return {k: get_shapes(v) for k,v in x.items()}
elif is_iterable(x):
return [get_shapes(v) for v in x]
else:
return None
import sys
def sizeof_fmt(num, suffix='B'):
''' by Fred Cirera, https://stackoverflow.com/a/1094933/1870254, modified'''
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Yi', suffix)
def local_sizeof():
for name, size in sorted(((name, sys.getsizeof(value)) for name, value in locals().items()),
key= lambda x: -x[1])[:10]:
print("{:>30}: {:>8}".format(name, sizeof_fmt(size)))
import gc, inspect
def find_names(obj):
frame = inspect.currentframe()
for frame in iter(lambda: frame.f_back, None):
frame.f_locals
obj_names = []
for referrer in gc.get_referrers(obj):
if isinstance(referrer, dict):
for k, v in referrer.items():
if v is obj:
obj_names.append(k)
return obj_names
import gc
from .nb_utils import tqdm_nice
from collections import defaultdict
import pandas as pd
def print_tensor_sizes(ignore_names = ('obj', 'weight', 'bias')):
results = []
seen_ptrs = set()
for obj in tqdm_nice(gc.get_objects()):
try:
assert isinstance(obj, torch.Tensor)
ptr = obj.data_ptr()
if ptr in seen_ptrs: continue
seen_ptrs.add(ptr)
names = [x for x in find_names(obj) if x not in ignore_names]
for name in names:
results.append((name, obj.numel(), ptr, obj.dtype, obj.device))
except AssertionError:
pass
colnames = ['varname', 'numel', 'data_ptr', 'data_type', 'device']
return pd.DataFrame(results, columns=colnames).sort_values('numel', ascending=False)
def same_storage(x, y):
"""
x = torch.arange(10)
y = x[1::2]
print(same_storage(x, y)) # prints True
z = y.clone()
print(same_storage(x, z)) # prints False
print(same_storage(y, z)) # prints False
"""
x_ptrs = set(e.data_ptr() for e in x.view(-1))
y_ptrs = set(e.data_ptr() for e in y.view(-1))
return (x_ptrs <= y_ptrs) or (y_ptrs <= x_ptrs)
def compare_state_dict(dct_a, dct_b):
SENTINEL = torch.zeros(3)
k1, k2 = set(dct_a), set(dct_b) # just the keys
deltas = []
for k in tqdm_nice(k1.union(k2)):
vala, valb = dct_a.get(k, SENTINEL), dct_b.get(k, SENTINEL)
if vala.shape == valb.shape and torch.eq(vala, valb).all():
continue
else:
deltas.append((k, vala.numel(), valb.numel()))
return pd.DataFrame(deltas, columns=['key', 'numel_a', 'numel_b'])
def log_tensor(msg, x):
sq = x.squeeze()
if sq.ndim == 2:
slice = x[:3, :4]
elif sq.ndim == 3:
slice = x[:, 0, :6]
else:
slice = x[:5]
print(f"{msg}: shape: {x.shape} min: {x.min(): .4f} max: {x.max(): .4f} slice: {slice}")
from .nb_utils import remove_prefix
def convert_pl_to_hf(pl_ckpt_path, hf_model, save_path):
state_dict = {remove_prefix(k, 'model.'): v for k, v in
torch.load(pl_ckpt_path, map_location='cpu')['state_dict'].items()}
missing, unexpected = hf_model.load_state_dict(state_dict, strict=False)
assert not missing, f'missing keys: {missing}'
hf_model.save_pretrained(save_path)
def get_src_lens(tok, examples):
src_lens = tok(examples, padding='longest', truncation=True, return_tensors='pt').input_ids.ne(tok.pad_token_id).sum(1)
return src_lens
def count_trainable_parameters(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return params
def count_parameters(model):
model_parameters = model.parameters()
params = sum([np.prod(p.size()) for p in model_parameters])
return params
| sshleifer/durbango | durbango/torch_utils.py | torch_utils.py | py | 5,385 | python | en | code | 3 | github-code | 13 |
17537756502 | import networkx as nx
from cdlib import algorithms, evaluation, NodeClustering, TemporalClustering
# communities number to be taken from louvain to be used in temporal clustering
communities_number = 5
# score to be taken in consideration, starting from the value set
score_lower_limit = 0.5
def get_communities(network):
communities = algorithms.louvain(network, weight='weight', resolution=1.)
return communities
def get_average_degree(network):
average_degree = "{:.2f}".format(sum([d for (n, d) in nx.degree(network)]) / float(network.number_of_nodes()))
print('Average Degree:',average_degree)
return average_degree
def get_average_weight(network):
get_average_weight = "{:.2f}".format(sum(nx.get_edge_attributes(network, 'weight').values()) / float(network.number_of_edges()))
print('Average Weight:',get_average_weight)
return get_average_weight
def get_average_clustering(network):
average_clustering = "{:.2f}".format(nx.average_clustering(network))
print('Average Clustering:',average_clustering)
return average_clustering
def get_modularity(network,communities):
modularity = evaluation.newman_girvan_modularity(network,communities)
print('Modularidade:',modularity)
return "{:.2f}".format(modularity[2])
def temporalClustering(network_timeline):
communities_files = []
for observation,network in enumerate(network_timeline):
communities_files.append([]) #append da observação
communities_list = list(get_communities(network).communities[0:communities_number])
for community in communities_list:
communities_files[observation].append(community) # append das comunidades na observação
node_clustering = {}
for observation in range(len(network_timeline)):
communities_obs = []
for community in range(len(communities_files[observation])):
communities_obs.append(communities_files[observation][community])
node_clustering[observation] = NodeClustering(communities_obs, graph=None, method_name="")
tc = TemporalClustering()
for observation in node_clustering:
tc.add_clustering(node_clustering[observation],observation)
jaccard = lambda x, y: len(set(x) & set(y)) / len(set(x) | set(y))
matching = tc.community_matching(jaccard,two_sided=True)
tc.add_matching(matching)
return tc
def colorizeTemporalClustering(network_timeline,tc):
colors_dict = {}
timestamps_number = len(network_timeline)
colors_list = ['red','orange','yellow','darkgreen','lime','cyan','lightblue','blue','darkblue','lightpink','pink','lightpurple','purple']
color_count = 0
explicit_match = tc.get_explicit_community_match()
# color the dict
for tuple in explicit_match:
t_c1 = tuple[0]
t_c2 = tuple[1]
score = tuple[2]
if score >= score_lower_limit:
if t_c1 in colors_dict.keys():
colors_dict[t_c2] = (colors_dict[t_c1])
else:
colors_dict[t_c1] = (colors_list[color_count])
colors_dict[t_c2] = (colors_list[color_count])
color_count+=1
print('Match: ',explicit_match)
print('Cores: ',colors_dict)
# color the nodes
# start all nodes as grey
for i in range(timestamps_number):
nx.set_node_attributes(network_timeline[i],'grey',name="color")
for t_c in colors_dict:
color = colors_dict[t_c]
t = int(t_c.split('_')[0])
for node in tc.get_community(t_c):
nx.set_node_attributes(network_timeline[t],{node: color},name="color") | FilipeHenrique/DNMV-Dynamic-Networks-Modeling-and-Visualization | pipeline/report.py | report.py | py | 3,628 | python | en | code | 0 | github-code | 13 |
24268074446 | import pandas as pd
import numpy as np
from settings import perch_config,rsna_config,chestray_config,label_var,path_var,label_sep
import json
import os
class Dataset:
def __init__(self,train_csv,test_csv=None,multilabel=False):
self.train_csv=train_csv
self.test_csv=test_csv
self.train=pd.read_csv(train_csv)
self.multilabel=multilabel
if test_csv is not None:
self.test=pd.read_csv(test_csv)
def load_data(self,labels=True,include_test=True):
path_train=self.train[path_var].values
path_test = None if self.train_csv is None else self.test[path_var].values
if labels:
labs_train=self.train[label_var]
labs_test=None if self.test_csv is None else self.test[label_var]
if (self.multilabel):
labs_train= np.array(labs_train)
labs_train=list(map(lambda x:x.split("|"),labs_train))
labs_train=np.array(labs_train,dtype="float")
labs_test= None if self.train_csv is None else np.array(labs_test)
labs_test = list(map(lambda x: x.split("|"), labs_test))
labs_test = np.array(labs_test, dtype="float")
else:
labs_train= pd.get_dummies(labs_train).values
labs_test = None if self.train_csv is None else pd.get_dummies(labs_test).values
if include_test:
if labels:
return path_train,labs_train,path_test,labs_test
else:
return path_train,path_test
else:
if labels:
return path_train,labs_train
else:
return path_train
# def load_weights(self,include_test=True):
# with open(os.path.join(perch_config.image_path, perch_config.class_weights),'r') as f:
# weights=json.load(f)
# return weights
# self=Dataset(rsna_config.train_csv,rsna_config.validation_csv)
# self=Dataset(perch_config.train_csv,perch_config.validation_csv)
# self=Dataset(train_csv=chestray_config.train_csv,test_csv= chestray_config.validation_csv,multilabel=True)
#rsna=Dataset(rsna_config.train_csv,rsna_config.validation_csv)
perch=Dataset(perch_config.train_csv,perch_config.validation_csv)
chestray=Dataset(train_csv=chestray_config.train_csv,test_csv= chestray_config.validation_csv,multilabel=True)
perch.train_long=pd.read_csv(os.path.join(perch_config.image_path, "Assessors_train.csv"))
perch.test_long=pd.read_csv(os.path.join(perch_config.image_path, "Assessors_test.csv")) | pmwaniki/perch-analysis | data/datasets.py | datasets.py | py | 2,575 | python | en | code | 0 | github-code | 13 |
29784234280 | from model.contact import New_contact
import re
class ContactHelper:
def __init__(self, app):
self.app = app
def open_home_page(self):
wd = self.app.wd
if not (wd.current_url == "http://localhost/addressbook/" and len(wd.find_elements_by_name("add")) > 0):
wd.find_element_by_link_text("home").click()
def add_new_contact(self, new_contact):
wd = self.app.wd
self.open_home_page()
wd.find_element_by_link_text("add new").click()
self.fill_contact_form(new_contact)
wd.find_element_by_xpath("(//input[@name='submit'])[2]").click()
self.open_home_page()
self.contact_cache = None
def fill_new_form_for_add_new_contact(self, new_contact):
wd = self.app.wd
self.fill_contact_form(new_contact)
def delete_contact(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
wd = self.app.wd
self.select_contact_by_index(index)
# delete contact
wd.find_element_by_xpath("//input[@value='Delete']").click()
# submit delete contact
wd.switch_to_alert().accept()
self.open_home_page()
self.contact_cache = None
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[id].click()
def select_contact_by_id(self, contact_id):
wd = self.app.wd
wd.find_element_by_css_selector("input[id='%s']" % contact_id).click()
def select_contact(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def modify_contact(self, new_contact):
self.modify_contact_by_index(0)
def modify_contact_by_index(self, index, new_contact):
wd = self.app.wd
self.open_home_page()
self.select_contact_by_index(index)
# click button for edit contact
wd.find_elements_by_xpath("(//img[@alt='Edit'])")[index].click()
# fill contact form
self.fill_contact_form(new_contact)
# submit modification
wd.find_element_by_name("update").click()
wd.find_element_by_link_text("home page").click()
self.contact_cache = None
def modify_contact_by_id(self, new_contact, id):
wd = self.app.wd
self.open_home_page()
self.open_contact_to_edit_by_id(id)
# fill contact form
self.fill_contact_form(new_contact)
# submit modification
wd.find_element_by_name("update").click()
wd.find_element_by_link_text("home page").click()
self.contact_cache = None
def fill_contact_form(self, new_contact):
wd = self.app.wd
self.change_field_contact("firstname", new_contact.firstname)
self.change_field_contact("middlename", new_contact.middlename)
self.change_field_contact("lastname", new_contact.lastname)
self.change_field_contact("nickname", new_contact.nickname)
self.change_field_contact("title", new_contact.title)
self.change_field_contact("company", new_contact.company)
self.change_field_contact("address", new_contact.address)
self.change_field_contact("home", new_contact.homephone)
self.change_field_contact("mobile", new_contact.mobilephone)
self.change_field_contact("work", new_contact.workphone)
self.change_field_contact("fax", new_contact.fax)
self.change_field_contact("email", new_contact.email)
self.change_field_contact("email", new_contact.email1)
self.change_field_contact("email2", new_contact.email2)
self.change_field_contact("email3", new_contact.email3)
self.change_field_contact("address2", new_contact.address2)
self.change_field_contact("phone2", new_contact.secondary_phone)
self.change_field_contact("notes", new_contact.notes)
def change_field_contact(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def count(self):
wd = self.app.wd
self.open_home_page()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.app.open_home_page()
self.contact_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
lastname = cells[1].text
firstname = cells[2].text
address = cells[3].text
all_email = cells[4].text
all_phones = cells[5].text
id = row.find_element_by_name("selected[]").get_attribute("value")
self.contact_cache.append(
New_contact(firstname=firstname, lastname=lastname, id=id, address=address,
all_phones_from_home_page=all_phones, all_email=all_email))
return list(self.contact_cache)
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
def open_contact_to_edit_by_id(self, contact_id):
wd = self.app.wd
self.select_contact_by_id(contact_id)
wd.find_element_by_css_selector("a[href='edit.php?id=%s']" % contact_id).click()
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name("a").click()
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
address = wd.find_element_by_name("address").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
homephone = wd.find_element_by_name("home").get_attribute("value")
workphone = wd.find_element_by_name("work").get_attribute("value")
mobilephone = wd.find_element_by_name("mobile").get_attribute("value")
secondary_phone = wd.find_element_by_name("phone2").get_attribute("value")
email1 = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
return New_contact(firstname=firstname, lastname=lastname, address=address, id=id, homephone=homephone,
workphone=workphone,
mobilephone=mobilephone,
secondary_phone=secondary_phone, email1=email1, email2=email2, email3=email3)
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
homephone = re.search("H: (.*)", text).group(1)
mobilephone = re.search("M: (.*)", text).group(1)
workphone = re.search("W: (.*)", text).group(1)
secondary_phone = re.search("P: (.*)", text).group(1)
return New_contact(homephone=homephone, workphone=workphone, mobilephone=mobilephone,
secondary_phone=secondary_phone)
def delete_contact_from_group(self, contact, group):
wd = self.app.wd
self.open_home_page()
wd.find_element_by_name("group").click()
wd.find_element_by_xpath("//option[@value='%s']" % group.id).click()
self.select_contact_by_id(contact.id)
wd.find_element_by_name("remove").click()
self.open_home_page()
def add_contact_to_group(self, contact, group):
wd = self.app.wd
self.open_home_page()
wd.find_element_by_name("to_group").click()
wd.find_element_by_xpath("(//option[@value='%s'])[2]" % group.id).click()
self.select_contact_by_id(contact.id)
wd.find_element_by_name("add").click()
self.open_home_page()
def clear(self, s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(self, contact):
print([contact.homephone, contact.mobilephone, contact.workphone, contact.secondary_phone])
return "\n".join(filter(lambda x: x != "",
map(lambda x: self.clear(x), filter(lambda x: x is not None,
[contact.homephone, contact.mobilephone,
contact.workphone, contact.secondary_phone]))))
def merge_emails_like_on_home_page(self, contact):
return "\n".join(filter(lambda x: x != "",
filter(lambda x: x is not None,
[contact.email1, contact.email2, contact.email3])))
| TheMastere/PDT_training_b14 | fixture/contact.py | contact.py | py | 9,362 | python | en | code | 0 | github-code | 13 |
41633657269 | """
This file is part of nand2tetris, as taught in The Hebrew University, and
was written by Aviv Yaish. It is an extension to the specifications given
[here](https://www.nand2tetris.org) (Shimon Schocken and Noam Nisan, 2017),
as allowed by the Creative Common Attribution-NonCommercial-ShareAlike 3.0
Unported [License](https://creativecommons.org/licenses/by-nc-sa/3.0/).
"""
import xml.etree.ElementTree as ET
from jack_tokenizer import JackTokenizer
import consts
class CompilationEngine:
"""Gets input from a JackTokenizer and emits its parsed structure into an
output stream.
"""
def __init__(self, input_file):
"""
Creates a new compilation engine with the given input and output. The
next routine called must be compileClass()
:param input_stream: The input stream.
:param output_stream: The output stream.
"""
# Your code goes here!
# Note that you can write to output_stream like so:
# output_stream.write("Hello world! \n")
self.tokenizer = JackTokenizer(input_file)
self.root: ET.Element = None
self.stack: list[ET.Element] = []
def compile_class(self):
"""Compiles a complete class."""
# Your code goes here!
if self.tokenizer.has_more_tokens():
self.tokenizer.advance()
el = ET.Element("class")
self.stack.append(el)
self.root = el
self.compile_keyword()
self.tokenizer.advance()
self.compile_identifier()
self.tokenizer.advance()
self.compile_symbol()
self.tokenizer.advance()
while (self.tokenizer.keyword() in ["STATIC", "FIELD"]):
self.compile_class_var_dec()
while (self.tokenizer.keyword() in ["FUNCTION", "CONSTRUCTOR", "METHOD"]):
self.compile_subroutine()
self.compile_symbol()
self.pop_from_stack()
return self.root
def compile_class_var_dec(self):
"""Compiles a static declaration or a field declaration."""
# Your code goes here!
el = self.create_child_to_open_tag("classVarDec")
self.stack.append(el)
self.compile_keyword()
self.tokenizer.advance()
self.compile_tvar_name()
self.pop_from_stack()
def compile_subroutine(self):
"""
Compiles a complete method, function, or constructor.
You can assume that classes with constructors have at least one field,
you will understand why this is necessary in project 11.
"""
# Your code goes here!
el = self.create_child_to_open_tag("subroutineDec")
self.stack.append(el)
self.compile_keyword()
self.tokenizer.advance()
if (self.tokenizer.token_type() == consts.KEYWORD):
self.compile_keyword()
elif (self.tokenizer.token_type() == consts.IDENTIFIER):
self.compile_identifier()
self.tokenizer.advance()
self.compile_identifier()
self.tokenizer.advance()
self.compile_symbol()
self.tokenizer.advance()
self.compile_parameter_list()
self.compile_symbol()
self.tokenizer.advance()
el = self.create_child_to_open_tag("subroutineBody")
self.stack.append(el)
self.compile_symbol()
self.tokenizer.advance()
while self.tokenizer.keyword() == "VAR":
self.compile_var_dec()
self.compile_statements()
self.compile_symbol()
self.pop_from_stack()
self.pop_from_stack()
self.tokenizer.advance()
def compile_parameter_list(self):
"""Compiles a (possibly empty) parameter list, not including the
enclosing "()".
"""
# Your code goes here!
el = self.create_child_to_open_tag("parameterList")
self.stack.append(el)
while self.tokenizer.token_type() != consts.SYMBOL:
if self.tokenizer.token_type() == consts.KEYWORD:
self.compile_keyword()
elif self.tokenizer.token_type() == consts.IDENTIFIER:
self.compile_identifier()
self.tokenizer.advance()
self.compile_identifier()
self.tokenizer.advance()
if self.tokenizer.symbol() == consts.COMMA:
self.compile_symbol()
self.tokenizer.advance()
self.pop_from_stack()
def pop_from_stack(self):
if len(self.stack[-1]) == 0:
self.stack[-1].text = "\n" + " " * (len(self.stack)-1)
self.stack.pop()
def compile_var_dec(self):
"""Compiles a var declaration."""
# Your code goes here!
el = self.create_child_to_open_tag("varDec")
self.stack.append(el)
self.compile_keyword()
self.tokenizer.advance()
self.compile_tvar_name()
self.pop_from_stack()
def compile_statements(self):
"""Compiles a sequence of statements, not including the enclosing
"{}".
"""
# Your code goes here!
el = self.create_child_to_open_tag("statements")
self.stack.append(el)
while self.tokenizer.token_type() == consts.KEYWORD:
if self.tokenizer.keyword() == "LET":
self.compile_let()
elif self.tokenizer.keyword() == "IF":
self.compile_if()
elif self.tokenizer.keyword() == "WHILE":
self.compile_while()
elif self.tokenizer.keyword() == "DO":
self.compile_do()
elif self.tokenizer.keyword() == "RETURN":
self.compile_return()
self.pop_from_stack()
def compile_do(self):
"""Compiles a do statement."""
# Your code goes here!
el = self.create_child_to_open_tag("doStatement")
self.stack.append(el)
self.compile_keyword()
self.tokenizer.advance()
self.compile_identifier()
self.tokenizer.advance()
if self.tokenizer.symbol() == ".":
self.compile_symbol()
self.tokenizer.advance()
self.compile_identifier()
self.tokenizer.advance()
self.compile_symbol()
self.tokenizer.advance()
self.compile_expression_list()
self.compile_symbol()
self.tokenizer.advance()
self.compile_symbol()
self.pop_from_stack()
self.tokenizer.advance()
def compile_let(self):
"""Compiles a let statement."""
# Your code goes here!
el = self.create_child_to_open_tag("letStatement")
self.stack.append(el)
self.compile_keyword()
self.tokenizer.advance()
self.compile_identifier()
self.tokenizer.advance()
if self.tokenizer.symbol() == "[":
self._compile_arr_proc()
self.compile_symbol()
self.tokenizer.advance()
self.compile_expression()
self.compile_symbol()
self.pop_from_stack()
self.tokenizer.advance()
def _compile_arr_proc(self):
self.compile_symbol()
self.tokenizer.advance()
self.compile_expression()
self.compile_symbol()
self.tokenizer.advance()
def compile_while(self):
"""Compiles a while statement."""
# Your code goes here!
el = self.create_child_to_open_tag("whileStatement")
self.stack.append(el)
self.compile_keyword()
self.tokenizer.advance()
self.compile_symbol()
self.tokenizer.advance()
self.compile_expression()
self.compile_symbol()
self.tokenizer.advance()
self.compile_symbol()
self.tokenizer.advance()
self.compile_statements()
self.compile_symbol()
self.pop_from_stack()
self.tokenizer.advance()
def compile_return(self):
"""Compiles a return statement."""
# Your code goes here!
el = self.create_child_to_open_tag("returnStatement")
self.stack.append(el)
self.compile_keyword()
self.tokenizer.advance()
if self.tokenizer.symbol() != ";" and self.tokenizer.token_type() != consts.SYMBOL:
self.compile_expression()
self.compile_symbol()
self.pop_from_stack()
self.tokenizer.advance()
def compile_if(self):
"""Compiles a if statement, possibly with a trailing else clause."""
# Your code goes here!
el = self.create_child_to_open_tag("ifStatement")
self.stack.append(el)
self.compile_keyword()
self.tokenizer.advance()
self.compile_symbol()
self.tokenizer.advance()
self.compile_expression()
self.compile_symbol()
self.tokenizer.advance()
self.compile_symbol()
self.tokenizer.advance()
self.compile_statements()
self.compile_symbol()
self.tokenizer.advance()
if self.tokenizer.keyword() == 'ELSE' and self.tokenizer.token_type() == consts.KEYWORD:
self._compile_kw_in_if()
self.pop_from_stack()
def _compile_kw_in_if(self):
self.compile_keyword()
self.tokenizer.advance()
self.compile_symbol()
self.tokenizer.advance()
self.compile_statements()
self.compile_symbol()
self.tokenizer.advance()
def compile_expression(self):
"""Compiles an expression."""
# Your code goes here!
el = self.create_child_to_open_tag("expression")
self.stack.append(el)
self.compile_term()
while self.tokenizer.token_type() == consts.SYMBOL and self.tokenizer.symbol() in consts.OPERATORS:
self.compile_symbol()
self.tokenizer.advance()
self.compile_term()
self.pop_from_stack()
def compile_term(self):
"""Compiles a term.
This routine is faced with a slight difficulty when
trying to decide between some of the alternative parsing rules.
Specifically, if the current token is an identifier, the routing must
distinguish between a variable, an array entry, and a subroutine call.
A single look-ahead token, which may be one of "[", "(", or "." suffices
to distinguish between the three possibilities. Any other token is not
part of this term and should not be advanced over.
"""
# Your code goes here!
should_advance = True
el = self.create_child_to_open_tag("term")
self.stack.append(el)
t = self.tokenizer.token_type()
if t == consts.INT_CONST:
self.compile_int_const()
elif t == consts.STRING_CONST:
self.compile_string_const()
elif t == consts.KEYWORD:
self.compile_keyword()
elif t == consts.IDENTIFIER:
self.compile_identifier()
self.tokenizer.advance()
if self.tokenizer.symbol() == "[":
should_advance = True
self._compile_bracket_in_term()
elif self.tokenizer.symbol() == ".":
should_advance = True
self._compile_dot_in_term()
elif self.tokenizer.symbol() == "(":
should_advance = True
self._compile_paran_in_term()
else:
should_advance = False
elif self.tokenizer.symbol() == "(":
self._compile_paran_in_term_v2()
elif self.tokenizer.symbol() in ["~","-"]:
self.compile_symbol()
self.tokenizer.advance()
self.compile_term()
should_advance = False
if should_advance:
self.tokenizer.advance()
self.pop_from_stack()
def _compile_paran_in_term_v2(self):
self.compile_symbol()
self.tokenizer.advance()
self.compile_expression()
self.compile_symbol()
def _compile_paran_in_term(self):
self.compile_symbol()
self.tokenizer.advance()
self.compile_expression_list()
self.compile_symbol()
def _compile_bracket_in_term(self):
self.compile_symbol()
self.tokenizer.advance()
self.compile_expression()
self.compile_symbol()
def _compile_dot_in_term(self):
self.compile_symbol()
self.tokenizer.advance()
self.compile_identifier()
self.tokenizer.advance()
self.compile_symbol()
self.tokenizer.advance()
self.compile_expression_list()
self.compile_symbol()
def compile_expression_list(self):
"""Compiles a (possibly empty) comma-separated list of expressions."""
# Your code goes here!
el = self.create_child_to_open_tag("expressionList")
self.stack.append(el)
if self.tokenizer.symbol() != ")" and self.tokenizer.token_type() != consts.SYMBOL:
self.compile_expression()
while self.tokenizer.symbol() == consts.COMMA and self.tokenizer.token_type() == consts.SYMBOL:
self.compile_symbol()
self.tokenizer.advance()
self.compile_expression()
if self.tokenizer.symbol() =="(":
self.compile_expression()
while self.tokenizer.symbol() == consts.COMMA and self.tokenizer.token_type() == consts.SYMBOL:
self.compile_symbol()
self.tokenizer.advance()
self.compile_expression()
self.pop_from_stack()
def compile_tvar_name(self):
if self.tokenizer.token_type() == consts.KEYWORD:
self.compile_keyword()
elif self.tokenizer.token_type() == consts.IDENTIFIER:
self.compile_identifier()
self.tokenizer.advance()
self.compile_identifier()
self.tokenizer.advance()
while self.tokenizer.symbol() == consts.COMMA:
self.compile_symbol()
self.tokenizer.advance()
self.compile_identifier()
self.tokenizer.advance()
self.compile_symbol()
self.tokenizer.advance()
def _spaced_st(self, st: str):
return " " + st + " "
def create_child_to_open_tag(self, name: str):
return ET.SubElement(self.stack[-1], (name))
def compile_int_const(self):
el = self.create_child_to_open_tag(consts.INT_CONST_STR)
el.text = self._spaced_st(self.tokenizer.identifier())
def compile_string_const(self):
el = self.create_child_to_open_tag(consts.STR_CONST_STR)
el.text = self._spaced_st(self.tokenizer.identifier())
def compile_identifier(self):
el = self.create_child_to_open_tag(consts.IDENTIFIER_STR)
el.text = self._spaced_st(self.tokenizer.identifier())
def compile_keyword(self):
el = self.create_child_to_open_tag(consts.KEYWORD_STR)
el.text = self._spaced_st(self.tokenizer.keyword().lower())
def compile_symbol(self):
sym = self.tokenizer.symbol()
el = self.create_child_to_open_tag(consts.SYMBOL_STR)
el.text = self._spaced_st(sym)
| xrahoo/nand2tetris-python | 10/compilation_engine.py | compilation_engine.py | py | 15,493 | python | en | code | 6 | github-code | 13 |
34785463338 | from rct229.rulesets.ashrae9012019.data.schema_enums import schema_enums
from rct229.utils.jsonpath_utils import find_all, find_one
from rct229.utils.utility_functions import find_exactly_one_hvac_system
EXTERNAL_FLUID_SOURCE = schema_enums["ExternalFluidSourceOptions"]
def is_hvac_sys_fluid_loop_purchased_heating(rmi_b, hvac_b_id):
"""Returns TRUE if the fluid loop associated with the heating system associated with the HVAC system is attached to an external purchased heating loop. Returns FALSE if this is not the case.
Parameters
----------
rmi_b : json
RMD at RuleSetModelInstance level
hvac_b_id : str
The HVAC system ID.
Returns
-------
bool
True: the fluid loop associated with the heating system associated with the HVAC system is attached to an external purchased heating loop
False: otherwise
"""
# Get a list of loop ids in external fluid sources whose type are either hot water or steam
purchased_heating_loop_id_list_b = find_all(
f'$.external_fluid_sources[*][?(@.type="{EXTERNAL_FLUID_SOURCE.HOT_WATER}"), ?(@.type="{EXTERNAL_FLUID_SOURCE.STEAM}")].loop',
rmi_b,
)
# Get the hvac system
hvac_b = find_exactly_one_hvac_system(rmi_b, hvac_b_id)
# the hvac_sys has a heating system and the heating system has a hot_water_loop and
# the loop id is in the purchased_heating_loop_id_list
is_hvac_sys_fluid_loop_purchased_heating_flag = (
find_one("heating_system.hot_water_loop", hvac_b)
in purchased_heating_loop_id_list_b
)
return is_hvac_sys_fluid_loop_purchased_heating_flag
| pnnl/ruleset-checking-tool | rct229/rulesets/ashrae9012019/ruleset_functions/baseline_systems/baseline_hvac_sub_functions/is_hvac_sys_fluid_loop_purchased_heating.py | is_hvac_sys_fluid_loop_purchased_heating.py | py | 1,640 | python | en | code | 6 | github-code | 13 |
17079770464 | # -*- coding: utf-8 -*-
from odoo import models, fields, api
class Courses_course(models.Model):
_name = 'courses.course'
_description = 'Courses'
name = fields.Char('Title', required=True)
professor = fields.Char('Professor', required=True)
price = fields.Float('Price', required=True)
date_ini = fields.Date('Start Date', required=True)
number_employees = fields.Integer('Number of Registered Employees')
duration = fields.Char('Duration', required=True)
certificate = fields.Boolean('Offers Certificate')
contents = fields.Text('Contents')
total = fields.Float('Total Price', compute="_total", store=True)
@api.depends('price','number_employees')
def _total(self):
for r in self:
r.total = r.number_employees*r.price
| jdolz/Courses_app_Odoo | models/courses_app.py | courses_app.py | py | 807 | python | en | code | 0 | github-code | 13 |
8097424777 | import cv2
def create_dir(_dir) -> str:
"""
Create directory if it doesn't exist
Args:
_dir: str
"""
import os
if not os.path.exists(_dir):
os.makedirs(_dir)
return _dir
def create_video_writer(video_path, output_path, fps=None) -> cv2.VideoWriter:
"""
This function is used to create video writer.
Args:
video_path: video path
output_path: output path
fps: fps
Returns:
video writer
"""
from pathlib import Path
save_dir = create_dir(output_path)
save_path = str(Path(save_dir) / Path(video_path).name)
if fps is None:
cap = cv2.VideoCapture(video_path)
fps = cap.get(cv2.CAP_PROP_FPS)
cap = cv2.VideoCapture(video_path)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
size = (width, height)
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
videoWriter = cv2.VideoWriter(save_path, fourcc, fps, size)
return videoWriter | akashAD98/autoflip_py_yolo | utils.py | utils.py | py | 1,024 | python | en | code | 1 | github-code | 13 |
27710475620 | #!/usr/bin/env python
# Author: Guillaume VIDOT
#
# This file is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import re
import argparse
import numpy as np
import logging
import pickle
from learner.attack_gradient_descent_learner import AttackGradientDescentLearner
from learner.early_stopping_learner import EarlyStoppingLearner
from sklearn.metrics import zero_one_loss
from core.metrics import Metrics
from core.attack import Attack
from models.models import Module
from h5py import File
import torch
import random
###############################################################################
def get_dict_arg(arg):
if(arg is not None):
arg = re.split('=|,', arg)
else:
arg = []
arg_str = "dict_arg = {"
for i in range(0, len(arg), 2):
arg_str += "\""+arg[i]+"\": "
arg_str += arg[i+1]+","
arg_str += "}"
locals = {}
exec(arg_str, globals(), locals)
dict_arg = locals["dict_arg"]
return dict_arg
###############################################################################
if __name__ == "__main__":
###########################################################################
logging.basicConfig(level=logging.INFO)
# logging.getLogger().disabled = True
logging.StreamHandler.terminator = ""
SEED = 0
random.seed(SEED)
np.random.seed(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.cuda.manual_seed_all(SEED)
torch.manual_seed(SEED)
arg_parser = argparse.ArgumentParser(description='')
# Add argument parser
# ----------------------------------------------------------------------- #
arg_parser.add_argument(
"data", metavar="data", type=str,
help="data")
arg_parser.add_argument(
"--train-start", metavar="train-start", default=None, type=int,
help="train-start")
arg_parser.add_argument(
"--train-end", metavar="train-end", default=None, type=int,
help="train-start")
arg_parser.add_argument(
"--val-start", metavar="val-start", default=None, type=int,
help="val-start")
arg_parser.add_argument(
"--val-end", metavar="val-end", default=None, type=int,
help="valid-end")
# ----------------------------------------------------------------------- #
arg_parser.add_argument(
"model", metavar="model",
type=str, help="model")
arg_parser.add_argument(
"--opt-model", metavar="opt-model", default=None,
type=str, help="opt-model")
# ----------------------------------------------------------------------- #
arg_parser.add_argument(
"--metric", metavar="metric", default="gibbs",
type=str, help="metric")
arg_parser.add_argument(
"--opt-metric", metavar="opt-metric", default=None,
type=str, help="opt-metric")
# ----------------------------------------------------------------------- #
arg_parser.add_argument(
"--optimizer", metavar="optimizer", default="adam",
type=str, help="optimizer")
arg_parser.add_argument(
"--opt-optimizer", metavar="opt-optimizer", default=None,
type=str, help="opt-optimizer")
# ----------------------------------------------------------------------- #
arg_parser.add_argument(
"--attack", metavar="attack", default="nothing",
type=str, help="attack")
arg_parser.add_argument(
"--opt-attack", metavar="opt-attack", default=None,
type=str, help="opt-attack")
# ----------------------------------------------------------------------- #
arg_parser.add_argument(
"--epoch", metavar="epoch", default=10, type=int,
help="epoch")
arg_parser.add_argument(
"--val-epoch", metavar="val-epoch", default=0, type=int,
help="valid")
# ----------------------------------------------------------------------- #
arg_parser.add_argument(
"--lr", metavar="lr", default=0.00005, type=float,
help="lr")
arg_parser.add_argument(
"--batch-size", metavar="batch-size", default=64, type=int,
help="batch-size")
# ----------------------------------------------------------------------- #
arg_parser.add_argument(
"--load", metavar="load", default=None, type=str,
help="load")
arg_parser.add_argument(
"--save", metavar="save", default=None, type=str,
help="save")
# ----------------------------------------------------------------------- #
# Retrieve argument
arg_list = arg_parser.parse_args()
train_start = arg_list.train_start
train_end = arg_list.train_end
val_start = arg_list.val_start
val_end = arg_list.val_end
model = arg_list.model
opt_model = arg_list.opt_model
metric = arg_list.metric
opt_metric = arg_list.opt_metric
optimizer_name = arg_list.optimizer
opt_optimizer = arg_list.opt_optimizer
attack = arg_list.attack
opt_attack = arg_list.opt_attack
if(opt_attack == ""):
opt_attack = None
epoch = arg_list.epoch
val_epoch = arg_list.val_epoch
lr = arg_list.lr
batch_size = arg_list.batch_size
# ----------------------------------------------------------------------- #
# Load the dataset
data = File("data/"+arg_list.data+".h5", "r")
x_train = np.array(data["x_train"])
y_train = np.array(data["y_train"])
x_val = None
y_val = None
# Split the data into training and validation set
if(val_start is None and val_end is not None):
x_val = x_train[:val_end, :]
y_val = y_train[:val_end]
elif(val_start is not None and val_end is not None):
x_val = x_train[val_start:val_end, :]
y_val = y_train[val_start:val_end]
elif(val_start is not None and val_end is None):
x_val = x_train[val_start:, :]
y_val = y_train[val_start:]
if(train_start is None and train_end is not None):
x_train = x_train[:train_end, :]
y_train = y_train[:train_end]
elif(train_start is not None and train_end is not None):
x_train = x_train[train_start:train_end, :]
y_train = y_train[train_start:train_end]
elif(train_start is not None and train_end is None):
x_train = x_train[train_start:, :]
y_train = y_train[train_start:]
device = "cuda"
new_device = torch.device('cpu')
if(torch.cuda.is_available() and device != "cpu"):
new_device = torch.device(device)
device = new_device
model_kwargs = get_dict_arg(opt_model)
model = Module(model, device, **model_kwargs)
model.to(new_device)
metric_kwargs = get_dict_arg(opt_metric)
metric_kwargs.update({"name": metric, "model": model})
metric = Metrics(**metric_kwargs)
optimizer_kwargs = get_dict_arg(opt_optimizer)
if optimizer_name == "adam":
optim = torch.optim.Adam(model.parameters(), lr=lr)
if(metric.param is not None):
param_list = list(model.parameters())
param_list.append(metric.param)
param_list = torch.nn.ParameterList(param_list)
if optimizer_name == "adam":
optim = torch.optim.Adam(param_list, lr=lr)
# Initialize the attack
metric_attack = Metrics("attackgibbs", model)
attack_kwargs = get_dict_arg(opt_attack)
attack = Attack(attack, model, device, metric_attack.fit, **attack_kwargs)
# Initialize the model
criteria = Metrics("attackgibbs", model)
learner = AttackGradientDescentLearner(
model, metric.fit, zero_one_loss, attack, optim, device,
batch_size=batch_size, epoch=epoch)
if(val_epoch > 0 and x_val is not None and y_val is not None):
learner = EarlyStoppingLearner(
learner, criteria.fit, val_epoch=val_epoch)
if(val_epoch > 0 and x_val is None and y_val is None):
learner = EarlyStoppingLearner(
learner, metric.fit, val_epoch=val_epoch)
if(arg_list.load is not None):
load_dict = pickle.load(open("data/model/"+arg_list.load, "rb"))
learner.load(load_dict["model_param"], beginning=True)
if(load_dict["arg_list"].metric == metric):
metric.load(load_dict["metric_param"])
if(val_epoch > 0 and x_val is not None and y_val is not None):
learner = learner.fit(x_train, y_train, x_val, y_val)
else:
learner = learner.fit(x_train, y_train, x_train, y_train)
# Save the model parameters
if(arg_list.save is not None):
pickle.dump({
"model_param": learner.save(),
"metric_param": metric.save(),
"arg_list": arg_list
}, open("data/model/"+arg_list.save, "wb"))
pickle.dump(learner.list_loss, open("data/loss/"+arg_list.save, "wb"))
###############################################################################
| paulviallard/NeurIPS21-PB-Robustness | learn_model.py | learn_model.py | py | 8,922 | python | en | code | 4 | github-code | 13 |
5608168526 | from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
from django.template import loader
from Three.models import Student, Grade
def index(request):
three_index = loader.get_template("three_index.html")
context = {
"student_name": "TOM",
}
# 下边用render的时候,没有提示。
return HttpResponse(three_index.render(context=context))
def get_grade(request):
student = Student.objects.get(pk=4)
# 获取了一个学生的所有信息,如下是获取了grade这个qurry。
grade = student.s_grage
return HttpResponse("grade:%s" % grade.g_name)
def get_stu(request):
grade = Grade.objects.get(g_name="linux")
# 通过班级反向查所有学生
students = grade.student_set.all()
for i in students:
print(i.s_name,i.s_grage.g_name)
# return render(request,"get_stu.html")
return HttpResponse("GET IT!") | wkiii/HelloDjango | Three/views.py | views.py | py | 937 | python | en | code | 0 | github-code | 13 |
33608340862 | import random
from random import randint, sample
import datetime
print('Добро пожаловать в казино')
start = datetime.datetime.now()
try:
cash = int(input('Внесите вашу ставку '))
print(f'Вы внесли - {cash}')
if cash <= 0:
print('Не пытайтесь меня обмануть вводите нормальные деньги!')
except:
print('Произошла ошбика системы!')
# cash = 1000
while cash != 0:
try:
bet = int(input(f'Какую сумму из {cash} желаете поставить! '))
if bet>cash:
print('У вас не хватает денег!')
continue
person = [randint(1, 6), randint(1,6)]
computer = [randint(1, 6), randint(1,6)]
except:
print('Вводите только число')
continue
if sum(person) > sum(computer):
cash += bet
print(f'Вы выйграли у вас денег {cash}')
elif sum(person) < sum(computer):
cash -= bet
print(f'Вы проиграли у вас денег {cash}')
else:
print('Ничья!!!')
endgame = datetime.datetime.now() - start
print(f'Вы провели в игре - {endgame}')
#
# lst = ['apple', 'banana', 'orange', 'peach', 'strawberry']
# print(sample(lst, 2))
# print(randint(100, 300)) | narmuhamedov/alexander | lesson7.1.py | lesson7.1.py | py | 1,478 | python | ru | code | 0 | github-code | 13 |
75056464016 | # Filename: q6_determine_prime.py
# Author: Jason Hong
# Created: 20130222
# Modified: 20120222
# Description: Program to determine whether an intger is a prime number
from math import *
def is_prime(n):
for d in range (2, int(sqrt(n)+1)):
if n % d == 0:
return False
return True
a = 0
b = 0
r = 0 #number of rows
while a >= 0:
if is_prime(a) == True:
print(int(a), end = " ")
b = b + 1
if b == 10:
print(end = "\n")
b = 0
r = r + 1
if r > 100:
break
a = a + 1
| xJINC/cpy5python | practical03/q6_determine_prime.py | q6_determine_prime.py | py | 593 | python | en | code | 0 | github-code | 13 |
42223805611 | import datetime
import logging
DATE_FORMAT = '%d.%m.%Y %H:%M%p'
logger = logging.getLogger()
def parse_date(date: str) -> datetime.datetime:
if date is None:
return None
date: datetime = datetime.datetime.strptime(date, DATE_FORMAT)
logger.debug("parsed date:{}".format(date))
return date
| alonastik/esmi | esmi/utils.py | utils.py | py | 318 | python | en | code | null | github-code | 13 |
41634416046 | """
negative_paren
--------------
Given a file as input, treat each '(' as 1, and each ')' as -1. Print out
position when the running sum becomes negative.
Day 1 of the 2015 Advent of Code game!
"""
from __future__ import print_function
import os
import sys
import argparse
def find_neg_paren(paren_str):
"""
Follows the Advent of Code day 1 parameters to return a number based on
open/closed parentheses.
I.e.:
( = 1
) = -1
All other characters are ignored.
The number returned is the **position** when the running total becomes < 0.
This is not the index, but the position in the string!
"""
count = 0
for (i, c) in enumerate(paren_str):
if c is '(':
count += 1
elif c is ')':
count -= 1
if count < 0:
return i+1
raise ValueError('Sum never becomes negative!')
def parse_args():
"""
Go go gadget simple arg parser!
"""
p = argparse.ArgumentParser(description=__doc__.strip())
p.add_argument("-f", "--input-file", default='', dest="in_file", help="Input parentheses file")
return p.parse_args()
if __name__ == '__main__':
args = parse_args()
if args.in_file:
try:
parens = open(args.in_file).read()
print(find_neg_paren(parens))
sys.exit(0)
except ValueError as e:
print(e)
sys.exit(1)
else:
print("No input file!")
sys.exit(1) | briehl/advent-of-code | 2015/day1/negative_paren.py | negative_paren.py | py | 1,476 | python | en | code | 0 | github-code | 13 |
6489541743 | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 3 10:46:13 2021
This python module holds basic functions needed for data preprocessing of
meteorological measurements from LoggerNet (in the form of .dat files ).
The main utilities are to calculate downwelling longwave radiation from
measured body temperature, to extract and redefine column names and
to performq quality control of the measured values.
@author: Julia Kukulies, see GitHub for history
"""
import numpy as np
import pandas as pd
from pathlib import Path
from shutil import copyfile
from datetime import date, datetime, timedelta
import pytz
import warnings
# import plotting module and functions for preprocessing
from plotting import bridge_plot, roof_plot, roof_table, bridge_table
############################### Define functions ##############################
def load_logger_data(f):
"""Top-level function to load a table from a logger output file.
f - filename
"""
table = pd.read_table(f, sep=",", header=1, low_memory=False)
# skip first two rows
table = table.iloc[2::, :]
# replace nan values with empty field
table = table.replace("NAN", "", regex=True)
# add a datetime.
table["dtime"] = pd.to_datetime(table.TIMESTAMP)
return table
def make_csv(table, pset, year, month):
"""Write out a CSV file
Inputs:
table - as output from load_logger_data
station - "roof" or "bridge"
interval - "10" or "5"
year & month as int
Output:
filename Local that was written to.
"""
output_dir = pset["csv_output_dir"]
do_QC = pset["do_QC"]
station = pset["station"]
interval = pset["interval"]
MONTH = "0" + str(month)
if month > 9:
MONTH = str(month)
YEAR = str(year)
data = table[(table.dtime.dt.year == year) & (table.dtime.dt.month == month)]
# Call processing functions
df_ = get_data(data, pset)
# Saving monthly .csv file
OUTPUT10 = (
"gvc_" + station + "_" + interval + "mindata_" + YEAR + "_" + MONTH + ".csv"
)
# save locally
source = Path(output_dir, OUTPUT10)
df_.to_csv(
source,
index=False,
float_format="%.5g",
sep=",",
encoding="utf-8",
na_rep="",
header=df_.columns,
)
return source
def make_plot(table, pset):
"""Create a plot for the last 4 days, copy to web directory"""
# create plot of last 4 days and table image of last measurement
# and send to RCG server for display on webpage
end = datetime.today()
# take one more hour to plot if it is summer time
if (
pd.Timestamp(datetime.today()).tz_localize(tz=pytz.FixedOffset(60)).hour
== pd.Timestamp(datetime.today()).tz_localize("CET").hour
):
today = datetime.today() + timedelta(hours=1)
else:
today = datetime.today()
start = today - timedelta(days=4)
# data = table[(table.dtime.dt.year == year) & (table.dtime.dt.month == month)]
data = table[(table.dtime > start) & (table.dtime <= end)]
# Call processing functions
df_ = get_data(data, pset)
df_["dtime"] = pd.to_datetime(df_.TIMESTAMP)
df_["TIMESTAMP"] = pd.to_datetime(df_.TIMESTAMP)
# convert time to CEST
df_["TIMESTAMP"] = (
df_["TIMESTAMP"]
.dt.tz_localize(tz=pytz.FixedOffset(60))
.dt.tz_convert("Europe/Stockholm")
)
# extract last four days
# mask = (df_roof["dtime"] > start) & (df_roof["dtime"] <= end)
# roof = df_roof.loc[mask]
if pset["station"] == "roof":
LOCAL_NAME = roof_plot(df_, pset["plot_output_dir"])
copyfile(LOCAL_NAME, pset["plot_web_dir"] / "GVC_plot.png")
LOCAL_NAME = roof_plot(df_, pset["plot_output_dir"], swedish=True)
copyfile(LOCAL_NAME, pset["plot_web_dir"] / "GVC_plot_sv.png")
LOCAL_NAME = roof_table(df_, pset["plot_output_dir"])
copyfile(LOCAL_NAME, pset["plot_web_dir"] / "GVCtable_plot.png")
LOCAL_NAME = roof_table(df_, pset["plot_output_dir"], swedish=True)
copyfile(LOCAL_NAME, pset["plot_web_dir"] / "GVCtable_plot_sv.png")
if pset["station"] == "bridge":
LOCAL_NAME = bridge_plot(df_, pset["plot_output_dir"])
copyfile(LOCAL_NAME, pset["plot_web_dir"] / "Bridge_plot.png")
LOCAL_NAME = bridge_plot(df_, pset["plot_output_dir"], swedish=True)
copyfile(LOCAL_NAME, pset["plot_web_dir"] / "Bridge_plot_sv.png")
LOCAL_NAME = bridge_table(df_, pset["plot_output_dir"])
copyfile(LOCAL_NAME, pset["plot_web_dir"] / "Bridgetable_plot.png")
LOCAL_NAME = bridge_table(df_, pset["plot_output_dir"], swedish=True)
copyfile(LOCAL_NAME, pset["plot_web_dir"] / "Bridgetable_plot_sv.png")
def get_radiation(data):
"""This function calculates downwelling longwave radiation
based on measured body temperature and correction with ."""
L_down = (
5.670374419 * 10 ** (-8) * pd.to_numeric(data.temp_L_K_Avg.values) ** 4
) + pd.to_numeric(data.L_sig_Avg.values)
return L_down
def get_data(data, pset):
"""This function extracts usable columns from logger data
and creates a new dataframe df.
Input:
data - pandas dataframe with data from logger file
oldcolumns - list/array with columns of meteodata which should be used
newcolumns - list/array with official column names corresponding to oldcolumns
Returns:
df- pandas dataframe with extracted data
"""
station = pset["station"]
if station == "roof":
oldcolumns = [
"Wd_avg_Avg",
"Ws_min_Avg",
"Ws_avg_Avg",
"Ws_max_Avg",
"Ta_Avg",
"RH_Avg",
"P_Avg",
"Ri_intens_Avg",
"Hd_intens_Avg",
"SPN1_Total_Avg",
"SPN1_diff_Avg",
"temp_L_K_Avg",
"L_sig_Avg",
]
newcolumns = [
"wd",
"ws_min",
"ws",
"ws_max",
"Ta",
"RH",
"P",
"Rain",
"Hail",
"K_down_SPN1",
"K_diff_SPN1",
"L_down",
"K_down_Knz",
]
else:
oldcolumns = [
"Wd_avg_Avg",
"Ws_min_Avg",
"Ws_avg_Avg",
"Ws_max_Avg",
"Ta_Avg",
"RH_Avg",
"P_Avg",
"Ri_intens_Avg",
"Hd_intens_Avg",
]
newcolumns = ["wd", "ws_min", "ws", "ws_max", "Ta", "RH", "P", "Rain", "Hail"]
# Get columns for Code, Year, DOY, HHMM
df = pd.DataFrame()
df["Code"] = data.RECORD.values.astype(str)
df["Year"] = data.dtime.dt.year.values.astype(str)
df["DOY"] = data.dtime.dt.dayofyear.values.astype(str)
# time formatting
hours = data.dtime.dt.hour.values.astype(str)
minutes = data.dtime.dt.minute.values.astype(str)
# zero padding
for i, h in enumerate(hours):
if int(h) < 10:
hours[i] = "0" + h
for i, m in enumerate(minutes):
if int(m) < 10:
minutes[i] = "0" + m
df["hours"] = hours
df["minutes"] = minutes
df["HHMM"] = df["hours"] + df["minutes"]
df["TIMESTAMP"] = data.TIMESTAMP.values.astype(str)
df = df.drop(columns=["hours", "minutes"])
# Get columns for meteorological data
for i, col in enumerate(oldcolumns):
newname = newcolumns[i]
newcol = pd.to_numeric(data[col].values)
df[newname] = newcol
if pset["do_QC"]:
with warnings.catch_warnings():
# suppress warnings for this specific function
warnings.simplefilter("ignore")
df = quality_control(df)
if station == "roof":
# calculate downwelling longwave-radiation with Stefan Boltzmann law
L_down = get_radiation(data)
# replace body temperature with L_down
df["L_down"] = L_down
# remove signal and replace with empty column for old radiation data
df["K_down_KnZ"] = ""
return df
def quality_control(df):
"""This function performs a quality check on the meteorological measurements and
adds quality flags for the main meteorological variables.
Input: pandas dataframe containing meteorological data
Returns: pandas dataframe with the following flags:
0 = passed all controls
1 = not in plausible range, 2 = inconsistency
3 = too big jumps
4 = dead band (too small changes over time)
"""
# relative humidity (%)
df["RH_QC"] = 0
# pressure (hPa)
df["P_QC"] = 0
# mean air temperature (degC)
df["Ta_QC"] = 0
# mean wind speed (m/s)
df["ws_QC"] = 0
# min wind speed (m/s)
df["ws_max_QC"] = 0
# max wind speed (m/s)
df["ws_min_QC"] = 0
# mean wind direction (deg)
df["wd_QC"] = 0
# quality checks for each value
for i in df.index.values:
rh = df[df.index == i].RH.values[0]
pr = df[df.index == i].P.values[0]
ta = float(df[df.index == i].Ta.values[0])
wd = df[df.index == i].wd.values[0]
ws = df[df.index == i].ws.values[0]
ws_min = df[df.index == i].ws_min.values[0]
ws_max = df[df.index == i].ws_max.values[0]
# plausible ranges
if rh < 0 or rh > 100:
df.loc[i, "RH_QC"] = 1
if pr < 500 or pr > 1100:
df.loc[i, "P_QC"] = 1
if wd < 0 or wd > 360:
df.loc[i, "wd_QC"] = 1
if ws < 0 or ws > 75:
df.loc[i, "ws_QC"] = 1
if ws_min < 0 or ws_min > 75:
df.loc[i, "ws_min_QC"] = 1
if ws_max < 0 or ws_max > 75:
df.loc[i, "ws_max_QC"] = 1
if ta < -50 or ta > 50:
df.loc[i, "Ta_QC"] = 1
# internal inconsistencies
if wd == 0 and ws > 0:
df.loc[i, "wd_QC"] = 2
if ws == 0 and wd > 0:
df.loc[i, "wd_QC"] = 2
# comparison with surrounding values
if i > 2 and i < np.shape(df.index.values)[0] - 1:
p = i - 1 # index previous value
rh_p = df[df.index == p].RH.values[0]
pr_p = df[df.index == p].P.values[0]
ta_p = float(df[df.index == p].Ta.values[0])
wd_p = df[df.index == p].wd.values[0]
ws_p = df[df.index == p].ws.values[0]
n = i + 1 # index next value
rh_n = df[df.index == n].RH.values[0]
pr_n = df[df.index == n].P.values[0]
ta_n = float(df[df.index == n].Ta.values[0])
wd_n = df[df.index == n].wd.values[0]
ws_n = df[df.index == n].ws.values[0]
# set maximum variance to check time consistency (detect big jumps in data)
# define limits
lim_T = 3 # Cdeg
lim_RH = 15 # %
lim_P = 2 # hpa
lim_ws = 20 # m/s
lim_irr = 800 # W/m^2
if np.absolute(ta - ta_p) >= lim_T:
df.loc[i, "Ta_QC"] = 3
if np.absolute(rh - rh_p) >= lim_RH:
df.loc[i, "RH_QC"] = 3
if np.absolute(pr - pr_p) >= lim_P:
df.loc[i, "P_QC"] = 3
if np.absolute(ws - ws_p) >= lim_ws:
df.loc[i, "ws_QC"] = 3
# check instantanous values (ts) with standard deviation for each variable
ts = np.absolute(wd - wd_p) + np.absolute(wd - wd_n)
# get std of last 12 hours
std_wd = np.nanstd(df.loc[i - 1 : i - 6 * 12, "wd"].values)
if ts > 4 * std_wd:
df.loc[i, "wd_QC"] = 3
ts = np.absolute(ws - ws_p) + np.absolute(ws - ws_n)
std_ws = np.nanstd(df.loc[i - 1 : i - 6 * 12, "ws"].values)
if ts > 4 * std_ws:
df.loc[i, "ws_QC"] = 3
ts = np.absolute(pr - pr_p) + np.absolute(pr - pr_n)
std_pr = np.nanstd(df.loc[i - 1 : i - 6 * 12, "P"].values)
if ts > 4 * std_pr:
df.loc[i, "P_QC"] = 3
ts = np.absolute(rh - rh_p) + np.absolute(rh - rh_n)
std_rh = np.nanstd(df.loc[i - 1 : i - 6 * 12, "RH"].values)
if ts > 4 * std_rh:
df.loc[i, "RH_QC"] = 3
ts = np.absolute(ta - ta_p) + np.absolute(ta - ta_n)
std_ta = np.nanstd(df.loc[i - 1 : i - 6 * 12, "Ta"].values)
if ts > 4 * std_ta:
df.loc[i, "Ta_QC"] = 3
# persistence test (minimum required variability of instaneous value during two hours)
avg_wd = np.nanmean(df.loc[i - 1 : i - 12, "wd"].values)
if np.absolute(wd - avg_wd) < 10:
df.loc[i, "wd_QC"] = 4
avg_ws = np.nanmean(df.loc[i - 1 : i - 12, "ws"].values)
if np.absolute(ws - avg_ws) < 0.1:
df.loc[i, "ws_QC"] = 4
avg_pr = np.nanmean(df.loc[i - 1 : i - 12, "P"].values)
if np.absolute(pr - avg_pr) < 0.1:
df.loc[i, "P_QC"] = 4
avg_rh = np.nanmean(df.loc[i - 1 : i - 12, "RH"].values)
if np.absolute(rh - avg_rh) < 0.1:
df.loc[i, "RH_QC"] = 4
avg_ta = np.nanmean(df.loc[i - 1 : i - 12, "Ta"].values)
if np.absolute(ta - avg_ta) < 0.1:
df.loc[i, "Ta_QC"] = 4
# minimum standard deviation of last 12 hours to detect blocking of sensor ("dead band")
if std_ta < 0.1:
df.loc[i - 1 : i - 6 * 12, "Ta_QC"] = 4
if std_pr < 0.1:
df.loc[i - 1 : i - 6 * 12, "P_QC"] = 4
if std_ws < 0.5:
df.loc[i - 1 : i - 6 * 12, "ws_QC"] = 4
if std_wd < 10:
df.loc[i - 1 : i - 6 * 12, "wd_QC"] = 4
if std_rh < 1:
df.loc[i - 1 : i - 6 * 12, "RH_QC"] = 4
return df
| geovetarcentrum/climate-stations | python/utils.py | utils.py | py | 14,276 | python | en | code | 1 | github-code | 13 |
73755488657 | # Create Error Types
class DuplicateError(Exception):
"""Raised when there is a duplicate of a node."""
pass
class NodeDoesNotExist(Exception):
"""Raised when a node does not exist."""
pass
class IDsDoNotMatch(Exception):
"""Raised when two IDs do not match."""
pass
class EmptyTreeError(Exception):
"""Raised when the Tree is Empty"""
pass
class Node:
"""
A node in a Binary Tree.
Once initiated, the node remains empty until a node is inserted.
A node is inserted/sorted with its ID.
You can insert, delete, replace, or find a node in the binary tree.
You can print the entire binary tree from left to right with print_tree().
"""
def __init__(self):
"""Initiate the tree. Tree is empty until a node is inserted."""
self.data = None
self.right = None
self.left = None
self.parent = None
self._add_aliases()
def _add_aliases(self):
"""
NOT TO BE USED (other than in the __init__ function)!
Seperate function that adds aliases to functions.
"""
self.dist_to_farthest = self.depth
self.delete = self.remove
self.search = self.find
def find_node(self, id):
"""
Find a node in the tree. Returns node or None if not found.
Parameters:
-ID: The ID of the node you want to find.
"""
if self.data is None:
return None
search = None
if id > self.data.id:
if self.right is not None:
search = self.right.find(id)
else:
return None
elif id < self.data.id:
if self.left is not None:
search = self.left.find(id)
else:
return None
if id == self.data.id:
search = self
return search
def find(self, id):
"""Same as find_node() but returns data instead of node itself."""
return self.find_node(id).data
def replace(self, oldNode, newNode):
"""
Replace a node in the tree.
Parameters:
-oldNode: The id (or node object) of the node you want to replace.
-newNode: The new node (data object) you want to replace the old node with.
oldNode and newNode MUST have the same id!
"""
if type(oldNode) != int:
try:
oldNode = oldNode.id
except AttributeError:
raise TypeError("Old node must be an int or an object with an 'id' variable.")
if oldNode != newNode.id:
raise IDsDoNotMatch(f"Cannot replace node. Old node and replacement must have the same ID. Use delete() and insert() to add/delete IDs.")
return print("Old node and replacement must have the same ID! Use delete and insert to add/delete IDs.")
if self.find(oldNode) is None:
raise NodeDoesNotExist(f"Cannot replace node. Node {oldNode} does not exist. Use insert() to add new nodes")
return print("Cannot replace node. Node does not exist. Use insert to add nodes.")
if oldNode < self.data.id:
self.left.replace(oldNode, newNode)
elif oldNode > self.data.id:
self.right.replace(oldNode, newNode)
else:
self.data = newNode
def remove(self, node):
"""
Remove a node from the tree.
Parameters:
-node: The node you want to remove (id or data object)
"""
if type(node) != int:
try:
node = node.id
except AttributeError:
raise TypeError("Node must be an int or an object with an 'id' variable.")
if self.find(node) is None:
raise NodeDoesNotExist(f"Cannot delete node. Node {node} does not exist. Use insert() to add new nodes")
return print("Cannot delete node. Node does not exist. Use insert to add nodes.")
if node < self.data.id:
self.left = self.left.delete(node)
elif node > self.data.id:
self.right = self.right.delete(node)
else:
if self.right is None:
return self.left
if self.left is None:
return self.right
temp_node = self.right
minimum_data = temp_node.data
while temp_node.left is not None:
temp_node = temp_node.left
minimum_data = temp_node.data
self.data = minimum_data
self.right = self.right.delete(self.data)
return self
def insert(self, newData):
"""
Insert a node into the tree.
Parameters:
-newData: The new node (node object) to insert.
The ID of newData CANNOT already be in the tree.
"""
if self.find(newData.id) is not None:
# Raise a duplicate error
raise DuplicateError(f"There is already a node with the ID of {newData.id}")
return print(f"DuplicateError: There is already a node with the ID of {newData.id}")
if self.data:
if newData.id < self.data.id:
if self.left is None:
self.left = Node()
self.left.insert(newData)
self.left.parent = self
else:
self.left.insert(newData)
elif newData.id > self.data.id:
if self.right is None:
self.right = Node()
self.right.insert(newData)
self.right.parent = self
else:
self.right.insert(newData)
else:
self.data = newData
def length(self):
"""Returns length of tree (int)."""
if self.data is None:
return 0
if self.left is not None:
leftCounter = self.left.length()
else:
leftCounter = 0
if self.right is not None:
rightCounter = self.right.length()
else:
rightCounter = 0
counter = leftCounter + 1 + rightCounter
return counter
def depth(self):
"""
Calculates the distance to the farthest node and returns it's data.
Returns the distance and the farthest node's data.
"""
distance = 1
if self.right is not None:
rightDist, rightData = self.right.dist_to_farthest()
else:
rightDist = None
if self.left is not None:
leftDist, leftData = self.left.dist_to_farthest()
else:
leftDist = None
if rightDist is None or leftDist is None:
if rightDist is None and leftDist is None:
return 0, self.data # Returning 0 so the distance counts up from 0
if rightDist is None:
distance += leftDist
return distance, leftData
elif leftDist is None:
distance += rightDist
return distance, rightData
if rightDist > leftDist:
distance += rightDist
return distance, rightData
else:
distance += leftDist
return distance, leftData
# def queue_tree(self):
# queue = []
# queue.append(self.data)
# if self.left:
# queue.append(self.left.queue_tree())
# else:
# queue.append(None)
# if self.right:
# queue.append(self.right.queue_tree())
# else:
# queue.append(None)
# return queue
def display(self):
if self.data is None:
print("Empty Tree")
return
queue = []
tree = {}
level = 0
queue.append(self)
allNotNone = True
while allNotNone and level < 6:
tree[level] = []
for i in range(2**level):
parentNode = queue.pop(0)
if parentNode is None:
tree[level].append(" ")
queue.append(None)
queue.append(None)
# print(f"PNode: None | Queue: {queue} | Level: {level}")
else:
tree[level].append(f"{parentNode.data.id}")
queue.append(parentNode.left)
queue.append(parentNode.right)
# print(f"PNode: {parentNode.data.id} | Queue: {queue} | Level: {level}")
if all(item is None for item in queue):
allNotNone = False
# break
level += 1
# n = len(tree)
# for i in range(len(tree)):
# n -= 1
# for z in range((2**n)):
# tree[i] = " " + tree[i]
# for n in range(len(tree)):
# for z in range((2**i) // 2):
# tree[i] = " " + tree[i]
largestNumLength = 0
for i in range(len(tree)):
for x in range(len(tree[i])):
tempLength = len(tree[i][x])
if tempLength > largestNumLength:
largestNumLength = tempLength
def pad(string: str, padding: int):
if len(string) < padding:
for i in range(padding - len(string)):
string += " "
return string
spacing = [1, 3, 7, 15, 31, 63, 124, 255, 511, 1023]
# Formula that calculates the spacing list above
# num = 1
# spacing = [1]
# for i in range(9):
# ans = num+num+1
# num = ans
# spacing.append(ans)
if largestNumLength == 1:
space = " "
spacingMulti = 2
elif largestNumLength == 2:
spacingMulti = 1
space = " "
else:
print("Cannot print tree. IDs are more than two characters wide.")
return
n = len(tree) - 1
for i in range(len(tree)):
for v in range(len(tree[i])):
tree[i][v] = tree[i][v].rjust(largestNumLength)
for z in range(spacing[n]):
tree[i][v] += space
n -= 1
output = ""
n = len(tree)
for i in range(len(tree)):
levelOut = "".join(tree[i])
for z in range(n - 1):
levelOut += "\n"
for z in range((2**n) // spacingMulti):
levelOut = " " + levelOut
output += levelOut
n -= 1
print(output)
dist, node = self.dist_to_farthest()
if dist > 5:
print("WARNING: ONLY PRINTED LEVELS 0-5") # print("\n".join(tree))
# queue.append(parentNode)
# if parentNode.left is None and parentNode.right is None:
# allNotNone = False
# break
# if parentNode.left is not None:
# queue.append(parentNode.left)
# if parentNode.right is not None:
# queue.append(parentNode.right)
def print_tree(self):
"""Prints out the tree from left to right."""
if self.data is None:
return print("Empty Tree")
if self.left is not None:
self.left.print_tree()
print(f"---ID: {self.data.id} | NAME: {self.data.name} | NICKNAME: {self.data.nickname}")
if self.right is not None:
self.right.print_tree()
class BinaryTree(Node):
"""
This absolute piece of trash is my lazy approach to making a better tree.
I'll rewrie it soon. I hope.
"""
def __init__(self):
self.root = None
self._add_aliases()
def find_node(self, id):
if self.root is not None:
return self.root.find_node(id)
def find(self, id):
"""Same as find_node() but returns data instead of node itself."""
return self.find_node(id)
def insert(self, newData):
if self.root is not None:
self.root.insert(newData)
else:
self.root = Node()
self.root.insert(newData)
def remove(self, node):
if self.root is not None:
self.root.remove(node)
else:
raise EmptyTreeError("Tree is empty. There is nothing to remove.")
def replace(self, oldData, newData):
if self.root is not None:
self.root.replace(oldData, newData)
else:
raise EmptyTreeError("Tree is empty. There is nothing to replace.")
def length(self):
if self.root is not None:
return self.root.length()
def depth(self):
if self.root is not None:
return self.root.depth()
def display(self):
if self.root is not None:
self.root.display()
def print_tree(self):
if self.root is not None:
self.root.print_tree()
| Fyssion/PyBinaryTree | bintree/binarytree.py | binarytree.py | py | 12,967 | python | en | code | 1 | github-code | 13 |
12329900278 | #
# @lc app=leetcode.cn id=71 lang=python
#
# [71] 简化路径
#
# @lc code=start
class Solution(object):
def simplifyPath(self, path):
"""
:type path: str
:rtype: str
"""
item_list = path.split("/")
stack = []
for item in item_list:
if len(item) == 0:
continue
elif item == ".":
continue
elif item == "..":
if stack:
stack.pop()
else:
stack.append(item)
return "/" + "/".join(stack)
# @lc code=end
| Llunch4w/leetcode-cn | 71.简化路径.py | 71.简化路径.py | py | 607 | python | en | code | 0 | github-code | 13 |
24769990971 | # context checking
# "r" will put return at new line, and reversed othervise
# custom command execution
# "rs" will result re.search plus inserting "import" at beginning
# call from snippet palette
for element in set:
_process(element) | shagabutdinov/sublime-snippet-caller | demo/demo.py | demo.py | py | 241 | python | en | code | 6 | github-code | 13 |
133402702 | import math
def floyd(n: int) -> int:
def simple_trial_div(n: int) -> int:
small_primes = (2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47)
return next((prime_number for prime_number in small_primes if n % prime_number == 0), n)
def gcd(a: int, b: int) -> int:
while b != 0:
a, b = b, a % b
return a
tmp_factor = simple_trial_div(n)
if tmp_factor != n:
return tmp_factor
for i in range(2, int(math.sqrt(n)) + 1):
x_0 = i
y_0 = i
while True:
x_0 = (pow(x_0, 2) + 1) % n
y_0 = pow(((pow(y_0, 2) + 1) % n), 2) + 1
y_0 = y_0 % n
if x_0 == y_0:
break
d = gcd(abs(y_0 - x_0), n)
if d != 1:
return d
return n
def factorize(p: int) -> list:
result = []
i = 2
while pow(i, 2) <= p:
if p % i:
i += 1 if i == 2 else 2
else:
p //= i
result.append(i)
if p > 1:
result.append(p)
return result | whitearmorq/lab1_ivanilov | factorize.py | factorize.py | py | 1,139 | python | en | code | 0 | github-code | 13 |
74667626897 | import streamlit as st
import pandas as pd
formatacao_colunas={
"operadora": st.column_config.SelectboxColumn(
"Operadora",
help="Operadora: 0=Nenhuma 1=Claro 2=BR-Digital",
width="small",
required=True,
default="0",
options=[
"0",
"1",
"2"
]
),
"predio": st.column_config.SelectboxColumn(
"Prédio",
help="Selecione o prédio",
width="small",
options=[
"CTA01",
"CTA03",
"CTA05",
"CTA06",
"CTA09"
]
),
"inicio": st.column_config.DatetimeColumn(
"Inicio",
format="YYYY-MM-DD HH:mm:ss",
step=1
),
"fim": st.column_config.DatetimeColumn(
"Fim",
format="YYYY-MM-DD HH:mm:ss",
step=1
),
"chamado": st.column_config.TextColumn(
"Chamado",
help="Numero do chamado da operadora"
),
"incidente": st.column_config.CheckboxColumn(
"Incidente?",
help="Trata-se de um incidente?",
default=False,
)
}
if 'df' not in st.session_state:
st.session_state['df'] = pd.read_csv(
'manut_prog.csv',
sep=";",
dtype={'operadora': object},
parse_dates=['inicio','fim']
)
######################################## INICIO #######################################
st.header('Desligamentos programados / incidentes')
df = st.data_editor(
st.session_state['df'],
column_config=formatacao_colunas,
num_rows="dynamic",
hide_index=True
)
bt_salvar = st.button('Salvar alterações')
if bt_salvar:
df.to_csv('manut_prog.csv', sep=";", index=False) | danielcancelier/deslig | grid.py | grid.py | py | 1,793 | python | en | code | 0 | github-code | 13 |
37787161906 | def merge_sort(numbers_list):
# Checking that the length of the (split) list is grater than 1
if len(numbers_list) > 1:
# Calculate the middle point
t = len(numbers_list) // 2
# Dividing the list of numbers, recursively pass it to merge_sort and making it iterable
# Instead of using pointers, iterate using iter/next
left_list = iter(merge_sort(numbers_list[:t]))
n1 = next(left_list)
right_list = iter(merge_sort(numbers_list[t:]))
n2 = next(right_list)
# Define the ordered list as an empty list
numbers_list = []
# Try will expand the list and iterate using next. When one of the list has been finished, it raises an error
# so the except is called.
try:
while True:
if n1 <= n2:
numbers_list.append(n1)
n1 = next(left_list)
else:
numbers_list.append(n2)
n2 = next(right_list)
# Except checks which of the list has been completed, and appends the other one with append/extend.
except:
if n1 <= n2:
numbers_list.append(n2)
numbers_list.extend(right_list)
else:
numbers_list.append(n1)
numbers_list.extend(left_list)
return numbers_list
import random, time
numbers = [random.randint(1, 1000000) for _ in range(0,1000000)]
t0 = time.time()
merge_sort(numbers)
print(time.time() - t0)
| lmartinez7/masters | module_i/assignments/fucking sorting thing/fucksort2.py | fucksort2.py | py | 1,533 | python | en | code | 0 | github-code | 13 |
35834259559 | import numpy as np
import matplotlib.pyplot as plt
from genome_plot import CircosObject
import argparse
def parse_nodes(nodes_fn):
nodes = []
for line in open(nodes_fn,"r"):
fields = line.strip().split("\t")
name = fields[0]
length = float(fields[1])
try:
color = fields[2]
except IndexError:
color = None
nodes.append((name, length, color))
return nodes
def parse_edges(edges_fn):
edges = []
for line in open(edges_fn,"r"):
fields = line.strip().split("\t")
node1 = fields[0]
position1 = float(fields[1])
node2 = fields[2]
position2 = float(fields[3])
color = fields[4]
weight = fields[5]
edges.append((node1, position1, node2, position2, color, weight))
return edges
def main(nodes_fn, edges_fn, out_fn, radius):
nodes = parse_nodes(nodes_fn)
edges = parse_edges(edges_fn)
c = CircosObject(nodes, edges, radius)
c.draw()
c.fig.savefig(out_fn, transparent=True)
if __name__ == "__main__":
parser=argparse.ArgumentParser()
parser.add_argument('--nodes', help='nodes file name', required=True)
parser.add_argument('--edges', help='edges file name', required=True)
parser.add_argument('--o', help='output file name', required=True)
parser.add_argument('--radius', help='radius to use', type=int, default=10)
args=parser.parse_args()
nodes_fn = args.nodes
edges_fn = args.edges
out_fn = args.o
radius = args.radius
main(nodes_fn, edges_fn, out_fn, radius) | kylessmith/python_circos | python_circos/python_circos.py | python_circos.py | py | 1,643 | python | en | code | 1 | github-code | 13 |
20182534822 | import random
from pygame import image, Color
from random import randint
from math import sqrt
moveimage = image.load('images/move_map.png')
dotimage = image.load('images/dot_map.png')
#(x,y),(index to move) -> miejsca na mapie do których duszki mogą się przenieś
map_point=[((35,100),(1,6)),
((130,100),(0,2,7)),
((270,100),(1,9)),
((330,100),(10,4)),
((470,100),(3,5,12)),
((570,100),(4,13)),
((35,180),(0,7,14)),
((130,180),(6,1,8,15)),
((210,180),(7,9,16)),
((270,180),(2,8,10)),
((330,180),(9,11,3)),
((390,180),(10,12,19)),
((470,180),(11,13,20,4)),
((570,180),(5,12,21)),
((35,260),(6,15)),
((130,260),(7,14,27)),
((210,240),(8,17)),
((270,240),(16,23)),
((330,240),(19,25)),
((390,240),(18,11)),
((470,260),(12,21,31)),
((570,260),(20,13)),
((210,300),(28,23)),
((270,300),(22,24,17)),
((300,300),(23,25)),
((330,300),(24,26,18)),
((390,300),(25,30)),
((130,360),(15,28,35,56)),
((210,360),(27,22,32)),
((300,360),(24)),
((390,360),(26,31,33)),
((470,360),(20,30,36,57)),
((210,420),(28,33,39)),
((390,420),(30,32,42)),
((35,460),(35,44)),
((130,460),(34,27,38)),
((470,460),(31,37,43)),
((570,460),(36,51)),
((130,480),(35,39,45)),
((210,480),(38,32,40)),
((270,480),(39,47)),
((330,480),(42,48)),
((390,480),(41,43)),
((470,480),(36,42,50)),
((35,565),(34,45,52)),
((130,565),(38,44,46)),
((220,565),(45,47,53)),
((270,565),(46,47,40)),
((330,565),(47,49,41)),
((385,565),(48,50,54)),
((470,565),(49,43,51)),
((570,565),(37,50,55)),
((35,620),(44,53)),
((220,620),(46,52,54)),
((385,620),(49,53,55)),
((570,620),(51,54)),
((0, 360),(27)),
((580,360),(31))]
#Poroszanie pacmana
def check_move_point(pacman):
move_x, move_y = 0, 0
if pacman.keys_active['right']:
move_x = 3
elif pacman.keys_active['left']:
move_x = -3
elif pacman.keys_active['up']:
move_y = -3
elif pacman.keys_active['down']:
move_y = 3
if pacman.x+move_x < 0:
pacman.x = 585
return True
elif pacman.x+move_x+pacman.width/2 > 600:
pacman.x = 0
return True
if moveimage.get_at((int(pacman.x+move_x), int(pacman.y+move_y-60))) != Color('black'):
return False
return True
#Poruszanie duszków
def get_possible_directions(ghost):
last_index = ghost.new_point_index
index_move = -1
if last_index == 29:
index_move = 24
elif last_index == 56:
ghost.x = 580
ghost.y = 360
index_move = 31
elif last_index == 57:
ghost.x = 0
ghost.y = 360
index_move = 27
else:
for i, index in enumerate(map_point):
if last_index == i:
index_move = index[1][randint(0, len(index[1])-1)]
while index_move == ghost.last_point_index:
index_move = index[1][randint(0, len(index[1]) - 1)]
#print(map_point[index_move][0])
return index_move, map_point[index_move][0]
def get_possible_directions_near_pacman(ghost, pacman_pos):
last_index = ghost.new_point_index
#print("Last index: ", last_index)
index_move = -1
if last_index == 29:
index_move = 24
#print("Index move: ", 24)
elif last_index == 56:
ghost.x = 580
ghost.y = 360
index_move = 31
#print("Index move: ", 31)
elif last_index == 57:
ghost.x = 0
ghost.y = 360
index_move = 27
#print("Index move: ", 57)
else:
for i, index in enumerate(map_point):
if last_index == i:
distance_checer = 150
for i in range(0, len(index[1])):
distance = sqrt(pow(map_point[index[1][i]][0][0] - pacman_pos[0], 2) + pow(map_point[index[1][i]][0][1] - pacman_pos[1], 2))
if int(distance) < distance_checer:
index_move = index[1][i]
distance_checer = int(distance)
#print("Index move: ", index_move)
if index_move == -1:
direction = get_possible_directions(ghost)
index_move = direction[0]
return index_move, map_point[index_move][0]
def serch_direction(ghost):
index_move = -1
distance_serach = 1500
for i, index in enumerate(map_point):
distance = sqrt(pow(map_point[i][0][0] - ghost[0], 2) + pow(map_point[i][0][1] - ghost[1], 2))
if int(distance) < distance_serach:
index_move = i
distance_serach = int(distance)
#print(index_move)
return index_move, map_point[index_move][0]
def get_distans(ghost):
distans = 0
last_point_x = map_point[ghost.last_point_index][0][0]
last_point_y = map_point[ghost.last_point_index][0][1]
new_point_x = map_point[ghost.new_point_index][0][0]
new_point_y = map_point[ghost.new_point_index][0][1]
if abs(last_point_x-new_point_x)>0:
return int(abs(last_point_x-new_point_x))
else:
return int(abs(last_point_y - new_point_y))
#new_position = get_possible_directions()
#print(new_position[0])
#print(new_position[1])
#Wstawianie monet
def check_dot_point(x, y):
point = int(x), int(y)
if dotimage.get_at(point) == Color('blue'):
return 1
elif dotimage.get_at(point) == Color('green'):
return 2
else:
return 0 | hadesto92/CursGame-Python | gold_pacman/map.py | map.py | py | 5,890 | python | en | code | 0 | github-code | 13 |
24724731536 | from os import system
import alminer
import pandas as pd
from astroquery.alma import Alma
from astropy.io import fits
import numpy as np
import os
# Below license is for ALminer since we have modified some code from there
"""
MIT License
Copyright (c) 2021 Aida Ahmadi , Alvaro Hacar
Permission is hereby granted , free of charge , to any person obtaining a copy
of this software and associated documentation files (the " Software ") , to
deal in the Software without restriction , including without limitation the rights
to use , copy , modify , merge , publish , distribute , sublicense , and/or sell
copies of the Software , and to permit persons to whom the Software is
furnished to do so , subject to the following conditions :
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software .
THE SOFTWARE IS PROVIDED "AS IS" , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM
, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE .
"""
"""Modified version of alminer.download_data. Here, the download randomly
selects 500 files to download and download these. The code for this can
be found on line 167."""
##############################################
# Libraries
##############################################
from constants_copy import band_names, band_color, band_min_freq, band_max_freq, \
CO_line_names, CO_line_freq, CO_line_ha, CO_line_label, VALID_KEYWORDS_STR, \
NEW_COLUMNS, COLUMN_TYPES
from pyvo.dal import tap
from astroquery.alma import Alma
from matplotlib.ticker import FormatStrFormatter, NullFormatter
import matplotlib.pyplot as plt
from astropy import constants as const
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.coordinates import name_resolve
from astropy.coordinates import get_icrs_coordinates
from astropy.coordinates import Angle
import os
import re
import pandas as pd
import numpy as np
import random
np.set_printoptions(threshold=np.inf)
def _format_bytes(size):
"""Convert the size of the dota to be downloaded in human-readable format."""
power = 1000
n = 0
power_labels = {0: 'B', 1: 'KB', 2: 'MB',
3: 'GB', 4: 'TB', 5: 'PB', 6: 'EB'}
while size > power:
size /= power
n += 1
return size, power_labels[n]
def download_data_mod(observations, fitsonly=False, dryrun=False, print_urls=False, filename_must_include='',
location='./data', archive_mirror='ESO', n_fits=500):
"""
Download ALMA data from the archive to a location on the local machine.
Parameters
----------
observations : pandas.DataFrame
This is likely the output of e.g. 'conesearch', 'target', 'catalog', & 'keysearch' functions.
fitsonly : bool, optional
(Default value = False)
Download individual fits files only (fitsonly=True). This option will not download the raw data
(e.g. 'asdm' files), weblogs, or README files.
dryrun : bool, optional
(Default value = False)
Allow the user to do a test run to check the size and number of files to download without actually
downloading the data (dryrun=True). To download the data, set dryrun=False.
print_urls : bool, optional
(Default value = False)
Write the list of urls to be downloaded from the archive to the terminal.
filename_must_include : list of str, optional
(Default value = '')
A list of strings the user wants to be contained in the url filename. This is useful to restrict the
download further, for example, to data that have been primary beam corrected ('.pbcor') or that have
the science target or calibrators (by including their names). The choice is largely dependent on the
cycle and type of reduction that was performed and data products that exist on the archive as a result.
In most recent cycles, the science target can be filtered out with the flag '_sci' or its ALMA target name.
location : str, optional
(Default value = ./data)
directory where the downloaded data should be placed.
archive_mirror : str, optional
(Default value = 'ESO')
The archive service to use. Options are:
'ESO' for Europe (https://almascience.eso.org),
'NRAO' for North America (https://almascience.nrao.edu), or
'NAOJ' for East Asia (https://almascience.nao.ac.jp)
n_fits : int, optional
(Default value = 500)
Specify how many fits files you wish to download.
"""
print("================================")
# we use astroquery to download data
myAlma = Alma()
default_location = './data'
myAlma.cache_location = default_location
if archive_mirror == 'NRAO':
mirror = "https://almascience.nrao.edu"
elif archive_mirror == 'NAOJ':
mirror = "https://almascience.nao.ac.jp"
else:
mirror = "https://almascience.eso.org"
myAlma.archive_url = mirror
# catch the case where the DataFrame is empty.
try:
if any(observations['data_rights'] == 'Proprietary'):
print("Warning: some of the data you are trying to download are still in the proprietary period and are "
"not publicly available yet.")
observations = observations[observations['data_rights'] == 'Public']
uids_list = observations['member_ous_uid'].unique()
# when len(uids_list) == 0, it's because the DataFrame included only proprietary data and we removed them in
# the above if statement, so the DataFrame is now empty
if len(uids_list) == 0:
print("len(uids_list)==0")
print("No data to download. Check the input DataFrame. It is likely that your query results include only "
"proprietary data which cannot be freely downloaded.")
return
# this is the case where the query had no results to begin with.
except TypeError:
print("type error")
print("No data to download. Check the input DataFrame.")
return
# change download location if specified by user, else the location will be a folder called 'data'
# in the current working directory
if location != default_location:
if os.path.isdir(location):
myAlma.cache_location = location
else:
print("{} is not a directory. The download location will be set to {}".format(
location, default_location))
myAlma.cache_location = default_location
elif (location == default_location) and not os.path.isdir(location): # create the 'data' subdirectory
os.makedirs(default_location)
if fitsonly:
data_table = myAlma.get_data_info(uids_list, expand_tarfiles=True)
# filter the data_table and keep only rows with "fits" in 'access_url' and the strings provided by user
# in 'filename_must_include' parameter
dl_table = data_table[[i for i, v in enumerate(data_table['access_url']) if v.endswith(".fits") and
all(i in v for i in filename_must_include)]]
else:
data_table = myAlma.get_data_info(uids_list, expand_tarfiles=False)
# filter the data_table and keep only rows with "fits" in 'access_url' and the strings provided by user
# in 'filename_must_include' parameter
dl_table = data_table[[i for i, v in enumerate(data_table['access_url']) if
all(i in v for i in filename_must_include)]]
dl_df = dl_table.to_pandas()
# Picking out n_fits files of these
dl_df = dl_df.sample(n_fits)
# remove empty elements in the access_url column
dl_df = dl_df.loc[dl_df.access_url != '']
dl_link_list = list(dl_df['access_url'].unique())
# keep track of the download size and number of files to download
dl_size = dl_df['content_length'].sum()
# print(dl_size['content_length'])
dl_files = len(dl_df['access_url'].unique())
dl_uid_list = list(dl_df['ID'].unique())
if dryrun:
print("This is a dryrun. To begin download, set dryrun=False.")
print("================================")
else:
print("Starting download. Please wait...")
print("================================")
try:
myAlma.download_files(dl_link_list, cache=True)
except ValueError as e:
print(e)
if dl_files > 0:
print("Download location = {}".format(myAlma.cache_location))
print("Total number of Member OUSs to download = {}".format(len(dl_uid_list)))
print("Selected Member OUSs: {}".format(dl_uid_list))
print("Number of files to download = {}".format(dl_files))
dl_size_fmt, dl_format = _format_bytes(dl_size)
print("Needed disk space = {:.1f} {}".format(dl_size_fmt, dl_format))
if print_urls:
print("File URLs to download = {}".format("\n".join(dl_link_list)))
else:
print("Nothing to download.")
print("Note: often only a subset of the observations (e.g. the representative window) is ingested into "
"the archive. In such cases, you may need to download the raw dataset, reproduce the calibrated "
"measurement set, and image the observations of interest. It is also possible to request calibrated "
"measurement sets through a Helpdesk ticket to the European ARC "
"(see https://almascience.eso.org/local-news/requesting-calibrated-measurement-sets-in-europe).")
print("--------------------------------")
# From here on there's download, we only want to download random amount of X declared when calling function
| nkatshiba/Alma-bachelor-project | alma-classifier/alma_classifier/data_acquisition/alminer_mod.py | alminer_mod.py | py | 10,072 | python | en | code | 0 | github-code | 13 |
14275574486 | import bpy
import bmesh
import math
from . import object_manager
from . import settings_manager
def set_normals_to_outside(context, objects, only_recalculate_if_flagged = True):
'''
Set normals of objects so that they point outside of the mesh
(convex direction).
Set normals is an issue with planes, since it can invert normals.
Do not recalculate normals if:
- object has a dimension that is close to zero (i.e. flat plane)
Make sure normals are set BEFORE joining objects, since joining
changes bounding box!
'''
orig_mode = settings_manager.get_mode(context)
selection_data = settings_manager.get_selection_data(context)
for object in objects:
# Continue if this object should not be included in normal recalculation
if only_recalculate_if_flagged:
if not object.get('recalculate_normals'):
continue
# Select object
bpy.ops.object.mode_set(mode = 'OBJECT')
object_manager.select_objects(context, 'REPLACE', [object], True)
# Select all elements in edit mode
bpy.ops.object.mode_set(mode = 'EDIT')
bpy.ops.mesh.select_all(action = 'SELECT')
print("set normal to outside on object " + object.name)
bpy.ops.mesh.normals_make_consistent(inside=False)
# Restore mode, selection and active object
bpy.ops.object.mode_set(mode = orig_mode)
settings_manager.restore_selected_objects(context, selection_data)
def apply_custom_split_normal(context, objects):
'''
Apply custom split normal to each object
'''
selected_objects = context.selected_objects
active_object = context.active_object
# Apply custom split normals per object
for object in objects:
context.view_layer.objects.active = object
bpy.ops.mesh.customdata_custom_splitnormals_add()
object.data.use_auto_smooth = True
# Restore selection and active object
object_manager.select_objects(context, 'REPLACE', objects)
context.view_layer.objects.active = active_object
def handle_uv_naming_before_joining_objects(context, objects):
'''
# Rename uv maps if needed for the join process
# Make sure all objects active UV has same name so that it is not lost during the join process
'''
# Check if renaming of uv maps is needed
uv_renaming_needed = False
main_uv_name = ""
for object in objects:
for uv in object.data.uv_layers:
if uv.active_render == True:
active_uv_name = uv.name
if object == objects[0]:
main_uv_name = uv.name
if not active_uv_name == main_uv_name:
uv_renaming_needed = True
# Rename uv's if all active uv's on objects don't share the same name
if uv_renaming_needed:
for object in objects:
for uv in object.data.uv_layers:
if uv.active_render == True:
uv.name = "active uv"
def get_meshes_list_from_objects(objects, exclude_objects = []):
meshes = []
for object in objects:
if not object.type == "MESH":
continue
if object in exclude_objects:
continue
meshes.append(object.data)
return meshes
def delete_meshes(context, meshes):
for mesh in meshes:
try:
bpy.data.meshes.remove(mesh)
except:
pass
def set_sharp_edges_from_auto_smooth(context, objects, set_max_smooth_angle = False):
'''
Set hard edges on each objects mesh data based on settings in normal auto smooth
Also force auto smooth with max angle setting
Note: I found that it was much better to use split custom normal instead
so this function is not used any longer. Keeping it just in case
'''
# Ensure objects is list
if not type(objects) == list:
objects = [objects]
for object in objects:
if object.data.use_auto_smooth == True:
auto_smooth_angle = object.data.auto_smooth_angle
else:
# Set to max auto_smooth_angle
auto_smooth_angle = 3.14159
bm = bmesh.new()
bm.from_mesh(object.data)
bm.edges.ensure_lookup_table()
for edge in bm.edges:
if not edge.smooth:
continue
if not edge.is_manifold:
edge.smooth = False
continue
angle = edge.calc_face_angle()
#angle = math.degree(angle)
if angle > auto_smooth_angle:
edge.smooth = False
else:
edge.smooth = True
# Write bmesh to data
bm.to_mesh(object.data)
# Force auto smooth with max angle setting
if set_max_smooth_angle:
object.data.use_auto_smooth = True
object.data.auto_smooth_angle = 3.14159
def create_cage_from_objects(context, objects, fatten_amount, cage_name = "cage_object"):
'''
Create a cage mesh object by joining duplicates of
multiple objects. Each vertex is moved in it's positive
normal direction by the "fatten" value
'''
orig_selection = context.selected_objects
object_manager.select_objects(context, 'REPLACE', objects, True)
bpy.ops.object.duplicate()
cage_objects = context.selected_objects
# Fatten
if not context.mode == 'EDIT':
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.transform.shrink_fatten(value = fatten_amount)
bpy.ops.object.editmode_toggle()
# Join objects
bpy.ops.object.join()
context.active_object.name = cage_name
joined_cage_object = context.active_object
# Restore selection
object_manager.select_objects(context, 'REPLACE', orig_selection)
# Return cage object
return joined_cage_object
| Tilapiatsu/blender-custom_config | scripts/addon_library/local/BystedtsBlenderBaker/mesh_manager.py | mesh_manager.py | py | 5,907 | python | en | code | 5 | github-code | 13 |
42904322000 | import cv2
from object_detector import *
import numpy as np
import pyrebase
config={
"apiKey": "AIzaSyC89FK4pNLaftno-VAKpCPJVQxIKDi7ung",
"authDomain": "pythondbtest-8bff7.firebaseapp.com",
"databaseURL": "https://pythondbtest-8bff7-default-rtdb.firebaseio.com",
"databseURL":"https://pythondbtest-8bff7-default-rtdb.firebaseio.com/",
"projectId": "pythondbtest-8bff7",
"storageBucket": "pythondbtest-8bff7.appspot.com",
"messagingSenderId": "1099219286383",
"appId": "1:1099219286383:web:7d5a5da0de3b573e05d09a",
"measurementId": "G-XG4CMK1W94"
}
firebase=pyrebase.initialize_app(config)
database=firebase.database()
# Load Aruco detector
parameters = cv2.aruco.DetectorParameters_create()
aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_5X5_50)
# Load Object Detector
detector = HomogeneousBgDetector()
# Load Image
img = cv2.imread("sampleImg10.jpg")
# Get Aruco marker
corners, _, _ = cv2.aruco.detectMarkers(img, aruco_dict, parameters=parameters)
n=0
weights=[]
lengths=[]
# Draw polygon around the marker
int_corners = np.int0(corners)
cv2.polylines(img, int_corners, True, (0, 255, 0), 5)
# Aruco Perimeter
aruco_perimeter = cv2.arcLength(corners[0], True)
# Pixel to cm ratio
pixel_cm_ratio = aruco_perimeter / 20
contours = detector.detect_objects(img)
total_weight=0
print(contours)
#weightList=[]
# Draw objects boundaries
for cnt in contours:
# Get rect
rect = cv2.minAreaRect(cnt)
(x, y), (l, b), angle = rect
# Get Width and Height of the Objects by applying the Ratio pixel to cm
object_length = l / pixel_cm_ratio
object_breadth = b / pixel_cm_ratio
# Display rectangle
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.circle(img, (int(x), int(y)), 5, (0, 0, 255), -1)
cv2.polylines(img, [box], True, (255, 0, 0), 2)
cv2.putText(img, "Length {} cm".format(round(max(object_length,object_breadth), 2)), (int(x - 100), int(y - 20)), cv2.FONT_HERSHEY_PLAIN, 2, (100, 200, 0), 2)
cv2.putText(img, "Breadth {} cm".format(round(min(object_length,object_breadth), 2)), (int(x - 100), int(y + 15)), cv2.FONT_HERSHEY_PLAIN, 2, (100, 200, 0), 2)
lengths.append(object_length)
weight=0.0203*(max(object_length,object_breadth)**3)
weights.append(weight)
cv2.putText(img, "Weight {} g".format(round(weight, 2)), (int(x - 50), int(y - 50)), cv2.FONT_HERSHEY_PLAIN, 2,(100, 200, 0), 2)
cv2.imwrite('pic2.jpg',img)
print(lengths)
for i in range(0,len(weights)-1):
data1={
"length":lengths[i],
"weight":weights[i]
}
database.push(data1)
total_weight=total_weight+weights[i]
print(weights)
data2={
"weight":total_weight
}
database.push(data2)
#print(weightList)
cv2.imshow("Image", img)
cv2.waitKey(0) | roysonLobo/fishSizeAndWeight | measure_object_size.py | measure_object_size.py | py | 2,840 | python | en | code | 0 | github-code | 13 |
37882382440 | from collections import defaultdict, deque
read = lambda: int(input())
readline = lambda: list(map(int, input().split()))
APPLE = 1
rotates = defaultdict(str)
N = read()
K = read()
board = [[0] * (N + 1) for _ in range(N + 1)]
for _ in range(K):
r, c = readline()
board[r][c] = APPLE
L = read()
for _ in range(L):
r, c = input().split()
r = int(r)
rotates[r] = c
python = deque([(1, 1)])
DIR = [(0, 1), (1, 0), (0, -1), (-1, 0)]
def check_hit_wall(r, c):
return r < 1 or c < 1 or r > N or c > N
def solution():
cur_dir = 0
time = 0
while True:
dr, dc = python[0][0] + DIR[cur_dir][0], python[0][1] + DIR[cur_dir][1]
if check_hit_wall(dr, dc) or (len(python) > 1 and (dr, dc) in list(python)[1:]):
return time + 1
if not check_hit_wall(dr, dc) and board[dr][dc] == APPLE:
python.appendleft((dr, dc))
board[dr][dc] = 0
else:
v = python.pop()
python.appendleft((dr, dc))
time += 1
if rotates[time] == "D":
cur_dir = (cur_dir + 1) % 4
elif rotates[time] == "L":
cur_dir = (cur_dir - 1) % 4
print(solution()) | kod4284/kod-algo-note | 백준/3190-뱀/solution.py | solution.py | py | 1,186 | python | en | code | 0 | github-code | 13 |
69899904657 | #!/usr/local/bin/python3
#coding: utf-8
#extrac
##################################################################################################################################################################
# Created on 21 de Julho de 2021
#
# Projeto base: Banco Braavos
# Repositorio: Origem
# Author: Maycon Cypriano Batestin
#
##################################################################################################################################################################
##################################################################################################################################################################
#imports
import json
import csv
from faker import Faker
import random
from datetime import date, datetime
#setup
faker = Faker()
val = int(input("Enter the number of records you want to generate: "))
# create a dataset of json in source files
print("Starting the simulation...")
num = 0
for x in range(val):
num = num + 1
with open(f"C:/Users/Bates/Documents/Repositorios/SPARK/IronBankBraavos/source/client/cl_{num}.json", "w", encoding='utf-8') as out1:
with open(f"C:/Users/Bates/Documents/Repositorios/SPARK/IronBankBraavos/source/address/ad_{num}.json", "w", encoding='utf-8') as out2:
with open(f"C:/Users/Bates/Documents/Repositorios/SPARK/IronBankBraavos/source/account/ac_{num}.json", "w", encoding='utf-8') as out3:
with open(f"C:/Users/Bates/Documents/Repositorios/SPARK/IronBankBraavos/source/accountcard/accard_{num}.json", "w", encoding='utf-8') as out4:
with open(f"C:/Users/Bates/Documents/Repositorios/SPARK/IronBankBraavos/source/card/card_{num}.json", "w", encoding='utf-8') as out5:
with open(f"C:/Users/Bates/Documents/Repositorios/SPARK/IronBankBraavos/source/rent/rent_{num}.json", "w", encoding='utf-8') as out8:
##################### variables ##############################
female_name = faker.first_name_female()
male_name = faker.first_name_male()
gend_name_dict = random.choice([{"Female": female_name}, {"Male":male_name}])
name_resume = list(gend_name_dict.values())[0]
gender_resume = list(gend_name_dict.keys())[0]
lastname = faker.last_name()
fullname = f"""{name_resume} {lastname}"""
title = f"""{faker.prefix()} {name_resume} of house {lastname}"""
family = lastname
house = lastname
nb = random.randint(1,10)
slogan = faker.sentence(nb_words=nb, variable_nb_words=False)
id_client = faker.bban()
cultureBorn = random.choice(["Northmen", "Braavosi", "Free Folk", "Andalos", "Dothraki", "Roinar", "iron men", "Valyrian", "ghiscari", "First Men", "Westeros", "Unknown", "Uninformed"])
cultureLiving = random.choice(["Northmen", "Braavosi", "Free Folk", "Andalos", "Dothraki", "Roinar", "iron men", "Valyrian", "ghiscari", "First Men", "Westeros", "Unknown", "Uninformed"])
continetalBorn = random.choice([{"Westeros": ("North of the Wall ", "The North ", "The Riverlands", "The Vale", "The Iron Islands", "The Westerlands", "The Reach", "The Crownlands", "The Stormlands", "Dorne")},
{"Essos": ("Free Cities", "Slave's Bay", "Dothraki Sea", "Red Waste", "Valyrian peninsula", "Other")},
{"Sothoryos":("Naath", "Isle of Tears", "Basilisk Point")}])
continetalBornKey = list(continetalBorn.keys())[0]
continetalBornVal = random.choice(list(continetalBorn.values()))
continetalBornValChoice = random.choice(continetalBornVal)
continetalLiving = random.choice([{"Westeros": ("North of the Wall ", "The North ", "The Riverlands", "The Vale", "The Iron Islands", "The Westerlands", "The Reach", "The Crownlands", "The Stormlands", "Dorne")},
{"Essos": ("Free Cities", "Slave's Bay", "Dothraki Sea", "Red Waste", "Valyrian peninsula", "Other")},
{"Sothoryos":("Naath", "Isle of Tears", "Basilisk Point")}])
continetalLivingKey = list(continetalLiving.keys())[0]
continetalLivingVal = random.choice(list(continetalLiving.values()))
continetalLivingValChoice = random.choice(continetalLivingVal)
currentCity = f"""{random.choice(["Braavos", "Lorath", "Lys", "Magisters", "Myr", "Norvos", "Pentos", "Qohor", "Tyrosh", "Volantis","Unknown", "Uninformed", "Asshai", "Astapor", "New Ghis", "Asabhad", "Bayasabhad", "Carcosa", "Cities of the Bloodless Men", "City of the Winged Men", "Ebonhead", "Elyria", "Faros", "Gulltown", "Hesh", "Ib Nor", "Ib Sar", "Jinqi", "K'Dath", "Kayakayanaya", "King's Landing", "Kosrak", "Lannisport", "Leng Ma", "Leng Yi", "Lhazosh", "Lotus Point", "Mantarys", "Meereen", "Oldtown", "Qarth", "Vaes Dothrak", "White Harbor", "Yunkai", "Winterfell", "Unknown", "Uninformed"])}"""
bornCity = f"""{random.choice(["Braavos", "Lorath", "Lys", "Magisters", "Myr", "Norvos", "Pentos", "Qohor", "Tyrosh", "Volantis","Unknown", "Uninformed", "Asshai", "Astapor", "New Ghis", "Asabhad", "Bayasabhad", "Carcosa", "Cities of the Bloodless Men", "City of the Winged Men", "Ebonhead", "Elyria", "Faros", "Gulltown", "Hesh", "Ib Nor", "Ib Sar", "Jinqi", "K'Dath", "Kayakayanaya", "King's Landing", "Kosrak", "Lannisport", "Leng Ma", "Leng Yi", "Lhazosh", "Lotus Point", "Mantarys", "Meereen", "Oldtown", "Qarth", "Vaes Dothrak", "White Harbor", "Yunkai", "Winterfell", "Unknown", "Uninformed"])}"""
itin = faker.ssn()
date = f"{faker.date_of_birth()}"
gender = gender_resume
future = faker.future_date()
yearmonthday = f"""{future.strftime('%Y%m%d')}"""
valor_positivo = f"+{float(random.randint(1,999999))}"
valor_negativo = f"-{float(random.randint(1,999999))}"
valor_positivo2 = f"+{float(random.randint(1,999999))}"
valor_negativo2 = f"-{float(random.randint(1,999999))}"
valorliquido = random.choice([valor_positivo,valor_negativo])
postalcode = f"""{faker.postcode()}"""
street = faker.street_name()
number_street = f"""{faker.building_number()}"""
descricao_metodo_renda_eleita = f"{faker.bban()[:2]}"
valor_bruto_renda_eleita = random.choice([valor_positivo2,valor_negativo2])
card_number = f"""{faker.credit_card_number()}"""
status_bloqueio_conta = random.choice(["activated", "not activated"])
v0 = float(random.randint(1,999999))
v1 = float(random.randint(1,99999))
v2 = float(random.randint(1,9999))
valor_limite_total = f"""{v0}"""
valor_limite_utilizado = f"{v1}"
valor_limite = v0 - v1
valor_limite_disponivel = f"{valor_limite}"
total_limit_used = f"{v0 - valor_limite}"
descricao_produto = faker.credit_card_provider()
religion = random.choice(["other", "Uninformed", "old gods", "Faith of the Seven", "Drowned God", "Many-Faced God", "Dothraki Religion", "Gardens of Gelenei"]).upper()
gods = random.choice(["other", "Uninformed","Sun", "Moon", "Moonsingers", "Fountain of the Drunken God", "R'hllor", "Great Other", "Mother Rhoyne", "Aquan the Red Bull", "Bakkalon", "Black Goat", "Great Shepherd", "Hooded Wayfarer", "horse god","Lady of Spears", "Lion of Night", "Merling King", "Moon-Pale Maiden", "Pattern", "Semosh and Selloso", "Silent God", "Stone Cow of Faros", "Father of Waters", "Weeping Lady of Lys", "Pantera", "Yndros of the Twilight", "Saagael", "Maiden-Made-of-Light", "Cult of Starry Wisdom", "Moon Mother", "Mother Rhoyne"]).upper()
security_card = f"{faker.credit_card_security_code()}"
expire_card_number = f"{faker.credit_card_expire()}"
transaction_code = f"{faker.bban()}"
future_transaction = f"{faker.future_date()}"
date_transaction = f"""{future.strftime('%Y%m%d')}"""
time_transaction = f"{faker.time()}"
describle_transaction = random.choice(["Dragons","brothel", "golden company", "Marketplace", "witchcraft", "horses", "wolfs", "Loan to Finance War", "Tribute to the Gods", "immaculate army", "slaves", "ships", "swords", "royal forgiveness", "hill wine", "dormant wine", "winter clothing", "fishing vessel", "Iron Island Men's Fleet", "theater", "dothraki army", "savage army", "night patrol army", "Royal Guard army", "army of the dead", "Giants Army", "horn of Joramun"])
original_transaction_amount = f"{float(random.randint(1,999999))}"
number = random.randint(1,9)
number_installments_assign = f"{number}"
current_installment = f"{number - 1}"
currency = f"{faker.currency_code()} of Braavos"
account_type = random.choice(["checking account", "savings account", "salary bill", "salary bill", "dragon account", "account for the long winter"])
investor_profile = random.choice(["Reliable", "Vicious", "Risky", "Crazy", "Temporary"])
bban_count = faker.bban()
aba = f"0{random.randint(11111111,99999999)}"
v01 = float(random.randint(1,9999999))
v11 = float(random.randint(1,999999))
v22 = float(random.randint(1,99990))
total_account_value =f"{v01}"
current_account_total_value = f"{v01-v11}"
total_amount_carried_over = f"{v11}"
account_status = random.choice(["activated", "not activated"])
##################### df client ##############################
client = {
"id_client": id_client,
"itin": itin,
"name": fullname,
"family": lastname,
"house": house,
"words": slogan,
"title": title,
"gender": gender,
"date_birth": date,
"faith": religion,
"god_to_pray": gods,
"yearmonthday": yearmonthday,
"_yearmonthday":yearmonthday
}
json.dump(client, out1, indent=True, separators=(',',':'),ensure_ascii=False)
##################### df address ##############################
address = {
"id_client": id_client,
"postalcode": postalcode,
"street": street,
"number": number_street,
"birth_culture": cultureBorn,
"current_culture": cultureLiving,
"country_of_birth": continetalBornKey,
"birth_region": continetalBornValChoice,
"city_of_birth": bornCity,
"currency_city": currentCity,
"yearmonthday": yearmonthday,
"_yearmonthday":yearmonthday
}
json.dump(address, out2, indent=True, separators=(',',':'),ensure_ascii=False)
##################### df account ##############################
account = {
"id_client": id_client,
"yearmonthday": yearmonthday,
"_yearmonthday":yearmonthday,
"bank": "Braavos",
"account_type": account_type,
"investor_profile": investor_profile,
"account_status": account_status,
"bban_count": bban_count,
"aba": aba
}
json.dump(account, out3, indent=True, separators=(',',':'),ensure_ascii=False)
##################### df accountcard ##############################
accountcard = {
"id_client": id_client,
"yearmonthday": yearmonthday,
"_yearmonthday":yearmonthday,
"status_account_blocked": status_bloqueio_conta,
"card_number": card_number,
"transaction_code": transaction_code,
"date_transaction": future_transaction,
"time_transaction": time_transaction,
"describle_transaction": describle_transaction,
"original_transaction_amount": original_transaction_amount,
"number_installments_assign": number_installments_assign,
"current_installment": current_installment,
"currency": currency
}
json.dump(accountcard, out4, indent=True, separators=(',',':'), ensure_ascii=False)
##################### df card ##############################
card = {
"id_client": id_client,
"yearmonthday": yearmonthday,
"_yearmonthday":yearmonthday,
"card_number": status_bloqueio_conta,
"total_limit_value": valor_limite_total,
"limit_value_available": valor_limite_disponivel,
"total_limit_used": total_limit_used,
"description_card": descricao_produto,
"security_card": security_card,
"expire_card_number": expire_card_number
}
json.dump(card, out5, indent=True, separators=(',',':'),ensure_ascii=False)
##################### df rent ##############################
rent = {
"id_client": id_client,
"yearmonthday": yearmonthday,
"_yearmonthday":yearmonthday,
"rent_income_value": valorliquido,
"description_chosen_income_method": valor_bruto_renda_eleita,
"total_account_value": total_account_value,
"current_account_total_value": current_account_total_value,
"total_amount_carried_over": total_amount_carried_over
}
json.dump(rent, out8, indent=True, separators=(',',':'),ensure_ascii=False)
| batestin1/SPARK | script/create_dataset.py | create_dataset.py | py | 16,595 | python | en | code | 4 | github-code | 13 |
31466468712 | # Given a binary tree
# struct TreeLinkNode {
# TreeLinkNode *left;
# TreeLinkNode *right;
# TreeLinkNode *next;
# }
# Populate each next pointer to point to its next right node. If there is no next right node, the next pointer should be set to NULL.
# Initially, all next pointers are set to NULL.
# Note:
# You may only use constant extra space.
# You may assume that it is a perfect binary tree (ie, all leaves are at the same level, and every parent has two children).
# For example,/
# Given the following perfect binary tree,
# 1
# / \
# 2 3
# / \ / \
# 4 5 6 7
# After calling your function, the tree should look like:
# 1 -> NULL
# / \
# 2 -> 3 -> NULL
# / \ / \
# 4->5->6->7 -> NULL
class Node(object):
"""Simple node class."""
def __init__(self, data):
"""."""
self.data = data
self.left = None
self.right = None
def connect(node):
"""LC 116."""
while node:
curr = node
while curr:
if curr.left:
curr.left.next = curr.right # curr as parent
if curr.next:
curr.right.next = curr.next.left # diff parent nodes
curr = curr.next # going across
node = node.left # going down most left side
if __name__ == "__main__":
head = Node(20)
head.left = Node(10)
head.left.left = Node(5)
head.left.right = Node(15)
head.right = Node(30)
head.right.left = Node(25)
head.right.right = Node(35)
| han8909227/leetcode | tree/sibling_pointer_lc116.py | sibling_pointer_lc116.py | py | 1,569 | python | en | code | 3 | github-code | 13 |
39481178645 | from functools import reduce
def filemap(filename, func = int, sep='\n'):
'''
Reads in the filename and returns a list with all the rows mapped by
the function func, which defaults to int(). That is returns
a list containing one integer for every row of the file with def arguments.
'''
with open(filename, 'r') as f:
return list(map(func, f.read().strip().split(sep)))
DIRECTIONS = dict(zip('ESWN', reduce( # ( (0,1), (1,0), (0,-1), (-1,0) )
lambda x, y: x + y, map( # ( ((0,1), (1,0)), ((0,-1), (-1,0)) )
lambda c: ((c[1], c[0]), c),
((y,0) for y in (1,-1)) # ( (1,0), (-1,0) )
)
)
))
def init_diags():
dirs = list(DIRECTIONS.keys())
for a in ('N', 'S'):
for b in ('W', 'E'):
A = DIRECTIONS[a]
B = DIRECTIONS[b]
DIRECTIONS[''.join((a, b))] = (A[0] + B[0], A[1] + B[1])
def initialize():
init_diags()
def main():
initialize()
print(DIRECTIONS)
if __name__ == '__main__':
main()
else:
initialize()
| kordaniel/AoC | 2020/helpers.py | helpers.py | py | 1,064 | python | en | code | 0 | github-code | 13 |
24682455594 | import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection as modsel
from sklearn import linear_model
import matplotlib.pyplot as plt
plt.style.use('ggplot')
boston = datasets.load_boston()
linreg = linear_model.LinearRegression()
X_train, X_test, y_train, y_test = modsel.train_test_split(
boston.data, boston.target, test_size=0.1, random_state=42
)
linreg.fit(X_train, y_train)
print('mean_squared_error = ' + str(metrics.mean_squared_error(y_train, linreg.predict(X_train))))
y_pred = linreg.predict(X_test)
plt.figure(figsize=(10, 6))
plt.plot(y_test, linewidth=3, label='ground truth')
plt.plot(y_pred, linewidth=3, label='predicted')
plt.xlabel('test datat points')
plt.ylabel('target value')
plt.figure(2)
plt.plot(y_test, y_pred, 'o')
plt.plot([-10, 60], [-10, 60], 'k--')
plt.axis([-10, 60, -10, 60])
plt.xlabel('ground truth')
plt.ylabel('predicted')
plt.show() | fw23t9/MachineLearningStudy | 2.Linear regression/boston.py | boston.py | py | 937 | python | en | code | 0 | github-code | 13 |
74436839698 | from luxcena_neo import NeoBehaviour, FloatVariable, IntegerVariable, ColorVariable, BooleanVariable
from time import perf_counter
class Main(NeoBehaviour):
def declare_variables(self):
self.declare(FloatVariable("delay", 0.07, min_val=0.000001, max_val=0.5, step=0.000001))
self.declare(IntegerVariable("pause", 20, min_val=1, max_val=60))
self.declare(IntegerVariable("strobe count", 10, min_val=1, max_val=100))
self.declare(ColorVariable("color", "#fafafa"))
def on_start(self):
self.strobe_enabled = False
self.strobe_on = False
self.strobe_c = 0
self.last_inst = perf_counter()
def each_tick(self):
if (perf_counter() - self.last_inst) > (self.var["delay"].value if self.strobe_enabled else self.var["pause"].value):
if not self.strobe_enabled: self.strobe_enabled = True
self.strobe_c += 1
set_all(self.var["color"].value if self.strobe_on else 0)
self.strobe_on = not self.strobe_on
if self.strobe_c >= (self.var["strobe count"].value * 2):
self.strobe_c = 0
self.strobe_enabled = False
self.strobe_on = False
set_all(0)
self.last_inst = perf_counter()
def set_all(*color):
for i in range(strip.num_pixels()):
strip.set_pixel_color(i, *color)
strip.show()
| JakobST1n/Luxcena-Neo | NeoRuntime/builtin/strobe/script.py | script.py | py | 1,418 | python | en | code | 0 | github-code | 13 |
18610843143 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="lorem_ipsum_auth",
version="1.0",
author="Adrian Dolha",
packages=[],
author_email="adriandolha@eyahoo.com",
description="Lorem Ipsum Demo App Auth",
long_description=long_description,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
)
| adriandolha/cloud-demo | lorem-ipsum/lorem-ipsum-authentication/setup.py | setup.py | py | 494 | python | en | code | 0 | github-code | 13 |
14799395488 | from fastapi import FastAPI
import uvicorn
from endpoints import sql, create_er_diagram, get_schema
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(sql.router)
app.include_router(create_er_diagram.router)
app.include_router(get_schema.router)
if __name__ == "__main__":
uvicorn.run("main:app", host="0.0.0.0", port=8080, reload=True)
| Forwall100/queryquest | backend/sqlite_query_service/main.py | main.py | py | 526 | python | en | code | 0 | github-code | 13 |
32270420943 | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 21 11:08:14 2022
@author: sarak
"""
def main():
fname = ['Graham', 'Eric', 'Terry', 'Terry', 'John', 'Michael']
lname = ['Chapman', 'Idle', 'Gilliam', 'Jones', 'Cleese', 'Palin']
born = ['8 January 1941', '29 March 1943', '22 November 1940', '1 February 1942',
'27 October 1939', '5 May 1943']
for f_name, l_name, b_date in zip(fname, lname, born):
print("{:10} {:10} was born on {}".format(f_name, l_name, b_date))
if __name__ == '__main__' :
main()
# Graham Chapman was born on 8 January 1941
# Eric Idle was born on 29 March 1943
# Terry Gilliam was born on 22 November 1940
# Terry Jones was born on 1 February 1942
# John Cleese was born on 27 October 1939
# Michael Palin was born on 5 May 1943 | sara-kassani/1000_Python_example | 11_functional_programming/21_zip.py | 21_zip.py | py | 932 | python | en | code | 1 | github-code | 13 |
22991386993 | """
ID: vip_1001
LANG: PYTHON3
TASK: milk
"""
def left(string):
rs = int(string[0:string.find(" ")])
return rs
def right(string):
rs = int(string[string.find(" ")+1:len(string)])
return rs
def takeFirst(elem):
return elem[0]
inputfile = open("milk.in", "r").readlines()
outputfile = open("milk.out", "w")
target=int(inputfile[0].strip()[0:inputfile[0].strip().find(" ")])
numoffarmers=int(inputfile[0].strip()[inputfile[0].strip().find(" ")+1:len(inputfile[0].strip())])
if target==0:
outputfile.write("0\n")
quit()
elif numoffarmers==0:
outputfile.write("0\n")
quit()
list=[]
temp=0
moneyused=0
for i in range (1,numoffarmers+1):
l=[]
l.append(left(inputfile[i]))
l.append(right(inputfile[i]))
list.append(l)
list.sort(key=takeFirst)
i=0
while temp<target:
moneyused+=list[i][0]*list[i][1]
temp+=list[i][1]
i+=1
remainder=0
for j in range(0,i):
remainder+=list[j][1]
remainder=remainder-target
moneyused=moneyused-remainder*list[i-1][0]
print(list)
print("Stop at farmer {} ".format(i))
print("moneyused: {} cents".format(moneyused))
print("remainder: {} unit".format(remainder))
print("moneyused: {} cents".format(moneyused))
outputfile.write(str(moneyused) + "\n") | JacquesVonHamsterviel/USACO | 11.milk/milk.py | milk.py | py | 1,238 | python | en | code | 0 | github-code | 13 |
74564326738 | """
_ResultSet_
A class to read in a SQLAlchemy result proxy and hold the data, such that the
SQLAlchemy result sets (aka cursors) can be closed. Make this class look as much
like the SQLAlchemy class to minimise the impact of adding this class.
"""
from builtins import object
import threading
class ResultSet(object):
def __init__(self):
self.data = []
self.keys = []
def close(self):
return
def fetchone(self):
if len(self.data) > 0:
return self.data[0]
else:
return []
def fetchall(self):
return self.data
def add(self, resultproxy):
myThread = threading.currentThread()
if resultproxy.closed:
return
elif resultproxy.returns_rows:
for r in resultproxy:
if len(self.keys) == 0:
# do not modernize next line.
# r is a `sqlalchemy.engine.result.RowProxy`, not a `dict`
self.keys.extend(r.keys())
self.data.append(r)
return
| dmwm/WMCore | src/python/WMCore/Database/ResultSet.py | ResultSet.py | py | 1,078 | python | en | code | 44 | github-code | 13 |
38033121966 | from django.contrib import admin
from django.urls import path ,include
from django.conf import settings
from django.conf.urls.static import static
from rest_framework_simplejwt import views as jwt_views
from cab_g import views
urlpatterns = [
path('admin/', admin.site.urls),
path('api/token/', views.MyTokenObtainPairView.as_view(), name='token_obtain_pair'),
path('api/token/refresh/', jwt_views.TokenRefreshView.as_view(), name='token_refresh'),
path('hello/', views.HelloView.as_view(), name='hello'),
path('api/register/', views.RegisterView.as_view(), name='auth_register'),
path('api/create-doctor/',views.CreateDoctorView.as_view(),name='create-doctor'),
path('api/create-cabinet/',views.CreateCabinetView.as_view(),name='create-cabinet'),
path('api/create-specialite/',views.createSpecialiteView.as_view(),name='create-specialite'),
path('api/create-patient/',views.CreatePatientView.as_view(),name='create-patient'),
path('api/create-acte-demander/',views.CreateActeDemanderView.as_view(),name='create-acte-demander'),
path('api/create-acte-fait/',views.CreateActeFaitView.as_view(),name='create-acte-fait'),
path('api/create-medicament/',views.CreateMedicamentView.as_view(),name='create-medicament'),
path('api/create-appointment/',views.CreateAppointmentView.as_view(),name='create-appointment'),
path('api/create-ordonnance/',views.CreateOrdonnanceView.as_view(),name='create-ordonnance'),
path('api/create-invoice/',views.CreateInvoiceView.as_view(),name='create-invoice'),
path('api/get-patient/',views.GetPatientView.as_view(),name='get-patient'),
path('api/get-cabinet/',views.GetCabinetView.as_view(),name='get-cabinet'),
path('api/get-specialite/',views.GetSpecialitetView.as_view(),name='get-specialite'),
path('api/get-acte-demander/',views.GetActeDemandertView.as_view(),name='get-acte-demander'),
path('api/get-acte-fait/',views.GetActeFaitView.as_view(),name='get-acte-fait'),
path('api/get-medicament/',views.GetMedicamentView.as_view(),name='get-medicament'),
path('api/get-appointment/',views.GetAppointmentView.as_view(),name='get-appointment'),
path('api/get-ordonnance/',views.GetOrdonnanceView.as_view(),name='get-ordonnance'),
path('api/get-invoice/',views.GetInvoiceView.as_view(),name='get-Invoice'),
#path('api/update-user/<int:id>/',views.UserUpdateView.as_view(),name='update-user'),
path('api/update-doctor/<int:id>/',views.DoctorUpdateView.as_view(),name='update-doctor'),
path('api/update-cabinet/<int:id>/',views.CabinetUpdateView.as_view(),name='update-cabinet'),
path('api/update-patient/<int:id>/',views.PatientUpdateView.as_view(),name='update-Patient'),
path('api/update-acte-demander/<int:id>/',views.ActeDemanderUpdateView.as_view(),name='update-acte-demander'),
path('api/update-acte-fait/<int:id>/',views.ActeFaitUpdateView.as_view(),name='update-acte-fait'),
path('api/update-medicament/<int:id>/',views.MedicamentUpdateView.as_view(),name='update-medicament'),
path('api/update-appointment/<int:id>/',views.AppointmentUpdateView.as_view(),name='update-appointment'),
path('api/update-ordonnance/<int:id>/',views.OrdonnanceUpdateView.as_view(),name='update-ordonnance'),
path('api/update-invoice/<int:id>/',views.InvoiceUpdateView.as_view(),name='update-invoice'),
path('api/delete-specialite/<int:id>/',views.SpecialiteDeleteView.as_view(),name='delete-specialite'),
path('api/delete-doctor/<int:id>/',views.DoctorDeleteView.as_view(),name='delete-doctor'),
path('api/delete-cabinet/<int:id>/',views.CabinetDeleteView.as_view(),name='delete-cabinet'),
path('api/delete-patient/<int:id>/',views.PatientDeleteView.as_view(),name='delete-Patient'),
path('api/delete-acte-demander/<int:id>/',views.ActeDemanderDeleteView.as_view(),name='delete-acte-demander'),
path('api/delete-acte-fait/<int:id>/',views.ActeFaitDeleteView.as_view(),name='delete-acte-fait'),
path('api/delete-medicament/<int:id>/',views.MedicamentDeleteView.as_view(),name='delete-medicament'),
path('api/delete-appointment/<int:id>/',views.AppointmentDeleteView.as_view(),name='delete-appointment'),
path('api/delete-ordonnance/<int:id>/',views.OrdonnanceDeleteView.as_view(),name='delete-ordonnance'),
path('api/delete-invoice/<int:id>/',views.InvoiceDeleteView.as_view(),name='delete-invoice'),
path('api/get-assistant/',views.AssistantGetView.as_view(),name='get-assistant'),
path('api/create-assistant/',views.CreateAssistantView.as_view(),name='create-assistant'),
path('api/update-assistant/<int:id>/',views.AssistantUpdateView.as_view(),name='update-assistant'),
path('api/delete-assistant/<int:id>/',views.AssistantDeleteView.as_view(),name='delete-assistant'),
path('api/get-doctor/',views.AllDoctorsView.as_view(),name='get-doctor'),
path('api/get-user/',views.AllUsersView.as_view(),name='get-user'),
path('api/get-connected-user/',views.GetUserView.as_view(),name='get-connected-user'),
path('do',views.DoTest.as_view(),name='r'),# just i wel check if its works
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | simofane4/suivi_back_1 | suivi_back/urls.py | urls.py | py | 5,226 | python | en | code | 0 | github-code | 13 |
5489289452 | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
def plot_mean_val_comparisons(dict1, dict2, name1, name2, error_bar = 'std'):
'''
plots a bar graph that compares the mean absolute error of two segmentation sources relative to a gold standard source
inputs:
dict1: dictionary specifying the mean absolute disagreement between segmentation source 1 and gold standard
dict1: dictionary specifying the mean absolute disagreement between segmentation source 2 and gold standard
name1: name of segmentation source 1
name2: name of segmentation source 2
'''
# labels = np.sort(list(dict1.keys()))
labels = ['all', 'deep', 'superficial',
'L','M','DL','SL','DM','SM',
'LA', 'LC', 'LP', 'MA', 'MC', 'MP',
'DLA', 'DLC','DLP','DMA','DMC','DMP',
'SLA', 'SLC','SLP','SMA','SMC','SMP']
values1 = [dict1[i] for i in labels]
values2 = [dict2[i] for i in labels]
labels = ['all', 'D', 'S',
'L','M','DL','SL','DM','SM',
'LA', 'LC', 'LP', 'MA', 'MC', 'MP',
'DLA', 'DLC','DLP','DMA','DMC','DMP',
'SLA', 'SLC','SLP','SMA','SMC','SMP']
means1 = [np.round(i[0],2) for i in values1]
means2 = [np.round(i[0],2) for i in values2]
if error_bar == 'std':
error1 = [np.round(i[1],2) for i in values1]
error2 = [np.round(i[1],2) for i in values2]
elif error_bar == 'ci':
error1 = np.array([[i[0]-i[2][0],i[2][1]-i[0]] for i in values1])
error1 = error1.T
error2 = np.array([[i[0]-i[2][0],i[2][1]-i[0]] for i in values2]).T
x = np.arange(len(labels)) # the label locations
width = 0.4 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x - width/2, means1, width, yerr=error1, label=name1)
rects2 = ax.bar(x + width/2, means2, width, yerr=error2, label=name2)
# Add some text for labels, title and custom x-axis tick labels, etc.
if error_bar == 'std':
ax.set_ylabel('Mean absolute difference (± StD)\n(ms)', size = 30)
elif error_bar == 'ci':
ax.set_ylabel('Mean absolute difference (± 95% CI)\n(ms)', size = 30)
ax.set_xlabel('Cartilage region', size = 30)
ax.set_title('Mean absolute difference in average T2 value for each region relative to Reader 1', size = 35)
ax.set_xticks(x)
ax.set_xticklabels([l[0:5] for l in labels], size = 30)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(30)
ax.legend(prop={'size': 30})
# def autolabel(rects):
# """Attach a text label above each bar in *rects*, displaying its height."""
# for rect in rects:
# height = rect.get_height()
# ax.annotate('{}'.format(height),
# xy=(rect.get_x() + rect.get_width() / 2, height),
# xytext=(0, 3), # 3 points vertical offset
# textcoords="offset points",
# ha='center', va='bottom', size = 15)
# autolabel(rects1)
# autolabel(rects2)
fig.set_size_inches(35, 9, forward=True)
fig.tight_layout()
plt.savefig("bar_graph.png", format="png", dpi = 1200, orientation='landscape')
plt.show() | kathoma/AutomaticKneeMRISegmentation | figure_utils.py | figure_utils.py | py | 3,380 | python | en | code | 10 | github-code | 13 |
7947190875 | # -*- coding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
STOP_RENDERING = runtime.STOP_RENDERING
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1521485842.978504
_enable_loop = True
_template_filename = '/Users/hollyholland/PycharmProjects/FOMO1/FOMO/catalog/templates/index.html'
_template_uri = 'index.html'
_source_encoding = 'utf-8'
import django_mako_plus
import django_mako_plus
_exports = ['content_top', 'content']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'app_base.htm', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
maxPages = context.get('maxPages', UNDEFINED)
def content_top():
return render_content_top(context._locals(__M_locals))
def content():
return render_content(context._locals(__M_locals))
selected = context.get('selected', UNDEFINED)
__M_writer = context.writer()
__M_writer('\n')
__M_writer('\n\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'content_top'):
context['self'].content_top(**pageargs)
__M_writer('\n\n\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):
context['self'].content(**pageargs)
__M_writer('\n\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_content_top(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
def content_top():
return render_content_top(context)
selected = context.get('selected', UNDEFINED)
__M_writer = context.writer()
__M_writer('\n <h1 class="text-center">\n')
if selected is None:
__M_writer(' All Categories\n')
else:
__M_writer(' ')
__M_writer(str(selected.name))
__M_writer('\n')
__M_writer(' </h1>\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_content(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
maxPages = context.get('maxPages', UNDEFINED)
def content():
return render_content(context)
__M_writer = context.writer()
__M_writer('\n <div class="pagination">\n <div class="row text-right"><a id="getLastPage"><i class="fa fa-arrow-left"></i></a> <span id=\'currentPage\'>1</span> of <span id=\'maxPages\'>')
__M_writer(str(maxPages))
__M_writer('</span> <a id="getNextPage"><i class="fa fa-arrow-right"></i></a></div>\n <div class="wrapper">\n <ul id="products">\n\n </ul>\n </div>\n </div>\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"filename": "/Users/hollyholland/PycharmProjects/FOMO1/FOMO/catalog/templates/index.html", "uri": "index.html", "source_encoding": "utf-8", "line_map": {"29": 0, "40": 1, "41": 2, "46": 12, "51": 24, "57": 4, "64": 4, "65": 6, "66": 7, "67": 8, "68": 9, "69": 9, "70": 9, "71": 11, "77": 15, "84": 15, "85": 17, "86": 17, "92": 86}}
__M_END_METADATA
"""
| hollyh95/FOMO_sprint0 | catalog/templates/.cached_templates/index.html.py | index.html.py | py | 3,691 | python | en | code | 0 | github-code | 13 |
73474892499 | #
#____Results__availabe___are:
#
# Nodal outputs
# coo = [1,2,3] x, y, z
# disp = [1,2,3] dx, dy, dz
# vel = [1,2,3] vx, vy, vz
# Element outputs (mesh,entity)
# crss = [1,2,...,n] where n=12,18,32 for bcc/fcc,hcp,bct respectively
# defrate = [1,2,3,4,5,6] tensor
# defrate_eq = [1]
# defrate_pl = [1,2,3,4,5,6]
# defrate_pl_eq = [1]
# elt_vol = [1]
# ori = [1,..,n] where n= 3 if rod of euler or 4 if quat or axis angle
# slip = [1,2,...,n] where n=12,18,32 for bcc/fcc,hcp,bct respectively
# sliprate = [1,2,...,n] where n=12,18,32 for bcc/fcc,hcp,bct respectively
# spinrate = [1,2,3] skew symetric plastic spin rate tensor
# strain = [1,2,3,4,5,6]
# strain_eq = [1]
# strain_el = [1,2,3,4,5,6]
# strain_el_eq = [1]
# strain_pl = [1,2,3,4,5,6]
# strain_pl_eq = [1]
# stress = [1,2,3,4,5,6]
# stress_eq = [1]
# velgrad = [1,2,3,4,5,6,7,8,9] full velocity gradient tensor
# work = [1]
# work_pl = [1]
# workrate = [1]
# workrate_pl = [1]
#
slip_systems = ["$(01-1)[111]$",
"$(10-1)[111]$",
"$(1-10)[111]$",
"$(011)[11-1]$",
"$(101)[11-1]$",
"$(1-10)[11-1]$",
"$(011)[1-11]$",
"$(10-1)[1-11]$",
"$(110)[1-11]$",
"$(01-1)[1-1-1]$",
"$(101)[1-1-1]$",
"$(110)[1-1-1]$"]
########
########
########
import os
from ezmethods import *
import matplotlib.pyplot as plt
import math
plt.rcParams.update({'font.size': 45})
#plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'DejaVu Serif'
plt.rcParams["mathtext.fontset"] = "cm"
plt.rcParams["figure.subplot.left"] = 0.05
plt.rcParams["figure.subplot.bottom"] = 0.08
plt.rcParams["figure.subplot.right"] = 0.99
plt.rcParams["figure.subplot.top"] = 0.95
plt.rcParams["figure.subplot.wspace"] = 0.21
plt.rcParams["figure.subplot.hspace"] = 0.44
plt.rcParams['figure.figsize'] = 30,20
import numpy as np
import shutil
import time
import pandas as pd
import time
start = time.perf_counter()
#sim.post_process(options ="neper -S . -reselset slip,crss,stress,sliprate")
#sim_iso= fepx_sim("name",path=home+"/1_uniaxial")
#sample_num = int(int(val[2])/10)
sample_num= 2000
start = 0
bin_size = 10
max = 0.00004
bins=np.arange(0,max,max/bin_size)
domains = [["Cube","CUB"], ["Elongated","ELONG"]]
#ids = np.arange(start,start+sample_num,1)
home ="/run/user/1001/gvfs/sftp:host=schmid.eng.ua.edu/media/schmid_2tb_1/etmengiste/files/slip_study_rerun/"
aps_home ="/home/etmengiste/jobs/aps/"
home="/media/etmengiste/acmelabpc2_2TB/DATA/jobs/aps/spring_2023/slip_study_rerun/"
simulations = os.listdir(home)
simulations.sort()
bulk=False
slips = ["2","4","6"]
aniso = ["125","150","175","200","400"]
sets = ["solid","dotted","dashdot",(0, (3, 5, 1, 5, 1, 5)),(0, (3, 1, 1, 1, 1, 1))]
an = ["Iso.", "1.25", "1.50", "1.75", "2.00", "4.00"]
#name = "DOM_"+domains[0][1]+"_ISO"
#package_oris(home+"isotropic/"+domains[0][0]+".sim/", name=name)
file = open(home+"common_files/Cube.stelt").readlines()
grain_id = 300
file_grain_ids = open(home+"common_files/Cube_grain_elts")
elts = file_grain_ids.readlines()[grain_id].split(", ")
elts = [int(i) for i in elts]
file_grain_ids.close()
print(elts[0:5])
exit(0)
#name = "DOM_"+domains[1][1]+"_ISO"
#package_oris(home+"isotropic/"+domains[1][0]+".sim/", name=name)
for sim_name in simulations[0:5]:
if sim_name=="common_files":
print("common_files")
break
for dom in domains:
path = sim_name+"/"+dom[0]
sim= fepx_sim("sim",path=home+path)
sim.post_process()
num_elts = int(sim.sim['**general'].split()[2])
step = sim.sim['**step']
#nums= np.arange(1457,1473,1) 373
#nums= np.arange(261,414,1)
nums= elts
#pprint(elts)
ori = sim.get_output("ori",step="0",res="elts",ids=nums)
ori10 = sim.get_output("ori",step="10",res="elts",ids=nums)
ori20 = sim.get_output("ori",step="20",res="elts",ids=nums)
ori28 = sim.get_output("ori",step="28",res="elts",ids=nums)
file= open(home+path+"oris0.txt","w")
file10= open(home+path+"oris10.txt","w")
file20= open(home+path+"oris20.txt","w")
file28= open(home+path+"oris28.txt","w")
for i in ori[0]:
var=""
var10=""
var20=""
var28=""
for j in range(3):
var+= str(ori[0][i][j])+" "
var10+= str(ori10[0][i][j])+" "
var20+= str(ori20[0][i][j])+" "
var28+= str(ori28[0][i][j])+" "
#print(var)
file.write(var+"\n")
file10.write(var10+"\n")
file20.write(var20+"\n")
file28.write(var28+"\n")
file.close()
file10.close()
file20.close()
file28.close()
os.system("pwd")
os.chdir(home)
os.system("./common_files/elt_spred.sh")
exit(0)
for i in [0,25,50]:
plot_yield_stress(i)
plot_eff_strain(i)
#
#sim.post_process(options ="neper -S . -reselset slip,crss,stress,sliprate")
#sim_iso= fepx_sim("name",path=home+"/1_uniaxial")
#sample_num = int(int(val[2])/10)
sample_num= 2000
start = 0
bin_size = 10
max = 0.00004
bins=np.arange(0,max,max/bin_size)
domains = [["Cube","CUB"], ["Elongated","ELONG"]]
#ids = np.arange(start,start+sample_num,1)
home ="/run/user/1001/gvfs/sftp:host=schmid.eng.ua.edu/media/schmid_2tb_1/etmengiste/files/slip_study_rerun/"
simulations = os.listdir(home)
simulations.sort()
bulk=False
debug = False
P = [ calculate_schmid(CUB_111[i],CUB_110[i]) for i in range(12)]
#pprint(P)
altered = [0, 1, 2, 3]
unaltered = [4, 5, 6, 7, 8, 9, 10, 11]
def calc_eff_pl_str(sim,domain,under=""):
file = open(home+sim+"/"+domain+"_eff_pl_str.csv","w")
sim= fepx_sim(sim,path=home+sim+"/"+domain)
sim.post_process()
num_elts = int(sim.sim['**general'].split()[2])
step = sim.sim['**step']
slip = sim.get_output("slip",step="28",res="elts",ids="all")
elt_vol = sim.get_output("elt"+under+"vol",step="0",res="elts",ids="all")
v_tot = sum(elt_vol[1]["0"])
del elt_vol
elt_vol_final = sim.get_output("elt"+under+"vol",step=step,res="elts",ids="all")
mat_par = sim.material_parameters["g_0"]
del sim
sim_iso= fepx_sim("name",path=home+"isotropic/"+domain)
#sim_iso.post_process(options ="neper -S . -reselset slip,crss,stress,sliprate")
sim_iso.post_process()
baseline = float(sim_iso.material_parameters["g_0"][0].split("d")[0])
val =sim_iso.sim["**general"].split()
baseline = float(sim_iso.material_parameters["g_0"][0].split("d")[0])
#stress_iso = [normalize(sim_iso.get_output("stress",step=step,res=res,ids=ids)[str(i)]) for i in ids]
del sim_iso
strength = [ float(i.split("d")[0]) for i in mat_par]
#exit(0)
altered = []
ratio=1
for index,val in enumerate(strength):
if val>baseline:
altered.append(index)
ratio = val/baseline
avg_eff_pl_str_alt = []
avg_eff_pl_str_unalt = []
print("***---***")
print(altered)
print(baseline)
print("***---***")
values = "elt_vol, tot_vol, vol_frac, eff_pl_alt, eff_pl_unalt, vol_eff_pl_alt, vol_eff_pl_unalt"
file.write(values+"\n")
for el in range(num_elts):
total_altered = 0
total_unaltered = 0
for i in range(12):
schmid_val = P[i]
if i in altered:
shear_val = slip[0][str(el)][i]
total_altered+= schmid_val*shear_val
if debug:
print("\n+++Schmid val")
pprint(schmid_val, preamble="+++")
print("\n\n+++===slip system shear\n===---", shear_val)
print("\n+++===Total resolved shear strain")
pprint(total_altered, preamble="+++===---")
print("-----------------------------------##-------##-------##")
print("-----------------------------------##-------##-------##")
print("-----------------------------------##-------##\n\n")
#print("-----------------------------------##-------##-------##")
#
else:
shear_val = slip[0][str(el)][i]
total_unaltered+= schmid_val*shear_val
if debug:
print("\n+++Schmid val")
pprint(schmid_val, preamble="+++")
print("\n\n+++===slip system shear\n===---", shear_val)
print("\n+++===Total resolved shear strain")
pprint(total_unaltered, preamble="+++===---")
print("-----------------------------------##-------##-------##")
print("-----------------------------------##-------##-------##")
print("-----------------------------------##-------##\n\n\n")
eff_pl_str_alt = math.sqrt((2/3)*inner_prod(total_altered,total_altered))
eff_pl_str_unalt = math.sqrt((2/3)*inner_prod(total_unaltered,total_unaltered))
v_el = elt_vol_final[0][str(el)][0]
v_frac = v_el/v_tot
avg_eff_pl_str_alt.append(eff_pl_str_alt*v_frac)
avg_eff_pl_str_unalt.append(eff_pl_str_unalt*v_frac)
if debug:
print("el vol", v_el)
print("tot vol", v_tot)
print("vol_frac", v_frac)
print("-----------------------------------##-------##\n")
print("\n Effective plastic altered :",eff_pl_str_alt)
print("\n Effective plastic altered :",eff_pl_str_unalt)
print("-----------------------------------##-------##\n\n")
print("-----------------------------------##-------##\n")
print("\n Vol avg Effective plastic altered :",avg_eff_pl_str_alt[el])
print("\n Vol avg Effective plastic altered :",avg_eff_pl_str_unalt[el])
print("-----------------------------------##-------##\n\n")
values = str(v_el)+"," + str(v_tot)+","+ str(v_frac)+","+ str(eff_pl_str_alt)+ ","+ str(eff_pl_str_unalt)+ ","+ str(avg_eff_pl_str_alt[el])+ ","+ str(avg_eff_pl_str_unalt[el])
file.write(values+"\n")
print("\n__")
print(sum(avg_eff_pl_str_alt))
print(sum(avg_eff_pl_str_unalt))
sim="isotropic"
domain="Cube"
#calc_eff_pl_str(sim,domain,under="_")
domain="Elongated"
#calc_eff_pl_str(sim,domain,under="_")
for i in simulations:
if i=="common_files":
print("common_files")
break
for dom in domains:
calc_eff_pl_str(i,dom[0])
#pprint(elt_vol[1]["0"],max=100)
#slip_vs_aniso(i,"Cube", slip_systems,debug=True, save_plot=False,df=dataframe)
exit(0)
file_grain_ids = open(home+"common_files/Cube_grain_elts","w")
elts = []
grain_elts=[]
values = np.arange(0,2000,1)
for val in values:
for i in file:
vals= i.split()
if vals[1] == str(val):
#print(vals[0])
elts.append(int(vals[0])-1)
#print(elts)
print(val,"----")
grain_elts.append(elts)
file_grain_ids.write(str(elts)[1:-1]+"\n")
#print(len(elts))
#print(grain_elts)
dataframe.to_csv("/home/etmengiste/jobs/aps/images/eff_pl_strain_values.csv")
#dataframe= pd.read_csv("/home/etmengiste/jobs/aps/images/eff_pl_strain_values.csv")
ani = [float(i)/100 for i in dataframe["Aniso"]]
for j in range(3):
fig= plt.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
print(0+25*j,24+25*j)
plot_eff_strain(j,ax1,"Cube",ani,dataframe)
plot_eff_strain(j,ax2,"Elongated",ani,dataframe)
plt.savefig("/home/etmengiste/jobs/aps/images/eff_pl_strain"+slips[j]+"_"+str(i))
exit(0)
#home="/media/etmengiste/acmelabpc2_2TB/DATA/jobs/aps/spring_2023/slip_study_rerun/"
#home="/media/schmid_2tb_1/etmengiste/files/slip_study_rerun/"
#home="/Users/ezramengiste/Documents/neper_fepx_gui/the_sims"
home_full ="/run/user/1001/gvfs/sftp:host=schmid.eng.ua.edu/media/schmid_2tb_1/etmengiste/files/slip_study_rerun/"
home ="/run/user/1001/gvfs/sftp:host=schmid.eng.ua.edu/media/schmid_2tb_1/etmengiste/files/slip_study_rerun/"
# non first g_0
#home="/run/user/1001/gvfs/sftp:host=schmid.eng.ua.edu/media/schmid_2tb_1/etmengiste/files/nonfirst_g_0/slip_study_rerun/"
ist= "/run/user/1001/gvfs/sftp:host=schmid.eng.ua.edu/media/schmid_2tb_1/etmengiste/files/slip_study_rerun/isotropic"
home="/media/etmengiste/acmelabpc2_2TB/DATA/jobs/aps/spring_2023/slip_study_rerun/"
home="/media/schmid_2tb_1/etmengiste/files/slip_study_rerun/"
simulations = os.listdir(home)
simulations.sort()
#sim.post_process(options ="neper -S . -reselset slip,crss,stress,sliprate")
sim_iso= fepx_sim("name",path=home+"isotropic/Cube")
#sim_iso.post_process(options ="neper -S . -reselset slip,crss,stress,sliprate")
sample_num = 2000
start = 0
ids = np.arange(start,start+sample_num,1)
slip_iso = [normalize(sim_iso.get_output("slip",step="28",res="elsets",ids=ids)[str(i)],absolute=True) for i in ids]
slips_iso = {}
baseline = float(sim_iso.material_parameters["g_0"][0].split("d")[0])
del sim_iso
fig = plt.figure()
axies = []
for i in range(len(slip_iso[0])):
slips =[]
ax= fig.add_subplot(2,6,i+1)
for id in ids:
slips.append(slip_iso[id][i])
slips_iso[str(i)] = slips
ax.hist(slips,bins=10,color="blue",edgecolor="red",alpha=0.2)
axies.append(ax)
for sim in ["071", "072", "073", "074","075"]:
sim= fepx_sim(sim,path=home+sim+"/Cube")
#sim.post_process(options ="neper -S . -reselset slip,crss,stress,sliprate")
slip = [normalize(sim.get_output("slip",step="28",res="elsets",ids=ids)[str(i)],absolute=True) for i in ids]
strength = [ float(i.split("d")[0]) for i in sim.material_parameters["g_0"]]
altered = [index for index,val in enumerate(strength) if val>baseline]
for i in range(len(slip_iso[0])):
color = "k"
if i in altered:
color="red"
slips =[]
ax= axies[i]
ax.clear()
for id in ids:
slips.append(slip_iso[id][i])
slips_iso[str(i)] = slips
ax.hist(slips,bins=20,color=color ,edgecolor="k", alpha= 0.2)
ax.set_ylim([0,sample_num*.8])
ax.set_xlim([0,0.15])
plt.tight_layout()
plt.subplots_adjust(left=0.13, right=0.995,top=0.99, bottom=0.1, wspace=0.035)
plt.savefig("figure"+sim.name)
del sim
exit(0)
for i in range(len(slip_iso[0])):
slips =[]
ax= fig.add_subplot(2,7,i+1)
for id in ids:
slips.append(slip_iso[id][i])
if i == 0:
stress_fake_1 = [ 0.3*float(i) for i in stress_iso[id]]
plotting_space([stress_fake_1,stress_iso[id]],axis=ax_stress)
slips_iso[str(i)] = slips
#print(slips)
slip_fake_1 = [ abs(float(i-0.002)) for i in slips]
ax.hist(slip_fake_1,bins=bin_size,color="red",edgecolor="k",alpha=0.2)
ax.hist(slips,bins=bin_size,color="blue",edgecolor="k",alpha=0.2)
ax.set_title(slip_systems[i])
ax.set_ylim([0,int(sample_num)])
axies.append(ax)
plt.tight_layout()
plt.subplots_adjust(left=0.062, bottom=0.08, right=0.9,top=0.948, wspace=0.162,hspace=.126)
plt.show()
exit(0)
for sim in ["046","050","075"]:
sim= fepx_sim("name",path=home+sim+"/Cube")
#sim.post_process(options ="neper -S . -reselset slip,crss,stress,sliprate")
slip = [normalize(sim.get_output("slip",step="28",res="elsets",ids=ids)[str(i)],absolute=True) for i in ids]
strength = [ float(i.split("d")[0]) for i in sim.material_parameters["g_0"]]
altered = [index for index,val in enumerate(strength) if val>baseline]
for i in range(len(slip_iso[0])):
color = "k"
if i in altered:
color="red"
slips =[]
ax= axies[i]
for id in ids:
slips.append(slip_iso[id][i])
slips_iso[str(i)] = slips
ax.hist(slips,bins=20,color=color ,edgecolor="k", alpha= 0.2)
ax.set_ylim([0,sample_num])
plt.tight_layout()
plt.subplots_adjust(left=0.13, right=0.995,top=0.99, bottom=0.1, wspace=0.035)
plt.savefig("figure")
exit(0)
baseline = float(sim_iso.material_parameters["g_0"][0].split("d")[0])
sims = ["070","071","072","072","074", "075"]
slips_list = {}
for sim in sims:
slips_list[sim] = slips
elt_num = len(ids)
del sim_iso
for simulation in sims:
slips = slips_list[simulation]
sim= fepx_sim("name",path=home+simulation+"/Cube")
#sim.post_process(options ="neper -S . -reselset slip,crss,stress,sliprate")
slip = [normalize(sim.get_output("slip",step="28",res="elsets",ids=ids)[str(i)],absolute=True) for i in ids]
strength = [ float(i.split("d")[0]) for i in sim.material_parameters["g_0"]]
altered = [index for index,val in enumerate(strength) if val>baseline]
for j,val in enumerate(slip):
ax= fig.add_subplot(2,6,j+1)
slips[str(j)]=val
color="k"
if j in altered:
color="red"
ax= fig.add_subplot(2,6,j+1)
ax.hist(slips[str(j)], bins=20 ,color=color,edgecolor=color, alpha=0.5)
ax.set_ylim([0,1500])
del sim
#stress = sim.get_output("stress",step="28",res="elsets",ids=ids)
#crss = sim.get_output("crss",step="28",res="elsets",ids=ids)
#pprint(slip,max=100)
#print(len(array_of_ids))
plt.tight_layout()
plt.subplots_adjust(left=0.13, right=0.995,top=0.99, bottom=0.1, wspace=0.035)
plt.show()
plt.savefig("figure")
exit(0)
array_of_ids = []
y = np.arange(6)
y2 = np.arange(12)
for index,i in enumerate(ids):
if slips[index]> right and slips[index]<=left:
#print(i,stress[str(i)])
axs[1].bar(y, stress[str(i)],color="k",edgecolor="k",alpha=0.003)
axs[2].bar(y2, slip[str(i)],color="k",edgecolor="k",alpha=0.003)
axs[3].hist(crss[str(i)],color="k",edgecolor="k",alpha=0.003)
array_of_ids.append(i)
stress = results["stress"]
strain = results["strain"]
| EMengiste/data_reduction_scripts | python/objects.py | objects.py | py | 18,141 | python | en | code | 0 | github-code | 13 |
12103897129 | from django.contrib import admin
from .models import Araba
# Register your models here.
class PostAdmin(admin.ModelAdmin):
list_display= ['arabaismi','ilantarihi']
search_fields=['arabaismi', 'ozellikler','ilantarihi']
class Meta:
model = Araba
admin.site.register(Araba, PostAdmin)
| muzafferkadir/Django_Car_Dealer_Site | araba/admin.py | admin.py | py | 311 | python | en | code | 0 | github-code | 13 |
41490352899 | import configparser
import pandas as pd
def open_audit(audit_file):
return pd.read_csv(audit_file)
class ReadLogs:
def __init__(self):
# Read from config.ini to import our Audit log file, user and commands to alert on
config = configparser.ConfigParser()
config.read("/app/config.ini")
# Set our audit file
audit_file = config.get("auditfile", "file")
# Set our user we want to alert on
self.username = config.get("behaviours", "username")
# Set the list of suspicious commands
self.cmds = config.get("behaviours", "cmds")
# Set our pandas output settings
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
# Open Audit file with pandas to create a dataframe
self.df = open_audit(audit_file)
def suspicious_commands(self):
# Return a dataframe based on username and suspicious commands
return self.df[(self.df['proctitle'].str.match(self.cmds)== True)
& (self.df['username'] == self.username)]
def root_events(self, data_frame=None):
# Validate we have a dataframe
if data_frame is not None:
# Return our provided dataframe that only contains events with the auid of our username and the euid of root
return data_frame[(data_frame['auid'] == 1000) & (data_frame['euid'] == 0)]
else:
# Return our main dataframe that only contains events with the auid of our username and the euid of root
return self.df[(self.df['auid'] == 1000) & (self.df['euid'] == 0)]
| racerman300zx/audit_log | modules/auditreader.py | auditreader.py | py | 1,730 | python | en | code | 0 | github-code | 13 |
40166133253 | import main
def menu():
print("Welcome to Nim Game\n")
print("\n")
print("****************************\n")
print(" Welcome to Menu \n")
print(" Please choose a Option \n")
print("****************************\n")
print(" 1) MinMax \n")
print(" 2) Tree alpha-bea \n")
print(" 3) Exit \n")
print("Rules:\n"
"1) There are 15 sticks in 3 arrows.(See image)\n")
print("****************************\n")
def menuOption():
menu()
option = input("Option: ")
print ("\n")
return option
def optionValidated(option):
if(option != "1" and option != "2" and option != "3"):
print("Please, give a Valid Option\n")
option2 = menuOption
return option2
if(option == "1" or option == "2" or option == "3"):
return option
def startGame():
flag = True
while flag != False:
option = menuOption()
optionValidate = optionValidated(option)
if optionValidate == "1":
#main.Tablero().Mostrar()
game = main.Game(15, "H")
main.main_play(game)
root = main.Node(game.initial)
newroot = main.makeTreeMinimax(root,game)
#main.representTree(newroot, game)
if optionValidate == "2":
game = main.Game(15,"H")
main.main_play(game)
root = main.Node(game.initial)
newroot = main.makeTreeAplhaBeta(root,game)
if optionValidate == "3":
flag = False
if __name__ == '__main__':
startGame()
| lancal/TallerIA | TallerIA.py | TallerIA.py | py | 1,644 | python | en | code | 0 | github-code | 13 |
37313694815 | import openpyxl
import os
wb = openpyxl.Workbook()
wb.get_sheet_names()
sheet = wb.get_sheet_by_name('Sheet')
sheet['A1'].value
sheet['A2'] = 'Hello'
os.chdir('c:\\Users\\All\\Documents')
wb.save('example.xlsx') | MagsMagnoli/automate-boring-stuff-python | 5_excel_spreadsheets.py | 5_excel_spreadsheets.py | py | 213 | python | en | code | 0 | github-code | 13 |
18375678051 | import os
from dotenv import load_dotenv
import datetime
import csv
import openai
def openai_request():
# Prompt the user for text input
prompt = input("Enter your prompt: ")
keyword = input("Enter your keyword: ")
openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a whimsical storyteller."},
{"role": "user", "content": prompt}
],
max_tokens=200
)
# Print the API response
text = response['choices'][0]['message']['content'].strip().replace("\"", "")
return [prompt, keyword, text]
def data_to_files(text_path, prompts_path, data):
current_date = datetime.datetime.now().strftime("%m_%d_%y")
prompt = data[0]
keyword = data[1]
text = data[2]
text_row_data = [text, "true"]
prompt_row_data = [current_date + "_" + keyword, prompt]
# Open the CSV file in append mode
with open(text_path, mode="a", newline="\n") as text_file:
# Create a CSV writer object
csv_writer = csv.writer(text_file)
# Write the new row to the CSV file
csv_writer.writerow(text_row_data)
# Cleanup by closing the CSV file
text_file.close()
# Open the CSV file in append mode
with open(prompts_path, mode="a", newline="\n") as prompts_file:
# Create a CSV writer object
csv_writer = csv.writer(prompts_file)
# Write the new row to the CSV file
csv_writer.writerow(prompt_row_data)
# Cleanup by closing the CSV file
prompts_file.close()
if __name__ == "__main__":
load_dotenv()
data = openai_request()
text_path = "data/text.csv"
prompts_path = "data/prompts.csv"
data_to_files(text_path, prompts_path, data) | ctuguinay/AutomaticTiktok | openai_api.py | openai_api.py | py | 1,842 | python | en | code | 0 | github-code | 13 |
35327057288 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------- #
# --------------- Importations --------------- #
# ------------------------------------------------------------------- #
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
from torchvision import datasets, models, transforms
import time
import os
import copy
from Generation_image_et_generateur import *
import json
with open('parametres_ResNet18.json') as f:
data = json.load(f)
# ------------------------------------------------------------------- #
# --------------- Paramètres --------------- #
# ------------------------------------------------------------------- #
## Hyper-parametres récupérés d'un fichier JSON
pretrained = bool(data['pretrained'])
nb_epoch = data['nb_epoch']
learning_rate = data['learning_rate']
step_size= data['step_size']
gamma= data['gamma']
train_size = data['train_size']
val_size = data['val_size']
batch_size = data['batch_size']
nombre_classe = data['nombre_classe']
# Machine = CPU ou GPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# ------------------------------------------------------------------- #
# --------------- fonctions --------------- #
# ------------------------------------------------------------------- #
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
# Iterate over epoch
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
size = train_size
else:
model.eval() # Set model to evaluate mode
size = val_size
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for images, labels in generateur_image(size, batch_size):
images = images.to(device)
labels = labels.to(device)
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(images)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
optimizer.zero_grad()
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * images.size(0)
running_corrects += torch.sum(preds == labels)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / size
epoch_acc = running_corrects.double() / size
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
writer.add_scalar('accuracy', epoch_acc, epoch)
writer.add_scalar('loss', epoch_loss, epoch)
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model, best_acc
# ------------------------------------------------------------------- #
# --------------- Programme --------------- #
# ------------------------------------------------------------------- #
model = models.resnet18(pretrained=pretrained) # Choix du modèle
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, nombre_classe) # Redimensionnement de la derniere couche
model = model.to(device)
# Choix du critère
criterion = nn.CrossEntropyLoss()
# Choix de l'optimiseur
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
# Choix du scheduler
step_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
## Tensorboard
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter("runs")
# Entrainement
model, b_acc = train_model(model, criterion, optimizer, step_lr_scheduler, num_epochs=nb_epoch)
writer.close() | Timbar84/Projet_Spiruline_2A | pytorch_spiruline.py | pytorch_spiruline.py | py | 4,998 | python | en | code | 0 | github-code | 13 |
16084642622 | """
Author: Missy Shi
Course: math 458
Date: 04/23/2020
Project: A3 - 1
Description:
Write a Python function which, given n,
returns a list of all the primes less than n.
There should be 25 primes less than 100, for instance.
Task:
How many prime numbers are there which are less than 367400?
"""
import math
def is_prime(n: int) -> bool:
""" Check if a integer is prime,
If it is, return True, else return False """
if n < 2:
# print(f"{n} is not a prime number")
return False
else:
for i in range(2, n):
if (n % i) == 0:
# print(f"{n} is not a prime number")
# print(f"{n} divides {i} = {n//i}")
return False
return True
def q1():
""" Find primes less than given number n """
# n = int(input('Input an integer: '))
n = 367400
pl = []
for i in range(1, n):
if is_prime(i) is True:
pl.append(i)
count = len(pl)
print(f"{count} prime numbers less than {n}")
# print(pl)
return
def main():
"""main program runner"""
q1()
if __name__ == '__main__':
main() | missystem/math-crypto | prime.py | prime.py | py | 1,158 | python | en | code | 0 | github-code | 13 |
38917916659 | """
Titanic prediction script
"""
import sys
from prediction import train, predict
if __name__ == "__main__":
test = "data/" + sys.argv[1]
model = sys.argv[2]
if model not in {"glm", "rf", "gb"}:
raise ValueError("Not valid option for model")
model = train("data/train.csv", model)
predict(test, model)
| MenciusChin/Kaggle | titanic/titanic.py | titanic.py | py | 336 | python | en | code | 1 | github-code | 13 |
36509398264 | #%%
import numpy
import tensorflow as tf
import matplotlib.pyplot as plt
# Initializes arrays of values for the training session
celsius = numpy.array([-40, -10, 0, 8, 15, 22, 38], dtype=float)
fahrenheit = numpy.array([-40, 14, 32, 46, 59, 72, 100], dtype=float)
# Initializes a simple neural network with 3 dense layers, 2 of them with 3 neurons
# the first with one input and the last with one output
hiddenLayer01 = tf.keras.layers.Dense(units=3, input_shape=[1])
hiddenLayer02 = tf.keras.layers.Dense(units=3)
outputLayer = tf.keras.layers.Dense(units=1)
model = tf.keras.Sequential([hiddenLayer01, hiddenLayer02, outputLayer])
# Sets the learning rate for the model
model.compile(
optimizer = tf.keras.optimizers.Adam(0.00001),
loss = tf.keras.losses.MeanSquaredError()
)
print('\nFinished initializing data.')
# Trains the model for 1000 epochs
print ('\nStarting model training...')
history = model.fit(celsius, fahrenheit, epochs=200000, verbose=False)
print('Finished training!')
# Tries to guess a value that wasn't given in the examples with the learnt info
testResult = model.predict([97])
print('\nLets try to guess the temperature of 97 degrees celsius:')
print('The predicted value for 97°C is: ', testResult, '°F, the right value is 206.6°C')
# Displays the used weights and biases for the model
# print('\nThe used weights and biases for the model are:')
# print(model.get_weights())
# Displays a graph of the loss function
# print('\nDisplaying a graph of the loss function...')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Loss vs. Epoch')
plt.plot(history.history['loss'])
# %%
| miquel-gb/neural-network-tests | neural_network_temp_improved.py | neural_network_temp_improved.py | py | 1,622 | python | en | code | 1 | github-code | 13 |
33038211606 | import sys
from pprint import pprint
sys.stdin =open("input.txt","r",encoding='UTF8')
#encoding= UTF 8 필수
T = 10
for test_case in range(1, T + 1):
t = int(input())
cnt = 0
str2 = input()
str1 = input()
A=len(str1)-len(str2)
for i in range(A+1):
cnt2 = 0
B=str1[i]
C=str2[0]
if B == C:
for j in range(len(str2)):
D=str1[i + j]
F=str2[j]
if D == F:
cnt2 += 1
if cnt2 == len(str2):
cnt += 1
print(f"#{t} {cnt}")
| Seobway23/Laptop | Algorithm/february_class/0209/string array.py | string array.py | py | 587 | python | en | code | 0 | github-code | 13 |
19737829302 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
from mafengwoSpider.items import MafengwospiderItem, SpotItem
from scrapy.utils.project import get_project_settings
settings = get_project_settings()
def getinfo(item):
if isinstance(item, MafengwospiderItem):
sql = "INSERT INTO mdd(mdd_id, province, mdd_name, mdd_href)VALUES(%s, %s, %s, %s)"
params = (item['mddid'], item['name'], item['cityname'], item['href'])
return sql, params
elif isinstance(item, SpotItem):
sql = "INSERT INTO scenic_spots(mdd_id, mdd_name, spot_name, spot_href) VALUES (%s, %s, %s, %s)"
params = (item['mddid'], item['cityname'], item['spotname'], item['spothref'])
return sql, params
else:
sql = "INSERT INTO spot_comments(spot_name, comment_user, comment_text) VALUES (%s, %s, %s)"
params = (str(item['spot_name']), item['comment_user'], item['comment_text'])
return sql, params
class MafengwospiderPipeline(object):
def __init__(self):
self.connect = pymysql.connect(
host=settings['MYSQL_HOST'],
db=settings['MYSQL_DBNAME'],
port=settings['MYSQL_PORT'],
user=settings['MYSQL_USER'],
passwd=settings['MYSQL_PASSWD'],
charset='utf8',
use_unicode= True)
self.cursor = self.connect.cursor();
def process_item(self, item, spider):
sql, params = getinfo(item)
try:
self.cursor.execute(sql, params)
self.connect.commit()
except Exception as e:
# 事务回滚
self.connect.rollback()
print('except:', e.message)
def close_spider(self, spider):
self.cursor.close()
self.connect.close()
| tianzhencong/scrapy_with_mafengwo | mafengwoSpider/mafengwoSpider/pipelines.py | pipelines.py | py | 1,915 | python | en | code | 1 | github-code | 13 |
2726148255 | # Simple Measure Program Execution Time 1.2
# Original Code by Udacity (https://www.udacity.com/blog/2021/09/create-a-timer-in-python-step-by-step-guide.html)
import time
our_list = list(range(1000000))
element = 898989
start = time.time()
for el in our_list:
if el == element:
break
end = time.time()
print(end - start) | adrhmdlz/Python-timer | timer4.py | timer4.py | py | 338 | python | en | code | 0 | github-code | 13 |
35185132781 | import requests
from urllib.request import urlopen
from bs4 import BeautifulSoup
from csv import reader
import pandas as pd
import csv
import re
from urllib.request import urlopen
url = ""
counter = 0
with open('topmillion.csv', newline='') as csvfile:
with open('scraped_robots.txt', 'w') as file:
data = csv.DictReader(csvfile)
print("|--------------------------|")
print("| ~~~~~ Scrape Time! ~~~~~ |")
print("|--------------------------|")
for row in data:
if counter == 10:
break
url = "https://www." + row['URL'] + '/robots.txt'
try:
page = urlopen(url)
#print("COUNTER" + str(counter))
html = page.read().decode("utf-8")
print(html)
pattern = "/"
match_results = re.search(pattern, html, re.IGNORECASE)
title = match_results.group()
title = re.sub("<.*?>", "", title) # Remove HTML tags
file.write("~"*30 + "\r\n")
file.write(url + "\r\n")
file.write(html + "\r\n")
print("Writing: " + html)
file.write("~"*30)
counter += 1
except Exception as inst:
print("Error opening: " + url)
print(type(inst)) # the exception instance
print(inst.args) # arguments stored in .args
print(inst)
counter += 1
| Destroyer7s/robot-skimmer | scraper.py | scraper.py | py | 1,631 | python | en | code | 0 | github-code | 13 |
5339577405 | import streamlit as st
import pandas as pd
import joblib
def app():
st.title("Zomato Restaurant Rating Prediction")
# Load the train.csv file
train_df = pd.read_csv("artifacts/train.csv")
# Get the unique values from the location column
unique_locations = train_df["location"].unique()
# Create a selectbox for the location field
location = st.selectbox("What is the location of this restaurant?", unique_locations)
st.write("You selected:", location)
# Get the unique values from the rest_type column
unique_rest_types = train_df["rest_type"].unique()
# Create a selectbox for the rest_type field
rest_type = st.selectbox("What is the type of restaurant?", unique_rest_types)
st.write("You selected:", rest_type)
# Get the unique values from the cuisines column
unique_cuisines = train_df["cuisines"].unique()
# Create a selectbox for the cuisines field
cuisines = st.selectbox("What cuisines does this restaurant serve?", unique_cuisines)
st.write("You selected:", cuisines)
# Create a text input for the cost of two field
cost = st.text_input("What is the estimated cost for two people?")
st.write("You entered:", cost)
# Create a text input for the votes field
votes = st.text_input("How many votes has this restaurant received?")
st.write("You entered:", votes)
# Create a selectbox for online_order
online_order = st.selectbox("Does the restaurant accept online orders?", ("Yes", "No"))
st.write("You selected:", online_order)
# Create a selectbox for book_table
book_table = st.selectbox("Does the restaurant have an option to book a table?", ("Yes", "No"))
st.write("You selected:", book_table)
if st.button("Submit"):
# Load the model
model = joblib.load("artifacts/model.joblib")
# Convert 'cost' and 'votes' to numerical data types
cost = float(cost)
votes = int(votes)
# Convert 'online_order' and 'book_table' to boolean values
online_order = online_order == "Yes"
book_table = book_table == "Yes"
features = {
"location": [location],
"rest_type": [rest_type],
"cuisines": [cuisines],
"cost": [cost],
"votes": [votes],
"online_order": [online_order],
"book_table": [book_table]
}
# Predict the rating
rating = model.predict(features)[0]
# Display the predicted rating
st.write("Predicted rating:", rating)
if __name__ == "__main__":
app()
| Nimish3011/Restaurant-Rating-Prediction | static/App.py | App.py | py | 2,595 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.