seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
538444678 | text_based = ["perspective_score", "identity_attack",
"sentiment",
"Please", "Please_start", "HASHEDGE",
"Indirect_(btw)",
"Hedges",
"Factuality", "Deference", "Gratitude", "Apologizing",
"1st_person_pl.", "1st_person", "1st_person_start",
"2nd_person", "2nd_person_start",
"Indirect_(greeting)", "Direct_question", "Direct_start",
"HASPOSITIVE", "HASNEGATIVE", "SUBJUNCTIVE", "INDICATIVE",
]
G_logs_based = ["rounds", "shepherd_time", "review_time"]
OSS_logs_based = ["rounds", "shepherd_time"]
length = ["length"]
def get_feature_set(dat):
if dat == "G":
logs_based = G_logs_based
else:
logs_based = OSS_logs_based
return [
text_based,
logs_based,
text_based + logs_based,
]
| null | main/get_feature_set.py | get_feature_set.py | py | 922 | python | en | code | null | code-starcoder2 | 51 |
106691790 | with open('file_sample.txt', 'rb') as f:
lines = [x.strip() for x in f.readlines()]
count = 0
for line in lines:
tmp = line.strip().lower()
words = tmp.replace(b'line',b'Line')
print(words)
for word in words:
if(word == 'line'):
count= count+1
print('Count: '+str(count))
# The count won't be correct if you are reading the file in binary mode
text_in_binary = b'Sky is blue.Roses are red'.decode('utf-8')
print(type(text_in_binary))
replaced_text = text_in_binary.replace('red','blue')
print(replaced_text) | null | byte_like_error/bytelikeerror_split.py | bytelikeerror_split.py | py | 550 | python | en | code | null | code-starcoder2 | 51 |
514071216 | #!/usr/bin/env python
import json
from pprint import pprint
from robot_localization.srv import SetPose
from geometry_msgs.msg import PoseWithCovarianceStamped
from mavros_msgs.srv import StreamRate
from std_msgs.msg import Bool
import sys
import rospy
import importlib
class TaskPlanner:
NODE_NAME = 'task_planner'
# REFACTOR THIS
CONTINUE = 1
FINISHED = 2
def __init__(self):
rospy.init_node(self.NODE_NAME, log_level=rospy.INFO)
plans_filename = sys.argv[1]
tasks_path = sys.argv[2]
self.plan_name = sys.argv[3]
sys.path.append(tasks_path)
with open(plans_filename) as plans_file:
self.masterplan = json.load(plans_file)
self.init_tasks(self.masterplan)
self.plan = self.init_plan(self.masterplan, self.plan_name)
self.disable_x = rospy.Publisher('/global_x/pid_enable', Bool, queue_size=10)
self.disable_y = rospy.Publisher('/global_y/pid_enable', Bool, queue_size=10)
self.disable_z = rospy.Publisher('/global_z/pid_enable', Bool, queue_size=10)
self.disable_roll = rospy.Publisher('/global_roll/pid_enable', Bool, queue_size=10)
self.disable_pitch = rospy.Publisher('/global_pitch/pid_enable', Bool, queue_size=10)
self.disable_yaw = rospy.Publisher('/global_yaw/pid_enable', Bool, queue_size=10)
def init_tasks(self, masterplan):
self.tasks = []
for task_info in masterplan['tasks']:
rospy.loginfo('Initializing task ' + task_info['name'])
task = getattr(importlib.import_module(task_info['modulename']), task_info['classname'])()
self.tasks.append(task)
def init_plan(self, masterplan, plan_name):
target_plan = None
for plan in masterplan['plans']:
if plan['name'] == plan_name:
target_plan = plan
break
if target_plan is None:
raise Exception('Plan ' + plan_name + ' not found')
task_names = target_plan['tasks']
self.tasks_plan = map(self._get_task_from_name, task_names)
def _get_task_from_name(self, name):
rospy.loginfo('Getting task for name ' + name)
return list(filter(lambda task: task.name == name, self.tasks))[0]
def run(self):
rospy.wait_for_service('/set_pose')
sp = rospy.ServiceProxy('/set_pose', SetPose)
zero_pose = PoseWithCovarianceStamped()
zero_pose.pose.pose.orientation.w = 1
#sp(zero_pose)
rospy.wait_for_service('/mavros/set_stream_rate')
ssr = rospy.ServiceProxy('/mavros/set_stream_rate', StreamRate)
ssr(0, 15, 1)
rate = rospy.Rate(15)
for task in self.tasks_plan:
rospy.loginfo('Starting task: ' + task.name)
task.pre_run_base()
task.pre_run()
while not rospy.is_shutdown():
result = task.run()
if result == self.CONTINUE:
pass
elif result == self.FINISHED:
break
rate.sleep()
self.disable_pid()
def disable_pid(self):
self.disable_x.publish(False)
self.disable_y.publish(False)
self.disable_z.publish(False)
self.disable_roll.publish(False)
self.disable_pitch.publish(False)
self.disable_yaw.publish(False)
if __name__ == '__main__':
TaskPlanner().run()
| null | catkin_ws/src/task_planning/scripts/task_planner.py | task_planner.py | py | 3,511 | python | en | code | null | code-starcoder2 | 51 |
625845206 | #!/usr/bin/env python
# ******************************* PLEASE DO NOT MODIFY *******************************
import os
with open('testCases.txt') as fp:
for line in fp:
if not line.isspace():
if line.startswith("TEST CASES FOR"):
parsedLine = line.split()
currFunction = parsedLine[-1]
print("<----------------------------------------------------------------->")
print("TESTING " + currFunction)
else:
parsedLine = line.split(",")
if currFunction == "STRLEN:":
if len(parsedLine) != 1:
print("Error: strlen takes a single argument")
else:
res = os.system("./pa3-runner strlen " + parsedLine[0])
elif currFunction == "STRCMP:":
if len(parsedLine) != 2:
print("Error: strcmp takes two arguments")
else:
os.system("./pa3-runner strcmp " + parsedLine[0] + " " + parsedLine[1])
elif currFunction == "STRTRUNC:":
if len(parsedLine) != 2:
print("Error: strtrunc takes two arguments")
else:
os.system("./pa3-runner strtrunc " + parsedLine[0] + " " + parsedLine[1])
elif currFunction == "STRREV:":
if len(parsedLine) != 3:
print("Error: strrev takes three arguments")
else:
os.system("./pa3-runner strrev " + parsedLine[0] + " " +
(parsedLine[1] + " " + parsedLine[2]))
elif currFunction == "PALINDROME:":
if len(parsedLine) != 1:
print("Error: palindrome takes a single argument")
else:
os.system("./pa3-runner palindrome " + parsedLine[0])
elif currFunction == "STRFIND:":
if len(parsedLine) != 2:
print("Error: strfind takes two arguments")
else:
os.system("./pa3-runner strfind " + parsedLine[0] + " " + parsedLine[1])
else:
print("Error: unrecognized function")
print("<----------------------------------------------------------------->")
| null | ucsd-cse30/pa/pa3-jams-master/runTests.py | runTests.py | py | 2,441 | python | en | code | null | code-starcoder2 | 51 |
442610606 | """
Pisano Period
In number theory, the nth Pisano period, written π(n), is the period with which
the sequence of Fibonacci numbers taken modulo n repeats.
https://en.wikipedia.org/wiki/Pisano_period
"""
import tortoise_and_hare2 as th
def fib_seq(n):
"""Returns a fibonacci sequence from 1 to n"""
nums = [0, 1]
for i in range(2, n - 1):
nums.append(nums[-1] + nums[-2])
return nums[1:] # Omit 0
def pisano(m, seq):
"""
Given a fibonacci seq and m, return a list where each number in
seq is mod m.
"""
return [i % m for i in seq]
pisano_seq = pisano(4, fib_seq(20))
print(fib_seq(20))
print(pisano_seq)
start, length = th.tortoise_and_hare(pisano_seq)
print("Start: {}, Length: {}".format(start, length))
| null | algorithms/cycle_detection/pisano_period.py | pisano_period.py | py | 762 | python | en | code | null | code-starcoder2 | 51 |
330718015 | import glob
import requests
import json
import ExperimentBoiler
import geoDonorMinimiser
import geoBiosampleMinimiser
import urlparse
import sys
from time import sleep
HEADERS = {'accept': 'application/json'}
GET_HEADERS = {'accept': 'application/json'}
POST_HEADERS = {'accept': 'application/json',
'Content-Type': 'application/json'}
#SERVER = "https://test.encodedcc.org/"
SERVER = "https://www.encodeproject.org/"
def encoded_get(url, keypair=None, frame='object', return_response=False):
url_obj = urlparse.urlsplit(url)
new_url_list = list(url_obj)
query = urlparse.parse_qs(url_obj.query)
if 'format' not in query:
new_url_list[3] += "&format=json"
if 'frame' not in query:
new_url_list[3] += "&frame=%s" % (frame)
if 'limit' not in query:
new_url_list[3] += "&limit=all"
if new_url_list[3].startswith('&'):
new_url_list[3] = new_url_list[3].replace('&', '', 1)
get_url = urlparse.urlunsplit(new_url_list)
max_retries = 10
max_sleep = 10
while max_retries:
try:
if keypair:
response = requests.get(get_url,
auth=keypair,
headers=GET_HEADERS)
else:
response = requests.get(get_url, headers=GET_HEADERS)
except (requests.exceptions.ConnectionError,
requests.exceptions.SSLError) as e:
print >> sys.stderr, e
sleep(max_sleep - max_retries)
max_retries -= 1
continue
else:
if return_response:
return response
else:
return response.json()
def getKeyPair(path_to_key_pair_file, server_name):
keysf = open(path_to_key_pair_file, 'r')
keys_json_string = keysf.read()
keysf.close()
keys = json.loads(keys_json_string)
key_dict = keys[server_name]
AUTHID = key_dict['key']
AUTHPW = key_dict['secret']
return (AUTHID, AUTHPW)
def extract_biosamples(exp):
samples = []
if exp['status'] == 'released' and \
'replicates' in exp and \
len(exp['replicates']) > 0:
for replicate in exp['replicates']:
if replicate['status'] == 'released' and \
replicate['library']['status'] == 'released' and \
replicate['library']['biosample']['status'] == 'released':
samples.append(replicate['library']['biosample']['accession'])
return list(set(samples))
def extract_controls(exp):
if "possible_controls" in exp and \
len(exp['possible_controls']) > 0:
controls_list = []
for e in exp['possible_controls']:
controls_list.append(e['accession'])
return list(set(controls_list))
else:
return []
def extract_donors(biosamples_list):
donors = []
for biosample in biosamples_list:
if biosample['status'] == 'released' and \
'donor' in biosample and \
biosample['donor']['status'] == 'released':
donors.append(biosample['donor']['accession'])
return list(set(donors))
keypair = getKeyPair('keypairs.json', 'test')
AUTHID = keypair[0]
AUTHPW = keypair[1]
# phase 1 - collect all experiments submitted so far.
submittedExperiments = set()
for filename in glob.glob('../experiments/*.json'):
submittedExperiments.add(filename.split('/')[2].split('_')[0])
e3 =0
other =0
m = 0
f_e3 = open('e3_submitted_to_geo.tsv', "w")
x = open('not_e3_submitted_to_geo.tsv', "w")
for experiment in submittedExperiments:
URL = SERVER + experiment + "/?frame=embedded&format=json"
response = requests.get(URL, auth=(AUTHID, AUTHPW), headers=HEADERS)
experiment_o = response.json()
if experiment_o['award']['rfa']=='ENCODE3':
e3 += 1
f_e3.write(experiment + "\t" + str(experiment_o['dbxrefs']) + '\t' +experiment_o['award']['rfa'] + '\n')
else:
other += 1
x.write(experiment + "\t" + str(experiment_o['dbxrefs']) + '\t' + experiment_o['award']['rfa']+ '\n')
m += 1
if m % 10 == 0:
print ('processed ' + str(m))
print ('E3 = ' + str(e3) + ' other = ' + str(other))
f_e3.close()
x.close()
| null | src/report_script.py | report_script.py | py | 4,239 | python | en | code | null | code-starcoder2 | 51 |
125082187 | #! usr/bin/env python3
#encoding: utf-8
import functools
def log(text=None):
def decorator(func):
@functools.wraps(func)
def wrapper(*args,**kw):
if isinstance(text,str):
print('%s %s()'%(text,func.__name__))
_func=func(*args,**kw)
else:
print('%s()'%func.__name__)
_func=func(*args,**kw)
return _func
return wrapper
if isinstance(text,(int,str)):
return decorator
else:
return decorator(text)
@log
def time():
print('2017-06-21')
if __name__=='__main__':
time()
| null | decorator.py | decorator.py | py | 627 | python | en | code | null | code-starcoder2 | 51 |
564388264 | from pytorch_metric_learning import losses, miners, trainers
import numpy as np
import pandas as pd
from torchvision import datasets, models, transforms
import torch.nn as nn
import torch.optim
import logging
from torch.utils.data import Dataset
from PIL import Image
from cub2011 import Cub2011
from mobilenet import mobilenet_v2
logging.getLogger().setLevel(logging.INFO)
# This is a basic multilayer perceptron
# This code is from https://github.com/KevinMusgrave/powerful_benchmarker
class MLP(nn.Module):
# layer_sizes[0] is the dimension of the input
# layer_sizes[-1] is the dimension of the output
def __init__(self, layer_sizes, final_relu=False):
super().__init__()
layer_list = []
layer_sizes = [int(x) for x in layer_sizes]
num_layers = len(layer_sizes) - 1
final_relu_layer = num_layers if final_relu else num_layers - 1
for i in range(len(layer_sizes) - 1):
input_size = layer_sizes[i]
curr_size = layer_sizes[i + 1]
if i < final_relu_layer:
layer_list.append(nn.ReLU(inplace=True))
layer_list.append(nn.Linear(input_size, curr_size))
self.net = nn.Sequential(*layer_list)
self.last_linear = self.net[-1]
def forward(self, x):
return self.net(x)
# This is for replacing the last layer of a pretrained network.
# This code is from https://github.com/KevinMusgrave/powerful_benchmarker
class Identity(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x
class Normalize(nn.Module):
def __init__(self,num_feat):
super().__init__()
self.bn1 = nn.BatchNorm1d(num_feat)
def forward(self,x):
#orm = nn.BatchNorm1d(self.num_feat)
return self.bn1(x)
#####################
### tambahan ########
#####################
class StandfordProducts(Dataset) :
def __init__(self,root,image_path,transform,train=True):
if train:
info_path = '/Info_Files/Ebay_train.txt'
else:
info_path = '/Info_Files/Ebay_test.txt'
files = pd.read_csv(root+info_path, header=0, delimiter=' ',usecols=['path','class_id'])[['path','class_id']]
#print(files.to_dict(orient='records'))
self.data = files.to_dict(orient='record')
self.image_path = image_path
self.transform = transform
#print(type(self.data[1]['class_id']))
#def
def __getitem__(self,index):
image = Image.open(root + '/'+ self.image_path + '/' + self.data[index]['path'])
#print ('{0}=>{1},{2}'.format(self.data[index]['path'],image.size,image.mode))
#print ('{0}=>{1}'.format(self.data[index]['path'],image.size))
if (image.mode != 'RGB'):
#print ('{0}=>{1}'.format(self.data[index]['path'],image.mode))
image = image.convert('RGB')
trans = self.transform(image)
#image = trans(image)
#print (trans.size())
#print('from get: \n')
#print(type(self.data[index]) )
return trans, self.data[index]['class_id']
#{'image':im, 'target':self.data[index]['class_id']}
def __len__(self):
return len(self.data)
class CustomerToShop(Dataset) :
def __init__(self,root,transform,train=True):
files = pd.read_csv(root+'/Eval/list_eval_partition_new.txt', header=0, delimiter='\t',skiprows=1)[['image_path','item_id','evaluation_status']]
##image_name item_id evaluation_status
if train:
str_query = "evaluation_status == 'train'"
else:
str_query = "evaluation_status == 'test' " #or evaluation_status == 'val' "
#print(files.to_dict(orient='records'))
#print (files.to_dict(orient='record'))
self.data = files.query(str_query).to_dict(orient='record')
#self.image_path = image_path
for dt in self.data :
dt['item_id'] = int(dt['item_id'][3:].strip('0'))
self.transform = transform
#print(type(self.data['item_id']))
#print(len(self.data))
#def
def __getitem__(self,index):
image = Image.open(root + '/'+ self.data[index]['image_path'])
#image.show()
#print (self.data[index])
if (image.mode != 'RGB'):
image = image.convert('RGB')
trans = self.transform(image)
#image = trans(image)
#print('from get: \n')
#print(type(itemid))
return trans, self.data[index]['item_id']
#return self.transform(image), self.data[index]['class_id']
#{'image':im, 'target':self.data[index]['class_id']}
def __len__(self):
return len(self.data)
#class DatasetConfig:
# source_path=''
# image_path=''
#
#
#def getOnlineProducts(conf, train=True) :
# #read text flie
# if train :
# #files = pd.read_table(conf.source_path+'/Info_Files/Ebay_train.txt', header=0, delimiter=' ',usecols=['path','class_id'])
# files = pd.read_csv(conf.source_path+'/Info_Files/Ebay_train.txt', header=0, delimiter=' ',usecols=['path','class_id'])[['path','class_id']]
#
# else:
# files = pd.read_table(conf.source_path+'/Info_Files/Ebay_test.txt', header=0, delimiter=' ', usecols=['path','class_id'])
# #print("training files :\n {0}".format(training_set['path'][0]))
# #print("test files :\n {0}".format(test_files))
## with open(conf.source_path+'/Info_Files/Ebay_train.txt',newline='') as csvfile:
## training_set = csv.DictReader(csvfile)
## for row in training_set:
## print("training dict :\n {0}".format(row))
##
# #training_set = training_files['path']['class_id'][:]
# return files.values.tolist()
# record_keeper is a useful package for logging data during training and testing
# You can use the trainers and testers without record_keeper.
# But if you'd like to install it, then do pip install record_keeper
# See more info about it here https://github.com/KevinMusgrave/record_keeper
try:
import os
import errno
import record_keeper as record_keeper_package
from torch.utils.tensorboard import SummaryWriter
def makedir_if_not_there(dir_name):
try:
os.makedirs(dir_name)
except OSError as e:
if e.errno != errno.EEXIST:
raise
pkl_folder = "dml_dist_margin_logs"
tensorboard_folder = "dml_dist_margin_tensorboard"
makedir_if_not_there(pkl_folder)
makedir_if_not_there(tensorboard_folder)
pickler_and_csver = record_keeper_package.PicklerAndCSVer(pkl_folder)
tensorboard_writer = SummaryWriter(log_dir=tensorboard_folder)
record_keeper = record_keeper_package.RecordKeeper(tensorboard_writer, pickler_and_csver, ["record_these", "learnable_param_names"])
except ModuleNotFoundError:
record_keeper = None
##############################
########## Training ##########
##############################
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#print(type(device))
# Set trunk model and replace the softmax layer with an identity function
#trunk = models.resnet50(pretrained=True)
#trunk = torch.hub.load('pytorch/vision:v0.5.0', 'mobilenet_v2', pretrained=True)
trunk = mobilenet_v2(pretrained=True)
#print(trunk.last_channel)
#trunk = torch.load('online_product_trunk.pth')
trunk_output_size = trunk.last_channel
#trunk.fc = Identity()
#trunk = torch.hub.load('pytorch/vision:v0.5.0', 'mobilenet_v2', pretrained=True)
#trunk_output_size = trunk.fc.in_features
#trunk.fc = Identity()
#trunk.fc = Normalize(trunk_output_size)
#trunk = torch.nn.DataParallel(trunk.to(device))
trunk = trunk.to(device)
# Set embedder model. This takes in the output of the trunk and outputs 64 dimensional embeddings
#embedder = torch.nn.DataParallel(MLP([trunk_output_size, 64]).to(device))
embedder = MLP([trunk_output_size, 512]).to(device)
#embedder = torch.nn.Linear(trunk_output_size,512).to(device)
#embedder = torch.load('online_product_embedder.pth')
# Set optimizers
trunk_optimizer = torch.optim.Adam(trunk.parameters(), lr=0.00001, weight_decay=0.00005)
embedder_optimizer = torch.optim.Adam(embedder.parameters(), lr=0.00001, weight_decay=0.00005)
# Set the image transform
'''
img_transform = transforms.Compose([transforms.Resize(256),
transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=227),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
'''
img_transform_train = transforms.Compose([transforms.RandomResizedCrop(size=227),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
img_transform_test = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(227),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
# Set the datasets
#train_dataset = datasets.CIFAR100(root="CIFAR100_Dataset", train=True, transform=img_transform, download=True)
#val_dataset = datasets.CIFAR100(root="CIFAR100_Dataset", train=False, transform=img_transform, download=True)
#print(train_dataset)
#print(type(train_dataset))
#train_dataset = getOnlineProducts(conf, train=True)
#val_dataset = getOnlineProducts(conf,train=False)
#
#
root = '/home/m405305/Deep-Metric-Learning-Baselines/Datasets/online_products'
image_path = 'images'
train_dataset = StandfordProducts(root,image_path,transform=img_transform_train,train=True)
val_dataset = StandfordProducts(root,image_path,transform=img_transform_test,train=False)
#root = '/home/m405305/dataset'
#train_dataset = Cub2011(root,transform=img_transform_train,train=True,download=False)
#val_dataset = Cub2011(root,transform=img_transform_test,train=False,download=False)
'''
root = '/home/m405305/Deep-Metric-Learning-Baselines/Datasets/cust-shop'
image_path = 'images'
train_dataset = CustomerToShop(root,transform=img_transform_train,train=True)
val_dataset = CustomerToShop(root,transform=img_transform_test,train=False)
'''
#print (type(val_dataset.__getitem__(10)))
# Set the loss function
loss = losses.TripletMarginLoss(margin=0.01)
#loss = losses.MarginLoss(margin=0.01,nu=1.2,beta=0)
#loss = losses.ContrastiveLoss()
# Set the mining function
#miner = miners.MultiSimilarityMiner(epsilon=0.1)
#miner = miners.DistanceWeightedMiner(cutoff=0, nonzero_loss_cutoff=0.5)
miner = miners.TripletMarginMiner(margin=0.01,type_of_triplets='semihard')
# Set other training parameters
batch_size = 40
num_epochs = 1
iterations_per_epoch = 10
# Package the above stuff into dictionaries.
models = {"trunk": trunk, "embedder": embedder}
optimizers = {"trunk_optimizer": trunk_optimizer, "embedder_optimizer": embedder_optimizer}
loss_funcs = {"metric_loss": loss}
mining_funcs = {"post_gradient_miner": miner}
trainer = trainers.MetricLossOnly(models,
optimizers,
batch_size,
loss_funcs,
mining_funcs,
iterations_per_epoch,
train_dataset,
record_keeper=record_keeper)
trainer.train(num_epochs=num_epochs)
#torch.save(trainer.models['trunk'],'online_product_trunk.pth')
#torch.save(trainer.models['embedder'],'online_product_embedder.pth')
#############################
########## Testing ##########
#############################
# The testing module requires faiss and scikit-learn
# So if you don't have these, then this import will break
from pytorch_metric_learning import testers
#tester = testers.GlobalEmbeddingSpaceTester(reference_set="compared_to_sets_combined", record_keeper=record_keeper)
tester = testers.GlobalEmbeddingSpaceTester(record_keeper=record_keeper)
dataset_dict = {"train": train_dataset, "val": val_dataset}
epoch = 1
tester.test(dataset_dict, epoch, trunk, embedder)
if record_keeper is not None:
record_keeper.pickler_and_csver.save_records()
| null | example_MetricLossOnly.py | example_MetricLossOnly.py | py | 12,910 | python | en | code | null | code-starcoder2 | 51 |
458758221 | # -*- coding: utf-8 -*-
# pylint: disable=missing-docstring,too-many-public-methods,invalid-name,protected-access,no-self-use
"""
ListView pagination tests.
"""
import math
from common.peewee_model import SystemPlatform
from manager.base import InvalidArgumentException
from manager.list_view import ListView
from .vuln_testcase import FlaskTestCase
SORTABLE = {
'inventory_id': SystemPlatform.inventory_id,
'vmaas_json': SystemPlatform.vmaas_json,
'last_evaluation': SystemPlatform.last_evaluation
}
FILTERABLE = {}
QUERY = (SystemPlatform.select(SystemPlatform.inventory_id))
URI = 'http://localhost:6666/api/v1/vulnerability/systems'
TOTAL_ITEMS = 127
LIMIT = 5
LIST_ARGS = {
'page': 4,
'page_size': 5,
'pages': 66,
'opt_out': 'foo',
'limit': LIMIT,
'offset': 15,
'total_items': TOTAL_ITEMS
}
QUERY_ARGS = {
'cvss_from': '2001-01-01', 'cvss_to': '2020-01-01',
'show_all': True, 'opt_out': True,
'status_id': 3,
'inventory_id': 'INV-ID-0001'
}
class NoQueryListView(ListView):
"""Pseudo-view used to test the basic math/param-processing of ListView and links"""
def __init__(self, query, sortable_columns, filterable_columns, list_args, query_args, uri, total):
self.total_items = total
super(NoQueryListView, self).__init__(query, sortable_columns, filterable_columns, list_args, query_args, uri)
def _apply_args(self, args):
# Intercept so we can ignore the query
self.active_filter = 'foo'
self.active_sort = 'bar'
self.page = args["page"]
self.page_size = args["page_size"]
self.limit = args["limit"]
self.offset = args["offset"]
pages = math.ceil(self.total_items / self.page_size)
self.pages = pages if pages > 0 else 1
if self.page > self.pages:
raise InvalidArgumentException("Requested page out of range: %s" % self.page)
if self.offset > self.total_items:
raise InvalidArgumentException("Requested starting offset out of range: %s" % self.offset)
class TestLinks(FlaskTestCase):
def test_first(self):
view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)
assert view._get_first(0, LIMIT, TOTAL_ITEMS) == 0
assert view._get_first(2, LIMIT, TOTAL_ITEMS) == 0
def test_previous(self):
view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)
assert view._get_previous(0, LIMIT, TOTAL_ITEMS) == 0
assert view._get_previous(20, LIMIT, TOTAL_ITEMS) == 15
assert view._get_previous(120, LIMIT, TOTAL_ITEMS) == 115
assert view._get_previous(15, LIMIT, TOTAL_ITEMS) == 10
assert view._get_previous(2, LIMIT, TOTAL_ITEMS) == 0
def test_next(self):
view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)
assert view._get_next(0, LIMIT, TOTAL_ITEMS) == 5
assert view._get_next(20, LIMIT, TOTAL_ITEMS) == 25
assert view._get_next(120, LIMIT, TOTAL_ITEMS) == 125
assert view._get_next(16, LIMIT, TOTAL_ITEMS) == 20
assert view._get_next(2, LIMIT, TOTAL_ITEMS) == 5
def test_last(self):
view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)
assert view._get_last(5, 5, TOTAL_ITEMS)
assert view._get_last(0, 3, 1) == 0
assert view._get_last(0, 3, 3) == 0
assert view._get_last(0, 3, 5) == 3
assert view._get_last(0, 3, 6) == 3
assert view._get_last(0, 3, 7) == 6
def test_first_link(self):
LOCAL_LIST_ARGS = LIST_ARGS.copy()
LOCAL_LIST_ARGS['offset'] = 0
view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LOCAL_LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)
assert 'limit=%s' % (LIMIT) in view._get_first_link()
assert 'offset=0' in view._get_first_link()
assert view._get_previous(0, LIMIT, TOTAL_ITEMS) == 0
assert view._get_previous(20, LIMIT, TOTAL_ITEMS) == 15
assert view._get_previous(120, LIMIT, TOTAL_ITEMS) == 115
assert view._get_previous(15, LIMIT, TOTAL_ITEMS) == 10
assert view._get_previous(2, LIMIT, TOTAL_ITEMS) == 0
def test_prev_link(self):
LOCAL_LIST_ARGS = LIST_ARGS.copy()
LOCAL_LIST_ARGS['offset'] = 0
view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LOCAL_LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)
assert view._get_previous_link() is None
LOCAL_LIST_ARGS['offset'] = 20
view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LOCAL_LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)
assert 'offset=15' in view._get_previous_link()
LOCAL_LIST_ARGS['offset'] = 120
view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LOCAL_LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)
assert 'offset=115' in view._get_previous_link()
LOCAL_LIST_ARGS['offset'] = 15
view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LOCAL_LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)
assert 'offset=10' in view._get_previous_link()
LOCAL_LIST_ARGS['offset'] = 2
view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LOCAL_LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)
assert 'offset=0' in view._get_previous_link()
def test_next_link(self):
LOCAL_LIST_ARGS = LIST_ARGS.copy()
LOCAL_LIST_ARGS['offset'] = 0
view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LOCAL_LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)
assert 'offset=5' in view._get_next_link()
LOCAL_LIST_ARGS['offset'] = 20
view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LOCAL_LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)
assert 'offset=25' in view._get_next_link()
LOCAL_LIST_ARGS['offset'] = 120
view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LOCAL_LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)
assert 'offset=125' in view._get_next_link()
LOCAL_LIST_ARGS['offset'] = 16
view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LOCAL_LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)
assert 'offset=20' in view._get_next_link()
LOCAL_LIST_ARGS['offset'] = 2
view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LOCAL_LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)
assert 'offset=5' in view._get_next_link()
def test_last_link(self):
view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)
assert 'offset=125' in view._get_last_link()
args = LIST_ARGS.copy()
args['page'] = 0
args['page_size'] = 3
args['offset'] = 0
args['limit'] = 3
view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, args, QUERY_ARGS, URI, 1)
assert 'offset=0' in view._get_last_link()
args['total_items'] = 3
view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, args, QUERY_ARGS, URI, 3)
assert 'offset=0' in view._get_last_link()
args['total_items'] = 5
view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, args, QUERY_ARGS, URI, 5)
assert 'offset=3' in view._get_last_link()
args['total_items'] = 6
view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, args, QUERY_ARGS, URI, 6)
assert 'offset=3' in view._get_last_link()
args['total_items'] = 7
view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, args, QUERY_ARGS, URI, 7)
assert 'offset=6' in view._get_last_link()
def test_links_stanza(self):
view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)
links = view.get_pagination_links()
assert links['first'] == view._get_first_link()
assert links['next'] == view._get_next_link()
assert links['previous'] == view._get_previous_link()
assert links['last'] == view._get_last_link()
def test_links_filters(self):
view = NoQueryListView(QUERY, SORTABLE, FILTERABLE, LIST_ARGS, QUERY_ARGS, URI, TOTAL_ITEMS)
last_link = view._get_last_link()
assert 'cvss_from=2001-01-01' in last_link
assert 'cvss_to=2020-01-01' in last_link
assert 'show_all=True' in last_link
assert 'opt_out=True' in last_link
assert 'status_id=3' in last_link
assert 'inventory_id=INV-ID-0001' in last_link
args = QUERY_ARGS.copy()
del args['show_all']
view = NoQueryListView(QUERY, SORTABLE, args, LIST_ARGS, args, URI, TOTAL_ITEMS)
last_link = view._get_last_link()
assert 'show_all=True' not in last_link
| null | tests/manager_tests/test_links.py | test_links.py | py | 8,667 | python | en | code | null | code-starcoder2 | 51 |
318137463 | """This module implements a two-stage HMAX-like model.
This module implements a multi-scale analysis by applying single-scale Gabors to
a scale pyramid of the input image. This is similar to the configuration used by
Mutch & Lowe (2008).
"""
# Copyright (c) 2011 Mick Thomure
# All rights reserved.
#
# Please see the file COPYING in this distribution for usage terms.
from scipy.ndimage.interpolation import zoom
from glimpse.models.misc import BaseState, Whiten
from glimpse.models.viz2.model import Model as Viz2Model
from glimpse.models.viz2.model import Layer
from glimpse.util import kernel
from .params import Params
class State(BaseState):
"""A container for the :class:`Model` state."""
pass
class Model(Viz2Model):
"""Create a 2-part, HMAX-like hierarchy of S+C layers."""
#: The datatype associated with layer descriptors for this model.
LayerClass = Layer
#: The parameters type associated with this model.
ParamClass = Params
#: The datatype associated with network states for this model.
StateClass = State
@property
def s1_kernel_shape(self):
"""The expected shape of the S1 kernels array, including band structure.
:rtype: tuple of int
"""
p = self.params
return p.s1_num_orientations, p.s1_num_phases, p.s1_kwidth, p.s1_kwidth
@property
def s1_kernels(self):
"""The set of S1 kernels, which is generated if not set.
:returns: S1 kernels indexed by orientation, and phase.
:rtype: 4D ndarray of float
"""
# if kernels array is empty, then generate it using current model parameters
if self._s1_kernels == None:
p = self.params
self._s1_kernels = kernel.MakeGaborKernels(
kwidth = p.s1_kwidth,
num_orientations = p.s1_num_orientations,
num_phases = p.s1_num_phases, shift_orientations = True,
scale_norm = self.s1_kernels_are_normed)
return self._s1_kernels
def BuildS1FromRetina(self, retina):
"""Apply S1 processing to some existing retinal layer data.
.. note::
This method pools over phase, so the output has only scale and
orientation bands.
:param retina: Result of retinal layer processing.
:type retina: 2D ndarray of float
:return: S1 maps indexed by scale and orientation.
:rtype: list of 3D ndarray of float
"""
# Create scale pyramid of retinal map
p = self.params
retina_scales = [ zoom(retina, 1 / p.scale_factor ** scale)
for scale in range(p.num_scales) ]
# Reshape kernel array to be 3-D: index, 1, y, x
s1_kernels = self.s1_kernels.reshape((-1, 1, p.s1_kwidth, p.s1_kwidth))
s1s = []
backend_op = getattr(self.backend, p.s1_operation)
for scale in range(p.num_scales):
# Reshape retina to be 3D array
retina = retina_scales[scale]
retina_ = retina.reshape((1,) + retina.shape)
s1_ = backend_op(retina_, s1_kernels, bias = p.s1_bias, beta = p.s1_beta,
scaling = p.s1_sampling)
# Reshape S1 to be 4D array
s1 = s1_.reshape((p.s1_num_orientations, p.s1_num_phases) + \
s1_.shape[-2:])
# Pool over phase.
s1 = s1.max(1)
# Append 3D array to list
s1s.append(s1)
return s1s
def BuildC1FromS1(self, s1s):
"""Compute the C1 layer activity from multi-scale S1 activity.
:param s1s: S1 maps indexed by scale.
:type s1s: list of 3D ndarray of float, or 4D ndarray of float
:returns: C1 maps indexed by scale and orientation.
:rtype: list of 3D ndarray of float
"""
p = self.params
c1s = [ self.backend.LocalMax(s1, kwidth = p.c1_kwidth,
scaling = p.c1_sampling) for s1 in s1s ]
if p.c1_whiten:
# Whiten each scale independently, modifying values in-place.
map(Whiten, c1s)
return c1s
def BuildS2FromC1(self, c1s):
"""Compute the S2 layer activity from multi-scale C1 activity.
:param c1s: C1 maps indexed by scale and orientation.
:type c1s: 4D ndarray of float, or list of 3D ndarray of float
:returns: S2 maps indexed by scale and prototype.
:rtype: list of 3D ndarray of float
"""
if self.s2_kernels == None or len(self.s2_kernels[0]) == 0:
raise Exception("Need S2 kernels to compute S2 layer activity, but none "
"were specified.")
kernels = self.s2_kernels[0]
if len(c1s) == 0:
return []
p = self.params
s2s = []
backend_op = getattr(self.backend, p.s2_operation)
for scale in range(p.num_scales):
c1 = c1s[scale]
s2 = backend_op(c1, kernels, bias = p.s2_bias, beta = p.s2_beta,
scaling = p.s2_sampling)
# Append 3D array to list.
s2s.append(s2)
return s2s
# Add (circular) Model reference to State class.
State.ModelClass = Model
| null | glimpse/models/ml/model.py | model.py | py | 4,757 | python | en | code | null | code-starcoder2 | 51 |
63604089 | from common import *
import autograd.numpy as np
import matplotlib.pyplot as plt
import autograd.numpy.random as rng
from autograd.numpy.random import multivariate_normal as rmvn
from autograd.numpy.linalg import cholesky, solve
from autograd.scipy.linalg import cholesky as chol
from autograd.scipy.linalg import solve_triangular as solve_tri
import cov
# Perform inference in the 0-mean GP specified by the covariance function fcov
# and observation noise s2n.
#
# Inputs:
# X - observation inputs. (N)
# y - observation outputs. (N)
# fcov - (stationary) covariance function.
# s2n - observation noise.
#
# Outputs:
# posterior - function which accepts new inputs and computes functions to
# compute the posterior distribution at these points.
# lml - function to compute the log marginal likelihood of the data log p(y | X).
#
def infer(X, y, fcov, s2n):
# Compute suff. stats for posterior prediction. Follows conventions from
# page 19 of GPforML (Rasmussen and Williams).
N = y.shape[0]
Kxx = fcov(X) + s2n * np.eye(N)
L = chol(Kxx, lower=True)
alpha = solve_tri(L, solve_tri(L, y, lower=True), lower=True, trans='T')
# Define function to make posterior predictions at new data.
def posterior(Xs):
Ks_diag, Ksx, Kss = fcov(Xs, diag=True), fcov(Xs, Z=X), fcov(Xs)
# Return function to compute posterior means.
def mu():
return np.dot(Ksx, alpha)
# Return function to compute posterior marginal variances.
def s2():
Ns = Xs.shape[0]
s2out = np.empty(Ns)
for j in range(Ns):
v = solve_tri(L, Ksx[j], lower=True)
s2out[j] = np.dot(v, v)
return Ks_diag - s2out
# Return the full posterior covariance.
def Sigma():
B = solve_tri(L, Ksx.T, lower=True)
return Kss - np.dot(B.T, B)
return mu, s2, Sigma
# Compute the log marginal likelihood of the data.
def lml():
return -0.5*(N*log2pi() + 2*np.sum(np.log(np.diag(L))) + np.dot(y, alpha))
# Functions to compute posterior predictive and log marginal likelihood.
return posterior, lml
def main():
# Define the covariance function.
print('Define covariance function.')
pars = {'l2h' : np.log(np.exp(1.0) - 1.0), 's2h' : np.log(np.exp(1.0) - 1.0)}
fcov = cov.factory(cov.eq, pars)
# Generate some data.
print('Generate toy data.')
rng.seed(15485863)
lb, ub, N, s2n = 0.0, 10.0, 250, 1e-1
X1 = rng.uniform(low=lb, high=ub / 3, size=N / 2)
X2 = rng.uniform(low=ub * 2.0 / 3.0, high=ub, size=N / 2)
X = rng.permutation(np.hstack([X1, X2]))
X = rng.uniform(low=lb, high=ub, size=N)
X = np.linspace(lb, ub, N)
y = rmvn(np.zeros(N), fcov(X, X) + s2n * np.eye(N))
posterior = infer(X, y, fcov, s2n)
Ns, delta = 500, 5.0
Xs = np.linspace(lb - delta, ub + delta, Ns)
mu, s2, Sigma = posterior(Xs)
muX, sX = mu(), np.sqrt(s2())
plt.plot(Xs, muX, 'b', Xs, muX + 2 * sX, 'b--', Xs, muX - 2 * sX, 'b--',\
X, y, 'rx')
plt.figure()
plt.imshow(np.log(Sigma() + 1e-3))
plt.colorbar()
plt.show()
if __name__ == '__main__':
main()
| null | exp/circgp/gpexact.py | gpexact.py | py | 3,065 | python | en | code | null | code-starcoder2 | 51 |
529240850 | import sys
sys.path.append('C:\E\mysoft\python-workSpace\pythons\test-dash2')
import pandas as pd
import pymysql
from sshtunnel import SSHTunnelForwarder
from sqlalchemy import create_engine
from pyecharts.charts import Bar
from example.commons import Faker
from pyecharts import options as opts
from pyecharts.charts import Page, Pie, Gauge, Line
from pyecharts.globals import ThemeType
import pyecharts.commons.utils as results
# 连接线上db_itouzi主库
def db_itz_conn():
db_itz_conn = pymysql.connect(host='172.16.3.127', port=3306, user='chenlianqing',
passwd='zEtwv4qaxs4mMox', db='db_itouzi', charset='utf8')
return db_itz_conn
# 连接ecshop主库 rm-2zes9s7zvt5z2il509o.mysql.rds.aliyuncs.com,修改为从库
def db_ecshop_conn():
db_ecshop_conn = pymysql.connect(
host='huanhuan103', port=3306,
user='yanan', passwd='qosH3$)!.s', db='ec_shop',
charset='utf8')
return db_ecshop_conn
#链接db_clq数据库
def db_clq_conn():
db_clq_conn = pymysql.connect(host='172.16.3.127', port=3306, user='chenlianqing',passwd='zEtwv4qaxs4mMox', db='db_clq', charset='utf8')
return db_clq_conn
#链接线下统计库的ecshop
def ol_new_shop_conn():
conn = pymysql.connect(host='39.107.136.209',port=3306,user='root',passwd='df@#88%nQWE',db='ecshop',charset='utf8')
print('ol_ecshop_conn connected via SSH')
return conn
# 获取数据库中的数据
# 目标值
target = 80000000
# 本月累计销售额
def month_gmv():
# conn = localconn.db_ecshop_conn()
conn = db_ecshop_conn()
gmv_sql = """SELECT sum(money_paid+surplus) AS 'goods_amount' FROM itz_order_info
WHERE ((pay_status=2 AND order_type in (0,2,3,4)) OR (pay_status = 1
AND order_type = 1
AND order_id IN (SELECT DISTINCT order_id
FROM itz_order_instalment
WHERE pay_status = 2)))
AND add_time>=unix_timestamp(concat(date_format(LAST_DAY(now()),'%Y-%m-'),'01'))
AND add_time<=unix_timestamp(LAST_DAY(now()));"""
gmv = pd.read_sql(gmv_sql, conn, index_col=None)
values = [round(gmv['goods_amount'][0] / target, 2), round(1 - gmv['goods_amount'][0] / target, 2)]
# conn = localconn.db_ecshop_conn()
conn = db_ecshop_conn()
user_sql = """SELECT DISTINCT user_id FROM itz_order_info
WHERE add_time>=unix_timestamp(concat(date_format(LAST_DAY(now()),'%Y-%m-'),'01'))
AND add_time<=unix_timestamp(LAST_DAY(now()))
AND ((pay_status=2 AND order_type in (0,2,3,4)) OR (pay_status = 1
AND order_type = 1
AND order_id IN (SELECT DISTINCT order_id
FROM itz_order_instalment
WHERE pay_status = 2)));"""
user = pd.read_sql(user_sql,conn,index_col=None)
user_list = user['user_id'].tolist()
# conn = localconn.ol_new_shop_conn()
conn = ol_new_shop_conn()
user_tag_sql = """SELECT
b2c_userid,
xingbie,
age,
province,
capital,
all_debt_money
FROM itz_hh_user_spark
WHERE b2c_userid in {};""".format(tuple(user_list))
user_tag = pd.read_sql(user_tag_sql,conn,index_col=None)
user_tag['b2c_userid'] = user_tag['b2c_userid'].astype('int')
user_tag['age'] = user_tag['age'].fillna(0)
user_tag['age'] = user_tag['age'].astype('int')
bins = [0,20,30,40,50,60,user_tag.age.max()]
# labels = ["0-20","20-30","30-40","40-50","50-60","60-100"]
user_tag['age_region'] = pd.cut(user_tag['age'],bins=bins,right=True)
user_tag['age_region'] = user_tag['age_region'].astype("str")
aa = user_tag.groupby('xingbie',as_index=False).agg({'b2c_userid':'count'})
bb = user_tag.groupby('age_region',as_index=False).agg({'b2c_userid':'count'})
# print("na $$$$$$: ",user_tag.loc[user_tag["age"].isna()]['age'])
# print("null ***** ",user_tag.loc[user_tag["age"].isnull()]['age'])
# print("user_tag$$$$$$$$$: ",user_tag['b2c_userid'].count())
# print("bbbbbbbbbbbb: ",bb['b2c_userid'].sum())
cc = user_tag.groupby('province',as_index=False).agg({'b2c_userid':'count'})
bins = [0,100,1000,10000,50000,100000,500000,1000000,user_tag.capital.max()+1]
user_tag['capital_region'] = pd.cut(user_tag['capital'],bins=bins,right=False)
user_tag['capital_region'] = user_tag['capital_region'].astype("str")
dd = user_tag.groupby('capital_region',as_index=False).agg({'b2c_userid':'count'})
return values,user_tag,aa,bb,cc,dd
# 打开数据库连接
def getConnect():
db = pymysql.connect("39.107.136.209:3306",
"root", "df@#88%nQWE@", "ecshop",charset="utf8mb4")
return db
# 将结果保存到huanhuan101:ecshop
def getHuanhuanEcshop():
# 创建对应的执行引擎
result = create_engine(
"mysql+pymysql://root:df@#88%nQWE@39.107.136.209:3306/ecshop?charset=utf8mb4",
echo=False, pool_pre_ping=True)
return result
# 设置颜色bar
color_function_bar = """
function (params) {
return '#07CDFF';
}
"""
bar = Bar(init_opts=opts.InitOpts(width="630px", height="450px",theme=ThemeType.CHALK)) # width="850px", height="650px"
bar.add_xaxis(month_gmv()[3]["age_region"].tolist())
bar.add_yaxis("不同年龄段购买人数", month_gmv()[3]["b2c_userid"].tolist(),
itemstyle_opts=opts.ItemStyleOpts(color=results.JsCode(color_function_bar)))
bar.set_global_opts(
xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(rotate=-30)),
title_opts=opts.TitleOpts(title="不同年龄段购买人数")
# title_opts=opts.TitleOpts(title="Bar-旋转X轴标签", subtitle="解决标签名字过长的问题"),
)
# 仪表盘
def gauge_base() -> Gauge:
c = (
# Gauge(init_opts=opts.InitOpts(width="850px", height="650px"))
Gauge(init_opts=opts.InitOpts(width="630px", height="450px",theme=ThemeType.CHALK))
.add("", [("完成率", int(month_gmv()[0][0] * 100))])
.set_global_opts(title_opts=opts.TitleOpts(title="当月目标完成率"))
)
return c
# 饼图
def pie_base() -> Pie:
color_function = """
function (params) {
return '#07CDFF';
}
"""
c = (
# Pie(init_opts=opts.InitOpts(width="850px", height="650px"))
Pie(init_opts=opts.InitOpts(width="630px", height="450px",theme=ThemeType.CHALK))
# .add("", [list(z) for z in zip(Faker.choose(), Faker.values())])
.add("", [list(z) for z in zip(['男','女'], month_gmv()[2]["b2c_userid"])],) #
.set_colors(["#6055FC","#01FFEA"]) # 设置饼状图的颜色
.set_global_opts(title_opts=opts.TitleOpts(title="近一个月男女购买比例"))
# .set_series_opts(label_opts=opts.LabelOpts(formatter="{b}: {c} {d}%"))
.set_series_opts(label_opts=opts.LabelOpts(formatter="{b}: {c} {d}%"))
)
return c
###########################################
def pie_rich_label22() -> Pie:
c = (
Pie(init_opts=opts.InitOpts(width="630px", height="450px",theme=ThemeType.CHALK))
.add(
"",
# [list(z) for z in zip(Faker.choose(), Faker.values())],
[list(z) for z in zip(['男','女'], month_gmv()[2]["b2c_userid"])],
# radius=["40%", "55%"],
radius=["50%", "65%"],
label_opts=opts.LabelOpts(
# position="outside",
# formatter="{a|{a}}{abg|}\n{hr|}\n {b|{b}: }{c} {per|{d}%} ",
formatter="{b}: {c} {d}%", # b 名称, c 数量, d 百分比
# background_color="#eee",
# border_color="#aaa",
# border_width=1,
# border_radius=4,
# rich={
# "a": {"color": "#999", "lineHeight": 22, "align": "center"},
# "abg": {
# "backgroundColor": "#e3e3e3",
# "width": "100%",
# "align": "right",
# "height": 22,
# "borderRadius": [4, 4, 0, 0],
# },
# "hr": {
# "borderColor": "#aaa",
# "width": "100%",
# "borderWidth": 0.5,
# "height": 0,
# },
# "b": {"fontSize": 16, "lineHeight": 33},
# "per": {
# "color": "#eee",
# "backgroundColor": "#334455",
# "padding": [2, 4],
# "borderRadius": 2,
# },
# },
),
)
.set_colors(["#6055FC", "#01FFEA"]) # 设置饼状图的颜色
.set_global_opts(title_opts=opts.TitleOpts(title="近一个月男女购买比例"))
)
return c
#############################################
# 折线图
def line_markpoint() -> Line:
c = (
Line(init_opts=opts.InitOpts(width="630px", height="450px",theme=ThemeType.CHALK)) # width="850px", height="650px"
# .add_xaxis(Faker.choose())
.add_xaxis(month_gmv()[5]["capital_region"].tolist())
.add_yaxis(
"近一个月不同待还金额区间购买人数分布",
# Faker.values(),
month_gmv()[5]["b2c_userid"].tolist(),
markpoint_opts=opts.MarkPointOpts(data=[opts.MarkPointItem(type_="min")]),
)
# .add_yaxis(
# "商家B",
# Faker.values(),
# markpoint_opts=opts.MarkPointOpts(data=[opts.MarkPointItem(type_="max")]),
# )
# .set_global_opts(title_opts=opts.TitleOpts(title="Line-MarkPoint"))
# 设置旋转的x坐标轴
.set_global_opts(
xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(rotate=-15)),
title_opts=opts.TitleOpts(title="待还金额区间购买人数"),
# title_opts=opts.TitleOpts(title="Bar-旋转X轴标签", subtitle="解决标签名字过长的问题"),
)
.set_colors(["#07CDFF"])
)
return c
# bar.render()
# 柱状图
# style="width:1100px; height:700px
def bar_base() -> Bar:
color_function = """
function (params) {
return '#07CDFF';
}
"""
c = ( # 1300px 1260px
Bar(init_opts=opts.InitOpts(width="1260px", height="650px",theme=ThemeType.CHALK)) # ,theme=ThemeType.DARK
# .add_xaxis(Faker.choose())
.add_xaxis(month_gmv()[4]["province"].tolist())
# .add_yaxis("商家A", Faker.values())
.add_yaxis("近一个月内不同城市购买人数分布", month_gmv()[4]["b2c_userid"].tolist(),
itemstyle_opts=opts.ItemStyleOpts(color=results.JsCode(color_function)))
# .add_yaxis("商家B", Faker.values())
# .set_global_opts(title_opts=opts.TitleOpts(title="Bar-基本示例", subtitle="我是副标题"))
.set_global_opts(
xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(rotate=-30)),
title_opts=opts.TitleOpts(title="不同城市购买人数分布"),
# title_opts=opts.TitleOpts(title="Bar-旋转X轴标签", subtitle="解决标签名字过长的问题"),
)
# .set_series_opts(
# label_opts=opts.LabelOpts(is_show=False),
# markline_opts=opts.MarkLineOpts(
# data=[
# opts.MarkLineItem(type_="min", name="最小值"),
# opts.MarkLineItem(type_="max", name="最大值"),
# opts.MarkLineItem(type_="average", name="平均值"),
# # opts.MarkLineItem(value_index = [200,400,600,800,1000]),
# ],
#
# ),
# # 设置线的类型: 实体线
# linestyle_opts=opts.LineStyleOpts(type_="solid")
# )
)
return c
page = Page(layout=Page.SimplePageLayout)
# 需要自行调整每个 chart 的 height/width,显示效果在不同的显示器上可能不同
page.add(gauge_base(),pie_rich_label22(),bar, line_markpoint(), bar_base()) # ,pie_base()
# page.add(bar_base())
# page.render()
if __name__ == '__main__':
# app.run_server(8080,debug=True)
# page.render() C:\E\mysoft\python-workSpace\pythons\djang1\templates
page.render("C:/E/mysoft/python-workSpace/pythons/djang1/templates/result.html")
# print("$$$$$$$: ",)
# tuples = month_gmv()
# print("values00000: ",tuples[0])
# print("user_tag11111: ",tuples[1])
# print("aa22222: ",tuples[2])
# print("bb33333: ",tuples[3])
# print("cc44444: ",tuples[4])
# print("dd55555: ",tuples[5])
#
# print("type$$$$$$$$$$: ",type(month_gmv()[0][0] * 100))
print("==============start===========")
# print(month_gmv()[4]["province"])
# print(month_gmv()[4]["b2c_userid"])
# print(month_gmv()[3]["age_region"].tolist())
# print(month_gmv()[5]["capital_region"].tolist())
| null | manager/pyecharts_results.py | pyecharts_results.py | py | 12,969 | python | en | code | null | code-starcoder2 | 51 |
96202310 | import json
import pickle
from argparse import ArgumentParser
from pathlib import Path
from typing import Dict, Tuple
import pandas as pd
import numpy as np
from pandas import DataFrame
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_squared_error
def rmse(a, b):
return np.sqrt(mean_squared_error(a, b))
def load_model(model_file: Path) -> GradientBoostingRegressor:
model: GradientBoostingRegressor = pickle.loads(model_file.read_bytes())
return model
def load_df(folder: Path) -> DataFrame:
"""Load prepared data into dataframe from folder
Args:
folder (Path): folder containing data.csv
Returns:
DataFrame: prepared dataframe
"""
return pd.read_csv(folder/"data.csv")
def load_data_for_model(original_data: DataFrame) -> Tuple[DataFrame, DataFrame]:
"""Load model-specific version of data (with addition type transformations etc.)
Args:
original_data (DataFrame): original dataframe
Returns:
Tuple[DataFrame, DataFrame]: (X, y) dataframes ready for model.fit()
"""
columns_to_drop = original_data.columns[
# sklearn GradientBoostingRegressor does not handle strings
original_data.columns.str.contains("_name")
]
original_data = original_data.drop(columns=columns_to_drop)
X = original_data.drop(columns=["item_cnt_month"])
y = original_data["item_cnt_month"]
return X, y
def load_test_range(test_folder: Path):
return pd.read_csv(test_folder/"test.csv", index_col=["shop_id", "item_id"])
def extend_target_df(test_range: DataFrame, val_df: DataFrame, prediction: np.ndarray) -> DataFrame:
target_df = test_range.join(val_df.set_index(["shop_id", "item_id"]).assign(prediction=prediction)).assign(
date_block_num=(24 + 9),
item_cnt_month=lambda df: df.item_cnt_month.fillna(0),
prediction=lambda df: df.prediction.fillna(0),
date_year=2015,
date_month=9,
)
return target_df
def evaluate_model(model_file: Path, val_folder: Path, test_folder: Path) -> Dict:
"""Evaluate model
Args:
model_file (Path): path to mode pickle file
Returns:
Dict: metrics
"""
model = load_model(model_file)
val_df = load_df(val_folder)
X_val, _ = load_data_for_model(val_df)
prediction = model.predict(X_val)
test_range = load_test_range(test_folder)
extended = extend_target_df(test_range, val_df, prediction)
return {
"rmse": rmse(extended.item_cnt_month, extended.prediction)
}
def write_metrics(metrics: Dict, file: Path):
file.write_text(json.dumps(metrics))
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("val_folder", type=Path)
parser.add_argument("test_folder", type=Path)
parser.add_argument("--model_file", type=Path, default=Path("model.pkl"))
parser.add_argument("--metrics_file", type=Path,
default=Path("metrics.json"))
args = parser.parse_args()
metrics = evaluate_model(model_file=args.model_file,
val_folder=args.val_folder,
test_folder=args.test_folder)
write_metrics(metrics=metrics, file=args.metrics_file) | null | code/src/evaluate.py | evaluate.py | py | 3,225 | python | en | code | null | code-starcoder2 | 51 |
323630674 | import collections
from typing import Deque
import re #정규표현식 불러오기
class Solution:
def isPalindrome(self, s: str) -> bool:
strs = []
for char in s:
if char.isalnum(): # isalnum(): 영문자, 숫자 여부 판별하여 False, True 변환
strs.append(char.lower()) # 모든 문자 소문자 변환하여 str에 입력
print('문자 처리: ', strs)
# 팰린드롬 여부 판별
while len(strs) > 1: # strs의 길이가 1 이상이면 반복
# pop(0): 맨 앞의 값, pop(): 맨 뒤의 값을 가져옴
if strs.pop(0) != strs.pop():
return False
def isPalindrome1(self, s: str) -> bool:
# 자료형 데크로 선언
strs: Deque = collections.deque() # 데크 생성
print('\n데크 생성: ', strs)
for char in s:
if char.isalnum():
strs.append(char.lower())
print('문자 처리: ', strs)
while len(strs) > 1:
if strs.popleft() != strs.pop(): # 데크의 popleft()는 O(1), 리스트의 pop(0)이 O(n)
return False
return True
def isPalindrome2(self, s: str) -> bool:
s = s.lower()
# 정규식으로 불필요한 문자 필터링: re.sub(''정규표현식', 대상 문자열, 치환 문자)
s = re.sub('[^a-z0-9]', '', s) #s 중, 알파벳과 숫자가 아닌 것을 ''로 바꿔라
print('\n문자 처리: ', s)
return s == s[::-1] # 슬라이싱 [::-1]: 배열 뒤집기 | null | python_algorithm/python_algorithm_06/Array/isPalindrome.py | isPalindrome.py | py | 1,593 | python | en | code | null | code-starcoder2 | 51 |
139529264 | from ..models import Measurement
def get_measurements():
queryset = Measurement.objects.all().order_by('-dateTime')[:10]
return (queryset)
def create_measurement(form):
measurement = form.save()
measurement.save()
return ()
def create_measurement_object(variable_id, value, unit, place):
measurement = Measurement()
measurement.variable = variable_id
measurement.value = value
measurement.unit = unit
measurement.place = place
measurement.save()
return () | null | measurements/logic/logic_measurements.py | logic_measurements.py | py | 506 | python | en | code | null | code-starcoder2 | 51 |
581952654 | #!/usr/bin/python3
import sys # stdout enumerate
from itertools import * # chain from_iterable product
from math import * # sqrt floor ceil gcd
from copy import copy, deepcopy
from collections import * # Counter defaultdict deque
from queue import Queue
from heapq import heappush, heappop, heapify
from operator import * # itemgetter
from functools import reduce
from string import ascii_lowercase, ascii_uppercase
from bisect import bisect_right
gi = lambda: int(input())
gis = lambda: list(map(int, input().split()))
gs = lambda: input()
skiplast = lambda x: range(len(x)-1)
is_even = lambda x: x%2 == 0
inf = float('inf')
def main():
n, k, q = gis()
teams = defaultdict(list)
sums = defaultdict(int)
for i in range(n):
si, ti = gis()
teams[ti].append(si)
sums[ti] += si
for team in teams.values():
team.sort()
dp = defaultdict(dict)
for _ in range(q):
typ, *rest = gis()
if typ == 1:
p, x = rest
teams[x].insert(bisect_right(teams[x], p), p)
else:
x, y = rest
i = len(teams[x])-1
j = len(teams[y])-1
x_sum = sums[x]
y_sum = sums[y]
x_attack = True
while True:
if x_attack:
if x_sum >= y_sum:
break
y_sum -= teams[x][i]
if y_sum < 0:
break
for k in range(j+1, j+teams[x][i]+1):
y_sum -= teams[y][k]
else:
if y_sum >= x_sum:
break
x_sum -= teams[y][j]
if x_sum < 0:
break
for k in range(i+1, i+teams[y][j]+1):
x_sum -= teams[x][k]
x_attack = not x_attack
print(x if x_attack else x)
main()
| null | algorithms/greedy/fighting_pits.py | fighting_pits.py | py | 2,013 | python | en | code | null | code-starcoder2 | 51 |
290709786 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes to interface with the Crystallography Open
Database. If you use data from the COD, please cite the following works (as
stipulated by the COD developers)::
Merkys, A., Vaitkus, A., Butkus, J., Okulič-Kazarinas, M., Kairys, V. &
Gražulis, S. (2016) "COD::CIF::Parser: an error-correcting CIF parser for
the Perl language". Journal of Applied Crystallography 49.
Gražulis, S., Merkys, A., Vaitkus, A. & Okulič-Kazarinas, M. (2015)
"Computing stoichiometric molecular composition from crystal structures".
Journal of Applied Crystallography 48, 85-91.
Gražulis, S., Daškevič, A., Merkys, A., Chateigner, D., Lutterotti, L.,
Quirós, M., Serebryanaya, N. R., Moeck, P., Downs, R. T. & LeBail, A.
(2012) "Crystallography Open Database (COD): an open-access collection of
crystal structures and platform for world-wide collaboration". Nucleic
Acids Research 40, D420-D427.
Grazulis, S., Chateigner, D., Downs, R. T., Yokochi, A. T., Quiros, M.,
Lutterotti, L., Manakova, E., Butkus, J., Moeck, P. & Le Bail, A. (2009)
"Crystallography Open Database – an open-access collection of crystal
structures". J. Appl. Cryst. 42, 726-729.
Downs, R. T. & Hall-Wallace, M. (2003) "The American Mineralogist Crystal
Structure Database". American Mineralogist 88, 247-250.
"""
import requests
import subprocess
from monty.dev import requires
from monty.os.path import which
import re
from pymatgen.core.composition import Composition
from pymatgen.core.structure import Structure
from pymatgen.util.string import formula_double_format
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
class COD:
"""
An interface to the Crystallography Open Database.
"""
def __init__(self):
pass
def query(self, sql):
r = subprocess.check_output(["mysql", "-u", "cod_reader", "-h",
"www.crystallography.net", "-e",
sql, "cod"])
return r.decode("utf-8")
@requires(which("mysql"), "mysql must be installed to use this query.")
def get_cod_ids(self, formula):
"""
Queries the COD for all cod ids associated with a formula. Requires
mysql executable to be in the path.
Args:
formula (str): Formula.
Returns:
List of cod ids.
"""
# TODO: Remove dependency on external mysql call. MySQL-python package does not support Py3!
# Standardize formula to the version used by COD.
sql = 'select file from data where formula="- %s -"' % \
Composition(formula).hill_formula
text = self.query(sql).split("\n")
cod_ids = []
for l in text:
m = re.search(r"(\d+)", l)
if m:
cod_ids.append(int(m.group(1)))
return cod_ids
def get_structure_by_id(self, cod_id, **kwargs):
"""
Queries the COD for a structure by id.
Args:
cod_id (int): COD id.
kwargs: All kwargs supported by
:func:`pymatgen.core.structure.Structure.from_str`.
Returns:
A Structure.
"""
r = requests.get("http://www.crystallography.net/cod/%s.cif" % cod_id)
return Structure.from_str(r.text, fmt="cif", **kwargs)
@requires(which("mysql"), "mysql must be installed to use this query.")
def get_structure_by_formula(self, formula, **kwargs):
"""
Queries the COD for structures by formula. Requires mysql executable to
be in the path.
Args:
cod_id (int): COD id.
kwargs: All kwargs supported by
:func:`pymatgen.core.structure.Structure.from_str`.
Returns:
A list of dict of the format
[{"structure": Structure, "cod_id": cod_id, "sg": "P n m a"}]
"""
structures = []
sql = 'select file, sg from data where formula="- %s -"' % \
Composition(formula).hill_formula
text = self.query(sql).split("\n")
text.pop(0)
for l in text:
if l.strip():
cod_id, sg = l.split("\t")
r = requests.get("http://www.crystallography.net/cod/%s.cif"
% cod_id.strip())
try:
s = Structure.from_str(r.text, fmt="cif", **kwargs)
structures.append({"structure": s, "cod_id": int(cod_id),
"sg": sg})
except Exception:
import warnings
warnings.warn("\nStructure.from_str failed while parsing CIF file:\n%s" % r.text)
raise
return structures
| null | pymatgen/ext/cod.py | cod.py | py | 5,029 | python | en | code | null | code-starcoder2 | 51 |
86064595 | # coding=UTF-8
#%matplotlib inline
import visa
import time
import datetime
import numpy as np
N=50
ppsvalue=np.array([5.50, 5.00, 4.5, 3.60, 3.30, 3.00, 2.70, 2.20])
rm = visa.ResourceManager()
pps=rm.open_resource('GPIB0::6::INSTR')
cnter= rm.open_resource('GPIB0::3::INSTR')
print(pps.query('*MODEL?'))
print(cnter.query('*IDN?'))
pps.write ('OVSET1 9.00; OVP 1; OCP 1; ISET1 1.00')
pps.write('VSET1 3.00;OUT1 1')
del cnter.timeout
time.sleep(5)
for ppsv in ppsvalue:
filename ='file'+time.strftime("%m%d%H%M%S", time.localtime())+'.txt'
pps.write('VSET1 '+str(ppsv)+';OUT1 1')
time.sleep(1)
filetemp = open (filename, mode='a')
filetemp.write("VDD="+str(ppsv)+"\n")
filetemp.close
#print ('VDD='+str(ppsv))
fcnt=0
for fcnt in range(N):
meafre = cnter.query("FETCH:FREQ?")
filetemp = open (filename, mode='a')
filetemp.write(str (float(meafre))+"\n")
filetemp.close
print ('VDD=' + str(ppsv) + str((float(meafre)-1)))
#print (str ((float(meafre)-1)))
print ('finished')
| null | array_test.py | array_test.py | py | 1,009 | python | en | code | null | code-starcoder2 | 51 |
511044401 | import datetime
import matplotlib.pyplot as plt
data = []
x = []
y = []
with open('forplot') as file:
for i in file.readlines():
splitted = i.split()
datestr = splitted[0]+' '+splitted[1]
date = datetime.datetime.strptime(datestr, '%Y-%m-%d %H:%M:%S.%f') # 2020-02-25 12:29:46.040
data.append((date,int(splitted[2])))
data.sort(key=lambda x: x[0])
x = [i[0] for i in data]
a = 0
for i in data:
a += i[1]
y.append(a/128)
(fig, ax) = plt.subplots(1, 1)
ax.plot(x, y)
for n, label in enumerate(ax.xaxis.get_ticklabels()):
if n % 2 != 0:
label.set_visible(False)
ax.yaxis.set_major_formatter(plt.FormatStrFormatter('%d'))
plt.ylabel('Kbit')
plt.savefig('plot.png')
| null | lab2/plot.py | plot.py | py | 717 | python | en | code | null | code-starcoder2 | 51 |
355372996 | # -*- coding: utf-8 -*-
import pandas as pd
data = pd.read_csv('../Dataset/datalog.csv')
col_list = ['학점', '토익', '토스', 'OPIC', '외국어', '해외경험', '인턴', '수상경력']
# ["Index","학점", "토익", "토스", "OPIC", "외국어", "자격증", "해외경험", "인턴", "수상경력","봉사","합격여부"]
# Index,학점,토익,토스,OPIC,외국어,자격증,해외경험,인턴,수상경력,봉사,합격여부
# index,grades,toeic,tos,opic,foreign_lang,certificate,foreign_exp,intern,prize,volunteer,label
grades_list = []
toeic_list = []
tos_list = []
opic_list = []
foreignl_list = []
certificate_list = list(data['자격증'])
foreigne_list = []
intern_list = []
prize_list = []
volunteer_list = list(data['봉사'])
label = list(data['합격여부'])
all_save = []
for x in col_list:
if x == '학점':
for y in data[x]:
if y >= 3.0:
grades_list.append(1)
else:
grades_list.append(0)
elif x == '토익':
for y in data[x]:
if y >= 600:
toeic_list.append(1)
else:
toeic_list.append(0)
elif x == '토스':
for y in data[x]:
if y:
tos_list.append(1)
else:
tos_list.append(0)
elif x == 'OPIC':
for y in data[x]:
if y:
opic_list.append(1)
else:
opic_list.append(0)
elif x == '외국어':
for y in data[x]:
if y:
foreignl_list.append(1)
else:
foreignl_list.append(0)
elif x == '해외경험':
for y in data[x]:
if y:
foreigne_list.append(1)
else:
foreigne_list.append(0)
elif x == '인턴':
for y in data[x]:
if y:
intern_list.append(1)
else:
intern_list.append(0)
elif x == '수상경력':
for y in data[x]:
if y:
prize_list.append(1)
else:
prize_list.append(0)
all_save.append(grades_list)
all_save.append(toeic_list)
all_save.append(tos_list)
all_save.append(opic_list)
all_save.append(foreignl_list)
all_save.append(certificate_list)
all_save.append(foreigne_list)
all_save.append(intern_list)
all_save.append(prize_list)
all_save.append(volunteer_list)
all_save.append(label)
re_data = pd.DataFrame(all_save)
re_data = re_data.transpose()
re_data.to_csv('../Dataset/optmiz_data.csv') | null | Pretreatment/pretreatment.py | pretreatment.py | py | 2,581 | python | en | code | null | code-starcoder2 | 51 |
204443424 | import re
import json
import glob
for filpath in glob.glob('LTETrace************'):
with open(filpath, 'r') as ltefile:
ignore = {'Zeit', '>>>>>>>>>>>>>>>>>>>>>>>>', '!GSTATUS: ', '!LTEINFO:', '--', '2017'}
onestring = {'EMM', 'RRC', 'IMS', 'SINR', 'InterFreq', 'LTE CA state', 'GSM', 'WCDMA', 'CDMA 1x'}
data = {}
myLTE = {}
cont = []
a = 0
for line in ltefile:
if line.strip():
if any(item in line for item in ignore):
continue
elif any(item in line for item in onestring):
line = line.strip().split(':')
item = [i.strip().replace(' \t', '-') for i in line]
data[item[0]] = item[1]
elif line.startswith('LTE Pegel'):
# cont.append[0]
time = next(ltefile)
try:
time = next(ltefile).replace(' ', 'T').replace('\n', '')
data['time utc'] = time
except ValueError as e:
continue
elif line.startswith('Serving'):
fields = line[8:].replace('\n', '').split(' ')
fields = [item for item in fields if item]
values = next(ltefile)
values = values.replace('\n', '').split(' ')
values = [item for item in values if item]
d = dict(zip(fields, values))
data['Serving'] = d
# print(values)
elif line.startswith('IntraFreq'):
fields = line[10:].split()
values = []
temp = []
for i in range(0, 100):
myline = next(ltefile)
if not myline.isspace():
myvalues = myline.replace('\n', '').split(' ')
myvalues = [item for item in myvalues if item]
temp.append(myvalues)
else:
break
values = list(zip(*temp))
values = [list(item) for item in values]
d = dict(zip(fields, values))
data['IntraFreq'] = d
elif line.startswith('CDMA HRPD:'):
data['CMDA HRPD'] = line[10:].strip()
myLTE[a] = data
a = a + 1
data = {}
elif 'PCC' in line:
field = line[:12]
item = re.findall(r'[-+]?\d+(?:\.\d+)?', line)
data[field] = {}
data[field]['value'] = item[0]
data[field]['RSRP (dBm)'] = item[1]
else:
if line.startswith('System mode'):
line = line.strip('\n').strip().split('\t')
else:
line = line.strip().split(' ')
for item in line:
item = item.split(':')
item = [i.strip() for i in item]
field = item[0]
value = item[1]
data[field] = value
# keylist = myLTE.keys()
# keylist.sort()
# for key in keylist:
# print "%s: %s" % (key, myLTE[key])
jsonData = json.dumps(myLTE) # Save Python dictionary as JSON File
with open('JSONLTEData.json', 'a') as f:
f.write(jsonData + '\n')
print ("Text file containing LTE Measurements parsed into JSON File.") | null | LTE_converter.py | LTE_converter.py | py | 3,809 | python | en | code | null | code-starcoder2 | 51 |
571468285 | from starlette.routing import Router, Route
from starlette.requests import Request
from starlette.authentication import requires
from omo.views import template_env, template
from omo.db import database
from omo.middlewares import COOKIES_SESSION_TOKEN_KEY
@requires('authenticated', redirect='login')
async def my_account(request: Request):
"""
This returns the member's account details
"""
page = template_env.get_template('my-account.html')
context = {'request': request}
token = request.cookies[COOKIES_SESSION_TOKEN_KEY]
query = 'SELECT id, first_name, last_name, email FROM member WHERE token = :token'
fetch = await database.fetch_one(query=query, values={'token': token})
if fetch:
member_id = fetch['id']
first_name = fetch['first_name']
last_name = fetch['last_name']
email = fetch['email']
context['member_details'] = {'id': member_id,
'first_name': f'{first_name} {last_name}',
'email': email
}
return template.TemplateResponse(page, context=context)
accounts_router = Router(routes=[
Route('/my_account/', endpoint=my_account, methods=['GET'])
])
| null | omo/routes/accounts.py | accounts.py | py | 1,263 | python | en | code | null | code-starcoder2 | 51 |
277593907 | #Code to run a quantum random number generator on a real quantum device.
from qiskit import QuantumCircuit, IBMQ, execute
#Authenticate an account and add for use during this session.
IBMQ.enable_account("YOUR_API_TOKEN")
provider = IBMQ.get_provider(hub='ibm-q')
#Initialize the number of qubits and classical registers
number =3
circuit = QuantumCircuit(number, number)
#Apply an hadamard gate to every qubits
circuit.h(range(number))
#Measure every qubits
circuit.measure(range(number), range(number))
# Set the quantum device and execute the quantum circuit
backend = provider.get_backend('ibmq_belem')
job = execute(circuit, backend, shots=1)
#Get and print results
result = job.result()
print(result.get_counts())
| null | quantum_coins.py | quantum_coins.py | py | 726 | python | en | code | null | code-starcoder2 | 51 |
382494602 | from typing import Any, Dict, Iterable, cast
from openslides_backend.action.actions.meeting.shared_meeting import (
meeting_projector_default_replacements,
)
from tests.system.action.base import BaseActionTestCase
class MeetingCreateActionTest(BaseActionTestCase):
def basic_test(self, datapart: Dict[str, Any]) -> Dict[str, Any]:
self.create_model("committee/1", {"name": "test_committee", "member_ids": [2]})
self.create_model("group/1")
self.create_model("user/2")
response = self.request(
"meeting.create",
{
"name": "test_name",
"committee_id": 1,
"welcome_title": "test_wel_title",
**datapart,
},
)
self.assert_status_code(response, 200)
return self.get_model("meeting/1")
def test_create_simple(self) -> None:
meeting = self.basic_test(dict())
self.assertCountEqual(
cast(Iterable[Any], meeting.get("default_projector_$_id")),
meeting_projector_default_replacements,
)
self.assert_model_exists(
"meeting/1",
{
"name": "test_name",
"committee_id": 1,
"group_ids": [2, 3, 4, 5, 6],
"default_group_id": 2,
"admin_group_id": 3,
"motion_workflow_ids": [1],
"motions_default_workflow_id": 1,
"motions_default_amendment_workflow_id": 1,
"motions_default_statute_amendment_workflow_id": 1,
"motion_state_ids": [1, 2, 3, 4],
"user_ids": [1],
"list_of_speakers_countdown_id": 1,
"poll_countdown_id": 2,
},
)
self.assert_model_exists("group/2", {"name": "Default"})
self.assert_model_exists("group/3", {"name": "Admin", "user_ids": [1]})
self.assert_model_exists("group/4", {"name": "Delegates"})
self.assert_model_exists("group/5", {"name": "Staff"})
self.assert_model_exists("group/6", {"name": "Committees"})
self.assert_model_exists(
"motion_workflow/1",
{
"name": "Simple Workflow",
"meeting_id": 1,
"default_workflow_meeting_id": 1,
"default_amendment_workflow_meeting_id": 1,
"default_statute_amendment_workflow_meeting_id": 1,
"state_ids": [1, 2, 3, 4],
"first_state_id": 1,
},
)
self.assert_model_exists(
"motion_state/1", {"name": "submitted", "next_state_ids": [2, 3, 4]}
)
self.assert_model_exists(
"motion_state/2",
{
"name": "accepted",
"previous_state_ids": [1],
"meeting_id": 1,
"workflow_id": 1,
},
)
self.assert_model_exists(
"motion_state/3", {"name": "rejected", "previous_state_ids": [1]}
)
self.assert_model_exists(
"motion_state/4", {"name": "not_decided", "previous_state_ids": [1]}
)
projector1 = self.get_model("projector/1")
self.assertCountEqual(
cast(Iterable[Any], projector1.get("used_as_default_$_in_meeting_id")),
meeting_projector_default_replacements,
)
self.assert_model_exists(
"projector/1",
{
"name": "Default projector",
"meeting_id": 1,
"used_as_reference_projector_meeting_id": 1,
}.update(
{
f"used_as_default_${name}_in_meeting_id": 1
for name in meeting_projector_default_replacements
}
),
)
self.assert_model_exists(
"user/1",
{
"group_$1_ids": [3], # meeting/1 and group 3
"group_$_ids": ["1"], # only meeting/1 values
},
)
self.assert_model_exists(
"projector_countdown/1",
{
"title": "List of speakers countdown",
"meeting_id": 1,
"used_as_list_of_speaker_countdown_meeting_id": 1,
"default_time": 60,
"countdown_time": 60,
},
)
self.assert_model_exists(
"projector_countdown/2",
{
"title": "Voting countdown",
"meeting_id": 1,
"used_as_poll_countdown_meeting_id": 1,
"default_time": 60,
"countdown_time": 60,
},
)
def test_check_action_data_fields(self) -> None:
meeting = self.basic_test(
{
"welcome_text": "htXiSgbj",
"description": "RRfnzxHA",
"location": "LSFHPTgE",
"start_time": 1608120653,
"end_time": 1608121653,
"url_name": "JWdYZqDX",
"enable_anonymous": False,
"guest_ids": [2],
}
)
assert meeting.get("welcome_text") == "htXiSgbj"
assert meeting.get("description") == "RRfnzxHA"
assert meeting.get("location") == "LSFHPTgE"
assert meeting.get("start_time") == 1608120653
assert meeting.get("end_time") == 1608121653
assert meeting.get("url_name") == "JWdYZqDX"
assert meeting.get("enable_anonymous") is False
assert meeting.get("guest_ids") == [2]
assert meeting.get("user_ids") == [1, 2]
user_2 = self.get_model("user/2")
assert user_2.get("guest_meeting_ids") == [1]
def test_guest_ids_error(self) -> None:
self.create_model("committee/1", {"name": "test_committee", "member_ids": [2]})
self.create_model("user/2")
self.create_model("user/3")
response = self.request(
"meeting.create",
{
"name": "test_name",
"committee_id": 1,
"welcome_title": "test_wel_title",
"guest_ids": [2, 3],
},
)
self.assert_status_code(response, 400)
self.assertIn(
"Guest-ids {3} are not part of committee-member or manager_ids.",
response.json["message"],
)
| null | tests/system/action/meeting/test_create.py | test_create.py | py | 6,420 | python | en | code | null | code-starcoder2 | 51 |
253042468 | from flask import Flask, request, abort
from linebot import (LineBotApi, WebhookHandler)
from linebot.exceptions import (InvalidSignatureError)
from linebot.models import *
from engine.currencySearch import currencySearch
from engine.AQI import AQImonitor
from engine.gamma import gammamonitor
from engine.OWM import OWMLonLatsearch
from engine.SpotifyScrap import scrapSpotify
import gspread
from oauth2client.service_account import ServiceAccountCredentials
scope=['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('好幫手.json',scope)
client = gspread.authorize(creds)
LineBotSheet = client.open('好幫手')
userStatusSheet = LineBotSheet.worksheet('userStatus')
userInfoSheet = LineBotSheet.worksheet('userInfo')
app = Flask(__name__)
# 設定你的Channel Access Token
line_bot_api = LineBotApi('zT/x0Dp81QA2Wp781ummtpycl3OxZk0M65BPz8SoCF1H6N93cSR50LMu8beeZ5jj9iM3C2hRBBk/4meraFGsJawJa3foM4c7tTf7tDTtudwlcDIFVyfHVhJIM67FyrOrVMgoe5J1X8dFf2m2X9P6fwdB04t89/1O/w1cDnyilFU=')
# 設定你的Channel Secret
handler = WebhookHandler('e4fdbb0acac692e6c47353219f9657ea')
# 監聽所有來自 /callback 的 Post Request
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
@app.route("/web")
def showWeb():
return '<h1>Hello Every one</h1>'
#處理訊息
#當訊息種類為TextMessage時,從event中取出訊息內容,藉由TextSendMessage()包裝成符合格式的物件,並貼上message的標籤方便之後取用。
#接著透過LineBotApi物件中reply_message()方法,回傳相同的訊息內容
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
userSend = event.message.text
userID = event.source.user_id
try:
cell = userStatusSheet.find(userID)
userRow = cell.row
userCol = cell.col
status = userStatusSheet.cell(cell.row,2).value
except:
userStatusSheet.append_row([userID])
cell = userStatusSheet.find(userID)
userRow = cell.row
userCol = cell.col
status = ''
if status == '':
#文字提示
message = TextSendMessage(text='你尚未註冊,請填資料,\n請複製以下的註册碼來填寫資料')
line_bot_api.push_message(userID,message)
#傳送使用者ID
message = TextSendMessage(text=userID)
line_bot_api.push_message(userID,message)
#傳送確認表單
message = TemplateSendMessage(
alt_text='註冊表單',
template=ConfirmTemplate(
text='請選擇【填寫表單】來註冊,完成後請點擊【完成】按鈕',
actions=[
URIAction(
label='填寫表單',
uri='line://app/1609239460-ZEJqMXl0'
),
MessageAction(
label='完成',
text='完成'
)
]
)
)
userStatusSheet.update_cell(userRow, 2, '註冊中')
elif status == '註冊中':
try:
infoCell = userInfoSheet.find(userID)
userStatusSheet.update_cell(userRow, 2, '已註冊')
message = TextSendMessage(text='Hi,{}您好,已註冊成功'.format(userInfoSheet.cell(infoCell.row,3).value))
except:
#文字提示
message = TextSendMessage(text='你尚未註冊,請填資料,\n請複製以下的註册碼來填寫資料')
line_bot_api.push_message(userID,message)
#傳送使用者ID
message = TextSendMessage(text=userID)
line_bot_api.push_message(userID,message)
#傳送確認表單
message = TemplateSendMessage(
alt_text='註冊表單',
template=ConfirmTemplate(
text='請選擇【填寫表單】來註冊,完成後請點擊【完成】按鈕',
actions=[
URIAction(
label='填寫表單',
uri='line://app/1609239460-ZEJqMXl0'
),
MessageAction(
label='完成',
text='完成'
)
]
)
)
userStatusSheet.update_cell(userRow, 2, '註冊中')
elif status == '已註冊':
if userSend == '你好':
infoCell = userInfoSheet.find(userID)
userName = userInfoSheet.cell(infoCell.row,3).value
message = TextSendMessage(text='Hello, ' + userName)
elif userSend == '天氣':
userStatusSheet.update_cell(userRow, 2, '天氣查詢')
message = TextSendMessage(text='請傳送你的座標,請按下列的+號選項')
elif userSend in ['CNY', 'THB', 'SEK', 'USD', 'IDR', 'AUD', 'NZD', 'PHP', 'MYR', 'GBP', 'ZAR', 'CHF', 'VND', 'EUR', 'KRW', 'SGD', 'JPY', 'CAD', 'HKD']:
message = TextSendMessage(text=currencySearch(userSend))
elif userSend == 'SOS':
message = TemplateSendMessage(
alt_text='這是個按鈕選單',
template=ButtonsTemplate(
thumbnail_image_url='https://i.imgur.com/Fpusd5M.png',
title='這是您的選單按鈕',
text='請選擇以下的項目,另有貨幣查詢功能,需輸入貨幣代碼3位大寫英文',
actions=[
MessageAction(
label='醫生',
text='醫生'
),
MessageAction(
label='家人',
text='家人'
),
MessageAction(
label='報警',
text='112'
),
URIAction(
label='修改連絡資料',
uri='https://forms.gle/J8UL7uPCJabMuWvV6'
)
]
)
)
elif userSend == '氣候':
message = TemplateSendMessage(
alt_text='這是個按鈕選單',
template=ButtonsTemplate(
thumbnail_image_url='https://i.imgur.com/iKYedf6.png',
title='天氣查詢',
text='請選擇地點',
actions=[
MessageAction(
label='查詢其他地方',
text='天氣'
),
URIAction(
label='你所在位置',
uri='https://watch.ncdr.nat.gov.tw/townwarn/'
)
]
)
)
elif userSend in ['spotify','音樂','music']:
columnReply,textReply = scrapSpotify()
message = TemplateSendMessage(
alt_text=textReply,
template=ImageCarouselTemplate(
columns=columnReply
)
)
elif userSend == '便當店':
infoCell = userInfoSheet.find(userID)
message = TextSendMessage(text='{}'.format(userInfoSheet.cell(infoCell.row,4).value))
elif userSend == '醫生':
infoCell = userInfoSheet.find(userID)
message = TextSendMessage(text='{}'.format(userInfoSheet.cell(infoCell.row,6).value))
elif userSend == '家人':
infoCell = userInfoSheet.find(userID)
message = TextSendMessage(text='{}'.format(userInfoSheet.cell(infoCell.row,7).value))
elif userSend == '水電行':
infoCell = userInfoSheet.find(userID)
message = TextSendMessage(text='{}'.format(userInfoSheet.cell(infoCell.row,5).value))
else:
message = TextSendMessage(text=userSend)
elif status == '天氣查詢':
message = TemplateSendMessage(
alt_text='是否取消查詢',
template=ConfirmTemplate(
text='是否取消查詢?',
actions=[
URIAction(
label='傳送位置資訊',
uri='line://nv/location'
),
MessageAction(
label='取消查詢',
text='取消'
)
]
)
)
userStatusSheet.update_cell(userRow, 2, '已註冊')
line_bot_api.reply_message(event.reply_token, message)
@handler.add(MessageEvent, message=LocationMessage)
def handle_message(event):
userID = event.source.user_id
try:
cell = userStatusSheet.find(userID)
userRow = cell.row
userCol = cell.col
status = userStatusSheet.cell(cell.row,2).value
except:
userStatusSheet.append_row([userID])
cell = userStatusSheet.find(userID)
userRow = cell.row
userCol = cell.col
status = ''
if status == '天氣查詢':
userAddress = event.message.address
userLat = event.message.latitude
userLon = event.message.longitude
weatherResult = OWMLonLatsearch(userLon,userLat)
AQIResult = AQImonitor(userLon,userLat)
gammaResult = gammamonitor(userLon,userLat)
userStatusSheet.update_cell(userRow, 2, '已註冊')
message = TextSendMessage(text='🌤天氣狀況:\n{}\n🚩空氣品質:\n{}\n\n🌌輻射值:\n{}'.format(weatherResult,AQIResult,gammaResult))
elif status == '':
#文字提示
message = TextSendMessage(text='你尚未註冊,請填基本資料!\n請複製以下註冊碼來填寫表單')
line_bot_api.push_message(userID,message)
#傳送使用者ID
message = TextSendMessage(text=userID)
line_bot_api.push_message(userID,message)
#傳送確認表單
message = TemplateSendMessage(
alt_text='註冊表單',
template=ConfirmTemplate(
text='請選擇[填寫表單]來註冊, 完成後請點擊[完成]按鈕',
actions=[
URIAction(
label='填寫表單',
uri='line://app/1609239460-ZEJqMXl0'
),
MessageAction(
label='填寫完成',
text='完成'
)
]
)
)
userStatusSheet.update_cell(userRow, 2, '註冊中')
else:
message = TextSendMessage(text='傳地址幹嘛?')
line_bot_api.reply_message(event.reply_token, message)
@handler.add(MessageEvent, message=StickerMessage)
def handle_message(event):
message = TextSendMessage(text='我看不懂貼圖')
line_bot_api.reply_message(event.reply_token, message)
import os
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
| null | app.py | app.py | py | 9,282 | python | en | code | null | code-starcoder2 | 51 |
262289428 | # import libraries
import urllib.request
from bs4 import BeautifulSoup
from selenium import webdriver
import json
from pymongo import MongoClient
import sys
import time
sys.stdout = open('file', 'w', encoding="utf-8")
url = "https://www.nike.com/w/new-shoes-3n82yzy7ok"
# run firefox webdriver from executable path of your choice
driver = webdriver.Firefox()
# get web page
driver.get(url)
# execute script to scroll down the page
driver.maximize_window()
time.sleep(5)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
#connect to database
client = MongoClient("mongodb+srv://rjain9:Ilikepie16%21@cluster0-wgm3y.mongodb.net/test?retryWrites=true&w=majority")
db = client["Shoes"]
mycol = db["nike"]
aTagsInLi = driver.find_elements_by_xpath("//div[@class='product-card css-1ikfoht css-z5nr6i css-11ziap1 css-zk7jxt css-dpr2cn product-grid__card ']")
line_items=[]
for a in aTagsInLi:
print("here")
#get div container for image details
img = a.find_element_by_tag_name('img')
#get div for site line
siteDiv = a.find_element_by_tag_name('a')
#get name of shoe
name = img.get_attribute('alt')
#get image url
image_url = img.get_attribute('src')
#get site link
site = siteDiv.get_attribute('href')
#get category of shoe
category = a.find_element_by_class_name('product-card__subtitle').text
#determine gender
if "Men" in category:
gender = "Male"
elif "Women" in category:
gender = "Female"
elif "Kid" in category or "Baby" in category or "Toddler" in category:
gender = "Kid"
else:
gender = "Unisex"
#create json object for database
myjson3 = {
'name': name,
'image_url': image_url,
'site': site,
'category': category,
'gender': gender,
'brand' : 'Nike'
}
print(myjson3)
line_items.append(myjson3)
#clear existing db
mycol.delete_many({})
#insert new elements into db
mycol.insert_many(line_items)
| null | scraper.py | scraper.py | py | 2,187 | python | en | code | null | code-starcoder2 | 51 |
334827427 | import urllib3, json, requests, keyboards
from setting import bot_token, chat_id_service, rest_link_product, rest_link_store, rest_link_stock
import telebot
from telebot import types
import barcode
import time, datetime, schedule
from configparser import ConfigParser
import os
from os import path
from mysql.connector import MySQLConnection, Error
from multiprocessing import Process, freeze_support
#from service import transliterate
urllib3.disable_warnings()
bot = telebot.TeleBot(bot_token)
dirpath = os.path.dirname(__file__)
conffile = os.path.join(dirpath, 'config.ini')
#Чтение файла конфигурации
def read_db_config(filename=conffile, section='mysql'):
parser = ConfigParser()
parser.read(filename)
db = {}
if parser.has_section(section):
items = parser.items(section)
for item in items:
db[item[0]] = item[1]
else:
raise Exception('{0} not found in the {1} file'.format(section, filename))
return db
#Первый запуск
@bot.message_handler(commands=['start'])
def start_message(message):
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
sql = ("SELECT * FROM users WHERE chat_id= %s")
cursor.execute(sql, [(message.from_user.id)])
user = cursor.fetchone()
if not user:
bot.send_message(message.chat.id, 'Вы впервые здесь. Для продолжения нажмите кнопку "Зарегистрироваться"', reply_markup=keyboards.NewUser)
else:
bot.send_message(message.chat.id, 'С возвращением!', reply_markup=keyboards.keyboard1)
cursor.close()
conn.close()
#Регистрация пользователя
@bot.message_handler(content_types=['contact'])
def add_user(message):
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
sql = ("SELECT * FROM users WHERE chat_id= %s")
cursor.execute(sql, [(message.contact.user_id)])
user = cursor.fetchone()
cursor.close()
conn.close()
if not user:
newdata = (message.contact.user_id,
message.contact.first_name,
message.contact.last_name,
message.contact.phone_number,
datetime.datetime.now()
)
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
cursor.executemany("INSERT INTO users (chat_id, first_name, last_name, phone_number,datetime) VALUES (%s,%s,%s,%s,%s)",
(newdata,))
conn.commit()
cursor.close()
conn.close()
bot.send_message(message.chat.id, 'Приятно познакомиться, можете пользоваться сервисом', reply_markup=keyboards.keyboard1)
#Обработка сообщений
@bot.message_handler(content_types=['text'])
def send_text(message):
if message.text.lower() == 'поиск':
products(message.chat.id)
elif message.text.lower() == 'локация':
city = get_user_city(message.chat.id)
if city:
usercity=city
else:
usercity='???'
citykeyboard = telebot.types.ReplyKeyboardMarkup(resize_keyboard=1)
#citykeyboard.add(types.KeyboardButton(text='Выбрать город ('+usercity+')'),
citykeyboard.add(types.KeyboardButton(text='Выбрать город ('+usercity+')'),
types.KeyboardButton(text='Обновить координаты', request_location=True))
citykeyboard.add(types.KeyboardButton(text='Назад'))
bot.send_message(message.chat.id, 'Чтобы увидеть товар в ближайших аптеках, выберите город и обновите координаты', reply_markup=citykeyboard)
elif message.text.lower() == 'назад':
bot.send_message(message.chat.id, 'Главное меню', reply_markup=keyboards.keyboard1)
elif message.text.lower().find('выбрать город') == 0:
try:
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
cursor.execute('select city from store s group by city order by city')
citys = cursor.fetchall()
markup = types.InlineKeyboardMarkup()
for city in citys:
name = city[0]
switch_button = types.InlineKeyboardButton(text=name, callback_data='mycity:'+name)
markup.add(switch_button)
cursor.close()
conn.close()
bot.send_message(message.chat.id, "Выберите ваш город", reply_markup=markup)
#bot.send_message(message.chat.id, 'Главное меню', reply_markup=keyboards.keyboard1)
#bot.send_message(message.chat.id, todos['name'] + chr(10) + chr(10) + 'Цена: ' + todos['price'] + ' тенге')
except requests.exceptions.ConnectionError:
bot.send_message(message.chat.id, 'Отсутствует связь с сервисом цен')
#Оповестить сервис о проблемах
bot.send_message(chat_id_service, 'Внимание! Проблема с доступом к сервису цен')
#Регистрация местоположения
@bot.message_handler(content_types=['location'])
def send_location(message):
print(message)
newdata = (
message.location.latitude,
message.location.longitude,
message.from_user.id
)
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
cursor.executemany("UPDATE users SET latitude = %s, longitude = %s WHERE chat_id = %s",
(newdata,))
conn.commit()
cursor.close()
conn.close()
bot.send_message(message.chat.id, 'Ваши координаты обновлены')
#Получение фото товара
@bot.message_handler(content_types=['photo'])
def sent_barcode(message):
raw = message.photo[2].file_id
file_info = bot.get_file(raw)
downloaded_file = 'https://api.telegram.org/file/bot' + bot_token + '/' + file_info.file_path
bcode = barcode.read_barcode(downloaded_file,message.chat.id)
print(str(bcode))
if bcode == 'No':
bot.send_message(message.chat.id, 'Не удалось распознать код. Попробуйте еще раз')
else:
print(bcode.decode())
#Формирование результатов поиска
@bot.inline_handler(func=lambda query: len(query.query) >= 2)
def query_text(query):
offset = int(query.offset) if query.offset else 0
try:
SQL = """\
select t.nommodif, t.name, t.producer, t.photo, t.city, case when %s='' then 0 ELSE t.price end price
FROM (SELECT p1.nommodif, p1.name, p1.producer, p1.photo, p3.city, p2.price FROM product p1
inner join stock p2 on p2.company = p1.company and p2.product_id = p1.nommodif
inner join store p3 on p3.company = p2.company and p3.name = p2.store
WHERE lower(concat(p1.name,COALESCE(p1.search_key,''))) LIKE lower(%s)
group by p1.nommodif, p1.name, p1.producer, p1.photo, p3.city, p2.price) t
WHERE (t.city = %s or %s='') LIMIT 5 OFFSET %s
"""
SQL2 = """\
SELECT p1.nommodif, p1.name, p1.producer, p1.photo, p3.city,
case when min(p2.price) <> max(p2.price) then
CONCAT(min(p2.price),' - ',max(p2.price))
else
CONCAT(min(p2.price))
end
price FROM product p1
inner join users u on u.chat_id = %s
inner join stock p2 on p2.company = p1.company and p2.product_id = p1.nommodif
inner join store p3 on p3.company = p2.company and p3.name = p2.store and p3.city = u.city
WHERE lower(concat(p1.name,p1.producer,COALESCE(p1.search_key,''))) LIKE lower(%s)
group by p1.nommodif, p1.name, p1.producer, p1.photo, p3.city
LIMIT 5 OFFSET %s
"""
#cursor.execute(SQL, (usercity,'%'+query.query+'%',usercity,usercity,offset,))
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
cursor.execute(SQL2, (query.from_user.id, '%' + query.query + '%', offset,))
products = cursor.fetchall()
results = []
try:
m_next_offset = str(offset + 5) if len(products) == 5 else None
if products:
for product in products:
try:
markup = types.InlineKeyboardMarkup()
markup.add(types.InlineKeyboardButton(text=u'\U0001F4CC Добавить в список', callback_data='prlist:' + str(product[0])),
types.InlineKeyboardButton(text='Мой список', callback_data='mylist:'),)
markup.add(types.InlineKeyboardButton(text=u'\U0001F30D Искать по списку в аптеках', callback_data='locallist:'),)
#types.InlineKeyboardButton(text=u'\U0001F30D Найти аптеку', callback_data='local:'+str(product[0])),
#types.InlineKeyboardButton(text=u'\U0001F30D', callback_data='locallist:'),
markup.add(types.InlineKeyboardButton(text=u'\U0001F50D Продолжить поиск', switch_inline_query_current_chat=""),)
items = types.InlineQueryResultArticle(
id=product[0], title=product[1],
description="Производитель: "+product[2]+"\nЦена: "+str(product[5])+" тенге",
input_message_content=types.InputTextMessageContent(
message_text='*'+product[1]+'* [.](' + product[3] + ') \n'+product[2]+'\nЦена: '+str(product[5])+' тенге',
parse_mode='markdown',
disable_web_page_preview=False,
),
reply_markup=markup,
thumb_url=product[3], thumb_width=100, thumb_height=100
)
results.append(items)
except Exception as e:
print(e)
cursor.close()
conn.close()
bot.answer_inline_query(query.id, results, next_offset=m_next_offset if m_next_offset else "", cache_time=86400)
#bot.answer_inline_query(query.id, results, next_offset=m_next_offset if m_next_offset else "")
else:
markup = types.InlineKeyboardMarkup()
markup.add(
types.InlineKeyboardButton(text=u'\U0001F50D Продолжить поиск', switch_inline_query_current_chat=""),
)
items = types.InlineQueryResultArticle(
id='1000', title='Ничего не найдено',
description="Попробуйте изменить запрос...",
input_message_content=types.InputTextMessageContent(
message_text="По вашему запросу ничего не найдено. Попробуйте изменить запрос...",
parse_mode='markdown',
disable_web_page_preview=True,
),
reply_markup=markup,
thumb_url='https://ru.seaicons.com/wp-content/uploads/2017/02/Cute-Ball-Stop-icon.png',
thumb_width=100, thumb_height=100
)
results.append(items)
bot.answer_inline_query(query.id, results)
add_logs(query.from_user.id, 'search', query.query)
except Exception as e:
print(e)
except Exception as e:
print(e)
#Обработка входящих сообщений
@bot.callback_query_handler(func=lambda call: True)
def callback_inline(call):
# Если сообщение из чата с ботом
if call.message:
#print(call)
if call.data.find('mycity:') == 0:
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
cursor.execute('UPDATE users SET city = %s WHERE chat_id = %s', (call.data.replace('mycity:',''),call.from_user.id))
conn.commit()
cursor.close()
conn.close()
#cursor.close()
#cnx.close()
usercity = call.data.replace('mycity:','')
citykeyboard = telebot.types.ReplyKeyboardMarkup(resize_keyboard=1)
citykeyboard.add(types.KeyboardButton(text='Выбрать город ('+usercity+')'),
types.KeyboardButton(text='Обновить координаты', request_location=True))
citykeyboard.add(types.KeyboardButton(text='Назад'))
bot.send_message(call.from_user.id,
'Ваш город: '+usercity,
reply_markup=citykeyboard)
if call.data.find('mylist:') == 0:
get_search_list(call.from_user.id)
if call.data.find('clearlist:') == 0:
#Очистка списка пользоателя
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
cursor.execute('DELETE FROM user_product_list WHERE chat_id = %s', [(call.from_user.id)])
conn.commit()
cursor.close()
conn.close()
markup = types.InlineKeyboardMarkup()
markup.add(
types.InlineKeyboardButton(text=u'\U0001F50D Продолжить поиск', switch_inline_query_current_chat=""), )
bot.send_message(call.from_user.id,
'Ваш список товаров удален.', reply_markup=markup)
if call.data.find('refresh:') == 0:
#Импорт данных из аптек
import_product()
import_store()
import_stock()
if call.data.find('locallist:') == 0:
search_list(call.from_user.id)
if call.data.find('locallist_one:') == 0:
search_list_one(call.from_user.id)
if call.data.find('prlist:') == 0:
add_list(call.from_user.id, call.data.replace('prlist:',''), call.id)
# Если сообщение из инлайн-режима
elif call.inline_message_id:
if call.data.find('prlist:') == 0:
add_list(call.from_user.id, call.data.replace('prlist:',''), call.id)
elif call.data.find('locallist:') == 0:
get_search_list(call.from_user.id)
search_list(call.from_user.id)
elif call.data.find('mylist:') == 0:
get_search_list(call.from_user.id)
def products(user_id):
markup = types.InlineKeyboardMarkup()
markup.add(types.InlineKeyboardButton(text=u'\U0001F4CC' + ' Мой список', callback_data='mylist:'),)
markup.add(types.InlineKeyboardButton(text=u'\U0001F50D' + ' Поиск товаров', switch_inline_query_current_chat=""),)
# Сервисная комманда
if user_id == chat_id_service:
markup.add(
types.InlineKeyboardButton(text='Обновить данные', callback_data='refresh:'))
bot.send_message(user_id, "КАК ЭТО РАБОТАЕТ:\n\n"
"1. В пункте [Локация] выберите город и обновите координаты (если Вы еще этого не сделали)\n\n"
"2. Нажмите [\U0001F50DПоиск], наберите боту часть наименования, например '@goAptoBot анальгин' или просто отправьте боту \U0001F4CE ФОТО ШТРИХ-КОДА с упаковки товара\n\n"
"3. Найдите один или несколько товаров и добавьте их в список \U0001F4CC \n\n"
"4. Нажмите [\U0001F30D Искать по списку в аптеках] - бот сообщит о цене и найдет ближайшие к вам аптеки, в которых есть товар из списка",
parse_mode='HTML', reply_markup=markup)
def add_logs(user_id, metod, value):
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
now = datetime.datetime.now()
cursor.executemany("INSERT INTO logs (datetime,chat_id,metod,value) VALUES (%s,%s,%s,%s)",
[(now,int(user_id), metod,value),])
conn.commit()
cursor.close()
conn.close()
def add_list(user_id, in_data, call_id):
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
cursor.executemany("INSERT INTO user_product_list (chat_id, product_id) VALUES (%s,%s)",
[(int(user_id), str(in_data)),])
conn.commit()
cursor.close()
conn.close()
add_logs(int(user_id), 'product', str(in_data))
bot.answer_callback_query(call_id, show_alert=True, text="Товар добавлен в список")
#Получение города пользователяя
def get_user_city(in_user_id):
# Ищем город пользователя
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
sql = ("SELECT city FROM users WHERE chat_id = %s")
cursor.execute(sql, [(in_user_id)])
city = cursor.fetchone()
cursor.close()
conn.close()
if city:
return city[0]
else:
return ''
#Вывод списка товаров
def get_search_list(user_id):
try:
product_list = 'СПИСОК ДЛЯ ПОИСКА:\n\n'
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
sql = (
"SELECT p2.name, p2.producer FROM user_product_list p1, product p2 WHERE p2.nommodif = p1.product_id AND p1.chat_id = %s group by p2.name, p2.producer order by p2.name")
cursor.execute(sql, [(user_id)])
products = cursor.fetchall()
for product in products:
product_list = product_list + '*' + product[0] + '*' + '\n' + product[1] + '\n' + '\n'
markup = types.InlineKeyboardMarkup()
markup.add(types.InlineKeyboardButton(text=u'\U0001F5D1 Очистить список', callback_data='clearlist:'),)
markup.add(types.InlineKeyboardButton(text=u'\U0001F30D Искать по списку в аптеках', callback_data='locallist:'),)
markup.add(types.InlineKeyboardButton(text=u'\U0001F50D Продолжить поиск', switch_inline_query_current_chat=""),)
bot.send_message(user_id,
product_list,
parse_mode='markdown',
reply_markup=markup, )
cursor.close()
conn.close()
except Exception as e:
print(e)
bot.send_message(user_id,
'Список пустой...')
#Поиск товаров по списку
def search_list(user_id):
#Назначим кнопки
markup = types.InlineKeyboardMarkup()
markup.add(types.InlineKeyboardButton(text=u'\U0001F30D Искать каждый товар отдельно', callback_data='locallist_one:'),)
markup.add(types.InlineKeyboardButton(text=u'\U0001F50D Продолжить поиск', switch_inline_query_current_chat=""), )
#Проверим что в списке есть товары
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
SQL = 'select count(distinct(product_id)) from user_product_list where chat_id = %s'
cursor.execute(SQL, (user_id,))
products = cursor.fetchone()
if products[0]==0:
bot.send_message(user_id,
'Сначала добавьте товары в список для поиска')
cursor.close()
conn.close()
else:
#Ищем аптеки с поответствием по списку товара
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
SQL = """\
SELECT s.name, s.address, s.mode, s.phone, s.latitude ,s.longitude, t.way FROM (
SELECT count(p2.product_id) kol, p1.name, get_way(p1.latitude ,p1.longitude,u.latitude,u.longitude) way FROM users u
inner join store p1 on p1.city = u.city
inner join stock p2 on p2.company = p1.company and p1.name = p2.store
WHERE u.chat_id = %s and p2.product_id in (select distinct(product_id) from user_product_list where chat_id = %s)
group by p1.name, p1.latitude ,p1.longitude,u.latitude,u.longitude having count(p2.product_id)=(select count(distinct(product_id)) from user_product_list where chat_id = %s)
) t
inner join store s on s.name = t.name
order by t.way asc
LIMIT 3
"""
cursor.execute(SQL, (user_id, user_id, user_id,))
stores = cursor.fetchall()
for store in stores:
try:
bot.send_venue(user_id,
store[4],
store[5],
store[0] + ' (' + str(store[6]) + ' м.)',
store[1]
)
bot.send_message(user_id,
store[2] + '\n' + 'Тел: ' + store[3] + '\nЕсть все по списку',
parse_mode='markdown', )
except Exception as e:
print(e)
cursor.close()
conn.close()
bot.send_message(user_id,
'Если вас не устроили эти аптеки, вы можете поискать отдельно каждый товар из списка в ближайших аптеках',
parse_mode='markdown',
reply_markup=markup, )
def search_list_one(user_id):
#Назначим кнопки
markup = types.InlineKeyboardMarkup()
markup.add(
types.InlineKeyboardButton(text=u'\U0001F30D Искать каждый товар отдельно', callback_data='locallist_one:'),
)
#Проверим что в списке есть товары
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
SQL = 'select count(distinct(product_id)) from user_product_list where chat_id = %s'
cursor.execute(SQL, (user_id,))
products = cursor.fetchone()
if products[0]==0:
bot.send_message(user_id,
'Сначала добавьте товары в список для поиска')
cursor.close()
conn.close()
else:
#Ищем аптеки с поответствием по списку товара
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
SQL = """\
select r.name, r.producer, p3.name, p3.address, p3.mode, p3.latitude, p3.longitude, p3.phone, t.way, t.price from user_product_list p
inner join product r on r.nommodif = p.product_id
inner join users u on u.chat_id = p.chat_id
inner join store p3 on p3.city = u.city and r.company = p3.company
inner join
(
select distinct(pl.product_id) product_id, p2.price, min(get_way(p3.latitude ,p3.longitude,u.latitude,u.longitude)) way from user_product_list pl
inner join users u on u.chat_id = pl.chat_id
inner join stock p2 on p2.product_id = pl.product_id
inner join store p3 on p3.company = p2.company and p3.name = p2.store and p3.city = u.city
where pl.chat_id = %s
group by pl.product_id, p2.price
) t
where p.chat_id = %s
and get_way(p3.latitude ,p3.longitude,u.latitude,u.longitude)=t.way and r.nommodif = t.product_id
group by r.name, r.producer, p3.name, p3.address, p3.mode, p3.latitude, p3.longitude, p3.phone, t.way, t.price
"""
cursor.execute(SQL, (user_id, user_id, ))
stores = cursor.fetchall()
for store in stores:
try:
bot.send_venue(user_id,
store[5],
store[6],
store[2] + ' (' + str(store[8]) + ' м.)',
store[3]
)
bot.send_message(user_id,
'*'+store[0]+'*\n'+store[1]+'\n'+'Цена: '+str(store[9])+' тенге\n\n'+
store[4] + '\n' + 'Тел: ' + store[7] ,
parse_mode='markdown', )
except Exception as e:
print(e)
cursor.close()
conn.close()
def import_data():
import_product()
import_store()
import_stock()
def import_product():
#Импорт справочника товаров
try:
response = requests.get(rest_link_product, verify=False)
if response.status_code == 404:
bot.send_message(chat_id_service, 'Не оступен сервер ЦВЕТНАЯ')
else:
todos = json.loads(response.text)
indata = []
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
cursor.execute("DELETE FROM product WHERE company='ЦВЕТНАЯ'")
for row in todos['items']:
indata.append((
'ЦВЕТНАЯ',
row['nommodif'],
row['modif_name'],
row['producer'],
row['barcode'],
row['photo'],
row['skey'],
))
'''
try:
while todos['next']['$ref']:
newlink = todos['next']['$ref']
print(newlink)
response = requests.get(newlink, verify=False)
todos = json.loads(response.text)
for row in todos['items']:
indata.append((
'ЦВЕТНАЯ',
row['nommodif'],
row['modif_name'],
row['producer'],
row['barcode']
))
'''
cursor.executemany("INSERT INTO product (company,nommodif,name,producer,barcode,photo,search_key) VALUES (%s,%s,%s,%s,%s,%s,%s)",
indata)
conn.commit()
cursor.close()
conn.close()
bot.send_message(chat_id_service, 'Справочник товаров обновлен')
#cursor.close()
#cnx.close()
except requests.exceptions.ConnectionError:
# Оповестить сервис о проблемах
bot.send_message(chat_id_service, 'Внимание! Проблема с доступом к сервису цен')
def import_store():
#Импорт справочника аптек
try:
response = requests.get(rest_link_store, verify=False)
if response.status_code == 404:
bot.send_message(chat_id_service, 'Не доступен сервер ЦВЕТНАЯ')
else:
todos = json.loads(response.text)
indata = []
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
cursor.execute("DELETE FROM store WHERE company='ЦВЕТНАЯ'")
for row in todos['items']:
indata.append((
row['company'],
row['store'],
row['city'],
row['address'],
row['lon'],
row['lat'],
row['phone'],
row['resh']
))
cursor.executemany(
"INSERT INTO store (company,name,city,address,longitude,latitude,phone,mode) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)",
indata)
conn.commit()
cursor.close()
conn.close()
bot.send_message(chat_id_service, 'Справочник аптек обновлен')
#cursor.close()
#cnx.close()
except requests.exceptions.ConnectionError:
# Оповестить сервис о проблемах
bot.send_message(chat_id_service, 'Внимание! Проблема с доступом к сервису цен')
def import_stock():
#Импорт остатков
try:
response = requests.get(rest_link_stock, verify=False)
if response.status_code == 404:
bot.send_message(chat_id_service, 'Не оступен сервер ЦВЕТНАЯ')
else:
todos = json.loads(response.text)
indata = []
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
cursor.execute("DELETE FROM stock WHERE company='ЦВЕТНАЯ'")
for row in todos['items']:
indata.append((
'ЦВЕТНАЯ',
row['store'],
row['nommodif'],
row['restfact'],
row['price']
))
try:
while todos['next']['$ref']:
newlink = todos['next']['$ref']
print(newlink)
response = requests.get(newlink, verify=False)
todos = json.loads(response.text)
for row in todos['items']:
indata.append((
'ЦВЕТНАЯ',
row['store'],
row['nommodif'],
row['restfact'],
row['price']
))
except Exception as e:
print(e)
cursor.executemany("INSERT INTO stock (company,store,product_id,qnt,price) VALUES (%s,%s,%s,%s,%s)",
indata)
conn.commit()
cursor.close()
conn.close()
bot.send_message(chat_id_service, 'Остатки обновлены')
#cursor.close()
#cnx.close()
except requests.exceptions.ConnectionError:
# Оповестить сервис о проблемах
bot.send_message(chat_id_service, 'Внимание! Проблема с доступом к сервису цен')
# Подключаем планировщик повторений
#schedule.every().day.at("05:00").do(job)
#schedule.every().hour.do(import_data)
"""
schedule.every(10).minutes.do(import_data)
# это функция проверки на запуск импорта
def check_import_data():
while True:
schedule.run_pending()
time.sleep(60)
# а теперь запускаем проверку в отдельном потоке
if __name__ == '__main__':
freeze_support()
p1 = Process(target=check_import_data, args=())
p1.start()
"""
while True:
try:
bot.polling(none_stop=True)
except Exception as e:
print(e)
# повторяем через 15 секунд в случае недоступности сервера Telegram
time.sleep(15)
| null | main.py | main.py | py | 33,479 | python | en | code | null | code-starcoder2 | 51 |
122696024 | """
Project 1 - Degree distributions for graphs
Part of Algorithmic Thinking (Part 1) on Coursera (coursera.org)
"""
EX_GRAPH0 = {
0: set([1, 2]),
1: set([]),
2: set([])
}
EX_GRAPH1 = {
0: set([1, 4, 5]),
1: set([2, 6]),
2: set([3]),
3: set([0]),
4: set([1]),
5: set([2]),
6: set([])
}
EX_GRAPH2 = {
0: set([1, 4, 5,]),
1: set([2, 6]),
2: set([3, 7,]),
3: set([7]),
4: set([1]),
5: set([2]),
6: set([]),
7: set([3]),
8: set([1, 2]),
9: set([0, 3, 4, 5, 6, 7])
}
def make_complete_graph(num_nodes):
"""
Returns a complete directed graph for the number of nodes requested
:param num_nodes: Number of nodes for which graph is requested
:return: Graph in the form of a dictionary
"""
if type(num_nodes) != int or num_nodes <= 0:
return {}
graph = {}
index_i = 0
while index_i < num_nodes:
index_j = 0
graph[index_i] = set([])
while index_j < num_nodes:
if index_i != index_j:
graph[index_i].add(index_j)
index_j += 1
index_i += 1
return graph
def compute_in_degrees(digraph):
"""
Computes the in degree for all nodes in a graph
:param digraph: Graph for which in degree is to be computed
:return: Dictionary with all nodes of graph and associated in degree
"""
in_degree = dict.fromkeys(digraph, 0)
for index_i in in_degree:
for index_j in digraph:
if index_i in digraph[index_j]:
in_degree[index_i] += 1
return in_degree
def in_degree_distribution(digraph):
"""
Computes the in degree distribution for a graph
:param digraph: Graph for which in degree distribution is to be computed
:return: Dictionary representing the in degree distribution
"""
in_degree = compute_in_degrees(digraph)
in_degree_dist = {}
for index_i in in_degree:
if not in_degree_dist.has_key(in_degree[index_i]):
in_degree_dist[in_degree[index_i]] = 1
else:
in_degree_dist[in_degree[index_i]] += 1
return in_degree_dist
| null | problems/coursera/1-graph_degree/graph_degree.py | graph_degree.py | py | 2,148 | python | en | code | null | code-starcoder2 | 51 |
55379322 | # coding: utf-8
import requests
import polling
import asyncio
import logging
from aiohttp import ClientSession
from time import sleep
class ShutterManager:
def __init__(self, address):
self.address = address
self.logger = logging.getLogger('blebox.ShutterManager')
def __repr__(self):
return self.address
def up(self, *args):
url = 'http://{}/s/u'.format(self.address)
return self._send_command(url)
def down(self, *args):
url = 'http://{}/s/d'.format(self.address)
return self._send_command(url)
def stop(self, *args):
url = 'http://{}/s/s'.format(self.address)
return self._send_command(url)
def position(self, position):
url = 'http://{}/s/p/{}'.format(self.address, position)
return self._send_command(url)
def current_position(self, do_async=False):
url = 'http://{}/api/shutter/state'.format(self.address)
if do_async == True:
return self._send_command(url)
else:
result = requests.get(url)
return result.json()['currentPos']['position']
def is_in_position(self, position):
try:
return polling.poll(lambda: self.current_position() == position, step=1, timeout=600)
except polling.TimeoutException:
return False
async def tilt(self, *args):
if not args:
time = 0.8
else:
time = args[0]
await self.down()
self.is_in_position(100)
await self.up()
await asyncio.sleep(time)
await self.stop()
async def _send_command(self, url):
async with ClientSession() as session:
async with session.get(url) as response:
try:
json = await response.json()
except Exception as e:
self.logger.exception(e)
return json, response.status
| null | blebox/shutter.py | shutter.py | py | 1,955 | python | en | code | null | code-starcoder2 | 51 |
312064231 | from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras import backend as keras
import numpy as np
import os
import glob
import cv2
def merge_and_save():
imgtype = "jpg"
train = glob.glob("results/*."+imgtype)
for i in range(len(train)):
if i is not 54 and i is not 72:
img_t = load_img("test/"+str(i)+"."+imgtype)
img_l = load_img("test/"+str(i)+"_l."+imgtype)
img_p = load_img("results/"+str(i)+"."+imgtype)
x_t = img_to_array(img_t)
x_l = img_to_array(img_l)
x_t[:,:,2] = x_l[:,:,0]
img_tmp = array_to_img(x_t)
img_tmp.save("merged/"+str(i)+"."+imgtype)
x_tp = img_to_array(img_t)
x_p = img_to_array(img_p)
x_tp[:,:,2] = x_p[:,:,0]
img_tmp = array_to_img(x_tp)
img_tmp.save("merged/"+str(i)+"_p."+imgtype)
'''
x_l = img_to_array(img_l)
x_p = img_to_array(img_p)
tmp = np.asarray(x_p).astype(np.bool)
img_tmp = array_to_img(tmp)
img_tmp.save("bool/"+str(i)+"."+imgtype)
tmp = np.asarray(x_l).astype(np.bool)
img_tmp = array_to_img(tmp)
img_tmp.save("bool/"+str(i)+"_t."+imgtype)
'''
def dice_coef(gt, seg):
gt = np.asarray(gt).astype(np.bool)
seg = np.asarray(seg).astype(np.bool)
intersection = np.logical_and(gt, seg)
return intersection.sum()*2.0 / (np.sum(seg) + np.sum(gt))
def calculate_dice():
imgtype = "jpg"
train = glob.glob("results/*."+imgtype)
dice_sum = 0
for i in range(len(train)):
if i is not 54 and i is not 72:
img_l = load_img("test/"+str(i)+"_l."+imgtype)
img_p = load_img("results/"+str(i)+"."+imgtype)
x_l = img_to_array(img_l)
x_p = img_to_array(img_p)
dice = dice_coef(x_l, x_p)
dice_sum += dice
print(i)
print(dice)
print(dice_sum / len(train))
if __name__ == "__main__":
merge_and_save()
calculate_dice() | null | first/merge_imgs.py | merge_imgs.py | py | 1,806 | python | en | code | null | code-starcoder2 | 51 |
430322468 | import SimpleITK as sitk
import numpy as np
from scipy.spatial.transform import Rotation as R
from dltk.io.preprocessing import whitening
"""
img: simpleitk input image
angle: radian angle to rotate around the z axis
size: voxel size for resampled data
"""
def rotate_image(img, angle, size=[64, 64, 64], is_label=False):
rotation_center = (0, 0, 0)
rotation = sitk.VersorTransform(R.from_euler('Z', angle).as_quat(), rotation_center)
rigid_versor = sitk.VersorRigid3DTransform()
rigid_versor.SetRotation(rotation.GetVersor())
rigid_versor.SetCenter(rotation_center)
out_origin, out_size, out_spacing = get_output_parameters(img, rigid_versor, size)
resample_filter = sitk.ResampleImageFilter()
resample_filter.SetTransform(rigid_versor)
if is_label:
resample_filter.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample_filter.SetInterpolator(sitk.sitkBSpline)
resample_filter.SetSize(size)
resample_filter.SetOutputOrigin(out_origin)
resample_filter.SetOutputSpacing(out_spacing)
resample_filter.SetOutputDirection(img.GetDirection())
if is_label:
resample_filter.SetOutputPixelType(sitk.sitkUInt8)
else:
resample_filter.SetOutputPixelType(sitk.sitkFloat32)
resample_filter.SetDefaultPixelValue(0.0)
output_img = resample_filter.Execute(img)
if is_label:
return sitk.GetArrayFromImage(output_img)
else:
return whitening(sitk.GetArrayFromImage(output_img))
"""
img: simpleitk input image
axes: 1 for no flip, -1 for a flip of array of (int, 3)
size: voxel size for resampled data
"""
def flip_image(img, axes=[1, -1, 1], size=[64, 64, 64], is_label=False):
out_origin, out_size, out_spacing = get_output_parameters(img, sitk.Transform(3, sitk.sitkIdentity), size)
rotation_center = (0, 0, 0)
rotation = sitk.VersorTransform(np.array([0., 0., 0., 1.]), rotation_center)
rigid_versor = sitk.VersorRigid3DTransform()
rigid_versor.SetRotation(rotation.GetVersor())
rigid_versor.SetCenter(rotation_center)
rigid_versor.SetMatrix([axes[0], 0, 0, 0, axes[1], 0, 0, 0, axes[2]])
resample_filter = sitk.ResampleImageFilter()
resample_filter.SetTransform(rigid_versor)
if is_label:
resample_filter.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample_filter.SetInterpolator(sitk.sitkBSpline)
resample_filter.SetSize(size)
resample_filter.SetOutputOrigin(img.GetOrigin())
resample_filter.SetOutputSpacing(out_spacing)
resample_filter.SetOutputDirection(img.GetDirection())
if is_label:
resample_filter.SetOutputPixelType(sitk.sitkUInt8)
else:
resample_filter.SetOutputPixelType(sitk.sitkFloat32)
resample_filter.SetDefaultPixelValue(0.0)
output_img = resample_filter.Execute(img)
if is_label:
return sitk.GetArrayFromImage(output_img)
else:
return whitening(sitk.GetArrayFromImage(output_img))
"""
given an image and a transform, provide the transformed bounds
returns: output_origin, size and spacing based on a given transform
output_origin : the origin of the image given a transform
output_spacing: the spacing given the size input set at 64 voxels as a default.
output_size : the size given the input image spacing
"""
def get_output_parameters(image, transform, size=[64, 64, 64]):
# origin and maximum of the transformed image.
x0, y0, z0 = image.GetOrigin()
x1, y1, z1 = image.TransformIndexToPhysicalPoint(image.GetSize())
trans_pts = []
for x in (x0, x1):
for y in (y0, y1):
for z in (z0, z1):
trans_pt = transform.GetInverse().TransformPoint((x, y, z))
trans_pts.append(trans_pt)
min_arr = np.array(trans_pts).min(axis=0)
max_arr = np.array(trans_pts).max(axis=0)
output_origin = min_arr
output_size = np.round(((max_arr - min_arr) / image.GetSpacing())).astype(int)
output_spacing = ((max_arr - min_arr) / size).astype(float)
# print(output_size)
return output_origin, output_size.tolist(), output_spacing.tolist()
"""
Pre-process and augment data (if defined)
Returns a list of all pre-processed/augmented data volumes as tuples
"""
def preprocess(volume_list, augment_data=False):
preprocessed_volumes = []
for volume_tuple in volume_list:
bmode, pd, label = load_volumes(volume_tuple)
preprocessed_volumes.append((bmode, pd, label))
if augment_data:
preprocessed_volumes += augment(volume_tuple)
return preprocessed_volumes
"""
Augments tuple of volumes (BMode, PD, Label) and returns a list of all augmented volumes (as tuples)
TO-DO: Currently just returns an array of the same volumes as a tuple array but should insert logic here
NOTE: It should not return the input volumes in the return array since its already added to the full volume list
ONLY append the augmentations
"""
def augment(volume_tuple):
bmode, pd, label = sitk.ReadImage(volume_tuple[0], sitk.sitkFloat32), sitk.ReadImage(volume_tuple[1], sitk.sitkFloat32), sitk.ReadImage(volume_tuple[2], sitk.sitkUInt8)
augmented_tuples = []
size = [64, 64, 64]
# initial go - -20 to +20 degrees (5 deg increment) no zero
# angles = array([-0.34906585, -0.26179939, -0.17453293, -0.08726646, 0.08726646, 0.17453293, 0.26179939, 0.34906585])
# now with more angles (-40 + 40) in 4 degree increments... to get to ~27 we augmented with prior
angles = np.linspace(-np.pi / 18, np.pi / 18, 11)
angles = angles[angles != 0]
for rad in angles:
augmented_tuples.append((rotate_image(bmode, rad, size), rotate_image(pd, rad, size),
rotate_image(label, rad, size, True)))
axes_flip = [-1, 1, 1], [1, -1, 1], [-1, -1, 1]
for a in axes_flip:
augmented_tuples.append(
(flip_image(bmode, a, size), flip_image(pd, a, size), flip_image(label, a, size, True)))
return augmented_tuples
def load_volumes(volume_tuple):
bmode, pd, label = sitk.ReadImage(volume_tuple[0], sitk.sitkFloat32), sitk.ReadImage(volume_tuple[1], sitk.sitkFloat32), sitk.ReadImage(volume_tuple[2], sitk.sitkUInt8)
bmode_vol = sitk.GetArrayFromImage(bmode)
pd_vol = sitk.GetArrayFromImage(pd)
label_vol = sitk.GetArrayFromImage(label)
return whitening(bmode_vol), whitening(pd_vol), label_vol
| null | preprocess.py | preprocess.py | py | 6,419 | python | en | code | null | code-starcoder2 | 50 |
439960465 | #### My Solution Using Hashtable ####
class FindElements:
def __init__(self, root: TreeNode):
self.hash_table = dict()
self.decontaminate(root, 0)
def decontaminate(self, root, value):
if root == None:
return
else:
root.val = value
self.hash_table[value] = root
l_val = 2 * value + 1
self.decontaminate(root.left, l_val)
r_val = 2 * value + 2
self.decontaminate(root.right, r_val)
def find(self, target: int) -> bool:
if target in self.hash_table:
return True
else:
return False
def __del__(slef):
del self.hash_table
del self.root
| null | 1261_Find_Elements_in_a_Contaminated_Binary_Tree.py | 1261_Find_Elements_in_a_Contaminated_Binary_Tree.py | py | 760 | python | en | code | null | code-starcoder2 | 50 |
328176617 | # test for convolution
from conv import *
import time
if 'DEF_CONV' not in globals():
from transfer.conv import *
def test_matrix():
x = np.array([[0.09, 0.0, 0.5], [0.2, 0.3, 0.08]])
m1 = Matrix(x)
print(m1)
x = np.array([[-0.09, 0.3, 0.07], [0.03, -0.3, 0.1]])
m2 = Matrix(x)
print(m2)
m_add = m1 + m2
print(m_add)
# check speed
a = np.random.uniform(low=-1.0, high=1.0, size=[1000, 1000])
b = np.random.uniform(low=-1.0, high=1.0, size=[1000, 1000])
cond = abs(a) > 0.999
a[cond] = 0
cond = abs(b) > 0.999
b[cond] = 0
m1 = Matrix(a)
m2 = Matrix(b)
# numpy function:
start_t = time.clock()
######## COUNT IN ########
m_add = m1 + m2
end_t = time.clock()
print("%s seconds." % (end_t - start_t))
######## COUNT OUT #######
#print(m_add)
def main():
test_matrix()
main() | null | transfer/t_conv.py | t_conv.py | py | 885 | python | en | code | null | code-starcoder2 | 50 |
316273474 | """
given a string, return longest palindrome of the string assuming you can reorder all the letters
"""
def longest_palindrome(s):
letter_count = {}
for char in s:
letter_count[char] = 1 if char not in letter_count else letter_count[char] + 1
multiple = []
single = []
for char, count in letter_count.iteritems():
if count > 1:
for _ in xrange(count / 2):
multiple.append(char)
if count % 2 == 1:
single.append(char)
else:
single.append(char)
return "".join(multiple + [single[0] if len(single) > 0 else ""] + list(reversed(multiple))) | null | google/longest_palindrome.py | longest_palindrome.py | py | 652 | python | en | code | null | code-starcoder2 | 50 |
139188028 | import sys
import socket
def packet_capture_socket():
# the public network interface
HOST = socket.gethostbyname(socket.gethostname())
# sniff traffic through all ports
PORT = 0
# create a new socket instance, requires administrator privileges
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP)
s.bind((HOST, PORT))
# include IP headers
s.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
# receive all packets
s.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON)
return s
if __name__ == '__main__':
s = packet_capture_socket()
# sniff network traffic
while True:
data, _ = s.recvfrom(4096)
if not data:
print('received nothing')
sys.exit()
print(data)
print('\r\n')
s.close()
| null | data-scripts/network_sniffer.py | network_sniffer.py | py | 847 | python | en | code | null | code-starcoder2 | 51 |
107860284 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 10 01:15:02 2019
@author: chaztikov
"""
import os;import numpy as np;import pandas as pd
import os,sys,re,subprocess
import pandas as pd
import numpy as np
import scipy
import scipy.integrate
from scipy.spatial import KDTree
from scipy.interpolate import BSpline
from scipy.interpolate import splrep, splder, sproot,make_interp_spline
import scipy.sparse.linalg as spla
import matplotlib.pyplot as plt
# import seaborn as sns
import sklearn.decomposition
from sklearn.decomposition import PCA
cwd = os.getcwd()
dname = '/home/chaztikov/git/aorta_piv_data/data/original/'
fnames = os.listdir(dname)
fnames = ['OpenAreaPerimountWaterbpm60.txt']
# for ifname0,fname0 in enumerate(fnames[:-2]):
for ifname0,fname0 in enumerate(fnames):
fname = dname+fname0
try:
df = pd.read_csv(fname)
print(df.columns)
print(df.shape)
xx = df.values[:,0]
yy = df.values[:,1]
except Exception:
df = np.loadtxt(fname)
xx = df[:,0]
yy = df[:,1]
HR=1
npeaks = 13
phi0 = 14150-1
# phi0 = 0
phi0 = int(phi0)
ntau = int(5)
#ntau = 10
ntau = int(ntau)
tau= int(60/HR)
plt.figure()
plt.plot(xx,yy,'b')
plt.grid()
plt.xlabel('Time')
plt.ylabel('Raw Signal')
plt.title(fname0)
plt.savefig('raw_'+str(ifname0)+'.png')
plt.show()
try:
xx = df.values[phi0:, 0]
yy = df.values[phi0:, 1]
except Exception:
xx = df[phi0:,0]
yy = df[phi0:,1]
dyy = np.diff(yy)
nbins = np.sqrt(yy.shape[0] * 1 ).astype(int)
inz = np.where(yy>0)[0]
idnz = np.where(np.abs(dyy)>0)[0]
dyynz = dyy[idnz]
dyynz = dyy
pdc = np.percentile(np.abs(dyynz),99.9)
iddc = np.where(dyynz>pdc )
peaks = np.sort(np.abs(dyy))[::-1][:2*npeaks]
ipeaks = np.argsort(np.abs(dyy))[::-1][:2*npeaks]
#ipeaks = np.argsort(np.abs(dyy))[::-1][:npeaks]
iipeaks = np.where(yy[ipeaks]>1e-6)[0]
inzpeaks = ipeaks[iipeaks]+1
inzpeaks = np.sort(inzpeaks)
#these are endpoints of interval
#pair these with the start points of signal intervals, marked by izpeaks
iizpeaks = np.where( np.isclose(yy[ipeaks], 0) )[0]
izpeaks = ipeaks[iizpeaks]
izpeaks = np.sort(izpeaks)
#cycles and lengths
icycle = np.array(list(zip(izpeaks,inzpeaks)))
minclen=np.min(np.diff(icycle,1))
maxclen=np.max(np.diff(icycle,1))
padclen = maxclen-np.diff(icycle,1)[:,0]
padclen = minclen-np.diff(icycle,1)[:,0]
icycle[:,1]+=padclen
times = np.vstack([xx[c[0]:c[1]] for c in icycle]).T
times -= times[0]
# times = xx[icycle][:,0][:,None] - xx[icycle]
output = np.stack([yy[c[0]:c[1]] for c in icycle]).T
plt.figure()
plt.plot(xx,yy,'b')
plt.plot(xx[icycle],yy[icycle],'r.')
plt.grid()
plt.xlabel('Time')
plt.ylabel('Truncated Raw Signal')
plt.title(fname0)
plt.savefig('truncraw_'+str(ifname0)+'.png')
plt.show()
p1,p2=0,100
p1,p2=np.percentile(yy[inz],p1),np.percentile(yy[inz],p2)
plt.figure()
plt.hist(yy[inz],bins=nbins,normed=True)
plt.xlim(p1,p2)
plt.grid()
plt.ylabel('pmf')
plt.xlabel('output')
plt.title('Raw, Nonzero Signal Histogram')
plt.savefig('histnz_'+str(ifname0)+'.png')
plt.show()
mean = output.mean(axis=1)
centered = output-mean[:,None]
plt.figure()
plt.plot(times,mean,'k-',lw=8,alpha=0.8,label='mean')
plt.plot(times,output,'b.',ms=2,alpha=0.4)
plt.grid()
plt.xlabel('time')
plt.ylabel('output')
plt.title('Signal Cycles as Samples')
plt.savefig('mean_'+str(ifname0)+'.png')
plt.show()
plt.figure()
#plt.plot(times,mean,'k-',lw=4,label='mean')
plt.plot(times,centered ,'.',ms=1,alpha=0.4)
plt.grid()
plt.xlabel('time')
plt.ylabel('output')
plt.title('Signal (Centered by Sample Mean)')
plt.savefig('centered_'+str(ifname0)+'.png')
plt.show()
X = output.copy().T
#X = centered.copy().T
nr = X.shape[0]
dimreductiontype='pca'
from sklearn.decomposition import PCA,KernelPCA,FactorAnalysis
if(dimreductiontype=='pca'):
pca = PCA(n_components = nr ,whiten=True)#min(df.shape))
elif(dimreductiontype=='kpca'):
pca = KernelPCA(n_components=min(df.shape))
elif(dimreductiontype=='fa'):
pca = FactorAnalysis(n_components=min(df.shape))
Z = pca.fit_transform(X)
try:
print("pca.n_components ", pca.n_components)
print("pca.n_features_ ", pca.n_features_)
print("pca.n_samples_ ", pca.n_samples_)
print('pca.noise_variance_ ', pca.noise_variance_)
except Exception:
1;
try:
ax,fig=plt.subplots(1,1)
plt.plot(pca.explained_variance_ratio_,'-o',ms=4)
plt.grid()
plt.title('Variance Explained (Percent) by Component')
plt.xlabel('Principal Component')
plt.ylabel('Variance Explained')
plt.grid()
# plt.legend(ilabel)
plt.savefig(cwd+"/"+str(ifname0)+'_'+dimreductiontype+"_"+"explained_variance_ratio_"+".png")
plt.show()
except Exception:
1;
#pca = FactorAnalysis(n_components=min(df.shape))
#Z = pca.fit_transform(X)
#plt.plot(times[:,0],favar)
#plt.title('Variance Explained (Percent) by Component')
#plt.xlabel('Principal Component')
#plt.ylabel('Variance Explained')
#plt.grid()
#plt.savefig(cwd+"/"+str(ifname0)+'_'+dimreductiontype+"_"+"explained_variance_ratio_"+".png")
#plt.show()
#
#pca = FactorAnalysis(n_components=min(df.shape))
#Z = pca.fit_transform(X)
#favar = pca.noise_variance_
#favar = np.sqrt(favar)
#scale_factor = 8
#plt.figure()
#
#plt.plot(times[:,0],X.T ,'b.',ms=1)
#plt.plot(times[:,0],Xm[0] ,'k-',lw=6,alpha=0.4)
#plt.plot(times[:,0],Xm[0] + scale_factor * favar[:],'g-')
#plt.plot(times[:,0],Xm[0] - scale_factor * favar[:],'r-')
#plt.title('Variance Explained (Percent) by Component')
#plt.xlabel('Principal Component')
#plt.ylabel('Variance Explained')
#plt.grid()
#plt.savefig(cwd+"/"+str(ifname0)+'_'+dimreductiontype+"_"+"bands_"+".png")
#plt.show()
#
try:
for iy in range(0,nr):
# ax,fig=plt.subplots(1,1)
x = times
y = pca.components_[iy]
plt.figure()
plt.plot(x,y,'o',ms=4)
# for ic, vc in enumerate((iclass)):
# plt.plot(x[vc],y[vc],icolor[ic]+'o',label=ilabel[ic])
plt.grid(which='both')
plt.xlabel('Time')
plt.ylabel('Principal Mode '+str(iy))
plt.savefig(cwd+"/"+str(ifname0)+'_'+dimreductiontype+"_"+"pm"+str(ix)+"pm"+str(iy)+".png")
plt.show()
except Exception:
1;
try:
plt.figure()
plt.plot(times,pca.mean_)
plt.grid()
plt.xlabel('Time')
plt.ylabel('Signal Mean')
plt.savefig(cwd+"/"+dimreductiontype+'_'+fname0+'.png')
plt.show()
except Exception:
1;
def reconstruction_error(pca,Z,X,pnorm=2,ax=0):
Xr = pca.inverse_transform(Z)
resid = Xr-X
if(pnorm=='avg'):
abserr = resid.mean(axis=0)
relerr = abserr / pca.mean_
else:
abserr = np.linalg.norm(resid,ord=pnorm,axis=ax)
norm = np.linalg.norm(X,ord=pnorm,axis=ax)
relerr = abserr/norm
return Xr.T, abserr, relerr
#recon,abserr, relerr = reconstruction_error(pca,Z,X, pnorm='avg')
recon,abserr, relerr = reconstruction_error(pca,Z,X, pnorm=2)
try:
plt.figure()
plt.plot(times[:,0],mean,'k-',lw=8,alpha=0.9,label='mean')
plt.plot(times[:,0],recon[:,0],'r.',ms=1,alpha=0.8,label='reconstruction')
plt.plot(times,recon,'r.',ms=1,alpha=0.2)#,label='reconstruction')
plt.grid()
plt.legend()
plt.xlabel('Time')
plt.ylabel('Approximate Reconstruction of Signal')
plt.savefig(cwd+"/"+dimreductiontype+'_'+fname0+'.png')
plt.show()
except Exception:
1;
#Xr = pca.inverse_transform(pca.transform(mean[None,:]))[0];plt.plot(Xr-mean)
try:
plt.figure()
plt.plot(times,relerr,'r.')
plt.grid()
plt.xlabel('Time')
plt.ylabel('Signal Reconstruction Error')
plt.title('Relative Signal Reconstruction Error')
plt.savefig(cwd+"/"+dimreductiontype+'_'+fname0+'.png')
plt.show()
except Exception:
1;
try:
plt.figure()
# plt.plot(times,pca.mean_,label='Mean')
plt.plot(times,abserr,'r.',label='Absolute Error')
plt.grid()
plt.xlabel('Time')
plt.ylabel('Signal Reconstruction Error')
plt.title('Absolute Signal Reconstruction Error')
plt.savefig(cwd+"/"+dimreductiontype+'_'+fname0+'.png')
plt.show()
except Exception:
1;
tt = times[:,0]
Xm = X.mean(axis=0)
Xm = Xm[None,:]
Xc = X-Xm
plt.figure()
plt.plot(times,Xm[0],'k-',ms=1)
plt.plot(times,X.T,'.',ms=1)
plt.grid()
plt.xlabel('Time')
plt.ylabel('Signal')
plt.title('Signal and Mean')
plt.savefig(cwd+"/"+dimreductiontype+'_'+fname0+'.png')
plt.show()
plt.figure()
plt.plot(times[:,0], Xc.T)
plt.grid()
plt.xlabel('Time')
plt.ylabel('Fluctuation in Signal about Mean')
plt.title('Fluctuation in Signal about Mean')
plt.savefig(cwd+"/"+dimreductiontype+'_'+fname0+'.png')
plt.show()
plt.figure()
plt.hist(Xc.flatten(),Xc.shape[0],normed=True)
plt.grid()
plt.ylabel('PMF')
plt.xlabel('Fluctuation in Signal about Mean')
plt.title('Fluctuation in Signal about Mean')
plt.savefig(cwd+"/"+dimreductiontype+'_'+fname0+'.png')
plt.show()
# U,S,V = np.linalg.svd(Xc,full_matrices=False)
#
# plt.plot(S,'-o')
# explained_variance = np.cumsum(S)/np.sum(S,axis=0)
# plt.plot(explained_variance,'-o')
# plt.show()
#tol = 0.3
#itrunc = np.where(explained_variance>tol)[0].min()
#
#for itrunc in range(S.shape[0], S.shape[0]-1,-1):
# S[itrunc:]*=0
# Xrc = U.dot(np.diag(S).dot(V))
#
# error = Xc-Xrc
# terror = np.mean(Xrc-Xc,axis=0)
# serror = np.linalg.norm(Xrc-Xc,axis=1,ord=2)
# nbins = np.sqrt(2 * serror.shape[0]).astype(int)
## print( terror)
## print('itrunc', itrunc, '' ,' Signal Variance ', Xc.var() - Xrc.var() , ' Signal Fraction ', 1 - Xrc.var() / Xc.var() )
# xstdev = np.sqrt(np.var(error,axis=1))
# xtimevariation = np.sqrt(np.var(error,axis=0))
# print('itrunc', itrunc, '' ,' Signal Time Variation ', xtimevariation,'Sample StDev ', xstdev )#, ' SNR ', np.sqrt( Xc.var() / Xrc.var() - 1 ) )
#
# plt.figure()
# plt.plot(tt,terror,'.')
# plt.show()
#
# plt.figure()
# plt.plot(times, error.T,'.',ms=2,alpha=0.2)
# plt.plot(tt, error[0],'.',ms=2,alpha=0.2)
# plt.show()
#
# plt.figure()
# plt.hist(serror,bins=nbins)
# plt.show()
| null | leaflet_flutter_data/ex1/aorta_data.py | aorta_data.py | py | 11,467 | python | en | code | null | code-starcoder2 | 51 |
85595004 | import csv
import statistics
import math_functions.stock_functions as stock_functions
import math_functions.math_functions as math_functions
file_path = 'D:/finance_data/data_test.csv'
data = []
daily_returns = []
first_row = True
with open(file_path, newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
# print(row)
# print(len(row))
if first_row:
first_row = False
else:
data.append([row[0], float(row[6])])
data.reverse()
data[0].append(0)
index = 0
while index < len(data):
if index != 0:
previous_day_close = data[index - 1][1]
current_day_close = data[index][1]
daily_return = stock_functions.total_return_fast(previous_day_close, current_day_close)
data[index].append(daily_return)
daily_returns.append(daily_return)
index += 1
first_close = data[1][1]
last_close = data[len(data) - 1][1]
print(daily_returns)
print('Total Return: ' + str(stock_functions.total_return_fast(386.27, 403.27)))
print('Daily Returns: ' + str(math_functions.get_average_of_list(daily_returns)))
print('Population Standard Deviation: ' + str(math_functions.get_population_standard_deviation_of_list(daily_returns)))
print('Sharpe Ratio: ' + str(stock_functions.get_sharpe_ratio_fast(daily_returns)))
#print('Total return: ' + str(stock_functions.total_return_safe(data[1][1], data[len(data) - 1][1])))
| null | Generic_Finance_Predictor_OLD/learning_tutorials_and_testing/computational_investing/data_manipulation_demo.py | data_manipulation_demo.py | py | 1,468 | python | en | code | null | code-starcoder2 | 51 |
113401861 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, print_function
#A HistoryGraph Immutable Object
import uuid
from .changetype import *
from . import fields
from operator import itemgetter
import hashlib
import six
class ImmutableObject(object):
is_singleton = False
def __init__(self, **kwargs):
# Initialise the immutable object from the kwargs. It can never be changed once initialise
self.insetup = True
self._field = dict()
variables = [a for a in dir(self.__class__) if not a.startswith('__') and not callable(getattr(self.__class__,a))]
for k in variables:
var = getattr(self.__class__, k)
self._field[k] = var
assert isinstance(var, fields.Collection) == False #Immutable objects not allow references to other objects just use a FieldText as a key
if isinstance(var, fields.Field):
setattr(self, k, var.create_instance(self, k))
if k in kwargs:
setattr(self, k, kwargs[k])
self._prevhash = kwargs['_prevhash'] if '_prevhash' in kwargs else ''
self.insetup = False
def __setattr__(self, name, value):
if name == "insetup":
super(ImmutableObject, self).__setattr__(name, value)
return
if not self.insetup:
assert False #Attempting to change an immutable object
return
super(ImmutableObject, self).__setattr__(name, value)
def get_hash(self):
#Immutable objects don't have UUIDs they have SHA256 hashes of their content
s = sorted([(k,str(getattr(self, k))) for (k,v) in six.iteritems(self._field)], key=itemgetter(0)) + [('_prevhash', str(self._prevhash))]
return hashlib.sha256(str(s).encode('utf-8')).hexdigest()
def as_dict(self):
#Return a dict suitable for transport
ret = dict()
for k in self._field:
ret[k] = getattr(self, k)
ret["_prevhash"] = self._prevhash
ret["classname"] = self.__class__.__name__
ret["hash"] = self.get_hash()
return ret
def get_is_deleted(self):
return False
| null | historygraph/immutableobject.py | immutableobject.py | py | 2,190 | python | en | code | null | code-starcoder2 | 51 |
18697883473 |
cost = []
items = []
all_items = []
total = int
total = 0
numitems = int(input('Enter the number of items you will be calculating'))
for i in range(0, numitems, 1):
items.append(i)
items[i] = int(input('please enter how much each item is (from start to finish)'))
total = items[i] + total
print(items)
print('Your total shipping cost is', total)
#all_items.append('1')
#items.append('hello')
#items.append('world')
#all_items.append(items)
#print(all_items[1][0]) | johnbuttigieg/Code | Week 3 Workshop/shippingCalc.py | shippingCalc.py | py | 486 | python | en | code | 0 | github-code | 13 |
74272001937 | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from datetime import datetime
from wikiSpider.items import Article
from string import whitespace
'''
这个示例类应该替换成你的新管线组件代码。
在前面的几节中,你已经收集了两个原始格 式的字段,
而这些可能需要进行额外的数据处理:
lastUpdated(一个表示日期的、格式糟 糕的字符串对象)和
text(一个混乱的由字符串片段组成的数组)。
'''
class WikispiderPipeline:
def process_item(self, article, spider):
dataStr = article['lastUpdated']
article['lastUpdated'] = article['lastUpdated'].replace("this page was last edited on","")
article['lastUpdated'] = article['lastUpdated'].strip()
article['lastUpdated'] = datetime.strptime(
article['lastUpdated'], '%d %B %Y, at %H:%M.')
article['text'] = [line for line in article['text']
if line not in whitespace]
article['text'] = "".join(article['text'])
return article
| danyow-cheung/data-analysis-etc | Python网络爬虫权威指南/wikiSpider/wikiSpider/pipelines.py | pipelines.py | py | 1,260 | python | en | code | 0 | github-code | 13 |
21599376655 | from PyQt4 import QtGui
from PyQt4 import QtOpenGL
from PyQt4 import QtCore
import numpy as np
from OpenGL.GL import *
import mathutils as mth
import time
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import CNST.techs as techs
import CNST.clGEOOBJ as clGEOOBJ
from CNST.draw import getmv # TODO go from draaw to techs
from CNST.draw import drawinbuf, sph, drawpic,newpic,multiget,multiset
from PIL import Image,ImageOps
class GLWidget(QtOpenGL.QGLWidget):
def __init__(self, parent=None):
super(GLWidget, self).__init__(parent)
self.color = QtGui.QColor.fromCmykF(0.4, 0.21, 0.0, 0.0)
self.rotx, self.roty = 0, 0
self.mvMatrix = np.identity(4)
self.sc, self.tr = 1, (0, 0)
self.objects = []
self.invisiblelist = []
self.revmouse = 0, 0
self.selection = []
self.mode = "pick0"
self.edgemode = 'on'
self.axlist = 0
self.rulerlist=0
self.linecdlist=[]
self.sphcdlist=[]
self.crosscdlist=[]
self.crosslist=0
self.planesize=1000
self.draftpoint = (0, 0, 0)
self.setMouseTracking(True)
self.ObjSelected = techs.Signal()
self.RulerChange = techs.Signal()
self.AngleChange = techs.Signal()
self.key=None
self.scalefree=1
self.font = QtGui.QFont()
self.font.setPointSize(14)
self.fontscale = QtGui.QFont()
self.fontscale.setPointSize(12)
self.textlist=[]
self.textconsole = []
self.timer=False
self.setFocusPolicy(QtCore.Qt.StrongFocus)
def addobj(self, obj):
self.objects.append(obj)
self.upmat()
def addtmpobj(self,obj):
#obj.setcol((*obj.defcol[:3],.8))
obj.setopacity(.6)
self.objects.append(obj)
def cleartmpobjs(self):
for obj in reversed(self.objects):
if obj.getopa()==.6:
self.objects.remove(obj)
#del(obj)
def sphinit(self,r=5,col=(0,0,1)):
self.sphlist=glGenLists(1)
glNewList(self.sphlist, GL_COMPILE)
r = r / self.scalefree
if self.sphcdlist:
glColor3f(*col)
for cd in self.sphcdlist:
glPushMatrix()
#glLoadIdentity()
glTranslate(*cd)
quad = gluNewQuadric()
gluSphere(quad, r, 4, 4)
glPopMatrix()
glEndList()
def axisinit(self):
p0, p1, p2, p3 = (0, 0, 0), (100, 0, 0), (0, 100, 0), (0, 0, 100)
self.axlist = glGenLists(1)
glNewList(self.axlist, GL_COMPILE)
thickness = GLfloat(4)
glLineWidth(thickness)
glBegin(GL_LINES)
glColor3fv((1, 0, 0))
glVertex3fv(p0)
glVertex3fv(p1)
glColor3fv((0, 1, 0))
glVertex3fv(p0)
glVertex3fv(p2)
glColor3fv((0, 0, 1))
glVertex3fv(p0)
glVertex3fv(p3)
glEnd()
glEndList()
def rulerinit(self):
w,h = self.wi,self.he
yofs = 10
p0,p1,p2 = (0,yofs-h/2,10000),(w/4,yofs-1*h/2,10000),(w/2-5,yofs-h/2,10000)
self.rulerlist = glGenLists(1)
glNewList(self.rulerlist, GL_COMPILE)
thickness = GLfloat(10)
glLineWidth(thickness)
glBegin(GL_LINES)
glColor3fv((0,0,0))
glVertex3fv(p0)
glVertex3fv(p1)
glColor3fv((1,1,1))
glVertex3fv(p1)
glVertex3fv(p2)
glEnd()
glEndList()
def drawaxis(self):
t = 1/self.scalefree
glDisable(GL_LIGHTING)
glPushMatrix()
glMultMatrixf(self.mvMatrix)
glScalef(t,t,t)
glCallList(self.axlist)
glPopMatrix()
glEnable(GL_LIGHTING)
def minimumSizeHint(self):
return QtCore.QSize(50, 50)
def sizeHint(self):
return QtCore.QSize(400, 400)
def initializeGL(self):
glutInit()
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
self.qglClearColor(self.color)
self.lineinit()
self.axisinit()
self.sphinit()
self.planecdinit()
self.planeinit()
self.gridcdinit()
self.gridinit()
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_BLEND)
#glEnable(GL_CULL_FACE)
glEnable(GL_COLOR_MATERIAL)
glEnable(GL_DEPTH_TEST)
# glLightfv(GL_LIGHT0, GL_POSITION, (-.3, .6, 1))
mat_specular = GLfloat_4(1.0, 1.0, 1.0, 1.0)
mat_shininess = GLfloat(80)
# light_position[] = {1.0, 1.0, 1.0, 0.0};
glShadeModel(GL_SMOOTH)
glMaterialfv(GL_FRONT, GL_SPECULAR, mat_specular)
glMaterialfv(GL_FRONT, GL_SHININESS, mat_shininess)
glLightfv(GL_LIGHT0, GL_POSITION, (-1,1,1))
cAmbientLight = GLfloat_4(0.4, 0.4, 0.4, .5)
glLightfv(GL_LIGHT0, GL_AMBIENT, cAmbientLight)
cDiffuseLight = GLfloat_4(1,1,1,.01)
glLightfv(GL_LIGHT0, GL_DIFFUSE, cDiffuseLight)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_NORMALIZE)
def paintGL(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
self.drawruler()
self.drawaxis()
self.drawsph()
self.drawline()
self.drawcross()
self.drawtext()
self.drawtextscale()
glLoadIdentity()
opacitylist = [(i,obj,obj.getopa()) for i,obj in enumerate(self.objects)]
sortedopalist = sorted(opacitylist,key = lambda t:t[2])
sortedobj = [(p[0],p[1]) for p in reversed(sortedopalist)]
for i,object in sortedobj:
if i not in self.invisiblelist:
for objid, planeid in self.selection:
object.showplane(planeid, objid)
object.show()
# self.drawplane()
# self.drawgrid()
def resizeGL(self, width, height):
self.wi = width
self.he = height
self.rulerinit()
self.crossinit()
self.FBO = techs.fbufinit(self.wi, self.he)
self.PBOS,self.pbosize = techs.pbosinit(self.wi, self.he)
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(-width / 2, width / 2, -height / 2, height / 2, -10000,10000)#-15000, 15000)
glMatrixMode(GL_MODELVIEW)
def mousePressEvent(self, event):
self.lastPos = event.pos() # onPress and onRelease produces same events!?
self.pos = (event.x(), event.y())
# self.sph = (self.pos[0] - self.wi / 2, self.he / 2 - self.pos[1], 0)
def getpic(self):
clrarr=[]
deparr = []
for obj in self.objects:
objclr,objdep = drawpic(obj,self.FBO,self.wi,self.he)
# objclrnp = np.frombuffer(objclr,np.uint8,count=self.wi*self.he*4)
# objclrnp = objclrnp.reshape((self.wi, self.he, 4))
clrarr.append(objclr)
deparr.append(objdep)
return clrarr,deparr,self.wi,self.he
from PIL import Image
def modpic(self,ind,obj):
data = newpic(obj, self.PBOS[ind], self.wi, self.he,self.pbosize,ind)
objclrnp = np.frombuffer(data,np.uint8,count=self.wi*self.he*4)
data = objclrnp.reshape((self.wi, self.he, 4))
# imgc = Image.frombytes("RGBA", (self.wi, self.he), data)
# #imgc = ImageOps.flip(imgc)
# imgc.save('RESULTS\\PBOTEST'+str(ind)+'.png', 'PNG')
return data
def writepic(self,ind,obj):
ind=0
multiset(obj, self.PBOS[ind], self.wi, self.he)
def readpic(self,ind):
#ind=0
data = multiget(self.PBOS[0], self.pbosize)
objclrnp = np.frombuffer(data,np.uint8,count=self.wi*self.he*4)
# data = objclrnp.reshape((self.he,self.wi, 4))
# data = np.flipud(data)
# img = Image.fromarray(data, 'RGBA')
# img.save('RESULTS\\obj'+str(ind)+'.png', 'PNG')
return np.flipud(objclrnp.reshape((self.he,self.wi, 4)))#data
def mouseReleaseEvent(self, event):
if (event.x(), event.y()) == self.pos:
objid, planeid = drawinbuf(self.objects, self.FBO, self.revmouse,self.invisiblelist)
pair = objid, planeid
if objid!=255:
if self.mode == "pickmany":
self.ObjSelected.register(pair)
if pair not in self.selection:
self.selection.append(pair)
self.addtoconsole('Added to selection:'+str(len(self.selection))+' elements')
else:
self.selection.remove(pair)
self.addtoconsole('Removed from selection:' + str(len(self.selection)) + ' elements')
elif self.mode == "pickone":
if pair not in self.selection:
self.selection = [pair]
self.getint(*pair,self.pos)
else:
self.selection.remove(pair)
elif self.mode == "pickwhole": # TODO oh this is ugly
try:
if self.selection == []:
self.selection = [[objid, plid + 1] for plid in range(len(self.objects[0].faces))]
elif self.selection[0][0] != objid:
self.selection = [[objid, plid + 1] for plid in range(len(self.objects[0].faces))]
else:
self.selection = []
except:
self.selection = []
elif self.mode == "pick0":
pass
self.rotx, self.roty = 0, 0
self.tr = 0, 0
self.upmat()
def wheelEvent(self, event):
if event.delta() > 0:
self.sc = 1.05
else:
self.sc = 0.95
self.scalefree *= self.sc
#print(self.scalefree,self.he/self.scalefree)
#self.textlist=[[str(round(self.scalefree,2)),0]]
#self.addtoconsole('test'+str(self.scalefree))
self.upmat()
def mouseMoveEvent(self, event):
self.revmouse = (event.pos()).x(), self.he - (event.pos()).y()
if event.buttons() == QtCore.Qt.LeftButton:
self.rotx = event.x() - self.lastPos.x()
self.roty = event.y() - self.lastPos.y()
self.lastPos = event.pos()
self.upmat()
los = (0,0,1,0)
multlos = np.matmul(self.mvMatrix, los)[:3]
ang1 = techs.getangle((0,1,0),multlos)
# print(ang1,np.cos(ang1*np.pi/180))
ang2 = techs.getangle(los[:3],multlos*np.sin(ang1*np.pi/180)),
# print(*ang2)
self.AngleChange.register((ang1,*ang2))
elif event.buttons() == QtCore.Qt.RightButton:
dx = event.x() - self.lastRPos.x()
dy = event.y() - self.lastRPos.y()
k = 1 # TODO get rid of k after all
self.tr = k * dx, k * dy
self.lastRPos = event.pos()
self.upmat()
else:
self.lastRPos = event.pos()
if self.key and self.mode=='pickmany':
objid, planeid = drawinbuf(self.objects, self.FBO, self.revmouse, self.invisiblelist)
pair = objid, planeid
self.ObjSelected.register(pair)
if objid!=255:
if self.key=='ctrl':
#print('ctrl')
if pair not in self.selection:
self.selection.append(pair)
self.addtoconsole('Added to selection:' + str(len(self.selection)) + ' elements')
elif self.key == 'alt':
try:
self.selection.remove(pair)
self.addtoconsole('Removed from selection:' + str(len(self.selection)) + ' elements')
except:
pass
self.upmat()
#self.upmat()
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Control:
self.key = 'ctrl'
elif event.key() == QtCore.Qt.Key_Alt:
self.key = 'alt'
else:
self.key=None
def keyReleaseEvent(self, event):
self.key=None
def upmat(self):
self.mvMatrix = getmv(self.sc, self.tr, self.rotx, self.roty, self.mvMatrix)
for object in self.objects:
object.update(self.mvMatrix)
self.updateGL()
self.sc = 1
def addinvisible(self, components):
try:
for comp in components:
index = self.getobjbyid(comp.getid())
self.invisiblelist.append(index)
except:
pass
self.upmat()
def delinvisible(self, components):
try:
for comp in components:
index = self.getobjbyid(comp.getid())
self.invisiblelist.remove(index)
except:
pass
self.upmat()
def dropselection(self):
self.selection = []
self.upmat()
def setselection(self, pair):
self.selection = [pair]
self.upmat()
def getobjbyid(self, objid):
for i, object in enumerate(self.objects):
if objid == object.getid():
return i
def getint(self, objid, planeid, pos):
'''sooo the thing is:
1. gotta transpose MV if you tranlate things
2. w=0/1 is important
3. what is going on: getting plane(point and normal), go to world space coordinates
then intersect plane with ray from mouse pick
then go back to object space cd
'''
object = self.objects[self.getobjbyid(objid)]
face = object.faces[planeid - 1]
org = object.points[face[0] - 1]
norm = object.getnormaltoface(planeid)
#norm = object.normals[planeid-1]
px, py = pos
px = px - self.wi/2
py = self.he / 2 - py
m = np.transpose(self.mvMatrix)
org = np.matmul(m, (*org, 1))[:3]
norm = np.matmul(m, (*norm, 0))[:3]
line_a = mth.Vector((px, py, -1200))
line_b = mth.Vector((px, py, 1200))
ci = mth.geometry.intersect_line_plane(line_a, line_b, org, norm)
m = np.linalg.inv(m)
ci = np.matmul(m,(*ci,1))[:3]
self.draftpoint = ci
#self.sphcdlist=[ci]
self.ObjSelected.register(((objid, planeid),ci))
return list(ci)
def dropsphs(self):
self.sphcdlist=[]
self.upmat()
def drawsph(self):
#t = 1 / self.scalefree
glPushMatrix()
glMultMatrixf(self.mvMatrix)
glCallList(self.sphlist)
glPopMatrix()
def edgemodeswitch(self):
flag = self.objects[0].fedge
for obj in self.objects:
obj.fedge = flag
obj.edgeswitch()
self.upmat()
def drawruler(self):
glDisable(GL_LIGHTING)
glPushMatrix()
glCallList(self.rulerlist)
glPopMatrix()
glEnable(GL_LIGHTING)
def lineinit(self,thick=10):
self.linelist = glGenLists(1)
glNewList(self.linelist, GL_COMPILE)
if self.linecdlist:
for line in self.linecdlist:
p1,p2=line
glPushMatrix()
thickness = GLfloat(thick)
glLineWidth(thickness)
glBegin(GL_LINES)
glColor3fv((1, 0, 0))
glVertex3fv(p1)
glVertex3fv(p2)
glEnd()
glPopMatrix()
glEndList()
def droplines(self):
self.linecdlist=[]
self.lineinit()
self.upmat()
def drawline(self):
glPushMatrix()
glMultMatrixf(self.mvMatrix)
glCallList(self.linelist)
glPopMatrix()
def drawcross(self):
glPushMatrix()
#glMultMatrixf(self.mvMatrix)
glCallList(self.crosslist)
glPopMatrix()
def dropcross(self):
self.crosscdlist=[]
self.crossinit()
self.upmat()
def crossinit(self):
self.crosslist = glGenLists(1)
glNewList(self.crosslist, GL_COMPILE)
if self.crosscdlist:
for line in self.crosscdlist:
p1, p2 = line
glPushMatrix()
thickness = GLfloat(1)
glLineWidth(thickness)
glBegin(GL_LINES)
glColor3fv((.1, 0.5, 1))
glVertex3fv(p1)
glVertex3fv(p2)
glEnd()
glPopMatrix()
glEndList()
def crosscdinit(self):
p1 = [-self.wi / 2, 0, 10000]
p2 = [self.wi / 2, 0, 10000]
p3 = [0, -self.he / 2, 10000]
p4 = [0, self.he / 2, 10000]
self.crosscdlist = [[p1, p2], [p3, p4]]
def planeinit(self):
self.planelist = glGenLists(1)
glNewList(self.planelist, GL_COMPILE)
if self.planecdlist:
for plane in self.planecdlist:
p1,p2,p3,p4 = plane
glPushMatrix()
#thickness = GLfloat(5)
#glLineWidth(thickness)
glBegin(GL_POLYGON)
colp = 0.8
glColor4fv((colp, colp, colp, .1))
glVertex3fv(p1)
#glColor4fv((colp, 0, 0, .1))
glVertex3fv(p2)
#glColor4fv((colp, colp, 0, .1))
glVertex3fv(p3)
#glColor4fv((0, colp, 0, .1))
glVertex3fv(p4)
glEnd()
glPopMatrix()
glEndList()
def planecdinit(self):
#l = 1000
l = self.planesize
p0 = [0,0,0]
px = [l,0,0]
py = [0,l,0]
pz = [0,0,l]
pxy = [l,l,0]
pzx = [l,0,l]
pzy = [0,l,l]
self.planecdlist = [[p0,px,pzx,pz]]#[p0,px,pxy,py],[p0,py,pzy,pz]]
def dropplane(self):
self.planecdlist=[]
self.planeinit()
self.upmat()
def drawplane(self):
glDisable(GL_CULL_FACE)
glPushMatrix()
glMultMatrixf(self.mvMatrix)
glCallList(self.planelist)
glPopMatrix()
glEnable(GL_CULL_FACE)
def gridinit(self):
self.gridlist = glGenLists(1)
glNewList(self.gridlist, GL_COMPILE)
if self.gridcdlist:
for line in self.gridcdlist:
p1, p2 = line
glPushMatrix()
thickness = GLfloat(4)
glLineWidth(thickness)
glBegin(GL_LINES)
colp = 0.0
glColor4fv((colp, colp, colp, .1))
glVertex3fv(p1)
glVertex3fv(p2)
glEnd()
glPopMatrix()
glEndList()
def gridcdinit(self):
#l = 1000
l = self.planesize
nx,ny = 40,40
dx,dy = l/nx,l/ny
lines=[]
jj=0
for i in range(nx+1):
p1=[i*dx,0,0]
p2=[i*dx,0,l]
lines.append([p1, p2])
for j in range(ny+1):
p1 = [0,0, j*dy]
p2 = [l,0, j*dy]
lines.append([p1, p2])
self.gridcdlist = lines
def dropgrid(self):
self.gridcdlist = []
self.gridinit()
self.upmat()
def drawgrid(self):
glPushMatrix()
glMultMatrixf(self.mvMatrix)
glCallList(self.gridlist)
glPopMatrix()
def drawtext(self):
off,a=0,0.25
for s,p in self.textconsole:
#self.textgen(s,p)
self.texttoconsole(s,off,a)
a*=2
off+=30
def droptext(self):
self.strlist=[]
self.upmat()
def textgen(self,s='',pos=(0,0)):
#s = 'Hello'
glColor3f(.8,.8,1)
#pos = -self.wi/2,-self.he/2
self.renderText(*pos, 0, s, self.font)
def drawtextscale(self):
w, h = self.wi, self.he
yofs = 20
points = (0, yofs - h / 2), (w / 4-10, yofs - h / 2), (1 * w / 2-60, yofs- h / 2)
pointst = w/4,w/2
glColor3f(.2, .2, 0)
self.renderText(*points[0], 0, '0', self.fontscale)
for p,pt in zip(points[1:],pointst):
self.renderText(*p, 0, str(round(pt/self.scalefree,1)), self.fontscale)
def texttoconsole(self,s,offset=0,a=1):
pos = (-self.wi / 2)*.95, (-self.he / 2)*.95+offset
glColor4f(.2, .2, 0, a)
self.renderText(*pos, 0, s, self.font)
def addtoconsole(self,s):
self.textconsole.append([s,0])
if len(self.textconsole)>3:
self.textconsole.pop(0)
def act_btn_front(self):
self.mvMatrix=np.identity(4)
self.scalefree = 1
def act_btn_right(self):
self.mvMatrix = np.identity(4)
self.scalefree = 1
glPushMatrix()
glLoadIdentity()
glRotatef(-90, 0, 1, 0)
mv = glGetDoublev(GL_MODELVIEW_MATRIX)
#mv = np.transpose(mv)
glPopMatrix()
self.mvMatrix = mv
def act_btn_left(self):
self.mvMatrix = np.identity(4)
self.scalefree = 1
glPushMatrix()
glLoadIdentity()
glRotatef(90, 0, 1, 0)
mv = glGetDoublev(GL_MODELVIEW_MATRIX)
#mv = np.transpose(mv)
glPopMatrix()
self.mvMatrix = mv
def act_btn_back(self):
self.mvMatrix = np.identity(4)
self.scalefree = 1
glPushMatrix()
glLoadIdentity()
glRotatef(180, 0, 1, 0)
mv = glGetDoublev(GL_MODELVIEW_MATRIX)
#mv = np.transpose(mv)
glPopMatrix()
self.mvMatrix = mv
def act_btn_top(self):
self.mvMatrix = np.identity(4)
self.scalefree = 1
glPushMatrix()
glLoadIdentity()
glRotatef(90, 1, 0, 0)
mv = glGetDoublev(GL_MODELVIEW_MATRIX)
#mv = np.transpose(mv)
glPopMatrix()
self.mvMatrix = mv
def act_btn_bottom(self):
self.mvMatrix = np.identity(4)
self.scalefree = 1
glPushMatrix()
glLoadIdentity()
glRotatef(-90, 1, 0, 0)
mv = glGetDoublev(GL_MODELVIEW_MATRIX)
#mv = np.transpose(mv)
glPopMatrix()
self.mvMatrix = mv
def dropui(self):
self.dropplane()
self.droplines()
self.dropcross()
self.dropgrid()
self.droptext()
def rot(self,anglex=1,angley=1):
glPushMatrix()
glLoadIdentity()
glRotatef(anglex, 0, 1, 0)
glMultMatrixf(self.mvMatrix)
mv = glGetDoublev(GL_MODELVIEW_MATRIX)
glLoadIdentity()
glRotatef(angley, 1, 0, 0)
glMultMatrixf(mv)
mv = glGetDoublev(GL_MODELVIEW_MATRIX)
glPopMatrix()
self.mvMatrix = mv
for object in self.objects:
object.update(self.mvMatrix)
# self.updateGL()
# if axis == 'x':
# self.rotx = anglex
# self.roty=0
# self.upmat()
# elif axis == 'y':
# self.roty=angley
# self.rotx=0
# self.upmat()
# elif axis=='xy':
# self.rotx = anglex
# self.mvMatrix = getmv(self.sc, self.tr, self.rotx, 0, self.mvMatrix)
# self.rotx=0
# self.roty = angley
# self.upmat()
def rotp(self,angle,axis):
glPushMatrix()
glLoadIdentity()
glRotatef(angle, *axis)
glMultMatrixf(self.mvMatrix)
mv = glGetDoublev(GL_MODELVIEW_MATRIX)
glPopMatrix()
self.mvMatrix = mv
for object in self.objects:
object.update(self.mvMatrix)
self.updateGL()
| bakeryproducts/ConstructorM4 | glwidget.py | glwidget.py | py | 23,768 | python | en | code | 2 | github-code | 13 |
32417429548 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 18 13:09:36 2017
@author: Ryan McMahon
"""
import pickle
import re
import pandas as pd
from utils import fightinwords
#########################
### 0) LEMMAS
#########################
# 0.0a) Read in lemma DTM build
with open("D:/cong_text/robust/DTMs/unilem_dtmbuildobj.pkl", "rb") as f:
dtmbuild = pickle.load(f)
# 0.0b) Extract features
FEATS = dtmbuild.DTM_features
# 0.0c) Remove DTM build (save memory)
del dtmbuild
# 0.1) Find different word classes in feature set
NNPs = [x for x in FEATS if re.search('\_NNP', x) is not None]
NNs = [x for x in FEATS if re.search('\_(NNS?$|PRP)', x) is not None] # includes pronouns
PRPs = [x for x in FEATS if re.search('\_PRP', x) is not None]
JJs = [x for x in FEATS if re.search('\_JJ$', x) is not None]
#########################
### 1) COUNTS
#########################
# 1.0) Read in word counts
counts = pd.read_csv("D:/cong_text/robust/DTMs/unilem_partytopiccounts.csv", encoding='utf-8')
# 1.1) Add in word column
counts['word'] = FEATS
# 1.2a) Index columns w/ priors
PCOLS = [i for i in range(2, 182,4)] + [i for i in range(3, 183, 4)]
PCOLS.sort()
PCOLS = [counts.columns[i] for i in PCOLS]
# 1.2b) Add 0.01 to prior columns w/ a minimum of 0:
for i in PCOLS:
if counts[i].min() == 0:
counts[i] += 0.01
#########################
### 2) MOST PARTISAN (uninformative Dirichlet)
#########################
# 2.0) Fit model on all topics
fw0 = fightinwords(words=counts.word,
counts1=counts.gopcounts0,
counts2=counts.demcounts0,
priors1=counts.goppriors0,
priors2=counts.dempriors0)
# 2.1) Pronouns
fw0_prp = fw0.loc[fw0.word.isin(PRPs),:]
fw0_prp = fw0_prp.sort_values(by='zeta', ascending=False)
# 2.2) Nouns (excluding proper nouns + pronouns)
fw0_nns = fw0.loc[fw0.word.isin(NNs),:]
fw0_nns = fw0_nns.sort_values(by='zeta', ascending=False)
| rymc9384/PartyOfSpeech | 06-robustness/01-unigrams/03-partisan_unigram_lemmas.py | 03-partisan_unigram_lemmas.py | py | 1,985 | python | en | code | 0 | github-code | 13 |
69796288018 | import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
# copy number
c = 0
# cooperativity of repressor binding
n = 1.0
# transcription rates
amRcas9 = 1.0
asgRNA = 1.0
aGmax = 1.0
aGmin = 0.0001
# degradation rates
ysgRNA = 0.1
ymRcas9 = 0.2
ycas9 = 0.2
yR = 0.1
ymRG = 0.2
yG = 0.2
# translation rates
bcas9 = 4.0
bG = 1.0
# on and off rates of repressor binding to the promoter
g = 1.0
kBindOn = 1.0
kBindOff = 0.000001
kRon = 1
kRoff = 0.1
# kDn = kRoff / kRon
def model(z, t):
mRcas9 = z[0]
cas9 = z[1]
sgRNA = z[2]
R = z[3]
PG = z[4]
PGR = z[5]
mG = z[6]
G = z[7]
Rn = R
dmRcas9dt = c * amRcas9 - ymRcas9 * mRcas9
dcas9dt = mRcas9 * bcas9 - ycas9 * cas9 - kBindOn * cas9 * sgRNA + kBindOff * R
dsgRNAdt = c * asgRNA - ysgRNA * sgRNA - kBindOn * cas9 * sgRNA + kBindOff * R
dRdt = kBindOn * cas9 * sgRNA - yR * R - kBindOff * R
dPGdt = kRoff * PGR - kRon * Rn * PG + n * yR * PGR
dPGRdt = kRon * Rn * PG - kRoff * PGR - n * yR * PGR
dmGdt = aGmax * PG + aGmin * PGR - ymRG * mG
dGdt = bG * mG - yG * G
dzdt = [dmRcas9dt, dcas9dt, dsgRNAdt, dRdt, dPGdt, dPGRdt, dmGdt, dGdt]
return dzdt
z = []
for i in range(100, 200, 10):
c = i
t = np.linspace(0, 20, 1000)
z0 = [0, 0, 0, 0, c, 0, 0, 0]
z = odeint(model, z0, t)
print(c,z[:, 7][-1])
# plt.plot(t,z[:,0],'b-',label='mRcas9')
# plt.plot(t,z[:,1],'r-',label='cas9')
# plt.plot(t,z[:,2],'g-',label='sgRNA')
# plt.plot(t,z[:,3],'b-.',label='R')
plt.plot(t,z[:,4],'r-.',label='PG')
plt.plot(t,z[:,5],'g-.',label='PGR')
# plt.plot(t,z[:,5],'b--',label='mG')
# plt.plot(t,z[:,5],'r--',label='G')
plt.ylabel('concentration')
plt.xlabel('time')
plt.legend(loc='best')
plt.show() | igem-thessaloniki/model | CAS9/model.py | model.py | py | 1,755 | python | en | code | 0 | github-code | 13 |
20296192704 | """
desitarget.cmx.cmx_targetmask
=============================
This looks more like a script than an actual module.
"""
from desiutil.bitmask import BitMask
from desitarget.targetmask import load_mask_bits
_bitdefs = load_mask_bits("cmx")
try:
cmx_mask = BitMask('cmx_mask', _bitdefs)
cmx_obsmask = BitMask('cmx_obsmask', _bitdefs)
except TypeError:
cmx_mask = object()
cmx_obsmask = object()
| desihub/desitarget | py/desitarget/cmx/cmx_targetmask.py | cmx_targetmask.py | py | 412 | python | en | code | 17 | github-code | 13 |
74638339536 | # Quick sort - Hoare partition scheme
# 피벗은 가장 첫 번째 값으로 설정한다.
def quick_sort(array, start, end):
# 원소가 1개인 경우 이미 정렬된 상태이다.
if start >= end:
return
pivot = start
left = start + 1
right = end
while left <= right:
# 피벗보다 큰 데이터가 나오기 전까지 반복
while left <= end and array[left] <= array[pivot]:
left += 1
# 피벗보다 작은 데이터가 나오기 전까지 반복
while right > start and array[right] >= array[pivot]:
right -= 1
# 엇갈리는 상황이라면 작은 데이터와 피벗을 교환한다.
# 그렇지 않다면, 작은 데이터와 큰 데이터를 교환한다.
if left > right:
array[right], array[pivot] = array[pivot], array[right]
else:
array[left], array[right] = array[right], array[left]
# 분할 이후 왼쪽 리스트와 오른쪽 리스트에서 각각 정렬을 수행한다.
quick_sort(array, start, right - 1)
quick_sort(array, right + 1, end)
array = [5, 7, 9, 0, 3, 1, 6, 2, 4, 8]
quick_sort(array, 0, len(array) - 1)
print(array)
| codehikerstudy/interview-question | MrKeeplearning/algorithm/src/quick_sort_hoare.py | quick_sort_hoare.py | py | 1,243 | python | ko | code | 0 | github-code | 13 |
327182231 | import pybio
import os
import sys
import pickle
cache_data = {}
def cache_string(string):
if cache_data.get(string, None)==None:
cache_data[string] = string
return string
else:
return cache_data[string]
class Gtf():
def __init__(self, filename):
self.genes = {}
self.filename = filename
f = pybio.data.TabReader(filename)
while f.readline():
chr = f.r[0]
gene_type = f.r[2]
start = int(f.r[3])
stop = int(f.r[4])
strand = f.r[6]
attrs = {}
temp = f.r[-1].split(";")
for att in temp:
att = att.replace("\"", "")
att = att.lstrip(" ")
att = att.split(" ")
attrs[att[0]] = " ".join(att[1:])
if attrs.get("gene_id", None)==None:
continue
gene = self.genes.get(attrs["gene_id"], pybio.data.Gene(attrs["gene_id"], chr, strand, attrs=attrs))
feature = pybio.data.GeneFeature(start, stop, gene_type, gene)
gene.add_feature(feature)
self.genes[gene.id] = gene
def get_genes(self, chr, pos):
bin = pos/self.bin_size
candidate_genes = self.pindex.get(chr, {}).get(bin, [])
position_genes = set()
for gene_id in candidate_genes:
for feature in self.genes[gene_id].features:
if feature.type!="exon":
continue
if feature.start<=pos<=feature.stop:
position_genes.add(gene_id)
return position_genes
def write_gff3(self, filename):
f = open(filename, "wt")
for gene_id, gene in self.genes.iteritems():
row = [gene.chr, "ap", "gene", gene.start, gene.stop, "", gene.strand, ".", "ID=%s;Name=%s" % (gene_id, gene_id)] # gene
f.write("\t".join(str(x) for x in row) + "\n")
row = [gene.chr, "ap", "mRNA", gene.start, gene.stop, "", gene.strand, ".", "ID=%s.t1;Parent=%s" % (gene_id, gene_id)] # mRNA
f.write("\t".join(str(x) for x in row) + "\n")
for exon_index, feature in enumerate(gene.features):
if feature.type not in ["exon", "CDS"]:
continue
row = [gene.chr, "ap", "CDS", feature.start, feature.stop, "", gene.strand, ".", "ID=%s.t1.cds;Parent=%s.t1" % (gene_id, gene_id)] # mRNA
f.write("\t".join(str(x) for x in row) + "\n")
row = [gene.chr, "ap", "exon", feature.start, feature.stop, "", gene.strand, ".", "ID=%s.t1.exon%s;Parent=%s.t1" % (gene_id, exon_index+1, gene_id)] # mRNA
f.write("\t".join(str(x) for x in row) + "\n")
f.write("\n")
f.close()
| grexor/pybio | pybio/data/Gtf.py | Gtf.py | py | 2,777 | python | en | code | 7 | github-code | 13 |
22929309866 | #from dataclasses import dataclass
from typing import List
#@dataclass
#class year:# Klasse zum Speichern von Daten eines Jahres.
# year:int
# months:List[float]
class year:
year:int
months:List[float]
def __init__(self, year, months):
self.year = year
self.months = months
#@dataclass
#class station:# Klasse zum Speichern und verwalten von Daten bezülich einer Station.
# station_id:int
# years:List[year]
class station:
station_id:int
years:List[year]
def __init__(self, station_id, years):
self.station_id = station_id
self.years = years
def read_data(path):# Einlesen der Daten
stations = dict()#Dict
with open(path) as f: # öffnen der Datei.
for line in f: # für jede Zeile.
data = line.replace("\n", "").split(",") #csv string zu einer liste umwandeln (nach Kommata trennen und newline entfernen)
if not data[0]=="Station": #Nach dem Header
y=year(data[1], list(map(lambda a:float(a),data[2:]))) #Das Jahrobjekt initialisieren.
if data[0] in stations.keys(): # Wenn die Station schon erfasst wurde.
stations[data[0]].years.append(y) #Bei der relevanten Station das Jahrobjekt der Liste hinzufügen.
else: #Sonst
stations[data[0]] = station(data[0], [y]) #Neues Stationsobjekt intiallisieren und im Dict speichern.
return stations #Dict zurückgeben
class realStation:
def __init__(self, s:station):
#Initialisieren der Variablen
self.mon_avg=[[] for i in range(12)]
self.season_avg = [0 for i in range(4)]
self.ann_avg=0
self.dmon=None
self.dann=None
self.id = int(s.station_id)
self.data = s.years
self.is_acceptable = len(self.data) >= 20
self.mon_avg:List[float]
self.data.sort(key=lambda a:int(a.year))
self.data[0].months = [-9999] + self.data[0].months
for i in range(1, len(self.data)):
self.data[i].months = self.data[i-1].months[-1:] + self.data[i].months #Den Dez des (i-1) Jahres zu dem (i)ten Jahr tun.
def step1(self):
#Der erste Schritt
mon_count = [0 for i in range(12)]
if self.is_acceptable:
for current_year in self.data:
for i in range(len(self.mon_avg)):
if current_year.months[i+1] != -9999:
self.mon_avg[i].append(current_year.months[i+1])
mon_count[i] += 1
for i in range(len(self.mon_avg)):
self.mon_avg[i] = sum(self.mon_avg[i]) / mon_count[i]
print(f"{self.id}: step1 finished")
else:
print(f"{self.id}: step1 failed")
def step2(self):
#Der zweite Schritt
seas1 = self.mon_avg[0:2] + self.mon_avg[-1:]
seas2 = self.mon_avg[2:5]
seas3 = self.mon_avg[5:8]
seas4 = self.mon_avg[8:11]
self.season_avg = [sum(seas1)/len(seas1), sum(seas2)/len(seas2), sum(seas3)/len(seas3), sum(seas4)/len(seas4)]
print(f"{self.id}: step2 finished")
def step3(self):
#Der dritte Schritt
self.ann_avg =sum(self.season_avg)/4
print(f"{self.id}: step3 finished")
def step4(self):
#Der vierte Schritt
self.dmon=self.data.copy()
for year in self.dmon:
for i in range(len(year.months)):
if not year.months[i] == -9999:
year.months[i]=year.months[i]-self.mon_avg[(11+i)%12]
print(f"{self.id}: step4 finished")
def step5(self):
#Der fünfte Schritt
self.dseas=[[[] for i in range(4)] for year in self.dmon]
for i in range(len(self.dmon)):
cyear = self.dmon[i]
for j in range(len(cyear.months)-1):
if not cyear.months[j] == -9999:
self.dseas[i][j//3].append(cyear.months[j])
for g in range(4):
if len(self.dseas[i][g])>1:
self.dseas[i][g] = sum(self.dseas[i][g])/len(self.dseas[i][g])
else:
self.dseas[i][g] = -9999
print(f"{self.id}: step5 finished")
def step6(self):
#Der sechste Schritt
self.dann = [-9999 for i in range(len(self.dseas))]
for i in range(len(self.dseas)):
cy = self.dseas[i]
cy = list(filter(lambda x: not x == -9999, cy))
if len(cy) > 2:
self.dann[i] = sum(cy)/len(cy)
else:
self.dann[i] = -9999
print(f"{self.id}: step6 finished")
def step7(self):
#Der siebte Schritt
self.seas = [[0 for i in range(4)] for i in range(len(self.dseas))]
self.ann = [-9999 for i in range(len(self.dann))]
for year in range(len(self.dseas)):
for season in range(4):
if self.dseas[year][season] == -9999:
self.seas[year][season] = -9999
else:
self.seas[year][season] = self.season_avg[season] + self.dseas[year][season]
if self.dann[year] == -9999:
self.ann[year] = -9999
else:
self.ann[year] = self.ann_avg + self.dann[year]
print(f"{self.id}: step7 finished")
def build(self):
#Alle Schritte
if self.is_acceptable:
self.step1()
self.step2()
self.step3()
self.step4()
self.step5()
self.step6()
self.step7()
else:
print(f"Bad Station ignored:{self.id}")
print("---------------------------------------------------------------------")
def write_data(self):
#Daten bereinigen und in einer csv Speichern
with open("Data/" + str(self.id) + ".csv", "w+") as f:
f.write("year,DJF,MAM,JJA,SON,ANN\n") #Header
for index, year in enumerate(self.data):
seas = [round(self.seas[index][0]/100,2), round(self.seas[index][1]/100,2), round(self.seas[index][3]/100,2), round(self.seas[index][3]/100,2), round(self.ann[index]/100, 2)]
for i in range(5):
if seas[i] == -99.99:
seas[i] = "NaN"
f.write(f"{year.year},{seas[0]},{seas[1]},{seas[2]},{seas[3]},{seas[4]}\n")
| CSideStep/climate_project_school | read_data.py | read_data.py | py | 6,432 | python | en | code | 0 | github-code | 13 |
34895565229 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 7 19:46:02 2019
@author: yaoweili
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.collections as mcoll
import tensorflow as tf
'''
This file contains functions as follow:
| functions | Usage |
| multicolored_lines | plot deconvolution values as color on original signal |
| colorline | plot deconvolution values as color on original signal |
| make_segments | plot deconvolution values as color on original signal |
| zscore | preprocessing functions |
| unpooling | unpooling function, keep the pooling position |
| ... and others as zero values |
'''
#--------------------------plotting function--------------------------------
def multicolored_lines(x,y,z,layer,img_id,model_id):
# fig, ax = plt.subplots(figsize=[20,2])
fig=plt.figure(figsize=[60,2])
norm=plt.Normalize(-z.max(), z.max())
lc = colorline(x, y,z[1800:3500], cmap='coolwarm',norm=norm)
# lc = colorline(x,y,z, cmap='coolwarm',norm=norm)
plt.colorbar(lc)
if layer==None:
# plt.title('1-layer ConvNet-model-{}'.format(model_id))
plt.title('Example of Noise beat')
else:
plt.title('layer'+str(layer))
plt.xlim(x.min(), x.max())
plt.ylim(y.min(), y.max())
# plt.show()
return fig
def colorline(x, y, z, cmap, norm,
linewidth=3, alpha=1.0):
z = np.asarray(z)
segments = make_segments(x, y)
lc = mcoll.LineCollection(segments, array=z, cmap=cmap, norm=norm,
linewidth=linewidth, alpha=alpha)
ax = plt.gca()
ax.add_collection(lc)
return lc
def make_segments(x, y):
"""
Create list of line segments from x and y coordinates, in the correct format
for LineCollection: an array of the form numlines x (points per line) x 2 (x
and y) array
"""
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
return segments
#--------------------------preprocessing function--------------------------------
def zscore(data):
data_mean=np.mean(data)
data_std=np.std(data, axis=0)
if data_std!=0:
data=(data-data_mean)/data_std
else:
data=data-data_mean
return data
#--------------------------unpooling function--------------------------------
def unpooling(relu,index,pool):
b, h, w, c = relu.shape.as_list()
shape=tf.constant([b * h * w * c], dtype=tf.int64)
try:
b2, h2, w2=pool.shape.as_list()
pool=tf.reshape(pool,[b2,h2,w2,1])
except:
print('no need to reshape')
unpool_flattened = tf.scatter_nd(tf.reshape(index[:,:,:,:], [-1,1]), tf.reshape(pool[:,:,:,:], [-1]), shape)
unpool=tf.reshape(unpool_flattened,[1,h,-1])
return unpool
| geekleahhh/1-D-DeconvNet-for-ECG-signals | functions.py | functions.py | py | 3,025 | python | en | code | 0 | github-code | 13 |
16987987668 | class Solution:
def topKFrequent(self, words, k):
"""
:type words: List[str]
:type k: int
:rtype: List[str]
"""
wordCount = {}
for word in words:
if word not in wordCount:
wordCount[word] = 0
wordCount[word] += 1
import heapq
heap = []
for word, count in wordCount.items():
heapq.heappush(heap, (-count, word))
ans = []
for i in range(k):
_, word = heapq.heappop(heap)
ans.append(word)
return ans | HzCeee/Algorithms | LeetCode/heap/692_TopKFrequentWords.py | 692_TopKFrequentWords.py | py | 607 | python | en | code | 0 | github-code | 13 |
8114603022 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""a converter from AI0 feature to AJ1 feature"""
# The implementation is very incomplete and very very ugly.
import sys, re
from collections import namedtuple
from enum import Enum
class GsubFragmentType(Enum):
UNKNOWN = 0
CID = 1
FROMBY = 2
OTHER = 0xbeef
GsubFragment = namedtuple('GsubFragment', ["val", "typ"])
def all_comments(lines):
for line in lines:
line = line.strip()
if line != "" and line[0:1] != "#":
return False
return True
def preprocess_class_def(line):
u"""replace spaces in class definitions with '#'"""
newline = ""
iterator = re.finditer(r"\[.*\]?", line)
if not iterator:
return line
e = 0
for m in iterator:
new_s = m.start()
prev_substr = line[e:new_s]
e = m.end()
newline += prev_substr + re.sub(r"\s+", "#", m.group())
newline += line[e:]
return newline
def preprocess_gsub_line(line, mapf):
u"""parse GSUB line"""
# clean up line
line = re.sub(r"^\s*sub\S*\s+", "", line)
line = re.sub(r"\s*;.*$", "", line)
parsed_line = []
for fragm in re.split(r"\s+", line):
if fragm == "from" or fragm == "by":
parsed_line.append( GsubFragment(val=fragm, typ=GsubFragmentType.FROMBY) )
elif fragm[0:1] == "\\":
cid = int(fragm[1:])
if cid not in mapf:
# this line can't be used because it contains invalid CID for a new font
return None
parsed_line.append( GsubFragment(val=mapf[cid], typ=GsubFragmentType.CID) )
else:
parsed_line.append( GsubFragment(val=fragm, typ=GsubFragmentType.OTHER) )
return parsed_line
class LookupProc(object):
def __init__(self, tag, mapf):
self.tag = tag
self.mapf = mapf
self.lines = []
def valid(self):
return True if self.lines else False
def start(self):
pass
def end(self):
if all_comments(self.lines):
return
print("lookup %s {" % (self.tag))
for line in self.lines:
print(line)
print("} %s;" % (self.tag))
def line(self, line):
if re.search(r"^\s*sub", line):
parsed_line = preprocess_gsub_line(line, self.mapf)
if parsed_line:
newline = " substitute"
for fragm in parsed_line:
if fragm.typ == GsubFragmentType.CID:
newline += " \\%d" % (fragm.val)
elif fragm.typ == GsubFragmentType.FROMBY or fragm.typ == GsubFragmentType.OTHER:
newline += " %s" % (fragm.val)
newline += ";"
self.lines.append(newline)
else:
self.lines.append(line)
##############################
class ClassProc(object):
def __init__(self, tag, mapf, inside_feature=False):
self.tag = tag
self.mapf = mapf
self.inside_feature = inside_feature
self.cids = []
self.cls_def = ""
def valid(self):
return True if self.cids else False
def start(self):
pass
def end(self):
if self.cids:
self.cls_def = " {} = [{}];".format(self.tag, " ".join(["\\%d" % (cid) for cid in self.cids]))
if not self.inside_feature:
print(self.cls_def)
def line(self, line):
for fragm in re.split(r"\s+", line):
if fragm[0:1] == "\\":
cid = int(fragm[1:])
if cid in self.mapf:
self.cids.append(self.mapf[cid])
##############################
class TableProc(object):
def __init__(self, tag, mapf):
self.tag = tag
self.mapf = mapf
def start(self):
print("table %s {" % (self.tag))
def end(self):
print("} %s;" % (self.tag))
def line(self, line):
print(line)
class HheaProc(TableProc):
def __init__(self, mapf):
super().__init__("hhea", mapf)
def line(self, line):
if "Ascender" in line:
print(re.sub(r"Ascender\s+([-\d]+)", "Ascender 880", line))
elif "Descender" in line:
print(re.sub(r"Descender\s+([-\d]+)", "Descender -120", line))
else:
print(line)
class VmtxProc(TableProc):
def __init__(self, mapf):
super().__init__("vmtx", mapf)
def line(self, line):
m = re.search(r"Vert\S+\s+\\(\d+)", line)
if m:
cid = int(m.group(1))
if cid in self.mapf:
print(re.sub(r"\\\d+", r"\\%d" % (self.mapf[cid]), line))
else:
print(line)
class OS2Proc(TableProc):
def __init__(self, mapf):
super().__init__("OS/2", mapf)
def line(self, line):
if "winAscent" in line:
print(re.sub(r"winAscent\s+([-\d]+)", "winAscent 880", line))
elif "winDescent" in line:
print(re.sub(r"winDescent\s+([-\d]+)", "winDescent 120", line))
else:
print(line)
##############################
class FeatureProc(object):
def __init__(self, tag, mapf, lookups=None):
self.tag = tag
self.mapf = mapf
self.lookups = lookups
def start(self):
print("feature %s {" % (self.tag))
def end(self):
print("} %s;" % (self.tag))
def line(self, line):
print(line)
class GeneralGsubProc(FeatureProc):
def __init__(self, tag, mapf, lookups):
super().__init__(tag, mapf, lookups)
self.lines = []
def start(self):
pass
def end(self):
if all_comments(self.lines):
return
print("feature %s {" % (self.tag))
for line in self.lines:
print(line)
print("} %s;" % (self.tag))
def line(self, line):
m = re.search(r"^\s*lookup\s+(\S+)\s*;", line)
if m:
lookup = m.group(1)
if lookup in self.lookups:
self.lines.append(line)
return
if re.search(r"^\s*sub", line):
parsed_line = preprocess_gsub_line(line, self.mapf)
if parsed_line:
newline = " substitute"
for fragm in parsed_line:
if fragm.typ == GsubFragmentType.CID:
newline += " \\%d" % (fragm.val)
elif fragm.typ == GsubFragmentType.FROMBY or fragm.typ == GsubFragmentType.OTHER:
newline += " %s" % (fragm.val)
newline += ";"
self.lines.append(newline)
return
self.lines.append(line)
# XXX: very ugly and complicated ...
class LoclProc(FeatureProc):
def __init__(self, tag, mapf, lookups):
super().__init__(tag, mapf, lookups)
self.tmp_script = None
self.tmp_lang = None
self.tmp_gsublines = []
self.lines = []
def start(self):
pass
def end(self):
if not all_comments(self.tmp_gsublines):
if self.tmp_script:
self.lines.append(self.tmp_script)
if self.tmp_lang:
self.lines.append(self.tmp_lang)
self.lines.extend(self.tmp_gsublines)
if all_comments(self.lines):
return
print("feature %s {" % (self.tag))
for line in self.lines:
print(line)
print("} %s;" % (self.tag))
def line(self, line):
if re.search(r"^\s*script", line):
if all_comments(self.tmp_gsublines):
# first comments
if not self.tmp_script and not self.tmp_lang:
self.lines.extend(self.tmp_gsublines)
else:
if self.tmp_script:
self.lines.append(self.tmp_script)
if self.tmp_lang:
self.lines.append(self.tmp_lang)
self.lines.extend(self.tmp_gsublines)
self.tmp_script = line
self.tmp_lang = None
self.tmp_gsublines = []
return
if re.search(r"^\s*language", line):
if not all_comments(self.tmp_gsublines):
if self.tmp_script:
self.lines.append(self.tmp_script)
if self.tmp_lang:
self.lines.append(self.tmp_lang)
self.lines.extend(self.tmp_gsublines)
self.tmp_script = None
self.tmp_lang = line
self.tmp_gsublines = []
return
m = re.search(r"^\s*lookup\s+(\S+)\s*;", line)
if m:
lookup = m.group(1)
if lookup in self.lookups:
self.tmp_gsublines.append(line)
return
if re.search(r"^\s*sub", line):
parsed_line = preprocess_gsub_line(line, self.mapf)
if parsed_line:
newline = " substitute"
for fragm in parsed_line:
if fragm.typ == GsubFragmentType.CID:
newline += " \\%d" % (fragm.val)
elif fragm.typ == GsubFragmentType.FROMBY or fragm.typ == GsubFragmentType.OTHER:
newline += " %s" % (fragm.val)
newline += ";"
self.tmp_gsublines.append(newline)
return
self.tmp_gsublines.append(line)
class PaltVpalHaltVhalProc(FeatureProc):
def __init__(self, tag, mapf):
super().__init__(tag, mapf)
def line(self, line):
m = re.search(r"pos\S*\s+\\(\d+)", line)
if m:
cid = int(m.group(1))
if cid in self.mapf:
print(re.sub(r"\\\d+", r"\\%d" % (self.mapf[cid]), line))
else:
print(line)
class KernVkrnProc(FeatureProc):
def __init__(self, tag, mapf, classes):
super().__init__(tag, mapf)
self.classes = classes
self.lines = []
def start(self):
pass
def end(self):
if all_comments(self.lines):
return
print("feature %s {" % (self.tag))
for line in self.lines:
print(line)
print("} %s;" % (self.tag))
def line(self, line):
m = re.search(r"^(.*pos\S*)\s+(.*)\s*;", line)
if m:
declaration = m.group(1)
pairs_value = m.group(2).strip()
latter_half_fragments = []
for fragm in re.split(r"\s+", pairs_value):
if fragm[0:1] == "@":
if fragm not in self.classes:
return
latter_half_fragments.append(fragm)
elif fragm[0:1] == "\\":
cid = int(fragm[1:])
if cid not in self.mapf:
return
latter_half_fragments.append("\\%d" % (self.mapf[cid]))
else:
latter_half_fragments.append(fragm)
self.lines.append("{} {};".format(declaration, " ".join(latter_half_fragments)))
else:
self.lines.append(line)
##############################
class Proc(object):
def __init__(self, mapf):
self.mapf = mapf
self.lookups = set()
self.classes = set()
self.cur_look = None
self.cur_cls = None
self.cur_tbl = None
self.cur_fea = None
def line(self, line):
print(line)
###
def lookup_start(self, tag):
self.cur_look = Proc.lookup_factory(tag, self.mapf)
self.cur_look.start()
def lookup_end(self):
self.cur_look.end()
if self.cur_look.valid() and self.cur_look.tag not in self.lookups:
self.lookups.add(self.cur_look.tag)
self.cur_look = None
def lookup_line(self, line):
self.cur_look.line(line)
###
def class_start(self, tag):
self.cur_cls = Proc.class_factory(tag, self.mapf, True if self.cur_fea else False)
self.cur_cls.start()
def class_end(self):
self.cur_cls.end()
if self.cur_cls.valid():
if self.cur_cls.tag not in self.classes:
self.classes.add(self.cur_cls.tag)
# XXX: ugly...
if self.cur_fea:
self.cur_fea.line(self.cur_cls.cls_def)
self.cur_cls = None
def class_line(self, line):
self.cur_cls.line(line)
###
def table_start(self, tag):
self.cur_tbl = Proc.table_factory(tag, self.mapf)
self.cur_tbl.start()
def table_end(self):
self.cur_tbl.end()
self.cur_tbl = None
def table_line(self, line):
self.cur_tbl.line(line)
###
def feature_start(self, tag):
self.cur_fea = Proc.fearure_factory(tag, self.mapf, self.lookups, self.classes)
self.cur_fea.start()
def feature_end(self):
self.cur_fea.end()
self.cur_fea = None
def feature_line(self, line):
self.cur_fea.line(line)
#####
@staticmethod
def lookup_factory(tag, mapf):
return LookupProc(tag, mapf)
@staticmethod
def class_factory(tag, mapf, inside_feature):
return ClassProc(tag, mapf, inside_feature)
@staticmethod
def table_factory(tag, mapf):
if tag == "hhea":
return HheaProc(mapf)
elif tag == "vmtx":
return VmtxProc(mapf)
elif tag == "OS/2":
return OS2Proc(mapf)
else:
return TableProc(tag, mapf)
@staticmethod
def fearure_factory(tag, mapf, lookups, classes):
if tag in ["palt", "vpal", "halt", "vhal"]:
return PaltVpalHaltVhalProc(tag, mapf)
elif tag in ["kern", "vkrn"]:
return KernVkrnProc(tag, mapf, classes)
elif tag in ["ccmp", "hist", "liga", "dlig", "fwid",
"hwid", "pwid", "jp78", "jp83", "jp90",
"nlck", "vert", "vrt2"]:
return GeneralGsubProc(tag, mapf, lookups)
elif tag == "locl":
return LoclProc(tag, mapf, lookups)
else:
return FeatureProc(tag, mapf, lookups)
##################################################
class FeatureConverter(object):
def __init__(self):
self.fea = sys.argv[1]
self.mapf = FeatureConverter.readMapFile(sys.argv[2])
self.cur_tbl = None
self.cur_fea = None
self.cur_look = None
self.cur_cls = None
def run(self):
self._walk_through_fea()
@staticmethod
def readMapFile(map_f):
map_ = {}
with open(map_f) as f:
for line in f.readlines():
m = re.search(r"(\d+)\s+(\d+)", line)
if m:
cid_to = int(m.group(1))
cid_from = int(m.group(2))
if cid_from not in map_:
map_[cid_from] = cid_to
return map_
def _walk_through_fea(self):
proc = Proc(self.mapf)
with open(self.fea) as f:
for line in [l.rstrip() for l in f.readlines()]:
self._line_proc(line, proc)
def _line_proc(self, line, proc):
# evaluate lookup case first because it is defined inside feature definition.
if self._lookup_proc(line, proc):
pass
elif self._class_proc(line, proc):
pass
elif self._table_proc(line, proc):
pass
elif self._feature_proc(line, proc):
pass
else:
proc.line(line)
def _lookup_proc(self, line, proc):
m = re.search(r"^\s*lookup\s+(\S+)\s*{", line)
if m:
self.cur_look = m.group(1)
proc.lookup_start(self.cur_look)
return True
if self.cur_look:
if re.search(r"^\s*}\s*%s\s*;" % (self.cur_look), line):
proc.lookup_end()
self.cur_look = None
return True
proc.lookup_line(line)
return True
return False
def _class_proc(self, line, proc):
m = re.search(r"^\s*(@[a-zA-Z0-9_]+)\s*=\s*\[(.*)", line)
if m:
self.cur_cls = m.group(1)
latter_half = m.group(2)
latter_half = re.sub(r"#.*", "", latter_half).replace(";", "").strip()
proc.class_start(self.cur_cls)
if latter_half != "":
a = latter_half.split("]")
cls_line = a[0]
if cls_line != "":
proc.class_line(cls_line)
if len(a) > 1:
proc.class_end()
self.cur_cls = None
return True
if self.cur_look:
line = re.sub(r"#.*", "", line).replace(";", "").strip()
a = line.split("]")
cls_line = a[0]
if cls_line != "":
proc.class_line(cls_line)
if len(a) > 1:
proc.class_end()
self.cur_cls = None
return True
return False
def _table_proc(self, line, proc):
m = re.search(r"^\s*table\s+(\S+)\s*{", line)
if m:
self.cur_tbl = m.group(1)
proc.table_start(self.cur_tbl)
return True
if self.cur_tbl:
if re.search(r"^\s*}\s*%s\s*;" % (self.cur_tbl), line):
proc.table_end()
self.cur_tbl = None
return True
proc.table_line(line)
return True
return False
def _feature_proc(self, line, proc):
m = re.search(r"^\s*feature\s+(\S+)\s*{", line)
if m:
self.cur_fea = m.group(1)
proc.feature_start(self.cur_fea)
return True
if self.cur_fea:
if re.search(r"^\s*}\s*%s\s*;" % (self.cur_fea), line):
proc.feature_end()
self.cur_fea = None
return True
proc.feature_line(line)
return True
return False
################################################################################
ver = sys.version_info
if ver.major < 3:
print("I may not work... :(")
conv = FeatureConverter()
conv.run()
| derwind/fontUtils | ai0_to_aj1/mk_features.py | mk_features.py | py | 18,230 | python | en | code | 1 | github-code | 13 |
41264713114 | class Solution:
def arrayStringsAreEqual(self, word1: List[str], word2: List[str]) -> bool:
n, m = len(word1), len(word2)
word1Pointer, word2Pointer = 0, 0
string1Pointer, string2Pointer = 0, 0
while word1Pointer < n and word2Pointer < m:
if word1[word1Pointer][string1Pointer] != word2[word2Pointer][string2Pointer]:
return False
string1Pointer += 1
string2Pointer += 1
if string1Pointer == len(word1[word1Pointer]):
word1Pointer += 1
string1Pointer = 0
if string2Pointer == len(word2[word2Pointer]):
word2Pointer += 1
string2Pointer = 0
return word1Pointer == len(word1) and word2Pointer == len(word2)
| AshwinRachha/LeetCode-Solutions | 1662-check-if-two-string-arrays-are-equivalent/1662-check-if-two-string-arrays-are-equivalent.py | 1662-check-if-two-string-arrays-are-equivalent.py | py | 862 | python | en | code | 0 | github-code | 13 |
7579147660 | import boto3
import time
glue = boto3.client('glue')
table_name = 'my_table'
job_name = f'job_for_{table_name}'
print(f"Starting Glue job: {job_name}")
glue.start_job_run(JobName=job_name)
status = 'STARTING'
while status in ['STARTING', 'RUNNING']:
time.sleep(10)
response = glue.get_job_run(JobName=job_name, RunId=response['JobRunId'])
status = response['JobRun']['JobRunState']
print(f"Job status: {status}")
if status == 'SUCCEEDED':
print("Glue job completed successfully!")
else:
print(f"Glue job failed with status: {status}")
| abhi1094/cdk-sample-projects | start_glue_job.py | start_glue_job.py | py | 567 | python | en | code | 0 | github-code | 13 |
28824711366 | #Load dataset
from sklearn import datasets
iris = datasets.load_iris()
print(iris['feature_names'])
print(iris['target_names'])
print(iris['data'][0])
print(iris['target'][0])
#split data into train and test data
#currently taking only 3 records for testing one for each
# flower type located at 0, 50 and 100 line in dataset
test_idx=[0,50,100]
import numpy as np
#training data
train_target = np.delete(iris['target'], test_idx)
train_data = np.delete(iris['data'], test_idx, axis=0)
#testing data
test_target = iris['target'][test_idx]
test_data = iris['data'][test_idx]
#Training
from sklearn import tree
clfr = tree.DecisionTreeClassifier()
clfr.fit(train_data, train_target)
#print the expected result
print(test_target)
#print actual results
print(clfr.predict(test_data)) | mansikataria/MachineLearning | Classification/IrisClassificationUsingDecisionTreeScikitLearn.py | IrisClassificationUsingDecisionTreeScikitLearn.py | py | 786 | python | en | code | 1 | github-code | 13 |
73662990736 | import xgboost as xgb
import numpy as np
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter, CategoricalHyperparameter
from alphaml.utils.constants import *
from alphaml.engine.components.models.base_model import BaseRegressionModel
class XGBoostRegressor(BaseRegressionModel):
def __init__(self, n_estimators, eta, min_child_weight, max_depth, subsample, gamma, colsample_bytree,
alpha, lambda_t, scale_pos_weight, random_state=None):
self.n_estimators = n_estimators
self.eta = eta
self.min_child_weight = min_child_weight
self.max_depth = max_depth
self.subsample = subsample
self.gamma = gamma
self.colsample_bytree = colsample_bytree
self.alpha = alpha
self.lambda_t = lambda_t
self.scale_pos_weight = scale_pos_weight
self.n_jobs = -1
self.random_state = random_state
self.estimator = None
self.time_limit = None
def fit(self, X, Y):
self.n_estimators = int(self.n_estimators)
dmtrain = xgb.DMatrix(X, label=Y)
self.num_cls = len(set(Y))
parameters = dict()
parameters['eta'] = self.eta
parameters['min_child_weight'] = self.min_child_weight
parameters['max_depth'] = self.max_depth
parameters['subsample'] = self.subsample
parameters['gamma'] = self.gamma
parameters['colsample_bytree'] = self.colsample_bytree
parameters['alpha'] = self.alpha
parameters['lambda'] = self.lambda_t
parameters['scale_pos_weight'] = self.scale_pos_weight
parameters['objective'] = 'reg:linear'
parameters['eval_metric'] = 'rmse'
parameters['tree_method'] = 'hist'
parameters['booster'] = 'gbtree'
parameters['nthread'] = self.n_jobs
parameters['silent'] = 1
watchlist = [(dmtrain, 'train')]
self.estimator = xgb.train(parameters, dmtrain, self.n_estimators, watchlist, verbose_eval=0)
self.objective = parameters['objective']
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
dm = xgb.DMatrix(X, label=None)
pred = self.estimator.predict(dm)
return np.array(pred)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'XGBoost',
'name': 'XGradient Boosting Regressor',
'handles_regression': True,
'handles_classification': False,
'handles_multiclass': False,
'handles_multilabel': False,
'is_deterministic': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (PREDICTIONS,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
cs = ConfigurationSpace()
n_estimators = UniformFloatHyperparameter("n_estimators", 50, 500, default_value=200, q=20)
eta = UniformFloatHyperparameter("eta", 0.025, 0.3, default_value=0.3, q=0.025)
min_child_weight = UniformIntegerHyperparameter("min_child_weight", 1, 10, default_value=1)
max_depth = UniformIntegerHyperparameter("max_depth", 2, 10, default_value=6)
subsample = UniformFloatHyperparameter("subsample", 0.5, 1, default_value=1, q=0.05)
gamma = UniformFloatHyperparameter("gamma", 0, 1, default_value=0, q=0.1)
colsample_bytree = UniformFloatHyperparameter("colsample_bytree", 0.5, 1, default_value=1., q=0.05)
alpha = UniformFloatHyperparameter("alpha", 1e-10, 10, log=True,default_value=1e-10)
lambda_t = UniformFloatHyperparameter("lambda_t", 1e-10, 10,log=True, default_value=1e-10)
scale_pos_weight = CategoricalHyperparameter("scale_pos_weight", [0.01, 0.1, 1., 10, 100], default_value=1.)
cs.add_hyperparameters(
[n_estimators, eta, min_child_weight, max_depth, subsample, gamma, colsample_bytree, alpha, lambda_t,
scale_pos_weight])
return cs
| dingdian110/alpha-ml | alphaml/engine/components/models/regression/xgboost.py | xgboost.py | py | 4,152 | python | en | code | 1 | github-code | 13 |
41224922726 | import torch
import os
from utils.utilGeneral import *
import random
def my_is_NAN(input_list: list):
for ts in input_list:
a = torch.max(ts).item()
if np.isnan(a):
return True
b = torch.min(ts).item()
if np.isnan(b):
return True
return False
def my_soft_max_2(input: torch.tensor):
denominator = torch.logsumexp(input, 1)
denominator_ex = denominator.unsqueeze(1).expand(-1, input.size(1))
output1 = input - denominator_ex
return torch.exp(output1)
def my_shift_right(input: torch.tensor):
b = input[0:len(input) - 1]
c = torch.cat([input[len(input) - 1:len(input)], b], 0)
return c
def my_reverse_tensor(input: torch.tensor):
idx = [i for i in range(input.size(0)-1, -1, -1)]
idx = torch.tensor(idx, dtype=torch.long)
return torch.index_select(input, 0, idx)
def my_save_checkpoint(ckpt_file, model):
print('Saving Checkpoint', ckpt_file)
try:
torch.save(model.state_dict(), ckpt_file)
except Exception as err:
print('Fail to save checkpoint', ckpt_file)
print('Error:', err)
def my_load_checkpoint(ckpt_file, model):
print('Loading Checkpoint', ckpt_file)
state_dict = torch.load(ckpt_file)
model.load_state_dict(state_dict)
def my_decay_lr(optimizer, epoch, init_lr, decay_rate):
lr = init_lr * ((1 - decay_rate) ** epoch)
if lr < 0.0001:
lr = 0.0001
print('Learning Rate is setted as:', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
def my_set_lr(optimizer, lr_input):
print('Learning Rate is setted as:', lr_input)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_input
return optimizer
def my_clip(model, max_norm=5.0):
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
class Environment:
"""
存储布局
model_dir/
checkpoints/
- e{e}.s{s}.ckpt
evaluation/
- output.txt
prediction/
- e{e}.dev.txt
- e{e}.test.txt
src/
*.py
"""
def __init__(self, model_dir, cuda):
assert not os.path.exists(model_dir), f'目录已存在 {model_dir}'
model_dir = os.path.realpath(model_dir)
self.model_dir = model_dir # 存储根目录
self.ckpt_dir = os.path.join(model_dir, 'checkpoints') # 检查点目录
self.eval_dir = os.path.join(model_dir, 'evaluation') # 评估结果
self.pred_dir = os.path.join(model_dir, 'prediciton') # 预测结果
self.src_dir = os.path.join(model_dir, 'src') # 运行时的源码
self.eval_file = os.path.join(self.eval_dir, "output.txt")
os.mkdir(self.model_dir)
os.mkdir(self.ckpt_dir)
os.mkdir(self.eval_dir)
os.mkdir(self.pred_dir)
os.mkdir(self.src_dir)
my_write_file(self.eval_file, "")
self.copy_src()
print(model_dir)
os.environ['CUDA_VISIBLE_DEVICES'] = str(cuda)
torch.manual_seed(2)
np.random.seed(2)
torch.cuda.manual_seed_all(2)
random.seed(2)
torch.set_default_tensor_type(torch.cuda.FloatTensor)
def copy_src(self):
"""
复制源码
:return:
"""
proj_dir = os.getcwd()
src_files = os.path.join(proj_dir, '*.py')
cmd = f'cp {src_files} {self.src_dir}'
print('Copy Source Code:', cmd)
os.system(cmd)
#src_dir = os.path.dirname(os.path.realpath(__file__))
src_dir = os.path.join(proj_dir, "utils")
src_files = os.path.join(src_dir, '*.py')
cmd = f'cp {src_files} {self.src_dir}'
print('Copy Source Code:', cmd)
os.system(cmd)
def save_checkpoint(self, epoch: int, model):
"""
保存检查点 checkpoints/e{e}.s{s}.ckpt
"""
ckpt_file = os.path.join(self.ckpt_dir, f'e{epoch}.ckpt')
try:
torch.save(model.state_dict(), ckpt_file)
print('Checkpoint:', ckpt_file)
except Exception as err:
print('Failed to save checkpoint', ckpt_file)
print('Error:', err)
def save_prediction(self, epoch: int, test_lines: list):
"""
保存推断结果 prediction/e{e}.s{s}.{dev,test}.txt
"""
test_lines = [l + '\n' for l in test_lines]
test_file = os.path.join(self.pred_dir, f'e{epoch}.test.txt')
try:
with open(test_file, 'w', encoding='utf-8') as fp:
fp.writelines(test_lines)
print('Prediction saved')
except Exception as err:
print('Failed to save prediction', test_file)
print('Error:', err)
def save_print(self, content):
my_write_file_append(self.eval_file, content)
print("Output saved...")
| xxin1984/x-parser | utils/utilTorch.py | utilTorch.py | py | 5,051 | python | en | code | 4 | github-code | 13 |
20349643090 | import logging
import sys
import asyncio
from kademlia.Node import Node
# This script is used to launch non-interactive nodes. They can only
# bootstrap, and can't be issued commands. They are created by the
# simulation.sh script to help analyze network behaviour
def prompt():
print("'set <key (str)> <value (str)>' to store data\n"\
"'get <value (str)>' to retrieve data\n"\
"'inspect' to view this node's state\n"
"'quit' to leave\n")
################################################################################
async def do_get(node, key):
result = await node.get(key)
if result and isinstance(result[1], str):
print(f"Found {key}:{result[1]} on the Kademlia network.")
elif result[0]:
print("Failed to find {key} on the Kademlia network: Found:\n"\
+ str(result[1:]))
else:
print(f"No such value for {key} on the Kademlia network.")
async def do_set(node, key, value):
result = await node.put(key, value)
if result:
print(f"Stored {key}:{value} on the Kademlia network.")
else:
print(f"Failed to store {key}:{value} on the Kademlia network.")
async def do_ping(node, ip, port):
result = await node.ping(ip, int(port))
if result[0]:
print(f"Received PONG from {result[1]}.")
else:
print(f"No response received from {ip}:{port}")
################################################################################
def handle_input(node):
args = ""
prompt()
args = sys.stdin.readline().rstrip().split(" ")
cmd = args[0].rstrip()
print(f"Attempting to run {cmd}...")
try:
if cmd == "get":
asyncio.create_task(do_get(node, args[1]))
elif cmd == "set":
asyncio.create_task(do_set(node, args[1], args[2]))
elif cmd == "ping":
asyncio.create_task(do_ping(node, args[1], args[2]))
elif cmd == "inspect":
print(f"Data for this node: {node.data}")
print(f"Routing table for {node.me}")
print(str(node.table))
elif cmd == "quit":
raise KeyboardInterrupt
else:
print(f"{cmd} is not a valid command. Try again.")
except IndexError:
# Handle poorly formed commands
print("Invalid command. Try again.")
################################################################################
if len(sys.argv) == 3:
my_ip = sys.argv[1]
my_port = sys.argv[2]
boot_ip = None
boot_port = None
print(f"Launching new Kademlia network on {my_ip}:{my_port}")
elif len(sys.argv) == 5:
my_ip = sys.argv[1]
my_port = sys.argv[2]
boot_ip = sys.argv[3]
boot_port = sys.argv[4]
print(f"Launching new Kademlia node on {my_ip}:{my_port}"\
f" with bootstrapping node {boot_ip}:{boot_port}")
else:
print(f"Usage: python3 {sys.argv[0]} <Node IP> <Node port> "\
f"[<bootstrap IP>] [<bootstrap port>]")
exit(1)
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
log = logging.getLogger('kademlia')
log.setLevel(logging.INFO)
log.addHandler(handler)
logrpc = logging.getLogger('rpcudp')
logrpc.setLevel(logging.INFO)
logrpc.addHandler(handler)
logasyncio = logging.getLogger('asyncio')
logasyncio.setLevel(logging.INFO)
logasyncio.addHandler(handler)
loop = asyncio.get_event_loop()
loop.set_debug(True)
# Create Kademlia node
node = Node(my_ip, my_port)
print("This process stores and retrieves strings on a"\
" distributed hash table based off of the Kademlia protocol.")
loop.run_until_complete(node.listen())
if boot_ip is not None and boot_port is not None:
print("Performing bootstrapping...")
loop.run_until_complete(node.bootstrap(boot_ip, boot_port))
prompt()
loop.add_reader(sys.stdin, handle_input, node)
try:
loop.run_forever()
except KeyboardInterrupt:
print("\nQuitting!")
node.stop()
loop.stop()
exit(0)
| rowan-maclachlan/cmpt-434-proj | kad.py | kad.py | py | 4,039 | python | en | code | 0 | github-code | 13 |
73151538256 | #!/usr/bin/env python
"""Create a csv matrix of distances between shapefile geometry objects.
Requirements: fiona, shapely
Written by: Taylor Denouden
Date: November 25, 2015
"""
from __future__ import print_function
import sys
import fiona
from shapely.geometry import shape
from multiprocessing import Pool, cpu_count
def extract_ids(input_file):
"""Extract all polygon ids from input shapefile."""
with fiona.open(input_file, 'r') as source:
return [shp['id'] for shp in source]
def calc_dists(args):
"""Calculate distances between `shp_id` and all other features in shapefile.
`shp_id` is the feature id in the shapefile `infile` (global). `shp_id` and
each id in `ids` (global) are cast to shapely features and then passed to a
distance function contained in the shapely library. An array is returned
with all these distances.
"""
i = args[0][0]
i_shp = args[0][1]
shps = args[1]
result = []
# Calculate distances and store in result
for (j, j_shp) in shps:
if int(j) < int(i):
dist = -1
else:
try:
dist = i_shp.distance(j_shp)
except Exception as e:
with open(sys.path.join("logs", i, ".txt", "w+")) as logfile:
logfile.write(e + "\n")
dist = -2
result.append(dist)
return result
def main():
"""Main execution thread."""
# infile = "./data/random_points/test_polys.shp"
infile = "./data/low_water_final/low_water.shp"
ids = extract_ids(infile)
shapes = []
# Get all shapefiles in memory as shapely shapes
with fiona.open(infile) as source:
source = list(source)
shapes = [(i, shape(source[int(i)]['geometry'])) for i in ids]
# Calculate each the distance from each id to ids using a process pool
print("Calculating distances")
pool = Pool(processes=cpu_count(), maxtasksperchild=5)
data = pool.map(calc_dists, [(i, shapes) for i in shapes], chunksize=50)
# Write the data to a new csv file
with open("test.csv", "w") as outfile:
# Write header of output file
print("Writing Header")
outfile.write("NODE,")
outfile.write(",".join(ids))
outfile.write("\n")
# Write rows
print("Writing Rows")
for i in ids:
outfile.write(i + ",")
outfile.write(",".join([str(j) for j in data[int(i)]]))
outfile.write("\n")
if __name__ == "__main__":
main()
| tayden/Island_MST | shp_to_csv_distances.py | shp_to_csv_distances.py | py | 2,530 | python | en | code | 0 | github-code | 13 |
41182052000 | import numpy as np
import theano
import os
from dml import *
from dml.knearest import *
import common
from common import *
import random
DIR_SPECIES = 'datas/fishes_species'
classFolders = [dirName for dirName, e, files in os.walk(DIR_SPECIES) if len(dirName) > 2 + len(DIR_SPECIES)]
IMG_SHAPE = (50, 50)
IMG_COLOR_SHAPE = (3,) + IMG_SHAPE
# Test Datas
common.NB_CLASSES = 3
common.CLASS_FOLD = ["01", "03", "04", "08", "09", "10"]
common.CLASS_NAME = ["First fish(C1)", "Black fish(C2)", "Clown fish(C3)", "C4", "C5", "C6"]
common.preprocess = ImagePreprocess(newShape=IMG_SHAPE, grayscale=False)
FILE_NAME = "sia_mc_fishes_" + str(random.randint(1, 1e4))
FILE_NAME = "sia_mc_fishes_test"
FILE_NAME_LOAD = "sia_mc_fishes_test"
print("Using FILE_NAME =", FILE_NAME)
def transformDatas(dataset, nnet):
dataset[0] = nnet.runBatch(dataset[0])
K_NEIGHBORS = 1
def main():
# debugOn()
quickTest = False
fromFile = False
if fromFile:
network = Network.loadFrom("fishes/saves/"+FILE_NAME_LOAD+".json")
else:
network = SiameseNNet([
InputLayer(IMG_COLOR_SHAPE),
Dense(400),
Activation(tanh),
Dense(20),
Activation(tanh),
],
defaultLoss=l2cost,
# dataProvider=RandomSiameseDataProvider
)
# network.setChecker(OneClassChecker())
network.build()
if not fromFile:
network.saveTo("fishes/saves/"+FILE_NAME+".json")
print("=> Network built!")
print("Read datas...")
validationDatas = getDataSet("validation")
if quickTest:
testDatas, trainingDatas = validationDatas, validationDatas
else:
testDatas = getDataSet("test")
trainingDatas = readDatasFrom(classFolders, list(range(len(classFolders))), labelBinRow=False)
if fromFile:
network.loadParameters("fishes/saves/"+FILE_NAME_LOAD+".mat")
else:
print("Start training")
network.train(
trainingDatas,
nbEpochs = 10,
batchSize = 40,
algo = MomentumGradient(0.0004),
# algo = GradientAlgo(0.004),
monitors = StdOutputMonitor([
# ("validation", validationDatas),
# ("test", testDatas),
]),
regul = 0.0#1
)
network.saveParameters("fishes/saves/"+FILE_NAME+".mat")
def mean(l):
if len(l) == 0:
return None
if isinstance(l[0], list):
return mean([mean(el) for el in l])
return sum(l)/len(l)
ids = list(range(0, 2000, 20))
r1 = network.runBatch(DirectDataFlow(trainingDatas).getDatas(ids)[0])[0]
r2 = network.runBatch(DirectDataFlow(trainingDatas).getDatas([i+1 for i in ids])[0])[0]
print("Dist same class", mean([sum([(a-b)**2 for a, b in zip(r1[i], r2[i])]) for i in range(len(r1))]))
print("Dist diff class", mean([[sum([(a-b)**2 for a, b in zip(r1[i], r2[j])]) for j in range(i+1, len(r2))]for i in range(len(r1)-1)]))
transformDatas(validationDatas, network)
if not quickTest:
transformDatas(testDatas, network)
# transformDatas(trainingDatas, network)
classifier = KNearestClassifier(nbClasses=common.NB_CLASSES, k=K_NEIGHBORS, datas=validationDatas)
print("Predict datas with K =", K_NEIGHBORS)
# printMetricResults(classifier.evalDataset(validationDatas), "validation")
printMetricResults(classifier.evalDataset(testDatas), "test")
if __name__ == '__main__':
main() | webalorn/TIPE | code/nnets/fishesClass/manyClassSiamese.py | manyClassSiamese.py | py | 3,158 | python | en | code | 1 | github-code | 13 |
41113879331 | #
# tokenize a file with spacy tokenizer -> so that we don't have to do it on the fly
# -------------------------------
#
# usage:
# python matchmaker/preprocessing/tokenize_files.py --in-file <path> --out-file <path> --reader-type <labeled_tuple or triple>
import argparse
import os
import sys
sys.path.append(os.getcwd())
from tqdm import tqdm
from matchmaker.dataloaders.ir_labeled_tuple_loader import *
from matchmaker.dataloaders.ir_tuple_loader import *
from matchmaker.dataloaders.ir_triple_loader import *
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules.token_embedders import Embedding
from allennlp.common import Params, Tqdm
Tqdm.default_mininterval = 1
#
# config
#
parser = argparse.ArgumentParser()
parser.add_argument('--out-file', action='store', dest='out_file',
help='output file', required=True)
parser.add_argument('--in-file', action='store', dest='in_file',
help='input file', required=True)
parser.add_argument('--reader-type', action='store', dest='reader_type',
help='labeled_tuple or triple or labeled_single', required=True)
parser.add_argument('--output-type', action='store', dest='output_type',
help='same or text_only (only used for labeled_single)', required=False)
args = parser.parse_args()
#
# load data (tokenize) & write out lines
# -------------------------------
#
if args.reader_type=="labeled_tuple":
loader = IrLabeledTupleDatasetReader(lazy=True,tokenizer=WordTokenizer()) # explicit spacy tokenize
elif args.reader_type=="labeled_single":
loader = IrTupleDatasetReader(lazy=True,target_tokenizer=WordTokenizer()) # explicit spacy tokenize
elif args.reader_type=="triple":
loader = IrTripleDatasetReader(lazy=True,tokenizer=WordTokenizer()) # explicit spacy tokenize
else:
raise Exception("wrong reader_type:" + args.reader_type)
with open(args.out_file,"w",encoding="utf8") as out_file:
instances = loader.read(args.in_file)
for i in tqdm(instances):
if args.reader_type=="labeled_tuple":
# query_id, doc_id, query_sequence, doc_sequence
out_file.write("\t".join([
str(i["query_id"].label),
str(i["doc_id"].label),
" ".join(t.text for t in i["query_tokens"]),
" ".join(t.text for t in i["doc_tokens"])])+"\n")
elif args.reader_type=="triple":
# query_sequence, doc_pos_sequence, doc_neg_sequence
out_file.write("\t".join([
" ".join(t.text for t in i["query_tokens"].tokens),
" ".join(t.text for t in i["doc_pos_tokens"].tokens),
" ".join(t.text for t in i["doc_neg_tokens"].tokens)])+"\n")
elif args.reader_type=="labeled_single":
# source, target
if args.output_type == "same":
out_file.write(i["source_tokens"].tokens[0] +"\t"+" ".join(t.text for t in i["target_tokens"].tokens)+"\n")
else:
out_file.write(" ".join(t.text.lower() for t in i["target_tokens"].tokens)+"\n")
| sebastian-hofstaetter/sigir19-neural-ir | matchmaker/preprocessing/tokenize_files.py | tokenize_files.py | py | 3,115 | python | en | code | 45 | github-code | 13 |
24661551694 | # import get_string from cs50 library
from cs50 import get_string
# define main function
def main():
# ask user for a text
text = get_string("Text: ")
# create a dict with measures and initial values
results = {"letter_count": 0, "word_count": 1, "sentence_count": 0}
# count letters, words and sentences with count function
count(text, results)
# calculate grade with coleman-liau index
grade = cl_index(results)
# print relatet grade result
if (grade < 1):
print("Before Grade 1\n")
elif (grade >= 16):
print("Grade 16+\n")
else:
print(f"Grade {grade}\n")
# define count function
def count(text, results):
# iterate through the given text to calculate letters, words and sentences and increment results in dict results
for i in range(len(text)):
n = ord(text[i])
if (n >= 65 and n <= 90) or (n >= 97 and n <= 122):
results["letter_count"] += 1
if (n == 32):
results["word_count"] += 1
if (n == 33 or n == 46 or n == 63):
results["sentence_count"] += 1
# define coleman-liau index function
def cl_index(results):
# results from dict applied in the coleman-liau index
letters_p100 = results["letter_count"] / results["word_count"] * 100
sentences_p100 = results["sentence_count"] / results["word_count"] * 100
index = (0.0588 * letters_p100) - (0.296 * sentences_p100) - 15.8
# round result before returning the value
return round(index)
if __name__ == "__main__":
main() | juliankohr/CS50x | 07_week_06_python/07_sentimental-readability/readability.py | readability.py | py | 1,565 | python | en | code | 0 | github-code | 13 |
28765808434 | d = {}
for i in range(int(input())):
s = input().split()
for i in s:
if i not in d: d[i] = 1
else: d[i] += 1
sort_d = {k: v for k, v in sorted(d.items(), key=lambda item: item[1], reverse=True)}
max = 0
second = 0
for k, v in sort_d.items():
max = v
break
for k, v in sort_d.items():
if v < max:
second = v
break
for k, v in sort_d.items():
if v == second:
print(k, end=' ') | CuongNguyen291201/py | frequentword.py | frequentword.py | py | 440 | python | en | code | 0 | github-code | 13 |
20411004489 | #!/usr/bin/env python3
import csv
import glob
import os
import re
_filename_re = re.compile(r'log_([0-9]+)x([0-9]+)_f([0-9]+)_replay([0-9]+)_r([0-9]+)_0[.]log')
def parse_basename(filename):
match = re.match(_filename_re, filename)
assert match is not None
return match.groups()
_replay_re = re.compile(r'^\[[0-9]+ - [0-9a-f]+\] +[0-9.]+ \{3\}\{resilience\}: Checkpoint replay finished in ([0-9.]+) seconds$', re.MULTILINE)
def parse_content(path):
with open(path, 'r') as f:
content = f.read()
replay_match = re.search(_replay_re, content)
replay = replay_match.group(1) if replay_match is not None else 'ERROR'
return (replay,)
def main():
paths = glob.glob('checkpoint/*_replay*_0.log')
content = [(os.path.dirname(path),) + parse_basename(os.path.basename(path)) + parse_content(path) for path in paths]
content.sort(key=lambda row: (row[0], int(row[1]), int(row[2]), int(row[3]), int(row[4]), int(row[5])))
import sys
# with open(out_filename, 'w') as f:
out = csv.writer(sys.stdout, dialect='excel-tab') # f)
out.writerow(['system', 'nodes', 'procs_per_node', 'freq', 'replay', 'rep', 'replay_time'])
out.writerows(content)
if __name__ == '__main__':
main()
| StanfordLegion/resilience | experiment/parse_replay.py | parse_replay.py | py | 1,254 | python | en | code | 0 | github-code | 13 |
32688005882 | import botocore
def new_boto_exception(exception_constructor):
"""
Get a new boto3 exception of the specified type with a mock exception message.
The mock exception message will look like this:
>>> 'An error occurred (MockError) when calling the MockOperation operation: mock message'
Example (different exception types):
>>> import botocore
>>> new_boto_exception(botocore.exceptions.ClientError)
>>>
>>> import boto3
>>> ddb_client = boto3.client('dynamodb')
>>> new_boto_exception(ddb_client.exceptions.ConditionalCheckFailedException)
Example (inside a unit test):
>>> from unittest.mock import patch, MagicMock
>>> from functionsTests.helpers.boto3.mock_responses.exceptions import new_boto_exception
>>>
>>> @patch('path.to.test.file.boto3')
>>> def test_can_handle_error_gracefully(self, mock_boto3: MagicMock):
>>> # Arrange
>>> mock_boto3.client('kms').generate_data_key.side_effect = new_boto_exception(botocore.exceptions.ClientError)
>>>
>>> # ... etc.
"""
return exception_constructor(
operation_name='MockOperation',
error_response={
'Error': {
'Code': 'MockError',
'Message': 'mock message'
}
}
)
| aws/aws-gamekit-unreal | AwsGameKit/Resources/cloudResources/functionsTests/helpers/boto3/mock_responses/exceptions.py | exceptions.py | py | 1,369 | python | en | code | 68 | github-code | 13 |
31732074040 | __author__ = 'Indra Gunawan'
from ladon.compat import PORTABLE_STRING
from ladon.ladonizer import ladonize
import math
import re
import collections
from ladon.types.ladontype import LadonType
temp3 = []
tempc = []
tempoftemp = []
tempoftimec = []
hit = 0
flag = 0
nama_server = "DWI_SERVER"
class LogCron(object):
@ladonize(str, rtype=str)
def count(self, ofile):
global folder_hasil_computasi, flag, temp3, hit, tempc, tempoftemp, tempoftimec
temp1 = []
temp_count = []
# folder_log="Log/"
# ofile = folder_log + ofile
buka = open(ofile)
for i, line in enumerate(buka):
lol = re.split("\W+", line, 8)
temp1.append('(' + lol[8])
# f = open(folder_hasil_computasi + "cron-copy.txt", 'wb')
f = open("cron-copy.txt", 'wb')
f.writelines(temp1)
buka.close()
temp2 = []
temp_count = []
# with open(folder_hasil_computasi + "cron-copy.txt") as infile:
with open("cron-copy.txt") as infile:
counts = collections.Counter(l.strip() for l in infile)
for line, count in counts.most_common():
temp2.append(line)
temp_count.append(count)
# return line, count
infile.close()
f.close()
# tempoftemp.append([temp2, temp_count])
# buka2 = open(ofile)
'''
fmt = '%-8s%-20s%s'
print(fmt % ('', 'Frequent','Command'))
fole = open("server1.txt", 'a')
for i, (name, grade) in enumerate(zip(temp_count,temp2)):
#print(fmt % (i, name, grade))
data3 = fmt % (i, name, grade)
#print data3
fole.write(data3+"\n")
buka2.close()
'''
if hit == 0:
temp3 = temp2
tempc = temp_count
hit = hit + 1
else:
tempoftemp = temp2
tempoftempc = temp_count
hit = hit + 1
# lola = temp + " "
# lolu = lola + str(temp_count)
# return lolu
# print tempoftemp
iter1 = 0
iter2 = 0
if hit > 1:
lentemp = len(tempoftemp)
lentemp3 = len(temp3)
# print nyonyo
cek = 0
for i in range(lentemp):
for j in range(lentemp3):
cek += 1
if tempoftemp[i] == temp3[j]:
tempc[j] += tempoftempc[i]
cek = -10;
if cek == lentemp3 - 1:
temp3.append(tempoftemp[i])
tempc.append(tempoftempc[i])
cek = 0
#p = Page()
#p.content = [None]*100
buka2 = open(ofile)
fmt = '%-8s%-20s%s'
# print(fmt % ('', 'Frequent','Command'))
fole = open(nama_server, 'w')
# fole = open(folder_hasil_computasi + "server1.txt", 'w')
for i, (name, grade) in enumerate(zip(tempc, temp3)):
# print(fmt % (i, name, grade))
data3 = fmt % (i, name, grade)
#p.content.append(tempc[i])
# print data3
fole.write(data3 + "\n")
buka2.close()
fole.close()
coba = str(tempc)
coba2 = str(temp3)
coba3 = coba + coba2
#print tempc
return coba3
| ardinusawan/Sistem_Terdistribusi | Web-Service/SOAP/serverLadon.py | serverLadon.py | py | 3,365 | python | en | code | 0 | github-code | 13 |
72123485457 | # Date : 2016.08.05
# Author : yqtao
# https://github.com/yqtaowhu
class Solution:
def strStr(self, source, target):
if source is None or target is None:
return -1
for i in range(len(source) - len(target) + 1):
for j in range(len(target)):
if source[i + j] != target[j]:
break
else:
return i
return -1 | yqtaowhu/programming | leetcode/implementStrStr/implementStrStr.py | implementStrStr.py | py | 422 | python | en | code | 2 | github-code | 13 |
20971471376 | import numpy as np
import biosppy.signals as bsig
DEVICE_SAMPLING_RATE = {'muse': 256, # is this right? is it 220 Hz (see documentation)?
}
def get_channels(signal, channels, device='muse'):
"""
Returns a signal with only the desired channels.
Arguments:
signal: a signal of shape [n_samples, n_channels]
channels: an array of the str names of the desired channels. returned in this order.
device: str name of the device.
Returns:
numpy array of signal with shape [n_channels, n_desired_channels].
Includes only the selected channels in the order given.
"""
# check device; each device has its own ch_ind dictionary corresponding to its available channels
if device == 'muse':
ch_ind_muse = {'TP9': 0, 'AF7': 1, 'AF8': 2, 'TP10': 3}
return_signal = np.array([signal[:, ch_ind_muse[ch]] for ch in channels]).T
return return_signal
def transform(buffer, epoch_len, channels, device='muse', filter_=False, filter_kwargs={}):
"""
Ensemble transform function. Takes in buffer as input. Extracts the appropriate channels and samples. Performs filtering.
Arguments:
buffer: the latest stream data. shape: [n_samples, n_channels]
epoch_len: the length of epoch expected by predictor in number of samples.
channels: list of channels expected by predictor. See get_channels.
device: string of device name. used to get channel and sampling_rate information
filter_: boolean of whether to perform filtering
filter_kwargs: dictionary of kwargs to be passed to filtering function. See biosppy.signals.tools.filter_signal.
by default, an order 8 bandpass butter filter is performed between 2Hz and 40Hz.
"""
# get the latest epoch_len samples of the buffer
transformed_signal = np.array(buffer[-epoch_len:, :])
# get the selected channels
transformed_signal = get_channels(transformed_signal, channels, device)
#filter_signal
if filter_:
# create dictionary of kwargs for filter_signal
filt_kwargs = {'sampling_rate': DEVICE_SAMPLING_RATE[device],
'ftype': 'butter',
'band': 'bandpass',
'frequency': (2, 40),
'order': 8}
filt_kwargs.update(filter_kwargs)
transformed_signal, _, _ = bsig.tools.filter_signal(signal=transformed_signal.T, **filt_kwargs)
transformed_signal = transformed_signal.T
return transformed_signal
def softmax_predict(input_, predictor, thresh=0.5):
"""
Consolidates a softmax prediction to a one-hot encoded prediction.
Arguments:
input_: the input taken by the predictor
predictor: function which returns a softmax prediction given an input_
thresh: the threshold for a positive prediction for a particular class.
"""
pred = np.array(predictor(input_))
return (pred >= thresh).astype(int)
def encode_ohe_prediction(prediction):
'''Returns the index number of the positive class in a one-hot encoded prediction.'''
return np.where(np.array(prediction) == 1)[0][0]
def decode_prediction(prediction, decode_dict):
'''Returns a more intelligible reading of the prediction based on the given decode_dict'''
return decode_dict[prediction]
| lukasbauer3091/alpha-light | streamStaffCode/classification_tools.py | classification_tools.py | py | 3,448 | python | en | code | 1 | github-code | 13 |
35654419418 | import cv2
from banknote import note_colors
from standalone.homography import find_match
image_final = None
notes_list = []
current_note = None
sift = cv2.xfeatures2d.SIFT_create()
def compute_homography(image, template_path, callback, debug=False):
points_list = []
#img_final = cv2.imread(image_path, 1) # Displayed image
img1 = cv2.imread(template_path, 0) # Matching templates
img2 = image # Image to compute
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1, None)
ended = False
while not ended:
points = find_match(img1, kp1, des1, img2, debug)
if points is not None:
points_list.append(points) # Memorise points for other external usages
img2 = cv2.fillPoly(img2, points, 255) # Fill to mask and compute next search with same template
callback(points) # Main callback to notify found and return points
else:
ended = True
return points_list #img_final
# Fouded callback from homography
def callback_founded(points):
global image_final
# Draw Contour on image
image_final = cv2.polylines(image_final, points, True, 255, 3, cv2.LINE_AA) # Contour
#cv2.imshow("Homography", image_final)
cv2.waitKey(1)
# Note program
notes_list.append(current_note)
print("->Founded a " + str(current_note.value) + " note.")
def do_homography(frame):
for color, note in note_colors.items():
for image_note_path in note.sides:
global current_note
current_note = note
compute_homography(frame, image_note_path, callback_founded, debug=False)
print("The image show a sum of : " + str(sum(note.value for note in notes_list)))
cv2.waitKey(0)
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
do_homography(frame)
#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows() | Blondwolf/NoteCounterCHF | src/aborted/standalone/video.py | video.py | py | 2,252 | python | en | code | 0 | github-code | 13 |
8105742062 | # Background Subtraction has several use cases in everyday life,
# It is being used for object segmentation, security enhancement,
# tracking, counting the number of visitors, number of vehicles in traffic etc.
# It is able to learn and identify the foreground mask.
# The popular Background subtraction algorithms are:
# BackgroundSubtractorMOG : It is a gaussian mixture based background segmentation algorithm.
# BackgroundSubtractorMOG2: It uses the same concept but the major advantage that it provides is in terms of stablity even when there is change in luminosity and better identification capablity of shadows in the frames.
# Geometric multigrid: It makes uses of statiistical method and per pixel bayesin segmentation algorithm.
# Python code for Background subtraction using OpenCV
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
cv2.imshow('fgmask', fgmask)
cv2.imshow('frame',frame )
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
| tanmaysgs/OpenCVPractice | 15.backgroundSubtraction.py | 15.backgroundSubtraction.py | py | 1,131 | python | en | code | 0 | github-code | 13 |
27213148412 | '''
创建一个有10个数字的列表,先输出此列表,然后输出其中的偶数元素
'''
import random
List=[random.randint(1,50) for i in range(20)]
print(List)
for i in List:
if(i%2==0):
print(i,end=' ') | xiao-ying19/zzh | exe/exe_1.11/if_else.py | if_else.py | py | 230 | python | zh | code | 0 | github-code | 13 |
27995863179 | from datetime import date
import json
from flask_jwt_extended import get_jwt_identity
from models.transaction import Transaction
from dao import account_dao
from dao import budget_dao
from flask import Blueprint, jsonify, request
from flask_jwt_extended import jwt_required
account_blueprint = Blueprint('account', __name__,)
@account_blueprint.route('/account/<id>', methods=['GET'])
@jwt_required()
def getAccount(id):
user_id = get_jwt_identity()
if int(id) != int(user_id):
return jsonify({401 : "Unauthorized."})
else:
return jsonify(account_dao.getAccount(id))
@account_blueprint.route('/account/<id>/income/<year>/<month>', methods=['GET'])
@jwt_required()
def getAmountEarned(id, year, month):
user_id = get_jwt_identity()
if int(id) != int(user_id):
return jsonify({401 : "Unauthorized."})
else:
return jsonify(account_dao.getAmountEarned(id, month, year))
@account_blueprint.route('/account/<id>/balance/<year>/<month>', methods=['GET'])
@jwt_required()
def getRemainingBalance(id, year, month):
user_id = get_jwt_identity()
if int(id) != int(user_id):
return jsonify({401 : "Unauthorized."})
else:
return jsonify(budget_dao.getRemainingBalance(id, month, year))
@account_blueprint.route('/account/<id>/summary/<year>/<month>', methods=['GET'])
@jwt_required()
def getTotalSpentByCategory(id, year, month):
user_id = get_jwt_identity()
if int(id) != int(user_id):
return jsonify({401: "Unauthorized."})
else:
return jsonify(account_dao.getTotalSpentByCategory(id, month, year))
@account_blueprint.route('/account/<id>/transactions/', methods=['GET'])
@jwt_required()
def getTransactionHistory(id):
user_id = get_jwt_identity()
if int(id) != int(user_id):
return jsonify({401 : "Unauthorized."})
else:
return jsonify(account_dao.getTransactionHistory(id))
@account_blueprint.route('/account/<id>/transaction/', methods=['POST'])
@jwt_required()
def addTransaction(id):
user_id = get_jwt_identity()
if int(id) != int(user_id):
return jsonify({401: "Unauthorized."})
else:
payload = request.data
payload = json.loads(payload)
transaction = JSONToTransaction(payload)
return jsonify(account_dao.addTransaction(transaction))
@account_blueprint.route('/account/<id>/transaction/', methods=['DELETE'])
@jwt_required()
def deleteTransaction(id):
user_id = get_jwt_identity()
if int(id) != int(user_id):
return jsonify({401: "Unauthorized."})
else:
payload = request.data
payload = json.loads(payload)
transaction = JSONToTransaction(payload)
return jsonify(account_dao.deleteTransaction(transaction))
@account_blueprint.route('/account/<id>/archive/', methods=["POST"])
@jwt_required()
def archiveAccount(id):
user_id = get_jwt_identity()
if int(id) != int(user_id):
return jsonify({401: "Unauthorized."})
else:
payload = request.data
payload = json.loads(payload)
activeBudgets = budget_dao.getActiveBudgets(payload["username"])
today = date.today()
account_dao.archiveAccount(payload["username"], today.strftime("%Y-%m-%d"))
for budgetItem in activeBudgets:
transaction = Transaction()
transaction.owner = budgetItem["owner"]
transaction.category = budgetItem["category"]
transaction.date = today.strftime("%Y-%m-%d")
transaction.amount = budgetItem["amount"]
budget_dao.addBudget(transaction)
return jsonify("Account archived.")
def JSONToTransaction(json):
transaction = Transaction()
transaction.id = json['transaction']['id']
transaction.owner = json['transaction']["owner"]
transaction.amount = json['transaction']["amount"]
transaction.archived = json['transaction']["archived"]
transaction.date = json['transaction']["date"]
transaction.category = json['transaction']["category"]
transaction.account = "main"
return transaction
| mason-wolf/penny-budget | api/account.py | account.py | py | 4,061 | python | en | code | 0 | github-code | 13 |
17042879894 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.VcpUniqueInfo import VcpUniqueInfo
class AlipayMarketingVoucherBatchqueryModel(object):
def __init__(self):
self._biz_codes = None
self._create_end_time = None
self._create_start_time = None
self._freeze_codes = None
self._page_num = None
self._page_size = None
self._product_codes = None
self._sort_type = None
self._status_list = None
self._template_extend_info = None
self._template_ids = None
self._user_info = None
self._voucher_extend_info = None
@property
def biz_codes(self):
return self._biz_codes
@biz_codes.setter
def biz_codes(self, value):
if isinstance(value, list):
self._biz_codes = list()
for i in value:
self._biz_codes.append(i)
@property
def create_end_time(self):
return self._create_end_time
@create_end_time.setter
def create_end_time(self, value):
self._create_end_time = value
@property
def create_start_time(self):
return self._create_start_time
@create_start_time.setter
def create_start_time(self, value):
self._create_start_time = value
@property
def freeze_codes(self):
return self._freeze_codes
@freeze_codes.setter
def freeze_codes(self, value):
if isinstance(value, list):
self._freeze_codes = list()
for i in value:
self._freeze_codes.append(i)
@property
def page_num(self):
return self._page_num
@page_num.setter
def page_num(self, value):
self._page_num = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def product_codes(self):
return self._product_codes
@product_codes.setter
def product_codes(self, value):
if isinstance(value, list):
self._product_codes = list()
for i in value:
self._product_codes.append(i)
@property
def sort_type(self):
return self._sort_type
@sort_type.setter
def sort_type(self, value):
self._sort_type = value
@property
def status_list(self):
return self._status_list
@status_list.setter
def status_list(self, value):
if isinstance(value, list):
self._status_list = list()
for i in value:
self._status_list.append(i)
@property
def template_extend_info(self):
return self._template_extend_info
@template_extend_info.setter
def template_extend_info(self, value):
self._template_extend_info = value
@property
def template_ids(self):
return self._template_ids
@template_ids.setter
def template_ids(self, value):
if isinstance(value, list):
self._template_ids = list()
for i in value:
self._template_ids.append(i)
@property
def user_info(self):
return self._user_info
@user_info.setter
def user_info(self, value):
if isinstance(value, VcpUniqueInfo):
self._user_info = value
else:
self._user_info = VcpUniqueInfo.from_alipay_dict(value)
@property
def voucher_extend_info(self):
return self._voucher_extend_info
@voucher_extend_info.setter
def voucher_extend_info(self, value):
self._voucher_extend_info = value
def to_alipay_dict(self):
params = dict()
if self.biz_codes:
if isinstance(self.biz_codes, list):
for i in range(0, len(self.biz_codes)):
element = self.biz_codes[i]
if hasattr(element, 'to_alipay_dict'):
self.biz_codes[i] = element.to_alipay_dict()
if hasattr(self.biz_codes, 'to_alipay_dict'):
params['biz_codes'] = self.biz_codes.to_alipay_dict()
else:
params['biz_codes'] = self.biz_codes
if self.create_end_time:
if hasattr(self.create_end_time, 'to_alipay_dict'):
params['create_end_time'] = self.create_end_time.to_alipay_dict()
else:
params['create_end_time'] = self.create_end_time
if self.create_start_time:
if hasattr(self.create_start_time, 'to_alipay_dict'):
params['create_start_time'] = self.create_start_time.to_alipay_dict()
else:
params['create_start_time'] = self.create_start_time
if self.freeze_codes:
if isinstance(self.freeze_codes, list):
for i in range(0, len(self.freeze_codes)):
element = self.freeze_codes[i]
if hasattr(element, 'to_alipay_dict'):
self.freeze_codes[i] = element.to_alipay_dict()
if hasattr(self.freeze_codes, 'to_alipay_dict'):
params['freeze_codes'] = self.freeze_codes.to_alipay_dict()
else:
params['freeze_codes'] = self.freeze_codes
if self.page_num:
if hasattr(self.page_num, 'to_alipay_dict'):
params['page_num'] = self.page_num.to_alipay_dict()
else:
params['page_num'] = self.page_num
if self.page_size:
if hasattr(self.page_size, 'to_alipay_dict'):
params['page_size'] = self.page_size.to_alipay_dict()
else:
params['page_size'] = self.page_size
if self.product_codes:
if isinstance(self.product_codes, list):
for i in range(0, len(self.product_codes)):
element = self.product_codes[i]
if hasattr(element, 'to_alipay_dict'):
self.product_codes[i] = element.to_alipay_dict()
if hasattr(self.product_codes, 'to_alipay_dict'):
params['product_codes'] = self.product_codes.to_alipay_dict()
else:
params['product_codes'] = self.product_codes
if self.sort_type:
if hasattr(self.sort_type, 'to_alipay_dict'):
params['sort_type'] = self.sort_type.to_alipay_dict()
else:
params['sort_type'] = self.sort_type
if self.status_list:
if isinstance(self.status_list, list):
for i in range(0, len(self.status_list)):
element = self.status_list[i]
if hasattr(element, 'to_alipay_dict'):
self.status_list[i] = element.to_alipay_dict()
if hasattr(self.status_list, 'to_alipay_dict'):
params['status_list'] = self.status_list.to_alipay_dict()
else:
params['status_list'] = self.status_list
if self.template_extend_info:
if hasattr(self.template_extend_info, 'to_alipay_dict'):
params['template_extend_info'] = self.template_extend_info.to_alipay_dict()
else:
params['template_extend_info'] = self.template_extend_info
if self.template_ids:
if isinstance(self.template_ids, list):
for i in range(0, len(self.template_ids)):
element = self.template_ids[i]
if hasattr(element, 'to_alipay_dict'):
self.template_ids[i] = element.to_alipay_dict()
if hasattr(self.template_ids, 'to_alipay_dict'):
params['template_ids'] = self.template_ids.to_alipay_dict()
else:
params['template_ids'] = self.template_ids
if self.user_info:
if hasattr(self.user_info, 'to_alipay_dict'):
params['user_info'] = self.user_info.to_alipay_dict()
else:
params['user_info'] = self.user_info
if self.voucher_extend_info:
if hasattr(self.voucher_extend_info, 'to_alipay_dict'):
params['voucher_extend_info'] = self.voucher_extend_info.to_alipay_dict()
else:
params['voucher_extend_info'] = self.voucher_extend_info
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMarketingVoucherBatchqueryModel()
if 'biz_codes' in d:
o.biz_codes = d['biz_codes']
if 'create_end_time' in d:
o.create_end_time = d['create_end_time']
if 'create_start_time' in d:
o.create_start_time = d['create_start_time']
if 'freeze_codes' in d:
o.freeze_codes = d['freeze_codes']
if 'page_num' in d:
o.page_num = d['page_num']
if 'page_size' in d:
o.page_size = d['page_size']
if 'product_codes' in d:
o.product_codes = d['product_codes']
if 'sort_type' in d:
o.sort_type = d['sort_type']
if 'status_list' in d:
o.status_list = d['status_list']
if 'template_extend_info' in d:
o.template_extend_info = d['template_extend_info']
if 'template_ids' in d:
o.template_ids = d['template_ids']
if 'user_info' in d:
o.user_info = d['user_info']
if 'voucher_extend_info' in d:
o.voucher_extend_info = d['voucher_extend_info']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayMarketingVoucherBatchqueryModel.py | AlipayMarketingVoucherBatchqueryModel.py | py | 9,627 | python | en | code | 241 | github-code | 13 |
72922352977 | from rest_framework.serializers import ModelSerializer, HyperlinkedIdentityField, SerializerMethodField, ImageField
from shops.models import Shop, create_slug
from comments.serializers import CommentSerializer
from comments.models import Comment
from products.serializers import ProductSerializer
from products.models import Product
class Base64ImageField(ImageField):
"""
A Django REST framework field for handling image-uploads through raw post data.
It uses base64 for encoding and decoding the contents of the file.
Heavily based on
https://github.com/tomchristie/django-rest-framework/pull/1268
Updated for Django REST framework 3.
"""
def to_internal_value(self, data):
from django.core.files.base import ContentFile
import base64
import six
import uuid
# Check if this is a base64 string
if isinstance(data, six.string_types):
# Check if the base64 string is in the "data:" format
if 'data:' in data and ';base64,' in data:
# Break out the header from the base64 content
header, data = data.split(';base64,')
# Try to decode the file. Return validation error if it fails.
try:
decoded_file = base64.b64decode(data)
except TypeError:
self.fail('invalid_image')
# Generate file name:
file_name = str(uuid.uuid4())[:12] # 12 characters are more than enough.
# Get the file name extension:
file_extension = self.get_file_extension(file_name, decoded_file)
complete_file_name = "%s.%s" % (file_name, file_extension, )
data = ContentFile(decoded_file, name=complete_file_name)
return super(Base64ImageField, self).to_internal_value(data)
def get_file_extension(self, file_name, decoded_file):
import imghdr
extension = imghdr.what(file_name, decoded_file)
extension = "jpg" if extension == "jpeg" else extension
return extension
class ShopListSerializer(ModelSerializer):
url = HyperlinkedIdentityField(
view_name='shops-api:detail',
lookup_field='slug'
)
image = SerializerMethodField()
class Meta:
model = Shop
fields = [
'title',
'image',
'url',
'slug'
]
def get_image(self, obj):
try:
image = obj.image.url
except:
image = None
return image
class ShopDetailSerializer(ModelSerializer):
url = HyperlinkedIdentityField(
view_name='shops-api:detail',
lookup_field='slug'
)
user = SerializerMethodField()
image = SerializerMethodField()
comments = SerializerMethodField()
products = SerializerMethodField()
class Meta:
model = Shop
fields = [
'id',
'user',
'title',
'slug',
'description',
'image',
'url',
'comments',
'products'
]
def get_user(self, obj):
return str(obj.user.username)
def get_image(self, obj):
try:
image = obj.image.url
except:
image = None
return image
def get_comments(self, obj):
qs = Comment.objects.filter(shop=obj.id)
comments = CommentSerializer(qs, many=True).data
return comments
def get_products(self, obj):
qs = Product.objects.filter(shop=obj.id)
products = ProductSerializer(qs, many=True).data
return products
class ShopCreateUpdateSerializer(ModelSerializer):
image = Base64ImageField(
max_length=None, use_url=True,
)
class Meta:
model = Shop
fields = [
'title',
'description',
'image',
'slug',
'id'
]
extra_kwargs = {"slug": {"read_only": True},
"id": {"read_only": True}}
def create(self, validated_data):
title = validated_data['title']
description = validated_data['description']
image = validated_data['image']
user = validated_data['user']
shop_obj = Shop(
title=title,
description=description,
image=image,
user=user
)
#shop_obj.set_password(password)
shop_obj.save()
validated_data['slug'] = shop_obj.slug
validated_data['id'] = shop_obj.id
return validated_data | mskw23/shopsapi | shops/serializers.py | serializers.py | py | 4,586 | python | en | code | 0 | github-code | 13 |
10722506816 | import sys
from os import path, makedirs
from shutil import rmtree
from charmhelpers.core import hookenv
from hashlib import sha256
from shell import shell
from nginxlib import get_app_path
def download_archive():
"""
"""
# Get the nginx vhost application path
app_path = get_app_path()
config = hookenv.config()
shell('rm /tmp/wordpress.tgz || true')
cmd = ('wget -q -O /tmp/wordpress.tgz '
'http://wordpress.org/latest.tar.gz')
hookenv.log("Downloading Wordpress: {}".format(cmd))
shell(cmd)
with open('/tmp/wordpress.tgz', 'rb') as fp:
dl_byte = sha256(fp.read())
if dl_byte.hexdigest() != config['checksum']:
hookenv.status_set(
'blocked',
'Downloaded Wordpress checksums do not match, '
'possibly because of a new stable release. '
'Check wordpress.org!')
sys.exit(0)
if path.isdir(app_path):
rmtree(app_path)
makedirs(app_path)
cmd = ('tar -xf /tmp/wordpress.tgz --strip-components=1 -C {}'.format(
app_path
))
hookenv.log("Extracting Wordpress: {}".format(cmd))
shell(cmd)
| adam-stokes/juju-charm-wordpress-hhvm | lib/wordpresslib.py | wordpresslib.py | py | 1,182 | python | en | code | 0 | github-code | 13 |
71446779217 | import speech_recognition as sr
import moviepy.editor as mp
from pathlib import Path
import os
def google_transfer(wavFilePath):
try:
r = sr.Recognizer()
audio = sr.AudioFile(wavFilePath+'.wav')
with audio as source:
audio_file = r.record(source)
result = r.recognize_google(audio_file,language = 'zh', show_all=True)
transcripts = result['alternative']
print(transcripts)
# exporting the result
with open(wavFilePath+'.txt', mode='w') as file:
file.write("Recognized Speech:")
file.write("\n")
for item in transcripts:
transcript = item['transcript']
file.write("\n")
file.write(transcript)
file.write("\n")
print("ready!")
except:
print('An exception occurred')
if __name__ == '__main__':
d = "C:\\Users\\wuxig\\PycharmProjects\\TelegramBot\\audios"
for path in os.listdir(d):
full_path = os.path.join(d, path)
for file_path in os.listdir(full_path):
filename, file_extension = os.path.splitext(file_path)
if file_extension == '.wav':
filepath = os.path.join(full_path, filename)
print(filepath)
google_transfer(filepath)
| davidyuan666/CaseAudioParser | speechRecongize.py | speechRecongize.py | py | 1,327 | python | en | code | 0 | github-code | 13 |
25943030030 | from Individuo import *
import numpy as np
import math
import random
class IndividuoReal(Individuo):
def __init__(self, tam, minB, maxB, fitFunc, funcResultado):
self.min_bound = minB
self.max_bound = maxB
self.cod = "REAL"
self.cromossomo = self.init_cromossomo(tam)
self.fitFunc = fitFunc
self.funcResultado = funcResultado
self.fit = None
def init_cromossomo(self, tamCrom):
return np.random.RandomState().uniform(self.min_bound, self.max_bound, size=tamCrom)
def fitness(self):
return self.fitFunc(self.cromossomo)
def crossover(self, i2, tipo):
#tipo de crossover
if tipo == "unif":
return self.crossoverUniformA(i2)
elif tipo == "blx":
return self.crossoverBLX(i2)
elif tipo == "aritm":
return self.crossoverAritm(i2)
else:
raise Exception("Crossover [", tipo, "] indefinido")
def crossoverUniformA(self, i2):
#gera os 2 individuos resultantes do crossover
#inicializa o array com o primeiro elemento
if np.random.random() < 0.5:
crom1 = np.array((self.cromossomo[0]))
crom2 = np.array((i2.cromossomo[0]))
else:
#print("Flip em 0")
crom1 = np.array((i2.cromossomo[0]))
crom2 = np.array((self.cromossomo[0]))
#percorre o resto do array verificando se ocorre o flip ou nao
for i in range(1, len(self.cromossomo)):
if np.random.random() < 0.5:
crom1 = np.append(crom1, self.cromossomo[i]);
crom2 = np.append(crom2, i2.cromossomo[i]);
else:
#print("Flip em ", i)
crom1 = np.append(crom1, i2.cromossomo[i]);
crom2 = np.append(crom2, self.cromossomo[i]);
#retorna uma lista com os 2 individuos gerados
return [crom1, crom2]
def crossoverBLX(self, i2):
#gera os 2 individuos resultantes do crossover
a = 0.5 #parametro [0, 1], default é 0.5
#inicializa o array com o primeiro elemento
di = abs(self.cromossomo[0] - i2.cromossomo[0])
minB = min(self.cromossomo[0], i2.cromossomo[0]) - a*di
maxB = max(self.cromossomo[0], i2.cromossomo[0]) + a*di
c1 = np.random.uniform(minB, maxB)
c2 = np.random.uniform(maxB, maxB)
#verificacao de bounds
if c1 < self.min_bound:
c1 = self.min_bound
if c1 > self.max_bound:
c1 = self.max_bound
if c2 < self.min_bound:
c2 = self.min_bound
if c2 > self.max_bound:
c2 = self.max_bound
crom1 = np.array(c1)
crom2 = np.array(c2)
#percorre o resto do array
for i in range(1, len(self.cromossomo)):
di = abs(self.cromossomo[i] - i2.cromossomo[i])
minB = min(self.cromossomo[i], i2.cromossomo[i]) - a*di
maxB = max(self.cromossomo[i], i2.cromossomo[i]) + a*di
c1 = np.random.uniform(minB, maxB)
c2 = np.random.uniform(minB, maxB)
#verificacao dos bound
if c1 < self.min_bound:
c1 = self.min_bound
if c1 > self.max_bound:
c1 = self.max_bound
if c2 < self.min_bound:
c2 = self.min_bound
if c2 > self.max_bound:
c2 = self.max_bound
crom1 = np.append(crom1, c1)
crom2 = np.append(crom2, c2)
return [crom1, crom2]
def crossoverAritm(self, i2):
#gera os 2 individuos resultantes do crossover
a = 0.5 #parametro [0, 1], default é 0.5
#inicializa o array com o primeiro elemento
c1 = a * self.cromossomo[0] + (1.0-a) * i2.cromossomo[0]
c2 = (1.0-a) * self.cromossomo[0] + a * i2.cromossomo[0]
#verificacao de bounds
if c1 < self.min_bound:
c1 = self.min_bound
if c1 > self.max_bound:
c1 = self.max_bound
if c2 < self.min_bound:
c2 = self.min_bound
if c2 > self.max_bound:
c2 = self.max_bound
crom1 = np.array(c1)
crom2 = np.array(c2)
#percorre o resto do array
for i in range(1, len(self.cromossomo)):
c1 = a * self.cromossomo[i] + (1.0-a) * i2.cromossomo[i]
c2 = (1.0-a) * self.cromossomo[i] + a * i2.cromossomo[i]
#verificacao dos bound
if c1 < self.min_bound:
c1 = self.min_bound
if c1 > self.max_bound:
c1 = self.max_bound
if c2 < self.min_bound:
c2 = self.min_bound
if c2 > self.max_bound:
c2 = self.max_bound
crom1 = np.append(crom1, c1)
crom2 = np.append(crom2, c2)
return [crom1, crom2]
def mutacao(self, tx, tipo):
if tipo == "gauss":
self.mutacaoGaussiana(tx)
elif tipo == "delta":
self.mutacaoDelta(tx)
else:
raise Exception("Mutacao[", tipo, "] indefinida")
def mutacaoGaussiana(self, tx):
#std usado na mutacao Gaussiana
std = 0.3#0.1 tava muito pouco
#para cada elemento do cromossomo da bitflip com um chance de txMut
for i in range(len(self.cromossomo)):
if np.random.random() < tx:
mean = self.cromossomo[i]
x1 = random.random()
x2 = random.random()
if x1 == 0.0:
x1 = 1.0
if x2 == 0.0:
x2 = 1.0
y1 = math.sqrt(-2.0 * math.log(x1)) * math.cos(2.0 * math.pi * x2)
valor = y1 * std + mean
#verificacao de bounds
if valor < self.min_bound:
valor = self.min_bound
if valor > self.max_bound:
valor = self.max_bound
self.cromossomo[i] = valor
def mutacaoDelta(self, tx):
#para cada elemento do cromossomo da bitflip com um chance de txMut
for i in range(len(self.cromossomo)):
if np.random.random() < tx:
mean = self.cromossomo[i]
y1 = np.random.uniform(self.min_bound, self.max_bound)/10.0
valor = y1 + mean
#verificacao de bounds
if valor < self.min_bound:
valor = self.min_bound
if valor > self.max_bound:
valor = self.max_bound
self.cromossomo[i] = valor | mbalatka/OCEV | IndividuoReal.py | IndividuoReal.py | py | 6,807 | python | pt | code | 0 | github-code | 13 |
9373485455 | from __future__ import annotations
from ipaddress import IPv4Address, IPv4Network
from cloudshell.cp.core.cancellation_manager import CancellationContextManager
from cloudshell.cp.core.rollback import RollbackCommand, RollbackCommandsManager
from cloudshell.cp.core.utils.name_generator import NameGenerator
from cloudshell.cp.openstack.exceptions import PrivateIpIsNotInMgmtNetwork
from cloudshell.cp.openstack.models import OSNovaImgDeployApp
from cloudshell.cp.openstack.os_api.api import OsApi
from cloudshell.cp.openstack.os_api.models import Instance, Network, Port
from cloudshell.cp.openstack.resource_config import OSResourceConfig
from cloudshell.cp.openstack.utils.instance_helpers import get_mgmt_iface_name
from cloudshell.cp.openstack.utils.udev import get_udev_rules
generate_name = NameGenerator()
class CreateInstanceCommand(RollbackCommand):
def __init__(
self,
rollback_manager: RollbackCommandsManager,
cancellation_manager: CancellationContextManager,
os_api: OsApi,
deploy_app: OSNovaImgDeployApp,
resource_conf: OSResourceConfig,
*args,
**kwargs,
):
super().__init__(rollback_manager, cancellation_manager, *args, **kwargs)
self._api = os_api
self._deploy_app = deploy_app
self._resource_conf = resource_conf
self._instance = None
def _execute(self, *args, **kwargs) -> Instance:
name = generate_name(self._deploy_app.app_name)
image = self._api.Image.get(self._deploy_app.image_id)
flavor = self._api.Flavor.find_first(self._deploy_app.instance_flavor)
mgmt_net = self._api.Network.get(self._resource_conf.os_mgmt_net_id)
port = None
if self._deploy_app.private_ip:
port = self._get_port_for_private_ip(mgmt_net)
instance = self._api.Instance.create(
name,
image,
flavor,
network=mgmt_net,
port=port,
availability_zone=self._deploy_app.availability_zone,
affinity_group_id=self._deploy_app.affinity_group_id,
user_data=self._prepare_user_data(),
cancellation_manager=self._cancellation_manager,
)
self._instance = instance
self._set_mgmt_iface_name(instance)
return instance
def rollback(self):
if isinstance(self._instance, Instance):
self._instance.remove()
def _prepare_user_data(self) -> str:
user_data = ""
if self._deploy_app.user_data:
user_data = self._deploy_app.user_data
if self._deploy_app.auto_udev:
if user_data:
user_data += "\n"
user_data += get_udev_rules()
return user_data
@staticmethod
def _set_mgmt_iface_name(inst: Instance) -> None:
ifaces = list(inst.interfaces)
assert len(ifaces) == 1
mgmt_iface = ifaces[0]
mgmt_iface.port.name = get_mgmt_iface_name(inst)
def _get_port_for_private_ip(self, mgmt_net: Network) -> Port:
ip_str = self._deploy_app.private_ip
ip = IPv4Address(ip_str)
for subnet in mgmt_net.subnets:
if ip in IPv4Network(subnet.cidr):
break
else:
raise PrivateIpIsNotInMgmtNetwork(ip_str, mgmt_net)
return self._api.Port.create(
"", mgmt_net, fixed_ip=ip_str, fixed_ip_subnet=subnet
)
| QualiSystems/cloudshell-cp-openstack | cloudshell/cp/openstack/os_api/commands/create_instance.py | create_instance.py | py | 3,442 | python | en | code | 0 | github-code | 13 |
73120112017 | qnt = 0
lista = list()
while True:
num = int(input('digite um número: '))
while num not in lista:
lista.append(num)
qnt += 1
escolha = str(input('deseja continuar?[S/N] ')).upper()
if escolha == 'N':
break
print('você digitou {} elementos'.format(qnt))
lista.sort(reverse=True)
print('os valores em ordem decrescente são {}'.format(lista))
if 5 in lista:
print('o número 5 faz parte da lista')
else:
print('o número 5 não faz parte da lista')
| henrique340/pythonProject4 | desafio 81.py | desafio 81.py | py | 497 | python | pt | code | 0 | github-code | 13 |
4937465551 | from itertools import combinations
import random
jugadores = ["Dani", "David", "Enano", "Cocinera", "Alexis", "Gafas", "Mauricio", "Jaimito"]
# Variables para almacenar los partidos y las posiciones
partidos = []
posiciones = {jugador: {"Puntos": 0, "PG": 0, "PE": 0, "PP": 0, "GF": 0, "GC": 0} for jugador in jugadores}
# Generamos los partidos
jornadas = []
num_jugadores = len(jugadores)
num_partidos_por_jornada = num_jugadores // 2
partidos_por_jugador = []
if num_jugadores % 2 == 1:
jugadores.append("Descansa")
for i in range(num_jugadores - 1):
partidos_por_jugador.append([])
for i in range(num_jugadores - 1):
for j in range(num_partidos_por_jornada):
partido = (jugadores[j], jugadores[num_jugadores - 1 - j])
partidos_por_jugador[i].append(partido)
partidos.append(partido)
jugadores.insert(1, jugadores.pop())
for i in range(num_jugadores - 1):
jornada = []
for j in range(num_partidos_por_jornada):
if i % 2 == 0:
jornada.append(partidos_por_jugador[j])
else:
jornada.append(partidos_por_jugador[num_partidos_por_jornada - 1 - j])
jornadas.append(jornada)
# Simulamos los resultados de los partidos (para este ejemplo, los resultados son aleatorios)
# Simulamos los resultados de los partidos (para este ejemplo, los resultados son ingresados por el usuario)
for partido in partidos:
goles_local = int(input("Ingrese los goles del equipo local en el partido {}: {} vs {}: ".format(partidos.index(partido)+1, partido[0], partido[1])))
goles_visitante = int(input("Ingrese los goles del equipo visitante en el partido {}: {} vs {}: ".format(partidos.index(partido)+1, partido[0], partido[1])))
posiciones[partido[0]]["GF"] += goles_local
posiciones[partido[1]]["GF"] += goles_visitante
posiciones[partido[0]]["GC"] += goles_visitante
posiciones[partido[1]]["GC"] += goles_local
if goles_local > goles_visitante:
posiciones[partido[0]]["Puntos"] += 3
posiciones[partido[0]]["PG"] += 1
posiciones[partido[1]]["PP"] += 1
elif goles_local < goles_visitante:
posiciones[partido[1]]["Puntos"] += 3
posiciones[partido[1]]["PG"] += 1
posiciones[partido[0]]["PP"] += 1
else:
posiciones[partido[0]]["Puntos"] += 1
posiciones[partido[1]]["Puntos"] += 1
posiciones[partido[0]]["PE"] += 1
posiciones[partido[1]]["PE"] += 1
# Ordenamos las posiciones
posiciones_ordenadas = sorted(posiciones.items(), key=lambda x: x[1]["Puntos"], reverse=True)
# Imprimimos el calendario de partidos y las posiciones
# Imprimimos las posiciones
# Ordenamos las posiciones
posiciones_ordenadas = sorted(posiciones.items(), key=lambda x: (x[1]["Puntos"], x[1]["GF"]-x[1]["GC"]), reverse=True)
# Imprimimos la tabla de posiciones
print("Tabla de posiciones:\n")
print("{:<10s}{:<5s}{:<5s}{:<5s}{:<5s}{:<5s}{:<5s}".format("Jugador", "Pts", "PG", "PE", "PP", "GF", "GC"))
for jugador, stats in posiciones_ordenadas:
print("{:<10s}{:<5d}{:<5d}{:<5d}{:<5d}{:<5d}{:<5d}".format(jugador, stats["Puntos"], stats["PG"], stats["PE"], stats["PP"], stats["GF"], stats["GC"]))
| mauricioatm20/Python | resultados y clasificacion.py | resultados y clasificacion.py | py | 3,178 | python | es | code | 0 | github-code | 13 |
21263758894 | from flask import Flask, request, redirect, render_template, session, flash
from mysqlconnection import MySQLConnector
import re
app = Flask(__name__)
mysql = MySQLConnector(app,'mydb')
app.secret_key = 'Brandon'
@app.route('/')
def index():
if not 'email' in session:
session['email']=''
if not 'valid' in session:
session['valid']='invalid'
query = "SELECT * FROM Emails"
emails = mysql.query_db(query)
return render_template("index.html", emails=emails)
@app.route('/validate', methods=['POST'])
def em():
match=re.search(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", request.form['email'])
if match:
session['email']= request.form['email']
session['valid']='valid'
create()
else:
session['valid']='invalid'
return redirect('/')
def create():
query = "INSERT INTO Emails (emails, created_at) VALUES (:emails, NOW())"
data = {'emails': request.form['email']}
mysql.query_db(query, data)
app.run(debug=True) | bwal91/Brandon | Python(completed)/myEnvironments/flask_mysql/Email/server.py | server.py | py | 975 | python | en | code | 0 | github-code | 13 |
15219767172 | #!/usr/bin/python
import random
import string
def main():
# create a string to hold lower case alphabet
letters = string.ascii_lowercase
# create file objects to manipulate opened/created files
f1 = open("file1.txt", "w")
f2 = open("file2.txt", "w")
f3 = open("file3.txt", "w")
# put file objects in list for easier manipulations
files = {f1, f2, f3}
# declare and initialize empty strings for file content
fileContent1 = ""
fileContent2 = ""
fileContent3 = ""
# fill strings with random lower-case alphabetic char
for i in range(10):
fileContent1 += (random.choice(letters))
fileContent2 += (random.choice(letters))
fileContent3 += (random.choice(letters))
# write random strings to files
f1.write("%s\n" % fileContent1)
f2.write("%s\n" % fileContent2)
f3.write("%s\n" % fileContent3)
# close all files for writing
for file in files:
file.close()
# open files for reading
f1 = open("file1.txt", "r")
f2 = open("file2.txt", "r")
f3 = open("file3.txt", "r")
# put file objects in list for easier manipulation
files = {f1, f2, f3}
# for each open file, read content to a string and print to screen, close file
for file in files:
content = file.read()
content = content.replace("\n", "")
print(content)
file.close()
# declare and initialize two random ints with range 1 to 42
randomInt1 = random.randint(1, 42)
randomInt2 = random.randint(1, 42)
# multiply random ints and store product
randProduct = randomInt1 * randomInt2
# print random ints and their product to the screen
print(randomInt1)
print(randomInt2)
print(randProduct)
main()
| solorzao/CS344-Operating-Systems | ProgramPy-PythonExploration/mypython.py | mypython.py | py | 1,675 | python | en | code | 0 | github-code | 13 |
18129373492 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 4 17:11:49 2019
@author: HP
"""
import re
_KEYWORDS = ["class", "method", "function", "constructor", "int", "boolean",
"char", "void", "var", "static", "field", "let", "do", "if",
"else", "while", "return", "true", "false", "null", "this"]
_SYMBOLS = ["{", "}", "[", "]", "(", ")", ".", ",", ";", "+", "-", "*", "/",
"&", "|", "<", ">", "=", "~"]
symbol_set={'<':"<", '>':">", '\'':""", '&':"&"}
def _is_keyword(word):
return word in _KEYWORDS
def _is_symbol(symbol):
return symbol in _SYMBOLS
def _is_string(word):
string_regex = re.compile('^\".*\"$')
return not not string_regex.match(word)
def _is_int(word):
int_regex = re.compile('^\d+$')
return not not int_regex.match(word)
def _is_identifier(word):
identifier_regex = re.compile('^\w+$')
return not not identifier_regex.match(word)
def _get_token(word):
if _is_keyword(word):
return "keyword", word
elif _is_symbol(word):
if word in symbol_set:
return "symbol",symbol_set[word]
return "symbol", word
elif _is_string(word):
return "stringConstant", word
elif _is_int(word):
return "integerConstant", word
elif _is_identifier(word):
return "identifier", word
def _slice_command(line):
stripped_line = line.strip()
if not stripped_line:
return ''
is_comment = stripped_line[0] == '*' or stripped_line[0:2] in ['//', '/*']
if is_comment:
return ''
without_comments = line.split('//')[0]
identifier_regex = '\w+'
integer_regex = '\d+'
string_regex = '\".*\"'
keyword_regex = ('class|method|function|constructor|int|boolean|char|void|'
'var|static|field|let|do|if|else|while|return|true|false|'
'null|this')
symbol_regex = '{|}|\[|\]|\(|\)|\.|,|;|\+|-|\*|\/|&|\||<|>|=|~'
composed_regex = r'({}|{}|{}|{}|{})'.format(identifier_regex,
integer_regex,
string_regex,
keyword_regex,
symbol_regex)
return re.finditer(composed_regex, without_comments)
class Tokenizer:
def __init__(self,filepath):
self.file=open(filepath,'r')
self.xmlfile=open(filepath[:-5]+"T.xml",'w')
self.tokens=[]
self.xmlfile.write("<tokens>\n")
for syntax in self.file:
command = _slice_command(syntax)
if not command:
continue
for word in command:
word = word.group().strip()
if not word:
continue
_type,_token=_get_token(word)
self.tokens.append([_type,_token])
_token=_token.replace("\"", "")
self.xmlfile.write("<{}> {} </{}>\n".format(_type,_token,_type))
self.xmlfile.write("</tokens>")
self.xmlfile.close()
| naveenls/nand2tetris | Tokenizer.py | Tokenizer.py | py | 3,219 | python | en | code | 0 | github-code | 13 |
17041298174 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayFundTransCollectSinglemoneytokenCreateModel(object):
def __init__(self):
self._biz_context = None
self._collect_mode = None
self._expire_date = None
self._ext_info = None
self._out_biz_no = None
self._out_channel = None
self._pay_amount = None
self._pay_memo = None
self._pay_mode = None
self._payee_user_id = None
@property
def biz_context(self):
return self._biz_context
@biz_context.setter
def biz_context(self, value):
self._biz_context = value
@property
def collect_mode(self):
return self._collect_mode
@collect_mode.setter
def collect_mode(self, value):
self._collect_mode = value
@property
def expire_date(self):
return self._expire_date
@expire_date.setter
def expire_date(self, value):
self._expire_date = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def out_channel(self):
return self._out_channel
@out_channel.setter
def out_channel(self, value):
self._out_channel = value
@property
def pay_amount(self):
return self._pay_amount
@pay_amount.setter
def pay_amount(self, value):
self._pay_amount = value
@property
def pay_memo(self):
return self._pay_memo
@pay_memo.setter
def pay_memo(self, value):
self._pay_memo = value
@property
def pay_mode(self):
return self._pay_mode
@pay_mode.setter
def pay_mode(self, value):
self._pay_mode = value
@property
def payee_user_id(self):
return self._payee_user_id
@payee_user_id.setter
def payee_user_id(self, value):
self._payee_user_id = value
def to_alipay_dict(self):
params = dict()
if self.biz_context:
if hasattr(self.biz_context, 'to_alipay_dict'):
params['biz_context'] = self.biz_context.to_alipay_dict()
else:
params['biz_context'] = self.biz_context
if self.collect_mode:
if hasattr(self.collect_mode, 'to_alipay_dict'):
params['collect_mode'] = self.collect_mode.to_alipay_dict()
else:
params['collect_mode'] = self.collect_mode
if self.expire_date:
if hasattr(self.expire_date, 'to_alipay_dict'):
params['expire_date'] = self.expire_date.to_alipay_dict()
else:
params['expire_date'] = self.expire_date
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.out_channel:
if hasattr(self.out_channel, 'to_alipay_dict'):
params['out_channel'] = self.out_channel.to_alipay_dict()
else:
params['out_channel'] = self.out_channel
if self.pay_amount:
if hasattr(self.pay_amount, 'to_alipay_dict'):
params['pay_amount'] = self.pay_amount.to_alipay_dict()
else:
params['pay_amount'] = self.pay_amount
if self.pay_memo:
if hasattr(self.pay_memo, 'to_alipay_dict'):
params['pay_memo'] = self.pay_memo.to_alipay_dict()
else:
params['pay_memo'] = self.pay_memo
if self.pay_mode:
if hasattr(self.pay_mode, 'to_alipay_dict'):
params['pay_mode'] = self.pay_mode.to_alipay_dict()
else:
params['pay_mode'] = self.pay_mode
if self.payee_user_id:
if hasattr(self.payee_user_id, 'to_alipay_dict'):
params['payee_user_id'] = self.payee_user_id.to_alipay_dict()
else:
params['payee_user_id'] = self.payee_user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFundTransCollectSinglemoneytokenCreateModel()
if 'biz_context' in d:
o.biz_context = d['biz_context']
if 'collect_mode' in d:
o.collect_mode = d['collect_mode']
if 'expire_date' in d:
o.expire_date = d['expire_date']
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'out_channel' in d:
o.out_channel = d['out_channel']
if 'pay_amount' in d:
o.pay_amount = d['pay_amount']
if 'pay_memo' in d:
o.pay_memo = d['pay_memo']
if 'pay_mode' in d:
o.pay_mode = d['pay_mode']
if 'payee_user_id' in d:
o.payee_user_id = d['payee_user_id']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayFundTransCollectSinglemoneytokenCreateModel.py | AlipayFundTransCollectSinglemoneytokenCreateModel.py | py | 5,476 | python | en | code | 241 | github-code | 13 |
41807350765 | print("this file is deprecated")
exit
import argparse as parse
import numpy as np
import plotly.graph_objects as go
import os
import permittivitycalc as pc
import src.plot_layout as plot_layout
import src.agent as agent
import scipy.signal as signal
import datetime
time=datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
file_name = '/Volumes/tianjie 1/code/research/panglin_prj/data/test_2023_3_27/SL.s2p'
for _airline in ['VAL']:
for _l in range(1,10):
net,airline = agent.get_airline(file_name,L=_l,airline=_airline,density=2.1,net_f_unit='ghz')
_dir = f'result/SL_{time}/{_airline}/{_l}'
os.system('mkdir -p '+_dir)
x_ = net.f/1e9
fig = agent.draw_common(y=[net.s_db[:,0,0],net.s_db[:,0,1],net.s_db[:,1,0],net.s_db[:,1,1]],x=[x_,x_,x_,x_],name_list=['S11','S12','S21','S22'],x_title='Frequency(GHz)',y_title='S11(db)')
fig.write_image(f'{_dir}/S.png')
fig2 = agent.draw_common(y=[net.s_deg[:,0,0],net.s_deg[:,0,1],net.s_deg[:,1,0],net.s_deg[:,1,1]],x=[x_,x_,x_,x_],name_list=['S11','S12','S21','S22'],x_title='Frequency(GHz)',y_title='Phase(deg)',color=plot_layout.color['jianbian'][0])
fig2.write_image(f'{_dir}/S_phase.png')
ans = airline.avg_dielec
ans = signal.savgol_filter(ans, 101, 3)
fig3 = agent.draw_common(y=[ans],x=[x_],name_list=['Primitivity'],x_title='Frequency(GHz)',y_title='Permitivity',show_legend=False)
fig3.write_image(f'{_dir}/pri.png')
ans = airline.avg_mu_real
ans = signal.savgol_filter(ans, 101, 3)
fig4 = agent.draw_common(y=[ans],x=[x_],name_list=['Permitivity'],x_title='Frequency(GHz)',y_title='Permeability_Real',show_legend=False)
fig4.write_image(f'{_dir}/per_real.png')
ans = airline.avg_mu_imag
ans = signal.savgol_filter(ans, 101, 3)
fig5 = agent.draw_common(y=[ans],x=[x_],name_list=['Permitivity'],x_title='Frequency(GHz)',y_title='Permeability_Imag',show_legend=False)
fig5.write_image(f'{_dir}/per_imag.png')
#denoise additive noise in time domain
# https://stackoverflow.com/questions/20618804/how-to-smooth-a-curve-in-the-right-way
# signal filtering scipy.signal
#explain the flow of permittivity calculation3
# 1. read the s2p file
# 2. calculate the s-parameters
# 3. calculate the permittivity and permeability
# 4. calculate the average permittivity and permeability
# 5. plot the s-parameters
# 6. plot the permittivity and permeability
# 7. plot the average permittivity and permeability
| zueskalare/panglin_prj | .trash/outpt.py | outpt.py | py | 2,557 | python | en | code | 0 | github-code | 13 |
31740719595 | # -*- coding: utf-8 -*-
# @Time : 2023/9/26 16:19
# @Author : nanji
# @Site :
# @File : testHandWriteDigit.py
# @Software: PyCharm
# @Comment :3. 性能度量——逻辑回归+手写数字分类手写数字分类
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0, shuffle=True, stratify=y)
# 采用逻辑回归进行多酚类
from sklearn.linear_model import LogisticRegression
model_lg = LogisticRegression() # 逻辑回归模型实力
model_lg.fit(X_train, y_train) # 训练样本
y_test_pred = model_lg.predict(X_test) # 测试样本预测
# 单独计算性能指标
from sklearn import metrics
# test_accuracy=metrics.accuracy_score(y_test,y_test_pred)
# print(test_accuracy)
# print('0'*100)
# macro=metrics.precision_score(y_test,y_test_pred,average="macro")
# print(macro)
# print('1'*100)
# micro=metrics.recall_score(y_test,y_test_pred,average="micro")
# print(micro)
# print('2'*100)
# f1_weight=metrics.f1_score(y_test,y_test_pred,average='micro')
# print(f1_weight)
# print('3'*100)
# macro=metrics.f1_score(y_test,y_test_pred,average='macro')
# print(macro)
# print('4'*100)
# f1_weighted=metrics.f1_score(y_test,y_test_pred,average='weighted')
# print(f1_weighted)
# print('5'*100)
# fbeta=metrics.fbeta_score(y_test,y_test_pred,average='macro',beta=1)
# print(fbeta)
# 绘制混淆矩阵
import matplotlib.pyplot as plt
# fig,ax=plt.subplots(figsize=(10,8))
# target_names=[] # 用来命名类别
# for i in range(10):
# target_names.append('n'+str(i))
# plot_confusion_matrix(model_lg,X_test,y_test,display_labels=target_names,cmap=plt.cm.Reds,ax=ax)
# plt.show()
# print('-'*100)
# print(metrics.confusion_matrix(y_test, y_test_pred))
# print('-'*100)
# print(metrics.classification_report(y_test, y_test_pred, target_names=target_names))
# print('4'*100)
# prfs=metrics.precision_recall_fscore_support(y_test,y_test_pred,beta=1,average=None)
# print(prfs)
cm = metrics.confusion_matrix(y_test, y_test_pred)
print(cm)
import numpy as np
print('0' * 100)
precision = np.diag(cm) / np.sum(cm, axis=0)
print(precision)
recall = np.diag(cm) / np.sum(cm, axis=1)
print('1' * 100)
print(recall)
f1_score = 2 * recall * precision / (recall + precision)
print(f1_score)
support = np.sum(cm, axis=1) # 各类别支持样本量
print('2' * 100)
print(support)
print('3' * 100)
print(np.sum(support)) # 总样本量
print('4' * 100)
accuracy = np.sum(np.diag(cm)) / np.sum(cm) # 精度
print(accuracy)
# 宏查准率 ,宏召回率,宏-F1
macro_avg = [precision.mean(), recall.mean(),
2 * precision.mean() * recall.mean() / (precision.mean() + recall.mean())]
# 加权查准率,加权召回率,加权-F1
support_all = np.sum(support)
weight = support / support_all
weight_avg = [np.sum(weight * precision), np.sum(weight * recall),
(np.sum(weight * f1_score))]
import pandas as pd
metrics1=pd.DataFrame(np.array([precision,recall,f1_score,support]).T,
columns=['precision','recall','f1_score','support'])
metrics2=pd.DataFrame([['','','',''],['','',accuracy,support_all],
np.hstack([macro_avg]),
np.hstack([weight_avg,support_all])],
columns=['precision','recall','f1_score','support'])
metrics=pd.concat([metrics1,metrics2],ignore_index=True)
target_names=[]# 用来命名类别
for i in range(10):
target_names.append("n"+str(i))
target_names.extend([''])
metrics.index=target_names
print('2'*100)
print(metrics)
| lixixi89055465/py_stu | machinelearn/stu02/testHandWriteDigit.py | testHandWriteDigit.py | py | 3,754 | python | en | code | 1 | github-code | 13 |
25497964151 | from fastapi import APIRouter, Depends, HTTPException
from api.dependencies import (
get_sys_map_service,
get_sys_map_update_service,
get_audit_log_service,
)
from schemas.system_mapping_schema import SystemMappingCurrent, SystemMappingUpdates
from api.requests import system_mapping_requests
from services.system_mapping_service import (
SystemMappingService,
SystemMappingUpdateService,
)
from api.requests.audit_log_requests import CreateAuditRequest
from services.audit_log_service import AuditLogService
from datetime import datetime
from typing import List
router = APIRouter(prefix="/system-mapping", tags=["System Mapping endpoints"])
# read operations
@router.get("/live", response_model=List[SystemMappingCurrent])
def get_all_current(
skip: int = 0,
limit: int = 100,
sys_map_service: SystemMappingService = Depends(get_sys_map_service),
):
sys_map_data = sys_map_service.get_all(skip=skip, limit=limit)
return sys_map_data
@router.get("/live/{hydraulic_system_name}", response_model=SystemMappingCurrent)
def get_current_by_name(
hydraulic_system_name: str,
sys_map_service: SystemMappingService = Depends(get_sys_map_service),
):
sys_map_data = sys_map_service.get_by_hydraulic_name(hydraulic_system_name)
return sys_map_data
@router.post("/live", response_model=SystemMappingCurrent)
def create_new_system_map(
create_sysmap_request: system_mapping_requests.CreateNewSystemMapLive,
sys_map_service: SystemMappingService = Depends(get_sys_map_service),
audit_service: AuditLogService = Depends(get_audit_log_service),
):
print(f"Create New System Map Request: \n{create_sysmap_request}")
try:
new_sysmap_obj = sys_map_service.create_new_entry(create_sysmap_request)
print(f"New System Map Object: \n{new_sysmap_obj}")
new_audit_event = CreateAuditRequest(
table_altered="pcp_poc_system_mapping",
columns_altered="hydraulic_system_name;area_name;region_name;odmt_area_id",
event_type="New Current System Map Entry",
previous_value="None;None;None;None",
updated_value=f"{new_sysmap_obj.hydraulic_system_name};{new_sysmap_obj.area_name};{new_sysmap_obj.region_name};{new_sysmap_obj.odmt_area_id}",
actor="Gear5th@Wano.com",
event_date=datetime.now(),
status="Added to Live",
pushed_to_live_date=datetime.now(),
row_altered=str(new_sysmap_obj.hydraulic_system_name)
)
audit_service.create_new_event(new_audit_event)
return new_sysmap_obj
except ValueError as e:
print(f"Error: {e}")
raise HTTPException(status_code=400, detail=f"Error: {e}")
# read from updates
@router.get("/updates", response_model=List[SystemMappingUpdates])
def get_all_updates(
skip: int = 0,
limit: int = 100,
sys_map_update_service: SystemMappingUpdateService = Depends(
get_sys_map_update_service
),
):
sys_map_data = sys_map_update_service.get_all(skip=skip, limit=limit)
return sys_map_data
@router.get("/updates/{hydraulic_system_name}", response_model=SystemMappingUpdates)
def get_update_by_name(
hydraulic_system_name: str,
sys_map_update_service: SystemMappingUpdateService = Depends(
get_sys_map_update_service
),
):
sys_map_data = sys_map_update_service.get_by_hydraulic_name(hydraulic_system_name)
return sys_map_data
# create in updates pending table
@router.post("/updates", response_model=SystemMappingUpdates)
def create_update_entry(
create_request: system_mapping_requests.CreateSystemMapUpdate,
sys_map_update_service: SystemMappingUpdateService = Depends(
get_sys_map_update_service
),
audit_service: AuditLogService = Depends(get_audit_log_service),
):
try:
new_sys_map_entry = sys_map_update_service.create_new_update(create_request)
print(f"New System Map Entry: \n{new_sys_map_entry}")
# create an event based on this
new_audit_event = CreateAuditRequest(
table_altered="pcp_poc_system_mapping_updates",
event_type="New System Mapping Update Entry",
previous_value="None",
updated_value="updated",
actor="CreateTest@testuser.com",
event_date=datetime.now(),
columns_altered="col1;col2;",
status="Pending",
pushed_to_live_date=None,
row_altered=str(new_sys_map_entry.hydraulic_system_name)
)
audit_service.create_new_event(new_audit_event)
return new_sys_map_entry
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
# update in updates
@router.put("/updates/{update_id}", response_model=SystemMappingUpdates)
def update_existing_entry(
update_id: int,
update_request: system_mapping_requests.UpdateSystemMapUpdate,
sys_map_update_service: SystemMappingUpdateService = Depends(get_sys_map_update_service),
audit_service: AuditLogService = Depends(get_audit_log_service),
):
try:
sys_map_data = sys_map_update_service.update_existing_entry(update_id, update_request)
new_audit_event = CreateAuditRequest(
table_altered="pcp_poc_system_mapping_updates",
event_type="Edited Existing Staged Update",
previous_value="None",
updated_value="updated",
actor="CreateTest@testuser.com",
event_date=sys_map_data.date_updated,
columns_altered="col1;col2;",
status="Pending",
pushed_to_live_date=None,
row_altered=str(sys_map_data.id)
)
audit_service.create_new_event(new_audit_event)
return sys_map_data
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
| gerald-eai/product-config-poc | pcp-poc-app/server/src/api/router/system_mapping_endpoints.py | system_mapping_endpoints.py | py | 5,917 | python | en | code | 0 | github-code | 13 |
14585319910 | from json import dumps
from kafka import KafkaProducer
import sys
import re
import csv
import sys
if len(sys.argv)>1:
day = sys.argv[1]
day = day[-8:-4] + '-' + day[-14:-9] + 'T00:00:00'
else:
day = None
if len(sys.argv)>2:
bserver = sys.argv[2]
else:
bserver = "localhost:9092"
if len(sys.argv)>3:
topic = sys.argv[3]
else:
topic = 'corona_cases'
producer = KafkaProducer(bootstrap_servers=[bserver],
value_serializer=lambda x: x.encode('utf-8') )
def main():
count = 0
counta = 0
# for line in sys.stdin:
for row in csv.DictReader(iter(sys.stdin.readline, '')):
counta = counta + 1
line = parseRow(row)
# line = "$$$"
# print("-" * 20)
data = line.rstrip()
print("Send: " + data)
# if count < 4:
producer.send(topic, value=data)
# if count == 2:
# producer.flush()
count = count + 1
print("("+ str(count) +"/" + str(counta) + ") lines sent to kafka")
# data = "$$$"
# producer.send(topic, value=data)
# data = "$$$,,,,,,,"
# print("Send finished signal:" + data)
# producer.send(topic, value=data)
# sys.exit(count)
producer.flush()
producer.close()
COLS = {'state':'Province/State', 'country':'Country/Region', 'county': 'Admin2', 'date':'Last Update', \
'confirm':'Confirmed', 'death':'Deaths', 'recov':'Recovered', 'lat':'Latitude', 'long':'Longitude', \
'state2':'Province_State', 'country2':'Country_Region', 'date2':'Last_Update'} # variation \
# Country/Region,Province/State,County,Last Update,Confirmed,Deaths,Recovered,Latitude,Longitude
def parseRow(row):
_day = day or row.get(COLS['date'], row.get(COLS['date2'], ''))
print("aaa", _day)
# print("Read: ({}) {!r}".format(time.time(), row))
# print(row)
ret = row.get(COLS['country'], row.get(COLS['country2'], '')) + ',' + \
row.get(COLS['state'], row.get(COLS['state2'], '')) + ',' + \
row.get(COLS['county'], '') + ',' + \
_day + ',' + \
row.get(COLS['confirm'], '') + ',' + \
row.get(COLS['death'], '') + ',' + \
row.get(COLS['recov'], '') + ',' + \
row.get(COLS['lat'], '') + ',' + \
row.get(COLS['long'], '')
# print(ret)
return ret
main()
| knguyen93/cs523 | python/kafka/pycode/sendkafka.py | sendkafka.py | py | 2,385 | python | en | code | 0 | github-code | 13 |
28904001718 | # coding:utf-8
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from bs4 import BeautifulSoup
import requests
import MeCab as mc
import os
def mecab_analysis(text):
t = mc.Tagger("-Ochasen -d /usr/local/lib/mecab/dic/mecab-ipadic-neologd/")
t.parse('')
node = t.parseToNode(text)
output = []
while node:
if node.surface != "": # ヘッダとフッタを除外
word_type = node.feature.split(",")[0]
if word_type in ["形容詞", "動詞","名詞", "副詞"]:
output.append(node.surface)
node = node.next
if node is None:
break
return output
def get_wordlist_from_QiitaURL(url):
res = requests.get(url)
soup = BeautifulSoup(res.text, "lxml")
text = soup.body.section.get_text().replace('\n','').replace('\t','')
return mecab_analysis(text)
def create_wordcloud(text):
fpath = "/usr/share/fonts/FLOPDESIGN-FONT/FlopDesignFONT.otf"
# ストップワードの設定
stop_words = [ 'てる', 'いる', 'なる', 'れる', 'する', 'ある', 'こと', 'これ', 'さん', 'して', \
'くれる', 'やる', 'くださる', 'そう', 'せる', 'した', '思う', \
'それ', 'ここ', 'ちゃん', 'くん', '', 'て','に','を','は','の', 'が', 'と', 'た', 'し', 'で', \
'ない', 'も', 'な', 'い', 'か', 'ので', 'よう', '', 'れ','さ','なっ']
wordcloud = WordCloud(background_color="black",font_path=fpath, width=900, height=500, \
stopwords=set(stop_words)).generate(text)
#wordcloud = WordCloud(background_color="black", width=900, height=500, \
# stopwords=set(stop_words)).generate(text)
plt.figure(figsize=(15,12))
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
url = "http://qiita.com/minagoro0522/items/b2350bab800eddaecad3"
wordlist = get_wordlist_from_QiitaURL(url)
print(wordlist)
create_wordcloud(" ".join(wordlist)) | pauwau/workspace | Environment_BD/Senseless/2channel/badword/makeWordCloud.py | makeWordCloud.py | py | 2,025 | python | en | code | 0 | github-code | 13 |
9118852652 | from django.urls import path
from django.views.generic import TemplateView
from rest_framework.documentation import include_docs_urls
from rest_framework import routers
from . import views
app_name = 'web'
urlpatterns = [
path('', views.home, name='home'),
path('', views.ReactView.as_view(), name='react_object_home'),
path('terms', TemplateView.as_view(template_name="web/terms.html"), name='terms'),
path('404', TemplateView.as_view(template_name='404.html'), name='404'),
path('500', TemplateView.as_view(template_name='500.html'), name='500'),
# path('api/dwollav2/customers', views.DwollaCustomersAPIView.as_view(), name='customers'),
# path('api/dwollav2/customers/<int:id>', views.DwollaCustomerAPIView.as_view(), name='customers'),
path('docs/', include_docs_urls(title='My API service'), name='api-docs'),
]
# drf config
router = routers.DefaultRouter()
router.register('api/dwollav2/customers', views.DwollaCustomerViewSet)
router.register('api/dwollav2/plaid', views.PlaidApiViewSet)
router.register('api/dwollav2/funding_sources', views.DwollaFundingSourceViewSet)
router.register('api/dwollav2/transfer_sources', views.DwollaTransferSourceViewSet)
urlpatterns += router.urls
| hittapa63/django-finance-dwolla-plain | apps/web/urls.py | urls.py | py | 1,225 | python | en | code | 0 | github-code | 13 |
23455605673 | #1 calculate & print the value of function y = 2x^2 + 2x + 2 for x=[56, 57, ... 100] (0.5p)
import math
for i in range(56, 101):
print('The value of function for i=', i, 'is', 2*i**2+2*i+2)
#2 ask the user for a number and print its factorial (1p)
print('Insert your value here:')
x = int(input())
factorial = 1
print('your factorial is: ', math.factorial(x))
for i in range(1, x+1):
factorial = factorial*i
print(factorial)
#3 write a function which takes an array of numbers as an input and finds the lowest value. Return the index of that element and its value (1p)
array = [14, 1, 3, 4, 5, 1, 2, 1]
def my_function(arr):
min_value = min(arr)
print('The lowest value in the array:', min_value, 'and the index is:',)
for i in range(len(arr)):
if arr[i] == min_value:
print(i)
my_function(array)
| mstolars/maja_stolarska_231016 | maja_stolarska_zadania/lab1/1_3_zadania.py | 1_3_zadania.py | py | 851 | python | en | code | 0 | github-code | 13 |
42526799247 | import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('data.csv') #reading the CSV Data File
print(list(data.columns.values)) #Producing the list of variables the user can choose
x = str(input("Select the x-axis variable ")) #Choosing the firse variable
type = str(input("Type of graph? Scatterplot(Type S) Pie Chart (Type P) Distribution Histogram (Type H) Bar Chart of Top 10 (Type B) ")) #Type of data visualization
if type == 'P': #code to output Pie Charts
plt.pie(data[x].value_counts(), labels=data[x].value_counts().index)
plt.show()
if type == 'H': #code to output Histograms
plt.hist(data[x])
plt.show()
print("The mean %s of players in FIFA 19 is %s" % x, data[x].values.mean())
print("The median %s of players in FIFA 19 is %s" % x, data[x].values.median())
print("Most players in FIFA 19 have a %s of %s" % x, data[x].values.mode()[0])
if type == 'S': #code to output Scatterplots
y = str(input("Select the y-axis variable ")) #Asking user for y-axis variable for the scatterplot
plt.scatter(data[x], data[y], c="r")
plt.title("%s against %s" % (x,y))
plt.xlabel(x)
plt.ylabel(y)
plt.gca().invert_yaxis()
plt.show()
corr_cof = x.corr(y)
if (abs(corr_cof) > 0.7):
print("There is a strong correlation between %s and %s" % (x, y))
else:
print("There is a weak correlation between %s and %s" % (x, y))
if type == 'B': #code to output BarCharts
plt.bar(data[x].value_counts().head(10).index, data[x].value_counts().head(10).values)
plt.show()
input()
| towseefhossain/Pandas_Fifa19 | FIFA.py | FIFA.py | py | 1,561 | python | en | code | 0 | github-code | 13 |
38221413982 | from typing import Callable, Optional
import time
from pyhazel.config import *
from dataclasses import dataclass
from io import TextIOWrapper
from functools import wraps
from threading import Lock
import time
import json
__all__ = [
"HZ_PROFILE_BEGIN_SESSION",
"HZ_PROFILE_END_SESSION",
"HZ_PROFILE_SCOPE",
"HZ_PROFILE_FUNCTION"
]
NANO_TO_MICRO_SECONDS_SCALE_FACTOR = 0.001
@dataclass
class ProfileResult:
name: str
start: int
end: int
thread_id: int
@dataclass
class InstrumentationSession:
name: str
class Instrumentor:
__instance = None
def __init__(self) -> None:
self.current_session: Optional[InstrumentationSession] = None
self.fp: TextIOWrapper = None
self.output: dict = {}
self.mutex = Lock()
@classmethod
def get(cls):
if cls.__instance is None:
cls.__instance = cls()
return cls.__instance
def begin_session(self, name: str, filepath: str = "results.json"):
with self.mutex:
if self.current_session is not None:
# If there is already a current session, then close it before beginning new one.
# Subsequent profiling output meant for the original session will end up in the
# newly opened session instead. That's better than having badly formatted
# profiling output.
print(
f"Instrumentor::BeginSession('{name}') when session '{self.current_session.name}' already open.")
else:
self.fp = open(filepath, "w")
self.current_session = InstrumentationSession(name)
self.write_header()
def end_session(self):
with self.mutex:
self.__internal_end_session()
def write_profile(self, result: ProfileResult):
event = {
"cat": "function",
"dur": (result.end - result.start),
"name": result.name,
"ph": "X",
"pid": 0,
"tid": result.thread_id,
"ts": result.start
}
with self.mutex:
if self.current_session is not None:
self.output["traceEvents"].append(event)
def write_header(self):
self.output = {"otherData": {}, "traceEvents": []}
def write_footer(self):
json.dump(self.output, self.fp)
def __internal_end_session(self):
if self.current_session is not None:
self.write_footer()
self.fp.close()
self.current_session = None
self.output = {}
class InstrumentationTimer:
def __init__(self, name: str) -> None:
self.name: str = name
self.start_time = time.perf_counter_ns() * NANO_TO_MICRO_SECONDS_SCALE_FACTOR
def __enter__(self):
pass
def __exit__(self, exc_type, value, traceback):
if exc_type is not None:
return
Instrumentor.get().write_profile(
ProfileResult(
name=self.name,
start=self.start_time,
end=time.perf_counter_ns() * NANO_TO_MICRO_SECONDS_SCALE_FACTOR,
thread_id=0
)
)
return True
class NullInstrumentationTimer:
def __init__(self) -> None:
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
return value is None
# ==========
# Client API
# ==========
def HZ_PROFILE_BEGIN_SESSION(name: str, filepath: str):
if not INSTRUMENTATION_ENABLED:
return
Instrumentor.get().begin_session(name, filepath)
def HZ_PROFILE_END_SESSION():
if not INSTRUMENTATION_ENABLED:
return
Instrumentor.get().end_session()
def HZ_PROFILE_SCOPE(name: str):
if INSTRUMENTATION_ENABLED:
return InstrumentationTimer(name)
else:
return NullInstrumentationTimer()
def HZ_PROFILE_FUNCTION(func: Callable):
@wraps(func)
def profiler(*args, **kwargs):
if INSTRUMENTATION_ENABLED:
with InstrumentationTimer(func.__qualname__):
return func(*args, **kwargs)
else:
return func(*args, **kwargs)
return profiler
| twje/pyhazel | src/pyhazel/debug/instrumentor.py | instrumentor.py | py | 4,220 | python | en | code | 2 | github-code | 13 |
15483207134 | import numpy as np
from math import floor
import scipy.ndimage as ndimage
def postprocess(surface):
surface = surface / np.amax(surface)
alpha = 0.75
surface = np.power(surface, alpha)
surface = ndimage.gaussian_filter(surface, sigma=2, order=0)
norm = np.linalg.norm(surface)
surface = surface / norm
return surface
def binning(x, eta):
temp = ((eta * x / 1200.0) % eta)
return floor(temp)
def generate_surface(normalized_pitch_profile, eta, tau):
N = len(normalized_pitch_profile)
c = normalized_pitch_profile
for i in range(len(c)):
c[i] = binning(c[i], eta)
c_for_j = c[: N - tau]
inter_matrix_c_for_j = np.reshape(np.repeat(c_for_j, eta), [N - tau, eta])
inter_matrix_j = np.tile(range(eta), [N - tau, 1])
c_for_i = c[tau:]
inter_matrix_c_for_i = np.tile(c_for_i, [eta, 1])
inter_matrix_i = np.reshape(np.repeat(range(eta), N - tau), [eta, N - tau])
first = (inter_matrix_i == inter_matrix_c_for_i)
second = (inter_matrix_j == inter_matrix_c_for_j)
s = np.dot(first.astype(int), second.astype(int))
return s
if __name__ == '__main__':
# Sanity check
normalized = [1, 2, 3, 4, 5, 6, 7, 8, 333, 876]
eta = 5
tau = 2
generate_surface(normalized,eta,tau) | parakalan/RagaRecognition | surface_generation.py | surface_generation.py | py | 1,284 | python | en | code | 11 | github-code | 13 |
3301356336 | import websocket
import ast
import matplotlib.pyplot as plt
import json
def on_error(wsapp, message):
""" A function to print any error messages """
print(message)
pit_volume = 0 #initializing some variables
incrementalRevenue=0
names=0
def on_message(wsapp, message):
""" A function called for every message received.
Stores data and sends back the results of the optimization algorithm. """
global pit_volume
global incrementalRevenue
global names
# stores the data as a dictionary
try:
data = ast.literal_eval(message)
except:
print("Invalid data type.")
print(f"{message}\n\n")
return 0
# tests whether the message sent is the data from the operations, or the results of the flow allocation
if data["type"] == "CURRENT_STATE":
# displays the data
#print(f"data = {message}")
# creates the suitable framework for the flow allocation
output = "["
flows=allocate_flow(data)
for i in range(0, len(data["operations"])):
output = output + "{\"operationId\":\"" + data["operations"][i]["id"] + "\",\"flowRate\":" + str(flows[i]) + "},"
output = output[:-1] + "]"
plt.figure(1)
plt.clf()
plt.pie(flows,labels=names,autopct="%.2f")
plt.savefig('pichat.png')
# prints and sends the flow allocation
print(f"ouput = {output}")
wsapp.send(output)
else:
print(f"response = {message}\n\n")
#storing the data we need to graph
pit_volume = data["currentPitVolume"]
temp=incrementalRevenue
incrementalRevenue=data["incrementalRevenue"]
dat=open("data.json","w")
json.dump({"incrementalRevenue": incrementalRevenue},dat)
dat.close()
deltathingy=incrementalRevenue-temp
plt.figure(2)
plt.bar(pit_volume,pit_volume,label='Pit Volume')
#plt.show()
def allocate_flow(data):
""" Stores data and runs the optimization algorithm to determine flow allocation. """
global pit_volume
global names
flowRateIn = data["flowRateIn"] + pit_volume
operations = data["operations"]
names = []
points = []
for operation in operations:
#makes a 2d array of the revenue points organized by their operation and then location
points_row = []
names.append(operation["name"])
for i in range(21):
#the index implies the flow rate of the point because flow=index*10000
points_row.append(operation["revenueStructure"][i]["dollarsPerDay"])
points.append(points_row)
slopes = []
for row in points:
#2d array of the slopes between each point
#linear interpolation, baby
slopes_row = []
for i in range(1, 21):
dy = row[i] - row[i - 1]
dx = 10000
slope = dy / dx
slopes_row.append(slope)
slopes.append(slopes_row)
maxindeces = []
for row in points:
#initializing our output as the flow rates that maximize the profit for each operation without considering the limit on inflow
maxindeces.append(row.index(max(row)))
if sum(maxindeces) * 10000 > flowRateIn: #continue only if the current water use is out of bounds
moves=[]
while sum(maxindeces) * 10000 - flowRateIn > 10000:
new = []
for row in range(len(maxindeces)): #the maximum revenues that cost less water than the current maxes
new.append((max(points[row][0:maxindeces[row]])) if maxindeces[row]!=0 else -99999999) #hoping -99999999 is low enough to keep dif so high it's out of competition
workingRow = 0
workingDif = points[0][maxindeces[0]] - new[0]
for row in range(len(maxindeces)):
dif = points[row][maxindeces[row]] - new[row]
if dif < workingDif:
workingRow = row #finding the minimum difference and corresponding row
workingDif = dif
moves.append([workingDif,maxindeces[workingRow],maxindeces[workingRow]-points[workingRow].index(new[workingRow]),workingRow]) #keeping track of the jumps to cheaper peaks
maxindeces[workingRow] = points[workingRow].index(new[workingRow]) #updating the maxindeces solution set
if sum(maxindeces) * 10000 > flowRateIn: #checking if sliding down from one of the current maxes will make it cross the water threshhold
maxesofeach = []
for row in range(len(points)):
newint = max(points[row][0:maxindeces[row]]) #here we consider the next left peaks (newint) and the points where the slide would bring us (ylimit)
ylimit = (points[row][maxindeces[row]] - slopes[row][maxindeces[row]-1] * (sum(maxindeces) * 10000 - flowRateIn)) if maxindeces[row]!=0 else 0
maxesofeach.append([max([newint, ylimit]), ylimit > newint]) #array shenanigans to help compare everything and keep track of where it's from
workingRow = maxesofeach.index(max(maxesofeach))
if not maxesofeach[workingRow][1]:
moves.append([points[workingRow][maxindeces[workingRow]]-max(maxesofeach)[0],maxindeces[workingRow],maxindeces[workingRow]-points[workingRow].index(max(maxesofeach)[0]),workingRow])
maxindeces[workingRow] = points[workingRow].index(maxesofeach[workingRow][0])
while True: #undoing sacrifices that we got enough water to undo in the last jump
for move in moves:
if move[2]>flowRateIn/10000-sum(maxindeces):
moves.remove(move)
if len(moves)==0:
break
undoing=max(moves)
maxindeces[undoing[3]]=undoing[1]
moves.remove(undoing) #it removes moves it can't afford and moves it does until the moveset is empty
else:
maxindeces[workingRow] = maxindeces[workingRow] - sum(maxindeces) + flowRateIn / 10000 #the fated slide
for i in range(len(maxindeces)):
maxindeces[i] = maxindeces[i] * 10000 #converts from indeces to actual flow rates
return maxindeces
def on_open(wsapp):
wsapp.send("{\"setPitCapacity\": 100000}")
wsapp = websocket.WebSocketApp("wss://2021-utd-hackathon.azurewebsites.net", on_message=on_message, on_error=on_error,on_open=on_open)
wsapp.run_forever() | Lord-Protector/EOG_HackUTD | node version/eog.py | eog.py | py | 6,646 | python | en | code | 0 | github-code | 13 |
18430050833 | """Pretraining on TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import numpy as np
import tensorflow as tf
from xlnet import model_utils, tpu_estimator, function_builder, data_utils
FLAGS = flags.FLAGS
def get_model_fn():
"""doc."""
def model_fn(features, labels, mode, params):
"""doc."""
#### Training or Evaluation
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
assert is_training
#### Retrieve `mems` from `params["cache"]`
mems = {}
idx = 0
if FLAGS.mem_len > 0:
mems["mems"] = params["cache"]
#### Get loss from inputs
total_loss, new_mems, monitor_dict = function_builder.get_loss(
FLAGS, features, labels, mems, is_training)
#### Turn `new_mems` into `new_cache`
new_cache = []
if FLAGS.mem_len > 0:
new_cache += new_mems["mems"]
#### Check model parameters
num_params = sum([np.prod(v.shape) for v in tf.trainable_variables()])
tf.logging.info("#params: {}".format(num_params))
#### Configuring the optimizer
train_op, learning_rate, gnorm = model_utils.get_train_op(
FLAGS, total_loss)
monitor_dict["lr"] = learning_rate
monitor_dict["gnorm"] = gnorm
#### Customized initial checkpoint
scaffold_fn = model_utils.init_from_checkpoint(FLAGS, global_vars=True)
#### Creating host calls
host_call = function_builder.construct_scalar_host_call(
monitor_dict=monitor_dict,
model_dir=FLAGS.model_dir,
prefix="train/",
reduce_fn=tf.reduce_mean)
#### Constucting training TPUEstimatorSpec with new cache.
train_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, loss=total_loss, train_op=train_op, host_call=host_call,
scaffold_fn=scaffold_fn)
train_spec.cache = new_cache
return train_spec
return model_fn
def get_cache_fn(mem_len):
"""doc."""
tf_float = tf.bfloat16 if FLAGS.use_bfloat16 else tf.float32
def cache_fn(batch_size):
mems = []
if FLAGS.mem_len > 0:
for _ in range(FLAGS.n_layer):
zeros = tf.zeros(
[mem_len, batch_size, FLAGS.d_model],
dtype=tf_float)
mems.append(zeros)
return mems
if mem_len > 0:
return cache_fn
else:
return None
def get_input_fn(split):
"""doc."""
assert split == "train"
batch_size = FLAGS.train_batch_size
input_fn, record_info_dict = data_utils.get_input_fn(
tfrecord_dir=FLAGS.record_info_dir,
split=split,
bsz_per_host=batch_size // FLAGS.num_hosts,
seq_len=FLAGS.seq_len,
reuse_len=FLAGS.reuse_len,
bi_data=FLAGS.bi_data,
num_hosts=FLAGS.num_hosts,
num_core_per_host=FLAGS.num_core_per_host,
perm_size=FLAGS.perm_size,
mask_alpha=FLAGS.mask_alpha,
mask_beta=FLAGS.mask_beta,
uncased=FLAGS.uncased,
num_passes=FLAGS.num_passes,
use_bfloat16=FLAGS.use_bfloat16,
num_predict=FLAGS.num_predict)
return input_fn, record_info_dict
def main(unused_argv):
del unused_argv # Unused
tf.logging.set_verbosity(tf.logging.INFO)
assert FLAGS.seq_len > 0
assert FLAGS.perm_size > 0
FLAGS.n_token = data_utils.VOCAB_SIZE
tf.logging.info("n_token {}".format(FLAGS.n_token))
if not tf.gfile.Exists(FLAGS.model_dir):
tf.gfile.MakeDirs(FLAGS.model_dir)
# Get train input function
train_input_fn, train_record_info_dict = get_input_fn("train")
tf.logging.info("num of batches {}".format(
train_record_info_dict["num_batch"]))
# Get train cache function
train_cache_fn = get_cache_fn(FLAGS.mem_len)
##### Get model function
model_fn = get_model_fn()
##### Create TPUEstimator
# TPU Configuration
run_config = model_utils.configure_tpu(FLAGS)
# TPU Estimator
estimator = tpu_estimator.TPUEstimator(
model_fn=model_fn,
train_cache_fn=train_cache_fn,
use_tpu=FLAGS.use_tpu,
config=run_config,
params={"track_mean": FLAGS.track_mean},
train_batch_size=FLAGS.train_batch_size,
eval_on_tpu=FLAGS.use_tpu)
#### Training
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.train_steps)
if __name__ == "__main__":
app.run(main)
| SebiSebi/xlnet | train.py | train.py | py | 4,350 | python | en | code | null | github-code | 13 |
33172372670 | class Node:
def __init__(self, item_name, size):
self.children = {}
self.name = item_name
self.size = size
def addChild(self, child_name, child_node):
self.children[child_name] = child_node
def totalSize(self):
total = self.size
for (_, c) in self.children.items():
total += c.totalSize()
return total
root = Node("/", 0)
current_node = root
stack = []
with open('day7.input') as f:
for line in f.read().splitlines():
if line == "$ cd /":
continue
if line == "$ cd ..":
current_node = stack.pop()
elif line.startswith("$ cd"):
dir_name = line[5:]
stack.append(current_node)
current_node = current_node.children[dir_name]
elif line == "$ ls":
continue
else:
(size_or_dir, name) = line.split(" ")
if size_or_dir == "dir":
current_node.children[name] = Node(name, 0)
else:
current_node.children[name] = Node(name, int(size_or_dir))
total_space = 70000000
needed_free_space = 30000000
current_free_space = total_space - root.totalSize()
min_size_of_dir_to_delete = root.totalSize()
dirs_to_check = [root]
sum_of_sizes = 0
while len(dirs_to_check) > 0:
current_dir = dirs_to_check.pop()
for (_, c) in current_dir.children.items():
if c.size == 0: # It's a dir
dir_size = c.totalSize()
if dir_size < 100000:
sum_of_sizes += dir_size
dirs_to_check.append(c)
if current_free_space + dir_size > needed_free_space and \
dir_size < min_size_of_dir_to_delete:
min_size_of_dir_to_delete = dir_size
print(sum_of_sizes)
print(min_size_of_dir_to_delete)
| alexvy86/advent-of-code | 2022/day7.py | day7.py | py | 1,820 | python | en | code | 0 | github-code | 13 |
42137381293 | #kata link: https://www.codewars.com/kata/550498447451fbbd7600041c
#Instruction : Given two arrays a and b write a function comp(a, b) (orcompSame(a, b)) that checks whether the two arrays have the "same" elements, with the same multiplicities.
# "Same" means, here, that the elements in b are the elements in a squared, regardless of the order.
#Code:
def comp(array1, array2):
if array1 == None or array2 == None or len(array1) != len(array2):
return False
array1.sort(key=abs)
array2.sort(key=abs)
for number in range(len(array1)):
num_1 = array1[number]
num_2 = array2[number]
if num_2 != num_1**2:
return False
return True | ianbeltrao/CodeWars | 6 kyu/Are_they_the_same.py | Are_they_the_same.py | py | 723 | python | en | code | 0 | github-code | 13 |
2876882398 | import collections
from io import TextIOWrapper
import os
import pathlib
import shutil
import tempfile
import tarfile
from typing import DefaultDict, List, Optional, Tuple
import xml.etree.ElementTree as ET
from docuploader import log, shell, tar
from docuploader.protos import metadata_pb2
from google.cloud import storage
from google.protobuf import text_format, json_format
from docpipeline import prepare
import semver
DOCFX_PREFIX = "docfx-"
XREFS_DIR_NAME = "xrefs"
DEVSITE_SCHEME = "devsite://"
TEMPLATE_DIR = pathlib.Path("third_party/docfx/templates/devsite")
DOCFX_JSON_TEMPLATE = """
{{
"build": {{
"content": [
{{
"files": ["**/*.yml", "**/*.md"],
"src": "obj/api"
}}
],
"globalMetadata": {{
"_appTitle": "{package}",
"_packageVersion": "{package_version}",
"_disableContribution": true,
"_appFooter": " ",
"_disableNavbar": true,
"_disableBreadcrumb": true,
"_enableSearch": false,
"_disableToc": true,
"_disableSideFilter": true,
"_disableAffix": true,
"_disableFooter": true,
"_rootPath": "{path}",
"_projectPath": "{project_path}"
}},
"overwrite": [
"obj/examples/*.md"
],
"dest": "site",
"xref": [{xrefs}],
"xrefService": [{xref_services}],
}}
}}
"""
def format_docfx_json(metadata: metadata_pb2.Metadata) -> str:
pkg = metadata.name
xrefs = ", ".join([f'"{xref}"' for xref in metadata.xrefs if xref != ""])
xref_services = ", ".join([f'"{xref}"' for xref in metadata.xref_services])
path = get_path(metadata)
version = metadata.version
project_path = f"/{metadata.language}/docs/reference/"
return DOCFX_JSON_TEMPLATE.format(
package=pkg,
package_version=version,
path=path,
project_path=project_path,
xrefs=xrefs,
xref_services=xref_services,
)
def setup_local_docfx(
tmp_path: pathlib.Path,
api_path: pathlib.Path,
decompress_path: pathlib.Path,
blob: storage.Blob,
) -> Tuple[pathlib.Path, metadata_pb2.Metadata]:
for item in blob.iterdir():
if item.is_dir() and item.name == "api":
decompress_path = tmp_path.joinpath("obj")
break
shutil.copytree(blob, decompress_path, dirs_exist_ok=True)
log.info(f"Decompressed in {decompress_path}")
return write_docfx_json(tmp_path, api_path, decompress_path, blob)
def setup_bucket_docfx(
tmp_path: pathlib.Path,
api_path: pathlib.Path,
decompress_path: pathlib.Path,
blob: storage.Blob,
) -> Tuple[pathlib.Path, metadata_pb2.Metadata]:
tar_filename = tmp_path.joinpath(blob.name)
tar_filename.parent.mkdir(parents=True, exist_ok=True)
# Reinstantiate the blob in case it changed between listing and downloading.
blob = blob.bucket.blob(blob.name)
if not blob.exists():
raise ValueError(
(
f"Blob gs://{blob.bucket.name}/{blob.name} does"
"not exist (maybe it was deleted?)"
)
)
blob.download_to_filename(tar_filename)
log.info(f"Downloaded gs://{blob.bucket.name}/{blob.name} to {tar_filename}")
# Check to see if api directory exists in the tarball.
# If so, only decompress things into obj/*
tar_file = tarfile.open(tar_filename)
for tarinfo in tar_file:
if (tarinfo.isdir() and tarinfo.name == "./api") or tarinfo.name.startswith(
"api/"
):
decompress_path = tmp_path.joinpath("obj")
break
tar.decompress(tar_filename, decompress_path)
log.info(f"Decompressed {blob.name} in {decompress_path}")
return write_docfx_json(tmp_path, api_path, decompress_path, blob)
def write_docfx_json(
tmp_path: pathlib.Path,
api_path: pathlib.Path,
decompress_path: pathlib.Path,
blob: storage.Blob,
) -> Tuple[pathlib.Path, metadata_pb2.Metadata]:
metadata = metadata_pb2.Metadata()
metadata_path = decompress_path.joinpath("docs.metadata.json")
if metadata_path.exists():
json_format.Parse(metadata_path.read_text(), metadata)
else:
metadata_path = decompress_path.joinpath("docs.metadata")
text_format.Merge(metadata_path.read_text(), metadata)
try:
metadata.xrefs[:] = [
get_xref(xref, blob.bucket, tmp_path) for xref in metadata.xrefs
]
except AttributeError:
log.warning("Building locally will ignore xrefs in the metadata.")
with open(tmp_path.joinpath("docfx.json"), "w") as f:
f.write(format_docfx_json(metadata))
log.info("Wrote docfx.json")
# TODO: remove this once _toc.yaml is no longer created.
if pathlib.Path(api_path.joinpath("_toc.yaml")).is_file():
shutil.move(api_path.joinpath("_toc.yaml"), api_path.joinpath("toc.yml"))
return metadata_path, metadata
def build_and_format(
blob: storage.Blob, is_bucket: bool
) -> Tuple[pathlib.Path, metadata_pb2.Metadata, pathlib.Path]:
tmp_path = pathlib.Path(tempfile.TemporaryDirectory(prefix="doc-pipeline.").name)
api_path = decompress_path = tmp_path.joinpath("obj/api")
api_path.mkdir(parents=True, exist_ok=True)
# If building blobs on a bucket, use setup_bucket_docfx
# Else, use setup_local_docfx
if is_bucket:
metadata_path, metadata = setup_bucket_docfx(
tmp_path, api_path, decompress_path, blob
)
blob_name = blob.name
else:
metadata_path, metadata = setup_local_docfx(
tmp_path, api_path, decompress_path, blob
)
blob_name = metadata.name
site_path = tmp_path.joinpath("site")
log.info(f"Running `docfx build` for {blob_name} in {tmp_path}...")
shell.run(
["docfx", "build", "-t", f"{TEMPLATE_DIR.absolute()}"],
cwd=tmp_path,
hide_output=False,
)
# Rename the output TOC file to be _toc.yaml to match the expected
# format. As well, support both toc.html and toc.yaml
try:
shutil.move(site_path.joinpath("toc.yaml"), site_path.joinpath("_toc.yaml"))
except FileNotFoundError:
shutil.move(site_path.joinpath("toc.html"), site_path.joinpath("_toc.yaml"))
html_files = list(site_path.glob("**/*.html"))
if len(html_files) == 0:
raise ValueError("Did not generate any HTML files.")
# Remove the manifest.json file.
site_path.joinpath("manifest.json").unlink()
# Add the prettyprint class to code snippets
prepare.add_prettyprint(site_path)
log.success(f"Done building HTML for {blob_name}. Starting upload...")
# Reuse the same docs.metadata file. The original docfx- prefix is an
# command line option when uploading, not part of docs.metadata.
shutil.copy(metadata_path, site_path)
return tmp_path, metadata, site_path
def get_path(metadata: metadata_pb2.Metadata) -> str:
path = f"/{metadata.language}/docs/reference/{metadata.name}"
if metadata.stem != "":
path = metadata.stem
if metadata.name != "help":
path += "/latest"
return path
def process_blob(blob: storage.Blob) -> None:
is_bucket = True
tmp_path, metadata, site_path = build_and_format(blob, is_bucket)
# Use the input blob name as the name of the xref file to avoid collisions.
# The input blob has a "docfx-" prefix; make sure to remove it.
xrefmap = site_path.joinpath("xrefmap.yml")
xrefmap_lines = xrefmap.read_text().splitlines()
# The baseUrl must start with a scheme and domain. With no scheme, docfx
# assumes it's a file:// link.
base_url = f"baseUrl: https://cloud.google.com{get_path(metadata)}/"
# Insert base_url after the YamlMime first line.
xrefmap_lines.insert(1, base_url)
xrefmap.write_text("\n".join(xrefmap_lines))
xref_blob_name_base = blob.name[len("docfx-") :]
xref_blob = blob.bucket.blob(f"{XREFS_DIR_NAME}/{xref_blob_name_base}.yml")
xref_blob.upload_from_filename(filename=xrefmap)
shell.run(
[
"docuploader",
"upload",
".",
f"--staging-bucket={blob.bucket.name}",
],
cwd=site_path,
hide_output=False,
)
shutil.rmtree(tmp_path)
log.success(f"Done with {blob.name}!")
def get_xref(xref: str, bucket: storage.Bucket, dir: pathlib.Path) -> str:
if not xref.startswith(DEVSITE_SCHEME):
return xref
d_xref = xref[len(DEVSITE_SCHEME) :]
lang, pkg = d_xref.split("/", 1)
version = "latest"
extension = ".tar.gz.yml"
if "@" in pkg:
pkg, version = pkg.rsplit("@", 1)
if version == "latest":
# List all blobs, sort by semver, and pick the latest.
prefix = f"{XREFS_DIR_NAME}/{lang}-{pkg}-"
blobs = bucket.list_blobs(prefix=prefix)
version = find_latest_version(blobs, prefix, extension)
if version == "":
# There are no versions, so there is no latest version.
log.error(f"Could not find {xref} in gs://{bucket.name}. Skipping.")
return ""
d_xref = f"{XREFS_DIR_NAME}/{lang}-{pkg}-{version}{extension}"
blob = bucket.blob(d_xref)
if not blob.exists():
# Log warning. Dependency may not be generated yet.
log.error(f"Could not find gs://{bucket.name}/{d_xref}. Skipping.")
return ""
d_xref_path = dir.joinpath(d_xref).absolute()
d_xref_path.parent.mkdir(parents=True, exist_ok=True)
blob.download_to_filename(d_xref_path)
return str(d_xref_path)
def version_sort(v: str) -> semver.VersionInfo:
if v[0] == "v": # Remove v prefix, if any.
v = v[1:]
return semver.VersionInfo.parse(v)
def find_latest_version(
blobs: List[storage.Blob], prefix: str, extension: Optional[str] = None
) -> str:
"""Finds the latest version from blobs with specified prefix."""
tarball_extension = extension if extension else ".tar.gz"
versions = []
for blob in blobs:
# Be sure to trim the suffix extension.
version = blob.name[len(prefix) : -len(tarball_extension)]
# Skip if version is not a valid version, like when some other package
# has prefix as a prefix (...foo-1.0.0" and "...foo-beta1-1.0.0").
try:
version_sort(version)
versions.append(version)
except ValueError:
pass # Ignore.
if len(versions) == 0:
return ""
versions = sorted(versions, key=version_sort)
return versions[-1]
def parse_blob_name(blob_name: str) -> Tuple[str, str]:
"""Parses the blob's name and returns its language and package."""
split_name = blob_name.split("-")
language = split_name[1]
pkg = "-".join(split_name[2:-1])
return language, pkg
def find_latest_blobs(
bucket: storage.Bucket, blobs: List[storage.Blob]
) -> List[storage.Blob]:
"""Gets a list of the latest blob for each package."""
latest_blobs = []
blobs_by_language_and_pkg = group_blobs_by_language_and_pkg(blobs)
# For each unique package, find latest version for its language
for language, pkgs in blobs_by_language_and_pkg.items():
for pkg, blobs in pkgs.items():
prefix = f"{DOCFX_PREFIX}{language}-{pkg}-"
version = find_latest_version(blobs, prefix)
if version == "":
log.error(f"Found no versions for {prefix}, skipping.")
continue
latest_blob_name = f"{prefix}{version}.tar.gz"
latest_blobs.append(bucket.blob(latest_blob_name))
return latest_blobs
def group_blobs_by_language_and_pkg(
blobs: List[storage.Blob],
) -> DefaultDict[str, DefaultDict[str, List[storage.Blob]]]:
"""Gets a map from language to package name to a list of blobs."""
packages: DefaultDict[
str, DefaultDict[str, List[storage.Blob]]
] = collections.defaultdict(lambda: collections.defaultdict(list))
for blob in blobs:
language, pkg = parse_blob_name(blob.name)
packages[language][pkg].append(blob)
return packages
def build_blobs(blobs: List[storage.Blob]):
"""Builds the HTML for the given blobs."""
num = len(blobs)
if num == 0:
log.success("No blobs to process!")
return
log.info("Let's build some docs!")
blob_names = "\n".join(map(lambda blob: blob.name, blobs))
log.info(f"Processing {num} blob{'' if num == 1 else 's'}:\n{blob_names}")
# Process every blob.
failures = []
successes = []
for i, blob in enumerate(blobs):
try:
log.info(f"Processing {i+1} of {len(blobs)}: {blob.name}...")
if not blob.name.startswith("docfx"):
raise ValueError(
(
f"{blob.name} does not start with docfx,"
f"did you mean docfx-{blob.name}?"
)
)
process_blob(blob)
successes.append(blob.name)
except Exception as e:
# Keep processing the other files if an error occurs.
log.error(f"Error processing {blob.name}:\n\n{e}")
failures.append(blob.name)
with open("sponge_log.xml", "w") as f:
write_xunit(f, successes, failures)
if len(failures) > 0:
failure_str = "\n".join(failures)
raise Exception(
f"Got errors while processing the following archives:\n{failure_str}"
)
log.success("Done!")
def build_all_docs(
bucket_name: str, storage_client: storage.Client, only_latest: bool = False
):
"""Builds all of the blobs in the bucket."""
all_blobs = storage_client.list_blobs(bucket_name)
docfx_blobs = [blob for blob in all_blobs if blob.name.startswith(DOCFX_PREFIX)]
if only_latest:
bucket = storage_client.get_bucket(bucket_name)
docfx_blobs = find_latest_blobs(bucket, docfx_blobs)
build_blobs(docfx_blobs)
def build_one_doc(bucket_name: str, object_name: str, storage_client: storage.Client):
"""Builds a single blob."""
blob = storage_client.bucket(bucket_name).get_blob(object_name)
if blob is None:
raise Exception(f"Could not find gs://{bucket_name}/{object_name}!")
build_blobs([blob])
def build_new_docs(bucket_name: str, storage_client: storage.Client):
"""Lazily builds just the new blobs in the bucket.
If the DocFX blob of a package is uploaded for the first time or is newer
than the corresponding HTML blob, it is generated.
The new version may or may not be the latest SemVer.
"""
all_blobs = list(storage_client.list_blobs(bucket_name))
docfx_blobs = [blob for blob in all_blobs if blob.name.startswith(DOCFX_PREFIX)]
html_blobs = {b.name: b for b in all_blobs if not b.name.startswith(DOCFX_PREFIX)}
docfx_blobs_to_process = []
for docfx_blob in docfx_blobs:
html_name = docfx_blob.name[len(DOCFX_PREFIX) :]
if (
html_name not in html_blobs
or docfx_blob.updated > html_blobs[html_name].updated
):
docfx_blobs_to_process.append(docfx_blob)
build_blobs(docfx_blobs_to_process)
def build_language_docs(
bucket_name: str,
language: str,
storage_client: storage.Client,
only_latest: bool = False,
):
"""Builds all of the blobs for the given language."""
all_blobs = storage_client.list_blobs(bucket_name)
language_prefix = f"{DOCFX_PREFIX}{language}-"
docfx_blobs = [blob for blob in all_blobs if blob.name.startswith(language_prefix)]
if only_latest:
bucket = storage_client.get_bucket(bucket_name)
docfx_blobs = find_latest_blobs(bucket, docfx_blobs)
build_blobs(docfx_blobs)
def write_xunit(f: TextIOWrapper, successes: List[str], failures: List[str]):
job_name = os.environ.get("KOKORO_JOB_NAME", "/generate")
name = job_name.rsplit("/", 1)[-1]
testsuites = ET.Element("testsuites")
testsuite = ET.SubElement(
testsuites,
"testsuite",
attrib={
"tests": str(len(successes) + len(failures)),
"failures": str(len(failures)),
"name": name,
},
)
for success in successes:
ET.SubElement(
testsuite, "testcase", attrib={"classname": "build", "name": success}
)
for failure in failures:
testcase = ET.SubElement(
testsuite, "testcase", attrib={"classname": "build", "name": failure}
)
ET.SubElement(testcase, "failure", attrib={"message": "Failed"})
tree = ET.ElementTree(element=testsuites)
ET.indent(tree)
tree.write(f, encoding="unicode")
| googleapis/doc-pipeline | docpipeline/generate.py | generate.py | py | 16,625 | python | en | code | 10 | github-code | 13 |
22453914339 | from .models import User, Transaction
from django.db.models import (
F, Q, Sum, Case, When, FloatField, Subquery, OuterRef
)
from .util import get_prices, monetaryConversor
def balance(userID):
user_data = User.objects.get(pk=userID)
portfolio = user_data.investiments.order_by("-date").all().annotate(
lastedTrans=Subquery(
Transaction.objects.filter(
investiment=OuterRef('pk')
).order_by('-transaction_date').values('action')[:1]
),
firstTrans=Subquery(
Transaction.objects.filter(
investiment=OuterRef('pk')
).order_by('-id').values('payprice')[:1]
),
qnt = Case(
When(position="BUY", then=(
Sum(Case(
When(transactions__action="BUY", then='transactions__quantity' ),
When(transactions__action="SELL", then=F('transactions__quantity') * -1 ),
output_field=FloatField()
))
)),
When(position="SELL", then=(
Sum(Case(
When(transactions__action="BUY", then='transactions__quantity' ),
When(transactions__action="SELL", then=F('transactions__quantity') * -1 ),
output_field=FloatField()
))* -1
)),
When(position="NONE", then=0)
,output_field=FloatField()),
allBought= (
Sum(Case(
When(transactions__action="BUY", then=F('transactions__quantity') * F('transactions__payprice')),
output_field=FloatField()
), output_field=FloatField())
* 1.0),
allSales = (
Sum(Case(
When(transactions__action="SELL", then=F('transactions__quantity') * F('transactions__payprice')),
output_field=FloatField()
), output_field=FloatField())
* 1.0),
balance = Sum(Case(
When(transactions__action="BUY", then=F('transactions__quantity') * F('transactions__payprice')),
When(transactions__action="SELL", then=F('transactions__quantity') * F('transactions__payprice') * -1 ),
output_field=FloatField()
)),
total = Case(
When(position="BUY", then=(
Case(
When( balance__gt = 0, then=F('balance')),
When( ~Q(balance__gt = 0), then=(
F('firstTrans') * F('qnt')
))
,output_field=FloatField())
)),
When(position="SELL", then=(
Case(
When( balance__gt = 0, then=F('balance') * -1),
When( ~Q(balance__gt = 0), then=(
F('firstTrans') * F('qnt')
))
,output_field=FloatField())
)),
When(position="NONE", then=('allBought'))
,output_field=FloatField()),
)
codes = []
realcodes = []
invested = 0
portfolioTotal = 0
typesTotal = {}
typesExpected = {}
types = user_data.types.all()
for type in types:
typesTotal[type.typeName] = 0
typesExpected[type.typeName] = 0
for investiment in portfolio:
if investiment.currency == 'R$':
realcodes.append(investiment.code)
codes.append(investiment.code+'.SA')
else:
realcodes.append(investiment.code)
codes.append(investiment.code)
if investiment.currency == user_data.preferences.currency:
productTotal = investiment.total
else:
productTotal = monetaryConversor(investiment.currency, user_data.preferences.currency , investiment.total)
invested = invested + productTotal
prices = get_prices(codes)
for i in range(len(codes)):
price = prices.tickers[codes[i]].info['regularMarketPrice']
thisInvestimentSum = round(float(price) * float(portfolio[i].qnt),2)
if portfolio[i].currency != user_data.preferences.currency :
thisInvestimentSum = monetaryConversor(portfolio[i].currency, user_data.preferences.currency , thisInvestimentSum)
typesTotal[portfolio[i].type.typeName] = typesTotal[portfolio[i].type.typeName] + thisInvestimentSum
portfolioTotal = portfolioTotal + thisInvestimentSum
portfolioTotal = round(portfolioTotal,2)
profit = 0
if invested > 0:
profit = round( (1 - (invested / portfolioTotal)) * 100 , 2)
if portfolioTotal > 0:
for type in types:
typesExpected[type.typeName] = portfolioTotal * (type.percent /100)
balance = {
'total': portfolioTotal,
'typesTotal': typesTotal,
'typesExpected': typesExpected,
'invested': invested,
'profit': profit,
}
return balance | carlosjosedesign/finance | finance/balance.py | balance.py | py | 5,351 | python | en | code | 0 | github-code | 13 |
71997909457 | import numpy as np
class PostProcess():
#initalization
def __init__(self,pageshape):
self.shape = pageshape # h,w
# process(sort,removing duplicate etc.) the horizontal/vertical lines in the page
def sort_by_index(self,lines,index):
if not len(lines):
return np.array([[0,0,0,0],[self.shape[1],self.shape[0],0,0]])
lines = np.insert(lines,0,[[0,0,0,0]],axis=0)
lines = np.append(lines,[[self.shape[1],self.shape[0],0,0]],axis=0)
lines = np.unique(lines,axis=0)
lines = lines[np.argsort(lines[:,index])]
return lines
# get the segment (segment are separated by horizontal lines)
def get_segment(self,high,low,texts):
segment = []
remain = []
for text in texts:
if text[1][1] < high and text[1][1] > low:
segment.append(text)
else:
remain.append(text)
return segment,remain
# get the cluster (cluster are separated by vertical lines)
def get_cluster(self,high,low,texts):
cluster = []
remain = []
for text in texts:
if text[1][0] < high and text[1][0] > low:
cluster.append(text)
else:
remain.append(text)
return cluster,remain
# arrange the text in form of lines
def arrange_in_line(self,texts,horlines,verlines):
lines = []
remain = texts.copy()
horlines = self.sort_by_index(horlines,1)
verlines = self.sort_by_index(verlines,0)
# for every segment
for horidx in range(1,len(horlines)):
y_upper = horlines[horidx-1][1]
y_lower = horlines[horidx][1]
segment,remain = self.get_segment(y_lower,y_upper,remain)
if segment:
# for every cluster
for veridx in range(1,len(verlines)):
x_upper = verlines[veridx-1][0]
x_lower = verlines[veridx][0]
cluster,segment = self.get_cluster(x_lower,x_upper,segment)
if cluster:
# sort the cluster by y index
cluster = np.array(cluster,dtype=object)
cluster = cluster[np.argsort(np.stack(np.array(cluster[:,1]))[:,1])]
# get the first word
line = [cluster[0]]
# process every word in the cluster if in same line, keep adding in the line else append line in lines and reinitailize the line
for idx in range(1,len(cluster)):
text = cluster[idx]
if self.sameline(cluster[idx-1][1],text[1]):
line.append(text)
else:
line = np.array(line)
line = line[np.argsort(np.stack(np.array(line[:,1]))[:,0])]
lines.append(line)
line = line.copy()
line = [text]
line = np.array(line)
line = line[np.argsort(np.stack(np.array(line[:,1]))[:,0])]
lines.append(line)
return lines
# check whether two words are in same line
def sameline(self,box1,box2):
l = max(box1[1]+box1[3],box2[1]+box2[3]) -min(box1[1],box2[1])
if ((box1[3]+box2[3])/l > 1.1):
return True
else:
return False
| wetleaf/Pdf_To_Text | code/postprocess.py | postprocess.py | py | 3,925 | python | en | code | 0 | github-code | 13 |
37478260945 | import torch
import torch.nn as nn
class Actor(nn.Module):
def __init__(self, obs_dim, action_dim, hidden_dim = 256):
super(Actor, self).__init__()
self.fc = nn.Linear(obs_dim, hidden_dim)
self.value = ResNet(hidden_dim, 1, 2, output_dim=1)
self.policy = nn.Linear(hidden_dim, action_dim)
def forward(self, x):
x = self.fc(x)
x, val = self.value(x, return_features=True)
logp = torch.log_softmax(self.policy(x), -1)
return logp, val
class ResidualBlock(nn.Module):
"""Following the structure of the one implemented in
https://arxiv.org/pdf/1806.10909.pdf
"""
def __init__(self, data_dim, hidden_dim):
super(ResidualBlock, self).__init__()
self.data_dim = data_dim
self.hidden_dim = hidden_dim
self.mlp = nn.Sequential(
nn.Linear(data_dim, hidden_dim),
nn.ReLU(True),
nn.Linear(hidden_dim, data_dim),
nn.ReLU(True)
)
def forward(self, x):
return x + self.mlp(x)
class ResNet(nn.Module):
"""ResNet which maps data_dim dimensional points to an output_dim
dimensional output.
"""
def __init__(self, data_dim, hidden_dim, num_layers, output_dim=1,
is_img=False):
super(ResNet, self).__init__()
residual_blocks = \
[ResidualBlock(data_dim, hidden_dim) for _ in range(num_layers)]
self.residual_blocks = nn.Sequential(*residual_blocks)
self.linear_layer = nn.Linear(data_dim, output_dim)
self.num_layers = num_layers
self.output_dim = output_dim
self.is_img = is_img
def forward(self, x, return_features=False):
if self.is_img:
# Flatten image, i.e. (batch_size, channels, height, width) to
# (batch_size, channels * height * width)
features = self.residual_blocks(x.view(x.size(0), -1))
else:
features = self.residual_blocks(x)
pred = self.linear_layer(features)
if return_features:
return features, pred
return pred
@property
def hidden_dim(self):
return self.residual_blocks.hidden_dim
| Gurvan/GoHighFox | models.py | models.py | py | 2,203 | python | en | code | 4 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.