seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
42014841225 | # power = gamma rate * epsilon rate ( ? )
# gamma = each bit is the most common bit in column of numbers
# epsilon: least common rate, or rather the inv of gamma
import numpy as np
file = 'input'
with open(file) as infile:
lines = infile.readlines()
vals = [[int(v) for v in line.strip()] for line in lines if line]
a = np.array(vals, dtype=bool)
bincounts = [np.bincount(col) for col in a.T]
most_common_bits = [np.argmax(bins) for bins in bincounts]
# np.median(a, axis=0).astype(int)
least_common_bits = [np.argmin(bins) for bins in bincounts]
# 1-most_common_bits
print(most_common_bits)
print(least_common_bits)
def convert_bits_to_int(bits):
return sum(v*2**i for i,v in enumerate(bits[::-1]))
gamma = convert_bits_to_int(most_common_bits)
epsilon = convert_bits_to_int(least_common_bits)
print(gamma, epsilon, gamma*epsilon)
# part two:
# multiplying the oxygen generator rating by the CO2 scrubber rating.
oxygen = 0
co2 = 0
print(a.shape)
def get_part_two_val(a, get_max=True):
numbers = a.copy()
bit_place = 0
base_target = int(get_max)
while len(numbers) > 1:
print(bit_place, len(numbers))
if len(numbers) == 2:
numbers = numbers[numbers[:,bit_place] == base_target]
break
bins = np.bincount(numbers[:,bit_place])
if bins[0] == bins[1]:
target = base_target
elif get_max:
target = np.argmax(bins)
else:
target = np.argmin(bins)
numbers = numbers[numbers[:,bit_place] == target]
bit_place += 1
return convert_bits_to_int(numbers[0])
def get_oxygen(a):
return get_part_two_val(a, get_max=True)
def get_co2(a):
return get_part_two_val(a, get_max=False)
o2 = get_oxygen(a)
co2 = get_co2(a)
print(o2, co2, o2*co2)
| halvarsu/advent-of-code | 2021/day3/run.py | run.py | py | 1,800 | python | en | code | 0 | github-code | 13 |
14278121916 | import bpy
from mathutils import *
bl_info = {
"name": "Tila : Object Duplicate",
"author": "Tilapiatsu",
"version": (1, 0, 0, 0),
"blender": (2, 80, 0),
"location": "View3D",
"category": "Mesh",
}
class TILA_ObjectDuplicateOperator(bpy.types.Operator):
bl_idname = "object.tila_duplicate"
bl_label = "TILA: Duplicate Object"
bl_options = {'REGISTER', 'UNDO'}
linked : bpy.props.BoolProperty(name='linked', default=False)
move : bpy.props.BoolProperty(name='move', default=False)
def execute(self, context):
if context.space_data.type == 'OUTLINER':
mode = None
if context.mode not in ['OBJECT']:
mode = context.mode
print(mode)
bpy.ops.object.mode_set(mode='OBJECT')
if self.move:
bpy.ops.object.duplicate_move(linked=self.linked)
else:
bpy.ops.object.duplicate(linked=self.linked)
if self.linked:
bpy.ops.outliner.collection_instance()
if mode is not None:
bpy.ops.object.mode_set(mode=mode)
return {'FINISHED'}
classes = (TILA_ObjectDuplicateOperator,)
register, unregister = bpy.utils.register_classes_factory(classes)
if __name__ == "__main__":
register()
| Tilapiatsu/blender-custom_config | scripts/startup/tila_OP_ObjectDuplicate.py | tila_OP_ObjectDuplicate.py | py | 1,317 | python | en | code | 5 | github-code | 13 |
3230536704 | def saddle_points(matrix):
if len(set([len(i) for i in matrix])) != 1 and len(matrix) != 0:
raise ValueError('Irregular matrix given')
cols = [i for i in zip(*matrix)]
saddles = []
for row_num, row in enumerate(matrix):
for col_num, val in enumerate(row):
row_check = all(val >= x for x in row)
col_check = all(val <= x for x in cols[col_num])
if row_check == col_check == True:
saddles.append({'row': row_num+1, 'column': col_num+1})
return [{}] if len(saddles) == 0 else saddles
| johncornflake/exercism | python/saddle-points/saddle_points.py | saddle_points.py | py | 570 | python | en | code | 0 | github-code | 13 |
42096630478 | from sys import stdout
n = int(input())
ne = (1, 1)
no = (1, 2)
def move_ne(b):
global ne
i, j = ne
print(b, i, j)
stdout.flush()
if i <= n and j + 2 <= n:
ne = (i, j + 2)
elif i + 1 <= n:
if (i + 1) % 2 == 1:
ne = (i + 1, 1)
else:
ne = (i + 1, 2)
else:
ne = None
def move_no(b):
global no
i, j = no
print(b, i, j)
stdout.flush()
if i <= n and j + 2 <= n:
no = (i, j + 2)
elif i + 1 <= n:
if (i + 1) % 2 == 0:
no = (i + 1, 1)
else:
no = (i + 1, 2)
else:
no = None
for i in range(n ** 2):
a = int(input())
if a == 1:
if ne is None:
move_no(3)
else:
move_ne(2)
else:
if no is None:
move_ne(3 if a == 2 else 2)
else:
move_no(1)
| keijak/comp-pub | cf/contest/1503/B/main.py | main.py | py | 893 | python | en | code | 0 | github-code | 13 |
37274100892 | __author__ = 'DafniAntotsiou'
import numpy as np
from copy import deepcopy
# native functions currently not working on windows
def get_joint_qpos(sim, name):
addr = sim.model.get_joint_qpos_addr(name)
if not isinstance(addr, tuple):
return sim.data.qpos[addr]
else:
start_i, end_i = addr
return sim.data.qpos[start_i:end_i]
def get_joint_qvel(sim, name):
addr = sim.model.get_joint_qvel_addr(name)
if not isinstance(addr, tuple):
return sim.data.qvel[addr]
else:
start_i, end_i = addr
return sim.data.qvel[start_i:end_i]
def set_joint_qpos(sim, name, value):
addr = sim.model.get_joint_qpos_addr(name)
if not isinstance(addr, tuple):
sim.data.qpos[addr] = value
else:
start_i, end_i = addr
value = np.array(value)
assert value.shape == (end_i - start_i,), (
"Value has incorrect shape %s: %s" % (name, value))
sim.data.qpos[start_i:end_i] = value
return sim
def get_joint_state(name, data):
if name is not None and data is not None:
try:
# object exists
obj_pos = deepcopy(data.get_joint_qpos(name))
obj_vel = deepcopy(data.get_joint_qvel(name))
return obj_pos, obj_vel
except ValueError:
pass
return None
def read_npz(path):
data = np.load(path, allow_pickle=True)
if isinstance(data, (list, dict, tuple)):
res = data
elif isinstance(data, np.lib.npyio.NpzFile):
res = dict(data)
data.close()
else:
print("incompatible type of data to unzip...")
res = None
return res
| DaphneAntotsiou/Adversarial-Imitation-Learning-with-Trajectorial-Augmentation-and-Correction | cat_dauggi/functions.py | functions.py | py | 1,669 | python | en | code | 1 | github-code | 13 |
24534419600 | def longestPalindrome(s: str) -> str:
def is_palindrome(s):
l = 0
r = len(s)-1
while l < r:
if s[l] == s[r]:
l += 1
r -= 1
else:
return False
return s
if len(s) == 1:
return s
elif is_palindrome(s):
return s
else:
# divide the string into two parts
left = s[:-1]
right = s[1:]
# recursively find the longest palindromic substring in each part
left_palindrome = longestPalindrome(left)
right_palindrome = longestPalindrome(right)
# return the longer of the two palindromic substrings
if len(left_palindrome) > len(right_palindrome):
return left_palindrome
else:
return right_palindrome
# print(longestPalindrome('abcdef'))
"""
"Moving window that is expanding method"
0th iteration - is full string palindrome
1nt - is len-1 shuffled through i = 0, 1 as starting indices
2th - is len-2 shuffled through i = 0, 1, 2 indicies
nth - is len-(n-1) shuffled through
"""
def longestPalindrome(s: str) -> str:
def is_palindrome(s):
l = 0
r = len(s)-1
while l < r:
if s[l] == s[r]:
l += 1
r -= 1
else:
return False
return s
if len(s) == 1:
return s
for i in range(len(s)):
ss_len = len(s) + 1 - i
for j in range(i):
if is_palindrome(s[j:ss_len + j]):
return s[j:ss_len + j]
else:
return s[0]
print(longestPalindrome('aaaddaaaaf')) | Hintzy/leetcode | Medium/5_longest_palindrome_substring/longest_palindrome_substring.py | longest_palindrome_substring.py | py | 1,645 | python | en | code | 0 | github-code | 13 |
2869735068 | import numpy as np
import pickle
import numpy.random as npr
import os
import sys
import argparse
import tensorflow as tf
import traceback
#my imports
from pddm.utils.helper_funcs import create_env
from pddm.utils.helper_funcs import get_gpu_config
from pddm.policies.policy_random import Policy_Random
from pddm.utils.loader import Loader
from pddm.regressors.dynamics_model import Dyn_Model
from pddm.policies.mpc_rollout import MPCRollout
from pddm.utils.data_structures import *
import pddm.envs
def run_eval(args, save_dir):
##########################
## params
##########################
### read in params from saved config file
paramfile = open(save_dir + '/params.pkl', 'rb')
params = pickle.load(paramfile)
### can manually set some options here, for these eval runs (to override options from training)
# params.kappa = 1
# params.horizon = 20
# params.mppi_beta = 0.6
#overwrite config's value with the commandline arg value
params.use_ground_truth_dynamics = args.use_ground_truth_dynamics
#if run length wasn't specified in args, default to config file's value
if args.eval_run_length==-1:
args.eval_run_length = params.rollout_length
##########################
## other initializations
##########################
### set seeds
npr.seed(args.seed)
tf.set_random_seed(args.seed)
#loader and data processor
loader = Loader(save_dir)
#env, rand policy
env, dt_from_xml = create_env(params.env_name)
random_policy = Policy_Random(env.env)
#load data from the iteration (for plotting)
iter_data = loader.load_iter(args.iter_num)
trainingLoss_perIter = iter_data.training_losses
rew_perIter = iter_data.rollouts_rewardsPerIter
scores_perIter = iter_data.rollouts_scoresPerIter
trainingData_perIter = iter_data.training_numData
#mean/std info
normalization_data = iter_data.normalization_data
### data dims
outputSize = normalization_data.mean_z.shape[0]
acSize = normalization_data.mean_y.shape[0]
inputSize = normalization_data.mean_x.shape[0] + acSize
with tf.Session(config=get_gpu_config(args.use_gpu, args.gpu_frac)) as sess:
##############################################
### dynamics model + controller
##############################################
dyn_models = Dyn_Model(inputSize, outputSize, acSize, sess, params=params)
mpc_rollout = MPCRollout(
env,
dyn_models,
random_policy,
execute_sideRollouts=args.execute_sideRollouts,
plot_sideRollouts=True,
params=params)
##############################################
### restore the saved dynamics model
##############################################
#restore model
sess.run(tf.global_variables_initializer())
restore_path = save_dir + '/models/model_aggIter' + str(
args.iter_num) + '.ckpt'
saver = tf.train.Saver(max_to_keep=0)
saver.restore(sess, restore_path)
print("\n\nModel restored from ", restore_path, "\n\n")
#restore mean/std
dyn_models.normalization_data = normalization_data
################################
########### RUN ROLLOUTS
################################
list_rewards = []
list_scores = []
rollouts = []
for rollout_num in range(args.num_eval_rollouts):
# Note: if you want to evaluate a particular goal, call env.reset with a reset_state
# where that reset_state dict has reset_pose, reset_vel, and reset_goal
starting_observation, starting_state = env.reset(return_start_state=True)
if not params.print_minimal:
print("\n############# Performing MPC rollout #", rollout_num)
mpc_rollout.rollout_length = args.eval_run_length
rollout_info = mpc_rollout.perform_rollout(
starting_state,
starting_observation,
controller_type=params.controller_type,
take_exploratory_actions=False)
#save info from MPC rollout
list_rewards.append(rollout_info['rollout_rewardTotal'])
list_scores.append(rollout_info['rollout_meanFinalScore'])
rollouts.append(rollout_info)
#save all eval rollouts
pickle.dump(
rollouts,
open(save_dir + '/saved_rollouts/rollouts_eval.pickle', 'wb'),
protocol=pickle.HIGHEST_PROTOCOL)
print("REWARDS: ", list_rewards, " .... mean: ", np.mean(list_rewards), " std: ", np.std(list_rewards))
print("SCORES: ", list_scores, " ... mean: ", np.mean(list_scores), " std: ", np.std(list_scores), "\n\n")
def main():
#############################
## vars to specify for eval
#############################
parser = argparse.ArgumentParser()
parser.add_argument(
'--job_path', type=str,
default='../output/cheetah') #address this WRT working directory
parser.add_argument('--iter_num', type=int, default=0)
parser.add_argument('--num_eval_rollouts', type=int, default=3)
parser.add_argument('--eval_run_length', type=int, default=-1)
parser.add_argument('--gpu_frac', type=float, default=0.9)
parser.add_argument('--use_ground_truth_dynamics', action="store_true")
parser.add_argument('--execute_sideRollouts', action="store_true")
parser.add_argument('--use_gpu', action="store_true")
parser.add_argument('--seed', type=int, default=0)
args = parser.parse_args()
#directory to load from
save_dir = os.path.abspath(args.job_path)
assert os.path.isdir(save_dir)
##########################
## run evaluation
##########################
try:
run_eval(args, save_dir)
except (KeyboardInterrupt, SystemExit):
print('Terminating...')
sys.exit(0)
except Exception as e:
print('ERROR: Exception occured while running a job....')
traceback.print_exc()
if __name__ == '__main__':
main()
| google-research/pddm | pddm/scripts/eval_iteration.py | eval_iteration.py | py | 6,154 | python | en | code | 89 | github-code | 13 |
22040633583 | import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
import time
import pg8000
import boto3
import re
from decimal import *
import extract_rs_query_logs_functions as functions
## @params: [JOB_NAME]
args = getResolvedOptions(sys.argv, ['TempDir', 'JOB_NAME','REGION','CLUSTER_ENDPOINT'])
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
job_configs={}
job_configs.update(args)
clusterId= re.search('jdbc:redshift://(.+?)\..*',args['CLUSTER_ENDPOINT']).group(1)
job_configs.update(functions.getJobConfigurations(clusterId,job_configs))
job_configs['CLUSTER_ID']=clusterId
tempDir=args['TempDir']
s3Prefix=job_configs['s3_prefix']
credentials=boto3.Session().get_credentials()
job_configs['aws_access_key_id'] = credentials.access_key
job_configs['aws_secret_access_key'] = credentials.secret_key
job_configs['aws_session_token'] = credentials.token
job_configs.update(args)
job_configs['jdbcURL']="{}?user={}&password={}".format(args['CLUSTER_ENDPOINT'],job_configs['user'],job_configs['password'])
job_configs['region_name']=boto3.session.Session().region_name
job_configs['spark_session']=spark
#### Query Text #####
stlQueryLastProcessedTSValue= functions.getLastProcessedTSValue(trackingEntry=clusterId+"_stl_query",job_configs=job_configs)
returnDF=functions.runQuery(query="select '{}' as clusterId,trunc(a.starttime) as startDate,b.* from stl_querytext b , stl_query a where a.query=b.query and a.endtime > '{}'".format(clusterId,stlQueryLastProcessedTSValue),tableName="stl_querytext",job_configs=job_configs)
functions.saveToS3(dataframe=returnDF,s3Prefix=s3Prefix,tableName="stl_querytext",partitionColumns=["clusterid","startdate"],job_configs=job_configs)
#### Explain #####
stlQueryLastProcessedTSValue= functions.getLastProcessedTSValue(clusterId+"_stl_query",job_configs)
returnDF=functions.runQuery("select '{}' as clusterId,trunc(a.starttime) as startDate,b.* from stl_explain b , stl_query a where a.query=b.query and a.endtime > '{}'".format(clusterId,stlQueryLastProcessedTSValue),"stl_explain",job_configs)
functions.saveToS3(returnDF,s3Prefix,"stl_explain",["clusterid","startdate"],job_configs)
#### Query #####
stlQueryLastProcessedTSValue= functions.getLastProcessedTSValue(clusterId+"_stl_query",job_configs)
returnDF=functions.runQuery("select '{}' as clusterId,trunc(starttime) as startDate,* from stl_query where endtime > '{}'".format(clusterId,stlQueryLastProcessedTSValue),"stl_query",job_configs)
functions.saveToS3(returnDF,s3Prefix,"stl_query",["clusterid","startdate"],job_configs)
latestTimestampVal=functions.getMaxValue(returnDF,"endtime",job_configs)
functions.updateLastProcessedTSValue(clusterId+"_stl_query",latestTimestampVal[0],job_configs)
#### DDL Text #####
stlDDLTextProcessedTSValue = functions.getLastProcessedTSValue(clusterId+"_stl_ddltext",job_configs)
returnDF=functions.runQuery("select '{}' as clusterId,trunc(starttime) as startDate,* from stl_ddltext where endtime > '{}'".format(clusterId,stlDDLTextProcessedTSValue),"stl_ddltext",job_configs)
functions.saveToS3(returnDF,s3Prefix,"stl_ddltext",["clusterid","startdate"],job_configs)
latestTimestampVal=functions.getMaxValue(returnDF,"endtime",job_configs)
functions.updateLastProcessedTSValue(clusterId+"_stl_ddltext",latestTimestampVal[0],job_configs)
#### Utility Text #####
stlUtilityTextProcessedTSValue = functions.getLastProcessedTSValue(clusterId+"_stl_utilitytext",job_configs)
returnDF=functions.runQuery("select '{}' as clusterId,trunc(starttime) as startDate,* from stl_utilitytext where endtime > '{}'".format(clusterId,stlUtilityTextProcessedTSValue),"stl_utilitytext",job_configs)
functions.saveToS3(returnDF,s3Prefix,"stl_utilitytext",["clusterid","startdate"],job_configs)
latestTimestampVal=functions.getMaxValue(returnDF,"endtime",job_configs)
functions.updateLastProcessedTSValue(clusterId+"_stl_utilitytext",latestTimestampVal[0],job_configs)
#### Alert Event Log #####
stlAlertEventLogProcessedTSValue = functions.getLastProcessedTSValue(clusterId+"_stl_alert_event_log",job_configs)
returnDF=functions.runQuery("select '{}' as clusterId,trunc(event_time) as startDate,* from stl_alert_event_log where event_time > '{}'".format(clusterId,stlAlertEventLogProcessedTSValue),"stl_alert_event_log",job_configs)
functions.saveToS3(returnDF,s3Prefix,"stl_alert_event_log",["clusterid","startdate"],job_configs)
latestTimestampVal=functions.getMaxValue(returnDF,"event_time",job_configs)
functions.updateLastProcessedTSValue(clusterId+"_stl_alert_event_log",latestTimestampVal[0],job_configs)
#### STL_SCAN #####
stlScanLastProcessedTSValue= functions.getLastProcessedTSValue(clusterId+"_stl_scan",job_configs)
returnDF=functions.runQuery("select '{}' as clusterId,trunc(starttime) as startDate,* from stl_scan where endtime > '{}'".format(clusterId,stlScanLastProcessedTSValue),"stl_scan",job_configs)
functions.saveToS3(returnDF,s3Prefix,"stl_scan",["clusterid","startdate"],job_configs)
latestTimestampVal=functions.getMaxValue(returnDF,"endtime",job_configs)
functions.updateLastProcessedTSValue(clusterId+"_stl_scan",latestTimestampVal[0],job_configs)
#### STL_WLM_QUERY #####
stlWLMQueryLastProcessedTSValue= functions.getLastProcessedTSValue(clusterId+"_stl_wlm_query",job_configs)
returnDF=functions.runQuery("select '{}' as clusterId,trunc(queue_start_time) as startDate,* from stl_wlm_query where queue_end_time > '{}'".format(clusterId,stlWLMQueryLastProcessedTSValue),"stl_wlm_query",job_configs)
functions.saveToS3(returnDF,s3Prefix,"stl_wlm_query",["clusterid","startdate"],job_configs)
latestTimestampVal=functions.getMaxValue(returnDF,"queue_end_time",job_configs)
functions.updateLastProcessedTSValue(clusterId+"_stl_wlm_query",latestTimestampVal[0],job_configs)
job.commit()
| aws-samples/aws-big-data-blog | aws-blog-retain-redshift-stl/scripts/extract_rs_query_logs.py | extract_rs_query_logs.py | py | 5,977 | python | en | code | 895 | github-code | 13 |
5416581537 | from datetime import timedelta
from aiogram import types, F, Router, Dispatcher
from aiogram.filters import Command, CommandStart
from db import add_new_user, add_message_to_queue, get_last_time, get_timeleft
from aiogram.fsm.context import FSMContext
from aiogram.fsm.state import State, StatesGroup
from aiogram.types import (
KeyboardButton,
Message,
ReplyKeyboardMarkup,
ReplyKeyboardRemove,
InlineKeyboardMarkup,
InlineKeyboardButton,
CallbackQuery,
)
import config
def get_start_keyboard():
buttons = [
[
types.InlineKeyboardButton(text="Опубликовать", callback_data="start_post"),
types.InlineKeyboardButton(text="Информация", callback_data="start_info"),
],
[types.InlineKeyboardButton(text="Перейти в канал", callback_data="start_info")],
]
keyboard = types.InlineKeyboardMarkup(inline_keyboard=buttons)
return keyboard
class Form(StatesGroup):
start_public = State()
send_message = State()
router = Router()
dp = Dispatcher()
@router.message(CommandStart())
async def start_handler(
msg: Message,
):
await msg.answer(config.HELLO, reply_markup=get_start_keyboard())
add_new_user(msg.from_user.id)
@router.callback_query(F.data.startswith("start_post"))
async def post_start_handler(msg: Message):
await msg.answer(f"Время последней публикации: {get_last_time(msg.from_user.id)}")
await msg.answer(
(
"Осталось времени до новой публикации:"
f" {str(timedelta(seconds=get_timeleft(msg.from_user.id)))}"
),
)
@router.message(Command("info"))
async def info_handler(msg: Message):
await msg.answer(f"Время последней публикации: {get_last_time(msg.from_user.id)}")
await msg.answer(
(
"Осталось времени до новой публикации:"
f" {str(timedelta(seconds=get_timeleft(msg.from_user.id)))}"
),
)
@router.message()
async def message_handler(msg: Message):
await msg.answer(f"Публикация. юзер-ID: {msg.from_user.id}")
add_message_to_queue(msg, publish=True)
| Vivatist/SendNewsBot | handlers.py | handlers.py | py | 2,260 | python | en | code | 0 | github-code | 13 |
39357266394 | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import precision_recall_fscore_support as score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, RandomizedSearchCV, cross_val_score
import numpy as np
import pandas as pd
import time
from nextbike.io.output import __save_model, __save_prediction
from nextbike.io.input import __read_model
algorithm, precision, recall, f1score, support, exetime, desc = [], [], [], [], [], [], []
dic = {
'algorithm': algorithm,
'precision': precision,
'recall': recall,
'f1score': f1score,
"support": support,
"exetime": exetime,
"desc": desc
}
def __init__(df):
y = df['tripLabel']
X = df[['weekend', 'hour', 'distanceToUniversity', 'month', 'area_start']]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
scaler.fit(X_test)
X_test_scaled = scaler.transform(X_test)
return {
'X': X,
'y' :y,
'X_train': X_train,
'X_test': X_test,
'y_train': y_train,
'y_test': y_test,
'X_train_scaled': X_train_scaled,
'X_test_scaled': X_test_scaled
}
def train(init, value=False):
print('init training parameters')
mod = RandomForestClassifier(n_estimators=1000, min_samples_split=2, min_samples_leaf=1, max_depth=10,
max_features='auto', bootstrap=False)
print('training started')
mod.fit(init['X_train'], init['y_train'])
print('training done')
__save_model(mod, 'destination_model')
print('model saved under data/output/destination_model.pkl')
if value is True:
y_pred = mod.predict(init['X_test'])
rfc_cv_score = cross_val_score(mod, init['X'], init['y'], cv=10)
__get_result(rfc_cv_score, init['y_test'], y_pred)
print('10 fold cross validation' + np.mean(cross_val_score(mod, init['X_train'], init['y_train'], cv=10)))
def explore(typ, init, df):
if typ not in ['away', 'towards']:
raise Exception('typ has to be either away or towards')
if typ == 'away':
y_away = df['awayFromUniversity']
X_train, X_test, y_away_train, y_away_test = train_test_split(init['X'], y_away, test_size=0.3)
start = time.time()
mod = RandomForestClassifier()
mod.fit(X_train, y_away_train)
y_pred = mod.predict(X_test)
end = time.time()
rfc_cv_score = cross_val_score(mod, init['X'], y_away, cv=10)
__get_result(rfc_cv_score=rfc_cv_score, y_test=y_away_test, y_pred=y_pred)
algorithm.append("Random Forrest")
exetime.append((end - start))
desc.append("Predicts awayFromUniversity (complement)")
else:
y_towards = df['towardsUniversity']
X_train, X_test, y_towards_train, y_towards_test = train_test_split(init['X'], y_towards, test_size=0.3)
start = time.time()
mod = RandomForestClassifier()
mod.fit(X_train, y_towards_train)
y_pred = mod.predict(X_test)
end = time.time()
rfc_cv_score = cross_val_score(mod, init['X'], y_towards, cv=10)
__get_result(rfc_cv_score=rfc_cv_score, y_test=y_towards_test, y_pred=y_pred)
algorithm.append("Random Forrest")
exetime.append((end - start))
desc.append("Predicts towardsUniversity (complement)")
def predict(df_trips, df_test):
X_train = df_trips[['weekend', 'hour', 'distanceToUniversity', 'month', 'area_start']]
y_train = df_trips['tripLabel']
# using the received test set as the test set (data for July)
X_test = df_test[['weekend', 'hour', 'distanceToUniversity', 'month', 'area_start']]
y_test = df_test['tripLabel']
mod = __read_model('destination_model')
print('trained model successfully imported')
pred = mod.predict(X_test)
pred_train = mod.predict(X_train)
df_pred = pd.DataFrame(pred, columns=['predictions'])
__save_prediction(df_pred, 'destination_prediction')
print('predict values saved under data/output/destination_prediction.csv')
print("=== Classification Report ===")
print(classification_report(y_test, pred))
print('\n')
print("=== Classification Report ===")
print(classification_report(y_train, pred_train))
print('\n')
def predict_trip_label(init, mod=RandomForestClassifier()):
start = time.time()
mod = mod
mod.fit(init['X_train'], init['y_train'])
y_pred = mod.predict(init['X_test'])
end = time.time()
rfc_cv_score = cross_val_score(mod, init['X'], init['y'], cv=10)
__get_result(rfc_cv_score=rfc_cv_score, y_test=init['y_test'], y_pred=y_pred)
algorithm.append("Random Forrest")
exetime.append((end-start))
if mod.n_estimators == 1000:
desc.append("Optimized hyperparameters of model in index 0")
elif mod.n_estimators == 400:
desc.append("Optimized hyperparameters of model in index 2")
elif mod.n_estimators == 200:
desc.append("Optimized hyperparameters of model in index 4")
else:
desc.append("Predicts tripLabel")
def optimize_hyper_parameters_random_forest(init):
X_train, X_test, y_train, y_test = train_test_split(init['X'], init['y'], test_size=0.3)
# random forest model creation
rfc = RandomForestClassifier()
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 110, num=11)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
bootstrap = [True, False]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
# Random search of parameters
rfc_random = RandomizedSearchCV(estimator=rfc, param_distributions=random_grid, n_iter=100, cv=3, verbose=2,
random_state=42, n_jobs=-1)
# Fit the model
rfc_random.fit(X_train, y_train)
# print results
print(rfc_random.best_params_)
def __get_result(rfc_cv_score, y_test, y_pred):
pre, re, f_score, sup = score(y_test, y_pred, average='weighted')
precision.append(pre)
recall.append(re)
f1score.append(f_score)
support.append(sup)
print("=== Confusion Matrix ===")
print(confusion_matrix(y_test, y_pred))
print('\n')
print("=== Classification Report ===")
print(classification_report(y_test, y_pred))
print('\n')
print("=== All AUC Scores ===")
print(rfc_cv_score)
print('\n')
print("=== Mean AUC Score ===")
print("Mean AUC Score - Random Forest: ", rfc_cv_score.mean())
| ey96/DataScienceBikesharing | nextbike/model/classification/random_forest_class.py | random_forest_class.py | py | 7,381 | python | en | code | 0 | github-code | 13 |
32215017700 | import json
import os
from collections import Counter
import matplotlib.pyplot as plt
import requests
from bs4 import BeautifulSoup
from scipy.stats import norm
url = "https://ctftime.org/event/2040"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36"
}
scoreboard = requests.get(url, headers=headers)
soup = BeautifulSoup(scoreboard.content, "html.parser")
points_html = soup.find_all("td", class_="points")
points = [float(p.text) for p in points_html]
# If a json scoreboard file exists, it takes precedence over the ctftime scoreboard
json_path = "C:/Users/janni/Downloads/scoreboard_enowars7/scoreboard478.json"
if os.path.exists(json_path):
print("Using local scoreboard\n")
with open(json_path, "r") as f:
data = json.load(f)
teams = data["teams"]
attack_points = dict()
for team in teams:
team_name = team["teamName"]
team_attack_points = team["attackScore"]
attack_points[team_name] = team_attack_points
points = sorted([float(p) for p in list(attack_points.values())])
MEAN = sum(points) / len(points)
STANDARD_DEVIATION = (sum([(p - MEAN) ** 2 for p in points]) / len(points)) ** 0.5
plt.hist(points, bins=100)
plt.title("Attack points distribution")
plt.show()
print(f"Normal Distribution:\nStandard deviation: {STANDARD_DEVIATION}\nMean: {MEAN}\n")
plt.plot(points, norm.pdf(points, MEAN, STANDARD_DEVIATION))
plt.title("Attempting to fit a normal distribution")
plt.show()
POINTS_PER_FLAG = 1
PARTICIPATING_TEAMS = len(points)
TOTAL_FLAGSTORES = 10 # in enowars7 there were 6 services with a total of 10 flagstores
TOTAL_ROUNDS = 8 * 60 # 8 hours with one round per minute
POINTS_PER_ROUND_PER_FLAGSTORE = (PARTICIPATING_TEAMS - 1) * POINTS_PER_FLAG
HIGH_SCORE = points[-1]
# these values represent the percentage of achieved points compared to the mean score of all teams
NOOB_AVERAGE_POINTS_NORM = max(
(MEAN - 3 * STANDARD_DEVIATION + MEAN - 2 * STANDARD_DEVIATION) / 2, 0
)
BEGINNER_AVERAGE_POINTS_NORM = max(
(MEAN - 2 * STANDARD_DEVIATION + MEAN - 1 * STANDARD_DEVIATION) / 2, 0
)
INTERMEDIATE_AVERAGE_POINTS_NORM = max(
(MEAN - 1 * STANDARD_DEVIATION + MEAN + 1 * STANDARD_DEVIATION) / 2, 0
)
ADVANCED_AVERAGE_POINTS_NORM = max(
(MEAN + 1 * STANDARD_DEVIATION + MEAN + 2 * STANDARD_DEVIATION) / 2, 0
)
PROFESSIONAL_AVERAGE_POINTS_NORM = max(
(MEAN + 3 * STANDARD_DEVIATION + MEAN + 2 * STANDARD_DEVIATION) / 2, 0
)
NOOB_AVERAGE_POINTS = max((0 * HIGH_SCORE + 0.2 * HIGH_SCORE) / 2, 0)
BEGINNER_AVERAGE_POINTS = max((0.2 * HIGH_SCORE + 0.4 * HIGH_SCORE) / 2, 0)
INTERMEDIATE_AVERAGE_POINTS = max((0.4 * HIGH_SCORE + 0.6 * HIGH_SCORE) / 2, 0)
ADVANCED_AVERAGE_POINTS = max((0.6 * HIGH_SCORE + 0.8 * HIGH_SCORE) / 2, 0)
PROFESSIONAL_AVERAGE_POINTS = max((0.8 * HIGH_SCORE + 1 * HIGH_SCORE) / 2, 0)
def points_to_exp_normal_dist(score):
exp = "NOOB"
if MEAN - 2 * STANDARD_DEVIATION < score <= MEAN - 1 * STANDARD_DEVIATION:
exp = "BEGINNER"
elif MEAN - 1 * STANDARD_DEVIATION < score <= MEAN + 1 * STANDARD_DEVIATION:
exp = "INTERMEDIATE"
elif MEAN + 1 * STANDARD_DEVIATION < score <= MEAN + 2 * STANDARD_DEVIATION:
exp = "ADVANCED"
elif MEAN + 2 * STANDARD_DEVIATION < score:
exp = "PROFESSIONAL"
return exp
def points_to_exp_percent_max(score):
exp = "NOOB"
if 0.2 * HIGH_SCORE < score <= 0.4 * HIGH_SCORE:
exp = "BEGINNER"
elif 0.4 * HIGH_SCORE < score <= 0.6 * HIGH_SCORE:
exp = "INTERMEDIATE"
elif 0.6 * HIGH_SCORE < score <= 0.8 * HIGH_SCORE:
exp = "ADVANCED"
elif 0.8 * HIGH_SCORE < score:
exp = "PROFESSIONAL"
return exp
team_distribution_normal = Counter([points_to_exp_normal_dist(p) for p in points])
team_distribution_percent_max = Counter([points_to_exp_percent_max(p) for p in points])
total_teams = len(points)
noob_teams_normal = team_distribution_normal["NOOB"]
beginner_teams_normal = team_distribution_normal["BEGINNER"]
intermediate_teams_normal = team_distribution_normal["INTERMEDIATE"]
advanced_teams_normal = team_distribution_normal["ADVANCED"]
professional_teams_normal = team_distribution_normal["PROFESSIONAL"]
noob_teams_percent_max = team_distribution_percent_max["NOOB"]
beginner_teams_percent_max = team_distribution_percent_max["BEGINNER"]
intermediate_teams_percent_max = team_distribution_percent_max["INTERMEDIATE"]
advanced_teams_percent_max = team_distribution_percent_max["ADVANCED"]
professional_teams_percent_max = team_distribution_percent_max["PROFESSIONAL"]
def exploit_probability(points_from_exploiting):
points_per_flagstore = points_from_exploiting / TOTAL_FLAGSTORES
rounds_to_reach_points_from_exploiting = (
points_per_flagstore / POINTS_PER_ROUND_PER_FLAGSTORE
)
exploit_probability = rounds_to_reach_points_from_exploiting / TOTAL_ROUNDS
return exploit_probability * 100
print("Normal distribution:")
print(
f"{'EXPERIENCE':<15}{'NUMBER OF TEAMS':<25}{'PERCENTAGE':<20}{'EXPLOIT PROBABILITY':<22}{'AVERAGE POINTS':<20}\n"
+ f"Noob {noob_teams_normal:<20}{100 * (noob_teams_normal/total_teams):>10.2f}% {exploit_probability(NOOB_AVERAGE_POINTS_NORM):>10.2f}% {NOOB_AVERAGE_POINTS_NORM:>10.2f}\n"
+ f"Beginner {beginner_teams_normal:<20}{100 * (beginner_teams_normal/total_teams):>10.2f}% {exploit_probability(BEGINNER_AVERAGE_POINTS_NORM):>10.2f}% {BEGINNER_AVERAGE_POINTS_NORM:>10.2f}\n"
+ f"Intermediate {intermediate_teams_normal:<20}{100 * (intermediate_teams_normal/total_teams):>10.2f}% {exploit_probability(INTERMEDIATE_AVERAGE_POINTS_NORM):>10.2f}% {INTERMEDIATE_AVERAGE_POINTS_NORM:>10.2f}\n"
+ f"Advanced {advanced_teams_normal:<20}{100 * (advanced_teams_normal/total_teams):>10.2f}% {exploit_probability(ADVANCED_AVERAGE_POINTS_NORM):>10.2f}% {ADVANCED_AVERAGE_POINTS_NORM:>10.2f}\n"
+ f"Professional {professional_teams_normal:<20}{100 * (professional_teams_normal/total_teams):>10.2f}% {exploit_probability(PROFESSIONAL_AVERAGE_POINTS_NORM):>10.2f}% {PROFESSIONAL_AVERAGE_POINTS_NORM:>10.2f}\n"
)
print("Percent max distribution:")
print(
f"{'EXPERIENCE':<15}{'NUMBER OF TEAMS':<25}{'PERCENTAGE':<20}{'EXPLOIT PROBABILITY':<22}{'AVERAGE POINTS':<20}\n"
+ f"Noob {noob_teams_percent_max:<20}{100 * (noob_teams_percent_max/total_teams):>10.2f}% {exploit_probability(NOOB_AVERAGE_POINTS):>10.2f}% {NOOB_AVERAGE_POINTS:>10.2f}\n"
+ f"Beginner {beginner_teams_percent_max:<20}{100 * (beginner_teams_percent_max/total_teams):>10.2f}% {exploit_probability(BEGINNER_AVERAGE_POINTS):>10.2f}% {BEGINNER_AVERAGE_POINTS:>10.2f}\n"
+ f"Intermediate {intermediate_teams_percent_max:<20}{100 * (intermediate_teams_percent_max/total_teams):>10.2f}% {exploit_probability(INTERMEDIATE_AVERAGE_POINTS):>10.2f}% {INTERMEDIATE_AVERAGE_POINTS:>10.2f}\n"
+ f"Advanced {advanced_teams_percent_max:<20}{100 * (advanced_teams_percent_max/total_teams):>10.2f}% {exploit_probability(ADVANCED_AVERAGE_POINTS):>10.2f}% {ADVANCED_AVERAGE_POINTS:>10.2f}\n"
+ f"Professional {professional_teams_percent_max:<20}{100 * (professional_teams_percent_max/total_teams):>10.2f}% {exploit_probability(PROFESSIONAL_AVERAGE_POINTS):>10.2f}% {PROFESSIONAL_AVERAGE_POINTS:>10.2f}\n"
)
| ashiven/enosimulator | util/scoreboard_analysis.py | scoreboard_analysis.py | py | 7,718 | python | en | code | 0 | github-code | 13 |
23301915315 | """
--------------------------
chipFish, part of chipFish
(c) 2008-2019 oAxiom
--------------------------
Main app entry
NOTES:
------
"""
import sys, os, copy, re, shlex
import opt, gDraw
from glbase_wrapper import glload, location, format, flat_track, genelist, genome_sql
from error import *
from genome_data import genomes
quote_string_splitter = re.compile(r'''((?:[^;"']|"[^"]*"|'[^']*')+)''')
class app():
"""
Inheritence was split off a while ago.
"""
def __init__(self, genome=None):
"""
Any preamble set-up
"""
self.current_genome = None
def __do_options_per_track(self, unsplit_line):
"""
sort out the options. returns a dictionary of option:value pairs
options should be a string containing option=value. Several options can be passed.
"""
s = re.split(r', (?=(?:"[^"]*?(?: [^"]*)*))|, (?=[^" ]+(?:,|$))', unsplit_line)# (unsplit_line, posix=False)
res = {}
for item in s:
item = item.split('=')
k = item[0]
v = item[1].strip('"').strip("'")
res[k] = v
return res
def __do_options(self, options, mode):
"""
sort out the options. returns a dictionary of option:value pairs
options should be a string containing option=value. Several options can be passed.
mode will set default options for the track type
"""
if "=" not in options:
return({}) # No valid parseable options
# Standardise input string:
s = options.strip(" ").replace("\t", " ").replace(" ", " ").replace("\n", "").replace("\r", "")
t = s.split(" ")
res = {}
for i in t:
if "=" in i: # ignore mangled options
opt, val = i.split("=")
try: # try to cooerce ints and floats and bools
if "." in i.split("=")[1]: # try float
res[i.split("=")[0]] = float(i.split("=")[1])
elif val == "True":
res[opt] = True
elif val == "False":
res[opt] = False
else:
res[i.split("=")[0]] = int(i.split("=")[1])
except ValueError:
res[i.split("=")[0]] = i.split("=")[1] # String literal
return res
def startup(self, tracklist=None):
"""
startup the server.
Use this to get started proper and change genomes etc.
**Arguments**
tracklist
A text file containing the path to the track files
Not Implemented
"""
self.draw = gDraw.gDraw()
if tracklist: # Although it doesn't really make much sense not to supply a tracklist
oh = open(tracklist, "rt")
track_path = os.path.dirname(tracklist)
mode = None
for line in oh:
if not line.strip(): # tolerate empty lines in spec sheet
continue
if "#" in line[0]:
continue
if ":" in line:
# The order of the following modes is important - for example, "bed:" will also
# collect "macs_bed" so "macs_bed" must go first.
# There can only be one mode
# Gods, just do strip() and regex it!?!?!!!!
if "kde_track:" in line: # Must go before "track"
mode = "kde_track"
elif "split_track" in line:
mode = "split_track"
elif "track:" in line:
mode = "track"
elif "macs_bed:" in line: # must go before bed
mode = "macs_bed"
elif "bed:" in line:
mode = "bed"
elif "genome:" in line:
mode = "genome"
elif "flat:" in line:
mode = "flat"
elif 'genome_sql:' in line:
mode = 'genome_sql'
elif 'repeat_sql:' in line:
mode = 'repeat_sql'
else:
raise ErrorUnrecognisedTrackMode(mode)
# process options
options = self.__do_options(line.split(":")[-1], mode)
elif mode:
path = os.path.expanduser(track_path)
tt = line.strip().split()
name = os.path.expanduser(tt[0])
track_opts = {}
if len(tt) > 1:
track_opts = self.__do_options_per_track(tt[1])
if "abs_path" in options and options["abs_path"] == "True":
tail, head = os.path.split(name)
# The path will be relative to the path, not relative to chipFish. Which could be anywhere
#
path = os.path.expanduser(os.path.normpath(os.path.join(track_path, tail)))
name = head
#print path, name
if name:
options = copy.deepcopy(options)
options.update(track_opts)
per_track_options = {}
if len(tt) > 1 and '=' in line: # see if it has valid per track options;
per_track_options = self.__do_options_per_track(' '.join(tt[1:]))
label = None
if 'label' in per_track_options:
label = per_track_options['label']
if mode == "flat":
self.draw.bindTrack(flat_track(filename=os.path.join(path, name)), track_type="graph", options=options, label=label)
elif mode == "bed":
self.draw.bindTrack(genelist(filename=os.path.join(path, name), format=format.bed), options=options, label=label)
elif mode == "macs_bed":
f = format.minimal_bed
f["skiplines"] = 1 # Changes in recent version of macs bed.
self.draw.bindTrack(genelist(filename=os.path.join(path, name), format=f), options=options, label=label)
elif mode == "genome": # must be a glb
# First see if I can get it out of the pre-packaged genomes:
try:
g = genomes()
if name in g:
print(name)
self.draw.bindTrack(g.get_genome(name))
continue
except AssertionError:
# Okay, that did'nt work. see If I can get a file in this dir:
self.draw.bindTrack(glload(os.path.join(path, name)))
continue
# Okay, assume the user knows what they are doing and just grab the file they asked for:
self.draw.bindTrack(glload(name), track_type="genome")
elif mode == "repeat_sql":
self.draw.bindTrack(genome_sql(filename=os.path.join(path, name)), track_type='repeats')
elif mode == "genome_sql":
self.draw.bindTrack(genome_sql(filename=os.path.join(path, name)), track_type='genome_sql')
oh.close()
self.draw.setViewPortSize(opt.draw.view_port_width)
self.draw.setLocation(loc=location(loc="chr1:172724244-172859108")) # Load a dummy location, my favourite gene Stat3
| oaxiom/chipFish | app.py | app.py | py | 7,967 | python | en | code | 1 | github-code | 13 |
24993051153 | from math import log2
hexa = "0123456789ABCDEF"
def doi_co_so(n, b):
step = int(log2(b))
while len(n)%step != 0:
n = '0' + n
for i in range(0, len(n), step):
token = 0
for j in range(i, i+step):
if n[j] == '1':
token += pow(2, step - j%step - 1)
print(hexa[token], end='')
print()
try:
f = open("DATA.in", 'r')
t = int(f.readline())
for test in range(t):
a = list(f.readline().split())
b = int(a[0])
if len(a) == 2:
n = int(a[1])
else:
n = int(f.readline())
doi_co_so(str(n), b)
except FileNotFoundError:
print()
finally:
f.close() | thethanh02/python3 | WA/PYKT086.py | PYKT086.py | py | 693 | python | en | code | 1 | github-code | 13 |
26883671925 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from datetime import datetime
from typing import Union, Generator, AsyncGenerator, Optional, Dict
from isodate import parse_duration
from dateutil import parser
from dtran.argtype import ArgType
from dtran.ifunc import IFunc, IFuncType
from dtran.dcat.api import DCatAPI
from dtran.metadata import Metadata
class DcatRangeStream(IFunc):
id = "dcat_range_stream"
description = """ Returns a stream of start_time and end_time for a dataset from Data Catalog
"""
func_type = IFuncType.READER
friendly_name: str = "Data Catalog Time Range Stream"
inputs = {
"dataset_id": ArgType.String,
"start_time": ArgType.DateTime(optional=True),
"end_time": ArgType.DateTime(optional=True),
"step_time": ArgType.String(optional=True)
}
outputs = {
"start_time": ArgType.DateTime,
"end_time": ArgType.DateTime
}
example = {
"dataset_id": "ea0e86f3-9470-4e7e-a581-df85b4a7075d",
"start_time": "2020-03-02T12:30:55",
"end_time": "2020-03-02T12:30:55",
"step_time": "P3Y6M4DT12H30M5S",
}
def __init__(self, dataset_id: str, start_time: datetime = None, end_time: datetime = None, step_time: str = None):
if (start_time is None) or (end_time is None):
dataset = DCatAPI.get_instance().find_dataset_by_id(dataset_id)
self.start_time = start_time or parser.parse(dataset['metadata']['temporal_coverage']['start_time'])
self.end_time = end_time or parser.parse(dataset['metadata']['temporal_coverage']['end_time'])
else:
self.start_time = start_time
self.end_time = end_time
self.start_time = self.start_time.replace(microsecond=0)
self.end_time = self.end_time.replace(microsecond=0)
if step_time is None:
self.step_time = self.end_time - self.start_time
else:
self.step_time = parse_duration(step_time)
async def exec(self) -> Union[dict, Generator[dict, None, None], AsyncGenerator[dict, None]]:
start_time = self.start_time
while start_time < self.end_time:
end_time = min(start_time + self.step_time, self.end_time)
yield {"start_time": start_time, "end_time": end_time}
start_time = end_time
def validate(self) -> bool:
return True
def change_metadata(self, metadata: Optional[Dict[str, Metadata]]) -> Dict[str, Metadata]:
return metadata
| mintproject/MINT-Transformation | funcs/readers/dcat_range_stream.py | dcat_range_stream.py | py | 2,513 | python | en | code | 3 | github-code | 13 |
71071866259 | from utils.globals import CMR_FILE_URL
import re
import math
from urllib.parse import urlparse
import os
import sys
import itertools
def build_version_query_params(version):
desired_pad_length = 3
if len(version) > desired_pad_length:
print('Version string too long: "{0}"'.format(version))
quit()
version = str(int(version)) # Strip off any leading zeros
query_params = ''
while len(version) <= desired_pad_length:
padded_version = version.zfill(desired_pad_length)
query_params += '&version={0}'.format(padded_version)
desired_pad_length -= 1
return query_params
def filter_add_wildcards(filter):
if not filter.startswith('*'):
filter = '*' + filter
if not filter.endswith('*'):
filter = filter + '*'
return filter
def build_filename_filter(filename_filter):
filters = filename_filter.split(',')
result = '&options[producer_granule_id][pattern]=true'
for filter in filters:
result += '&producer_granule_id[]=' + filter_add_wildcards(filter)
return result
def build_cmr_query_url(short_name, version, time_start, time_end,
bounding_box=None, polygon=None,
filename_filter=None):
params = '&short_name={0}'.format(short_name)
params += '&version={0}'.format(version)
# params += build_version_query_params(version)
params += '&temporal[]={0},{1}'.format(time_start, time_end)
if polygon:
params += '&polygon={0}'.format(polygon)
elif bounding_box:
params += '&bounding_box={0}'.format(bounding_box)
if filename_filter:
params += build_filename_filter(filename_filter)
return CMR_FILE_URL + params
def get_speed(time_elapsed, chunk_size):
if time_elapsed <= 0:
return ''
speed = chunk_size / time_elapsed
if speed <= 0:
speed = 1
size_name = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
i = int(math.floor(math.log(speed, 1000)))
p = math.pow(1000, i)
return '{0:.1f}{1}B/s'.format(speed / p, size_name[i])
def output_progress(count, total, status='', bar_len=60):
if total <= 0:
return
fraction = min(max(count / float(total), 0), 1)
filled_len = int(round(bar_len * fraction))
percents = int(round(100.0 * fraction))
bar = '=' * filled_len + ' ' * (bar_len - filled_len)
fmt = ' [{0}] {1:3d}% {2} '.format(bar, percents, status)
print('\b' * (len(fmt) + 4), end='') # clears the line
sys.stdout.write(fmt)
sys.stdout.flush()
def cmr_filter_urls(search_results):
"""Select only the desired data files from CMR response."""
if 'feed' not in search_results or 'entry' not in search_results['feed']:
return []
entries = [e['links']
for e in search_results['feed']['entry']
if 'links' in e]
# Flatten "entries" to a simple list of links
links = list(itertools.chain(*entries))
urls = []
unique_filenames = set()
for link in links:
if 'href' not in link:
# Exclude links with nothing to download
continue
if 'inherited' in link and link['inherited'] is True:
# Why are we excluding these links?
continue
if 'rel' in link and 'data#' not in link['rel']:
# Exclude links which are not classified by CMR as "data" or "metadata"
continue
if 'title' in link and 'opendap' in link['title'].lower():
# Exclude OPeNDAP links--they are responsible for many duplicates
# This is a hack; when the metadata is updated to properly identify
# non-datapool links, we should be able to do this in a non-hack way
continue
filename = link['href'].split('/')[-1]
if filename in unique_filenames:
# Exclude links with duplicate filenames (they would overwrite)
continue
unique_filenames.add(filename)
urls.append(link['href'])
return urls
def get_date_julian(url:str):
path = urlparse(url).path
return os.path.basename(path).split('.')[1]
def get_date_normal(date_code:str):
patt = re.compile('/([0-9]+[\.-/_]?[0-9]+[\.-/_]?[0-9]+)')
[date, *_] = patt.findall(date_code)
date = date.replace('.', '')
date = date.replace('-', '')
date = date.replace('_', '')
date = date.replace('/', '')
return date
def parse_save_url(file_url:str):
path = urlparse(file_url).path
date_code = os.path.dirname(path)
base_name = os.path.basename(path)
product, _, _ = base_name.partition('.')
return date_code, os.path.join(product, base_name)
def yes_no_parser(item: str):
item = item.strip().lower()
return item.startswith('y')
| stjimreal/Modis_Scrapy | utils/utilities.py | utilities.py | py | 4,767 | python | en | code | 4 | github-code | 13 |
4321291461 | ##############################################################################
# Copyright (C) 2018, 2019, 2020 Dominic O'Kane
##############################################################################
import sys
sys.path.append("..")
from financepy.utils.math import ONE_MILLION
from financepy.utils.date import Date
from financepy.utils.day_count import DayCountTypes
from financepy.utils.frequency import FrequencyTypes
from financepy.products.bonds.bond import YTMCalcType, Bond
from financepy.products.bonds.bond_zero import BondZero
from financepy.products.bonds.bond_market import *
import datetime as dt
import pandas as pd
import numpy as np
def test_bondtutor_example():
# EXAMPLE FROM http://bondtutor.com/btchp4/topic6/topic6.htm
accrualConvention = DayCountTypes.ACT_ACT_ICMA
y = 0.062267
settlement_date = Date(19, 4, 1994)
issue_date = Date(15, 7, 1990)
maturity_date = Date(15, 7, 1997)
coupon = 0.085
ex_div_days = 0
face = 1000000
freq_type = FrequencyTypes.SEMI_ANNUAL
bond = Bond(issue_date, maturity_date,
coupon, freq_type, accrualConvention, ex_div_days)
dirty_price = bond.dirty_price_from_ytm(settlement_date, y)
assert round(dirty_price, 4) == 108.7696
clean_price = bond.clean_price_from_ytm(settlement_date, y)
assert round(clean_price, 4) == 106.5625
accrued_interest = bond.accrued_interest(settlement_date, face)
assert round(accrued_interest, 4) == 22071.8232
ytm = bond.yield_to_maturity(settlement_date, clean_price)
assert round(ytm, 4) == 0.0622
bump = 1e-4
priceBumpedUp = bond.dirty_price_from_ytm(settlement_date, y + bump)
assert round(priceBumpedUp, 4) == 108.7395
priceBumpedDn = bond.dirty_price_from_ytm(settlement_date, y - bump)
assert round(priceBumpedDn, 4) == 108.7998
durationByBump = -(priceBumpedUp - dirty_price) / bump
assert round(durationByBump, 4) == 301.1932
duration = bond.dollar_duration(settlement_date, y)
assert round(duration, 4) == 301.2458
assert round(duration - durationByBump, 4) == 0.0526
modified_duration = bond.modified_duration(settlement_date, y)
assert round(modified_duration, 4) == 2.7696
macauley_duration = bond.macauley_duration(settlement_date, y)
assert round(macauley_duration, 4) == 2.8558
conv = bond.convexity_from_ytm(settlement_date, y)
assert round(conv, 4) == 0.0967
def test_bloomberg_us_treasury_example():
# https://data.bloomberglp.com/bat/sites/3/2017/07/SF-2017_Paul-Fjeldsted.pdf
settlement_date = Date(21, 7, 2017)
issue_date = Date(15, 5, 2010)
maturity_date = Date(15, 5, 2027)
coupon = 0.02375
freq_type = FrequencyTypes.SEMI_ANNUAL
accrual_type = DayCountTypes.ACT_ACT_ICMA
face = 100.0
ex_div_days = 0
bond = Bond(issue_date,
maturity_date,
coupon,
freq_type,
accrual_type,
ex_div_days)
clean_price = 99.7808417
yld = bond.current_yield(clean_price)
assert round(yld, 4) == 0.0238
ytm = bond.yield_to_maturity(settlement_date, clean_price,
YTMCalcType.UK_DMO)
assert round(ytm, 4) == 0.0240
ytm = bond.yield_to_maturity(settlement_date, clean_price,
YTMCalcType.US_STREET)
assert round(ytm, 4) == 0.0240
ytm = bond.yield_to_maturity(settlement_date, clean_price,
YTMCalcType.US_TREASURY)
assert round(ytm, 4) == 0.0240
dirty_price = bond.dirty_price_from_ytm(settlement_date, ytm)
assert round(dirty_price, 4) == 100.2149
clean_price = bond.clean_price_from_ytm(settlement_date, ytm)
assert round(clean_price, 4) == 99.7825
accrued_interest = bond.accrued_interest(settlement_date, face)
assert round(accrued_interest, 4) == 0.4324
accddays = bond._accrued_days
assert round(accddays, 4) == 67.0
duration = bond.dollar_duration(settlement_date, ytm)
assert round(duration, 4) == 869.0934
modified_duration = bond.modified_duration(settlement_date, ytm)
assert round(modified_duration, 4) == 8.6723
macauley_duration = bond.macauley_duration(settlement_date, ytm)
assert round(macauley_duration, 4) == 8.7764
conv = bond.convexity_from_ytm(settlement_date, ytm)
assert round(conv, 4) == 0.8517
def test_bloomberg_apple_corp_example():
settlement_date = Date(21, 7, 2017)
issue_date = Date(13, 5, 2012)
maturity_date = Date(13, 5, 2022)
coupon = 0.027
freq_type = FrequencyTypes.SEMI_ANNUAL
accrual_type = DayCountTypes.THIRTY_E_360_ISDA
face = 100.0
ex_div_days = 0
bond = Bond(issue_date, maturity_date,
coupon, freq_type, accrual_type, ex_div_days)
clean_price = 101.581564
yld = bond.current_yield(clean_price)
assert round(yld, 4) == 0.0266
ytm = bond.yield_to_maturity(settlement_date, clean_price,
YTMCalcType.UK_DMO)
assert round(ytm, 4) == 0.0235
ytm = bond.yield_to_maturity(settlement_date, clean_price,
YTMCalcType.US_STREET)
assert round(ytm, 4) == 0.0235
ytm = bond.yield_to_maturity(settlement_date, clean_price,
YTMCalcType.US_TREASURY)
assert round(ytm, 4) == 0.0235
dirty_price = bond.dirty_price_from_ytm(settlement_date, ytm)
assert round(dirty_price, 4) == 102.0932
clean_price = bond.clean_price_from_ytm(settlement_date, ytm)
assert round(clean_price, 4) == 101.5832
accddays = bond._accrued_days
assert accddays == 68
accrued_interest = bond.accrued_interest(settlement_date, face)
assert round(accrued_interest, 4) == 0.51
duration = bond.dollar_duration(settlement_date, ytm)
assert round(duration, 4) == 456.5778
modified_duration = bond.modified_duration(settlement_date, ytm)
assert round(modified_duration, 4) == 4.4722
macauley_duration = bond.macauley_duration(settlement_date, ytm)
assert round(macauley_duration, 4) == 4.5247
conv = bond.convexity_from_ytm(settlement_date, ytm)
assert round(conv, 4) == 0.2302
def test_zero_bond():
# A 3 months treasure with 0 coupon per year.
bill = BondZero(
issue_date=Date(25, 7, 2022),
maturity_date=Date(24, 10, 2022),
issue_price=99.6410
)
settlement_date = Date(8, 8, 2022)
clean_price = 99.6504
calc_ytm = bill.yield_to_maturity(
settlement_date, clean_price, YTMCalcType.ZERO) * 100
accrued_interest = bill.accrued_interest(settlement_date, ONE_MILLION)
print(calc_ytm)
print(accrued_interest)
assert abs(calc_ytm - 1.3997) < 0.0002
assert abs(accrued_interest - ONE_MILLION * 0.05523077 / 100) < 0.01
def test_bond_ror():
test_case_file = 'test_cases_bond_ror.csv'
df = pd.read_csv('./tests/data/' + test_case_file,
parse_dates=['buy_date', 'sell_date'])
# A 10-year bond with 1 coupon per year. code: 210215
bond = Bond(
issue_date=Date(13, 9, 2021),
maturity_date=Date(13, 9, 2031),
coupon=0.0312,
freq_type=FrequencyTypes.ANNUAL,
accrual_type=DayCountTypes.ACT_ACT_ICMA
)
for row in df.itertuples(index=False):
buy_date = Date(row.buy_date.day, row.buy_date.month,
row.buy_date.year)
sell_date = Date(row.sell_date.day,
row.sell_date.month, row.sell_date.year)
simple, irr, pnl = bond.calc_ror(
buy_date, sell_date, row.buy_ytm, row.sell_ytm)
assert abs(simple - row.simple_return) < 0.00001
assert abs(irr - row.irr) < 0.00001
def test_bond_zero_ror():
test_case_file = 'test_cases_bond_zero_ror.csv'
df = pd.read_csv('./tests/data/' + test_case_file,
parse_dates=['buy_date', 'sell_date'])
# A 1-year bond with zero coupon per year. code: 092103011
bond = BondZero(
issue_date=Date(23, 7, 2021),
maturity_date=Date(24, 8, 2022),
issue_price=97.67
)
for row in df.itertuples(index=False):
buy_date = Date(row.buy_date.day, row.buy_date.month,
row.buy_date.year)
sell_date = Date(row.sell_date.day,
row.sell_date.month, row.sell_date.year)
simple, irr, pnl = bond.calc_ror(
buy_date, sell_date, row.buy_ytm, row.sell_ytm)
assert abs(simple - row.simple_return) < 0.00001
assert abs(irr - row.irr) < 0.00001
def test_bond_cfets():
"""
Test ytms of bonds in CFETS convention, especially for those in last coupon period and
have 2 or more coupon payments per year.
"""
face = 100.0
test_case_file = 'test_cases_bond_cfets.csv'
df = pd.read_csv('./tests/data/' + test_case_file,
parse_dates=['settlement_date', 'issue_date', 'maturity_date'])
for row in df.itertuples(index=False):
bond = Bond(
issue_date=Date(row.issue_date.day,
row.issue_date.month, row.issue_date.year),
maturity_date=Date(row.maturity_date.day,
row.maturity_date.month, row.maturity_date.year),
coupon=row.coupon / 100,
freq_type=FrequencyTypes.ANNUAL if row.freq == 1 else FrequencyTypes.SEMI_ANNUAL,
accrual_type=DayCountTypes.ACT_ACT_ICMA
)
settlement_date = Date(
row.settlement_date.day, row.settlement_date.month, row.settlement_date.year)
accrued_interest = bond.accrued_interest(settlement_date, face)
clean_price = row.dirty_price - accrued_interest
calc_ytm = bond.yield_to_maturity(
settlement_date, clean_price, YTMCalcType.CFETS) * 100
try:
assert abs(calc_ytm - row.ytm) < 0.0001
except:
print(bond)
print(clean_price)
print(settlement_date)
print(bond.bond_payments(settlement_date, 100.0))
print(f'calc_ytm:{calc_ytm}, correct_ytm:{row.ytm}')
continue
def test_key_rate_durations_Bloomberg_example():
accrual_type, frequencyType, settlementDays, exDiv, calendar = \
get_bond_market_conventions(BondMarkets.UNITED_STATES)
# interest accrues on this date. Issue date is 01/08/2022
issue_date = Date(31, 7, 2022)
maturity_date = Date(31, 7, 2027)
coupon = 2.75/100.0
face = 100.0
ex_div_days = 0
accrual_type, freq_type, settlementDays, exDiv, calendar = get_bond_market_conventions(
BondMarkets.UNITED_STATES)
bond = Bond(issue_date, maturity_date, coupon,
freq_type, accrual_type, ex_div_days)
settlement_date = Date(24, 4, 2023)
# US Street yield on Bloomberg as of 20 April 2023
# with settle date 24 April 2023
ytm = 3.725060/100
# Details of yields of market bonds at KRD maturity points
my_tenors = np.array([0.5, 1, 2, 3, 5, 7, 10])
my_rates = np.array([5.0367, 4.7327, 4.1445, 3.8575, 3.6272, 3.5825, 3.5347])/100
key_rate_tenors, key_rate_durations = bond.key_rate_durations(settlement_date,
ytm,
key_rate_tenors = my_tenors,
rates = my_rates)
print(key_rate_tenors)
print(key_rate_durations)
bbg_key_rate_durations = [-0.001, -.009, -0.022, 1.432,
2.527, 0.00, 0.00, 0.00, 0.00]
for i in range(len(key_rate_durations)):
assert round(key_rate_durations[i], 3) == bbg_key_rate_durations[i]
test_zero_bond()
| domokane/FinancePy | tests/test_FinBond.py | test_FinBond.py | py | 12,173 | python | en | code | 1,701 | github-code | 13 |
33175519967 | # this test programme takes 30 photos and saves them to a folder
from picamera import PiCamera # raspberry pi cameraexit
from config import *
import dropbox # for uploading to dropbox, `pip3 install dropbox`
import subprocess
from glob import glob # for the file upload process
from time import sleep, strftime
# the Picamera
camera = PiCamera()
camera.resolution = (1640, 1232)
dbx = dropbox.Dropbox(YOUR_ACCESS_TOKEN, timeout = None) #dropbox, timeout=none allows for uploading of larger files without 30second normal timeout
def the_camera(no_of_frames, delay):
camera.start_preview()
sleep(2) # Camera warm-up time
for i in range(no_of_frames):
#create timestamp filename
print(f'Taking photo {i} of {no_of_frames}')
file_path = "/home/pi/sunrise300/minilapse/" + 'IMAGE_' '{0:04d}'.format(i)+".JPG"
camera.capture(file_path)
sleep(delay)
camera.stop_preview()
def dropbox_uploader(file_path):
files = file_path
print(files)
with open(files, "rb") as f:
print(f"Tring file {files}")
dbx.files_upload(f.read(), files, mute = True)
print("Successfully uploaded")
if __name__ == "__main__":
the_camera(30,0.5)
vid_file = "/home/pi/sunrise300/" + strftime("%Y%m%d-%H%M")+".mp4"
subprocess.call(f"ffmpeg -y -r 15 -f image2 -start_number 0000 -i /home/pi/sunrise300/minilapse/IMAGE_%04d.JPG -vf crop=1640:923:0:0 -vcodec libx264 -pix_fmt yuv420p {vid_file}", shell=True)
dropbox_uploader(vid_file) | llewmihs/sunrise300 | Development/take30.py | take30.py | py | 1,514 | python | en | code | 1 | github-code | 13 |
36436698425 | from .log import die, printi, printw
from .input_types import type_check_kle_layout
from .lazy_import import LazyImport
from .util import dict_union, key_subst, rem, safe_get
from .yaml_io import read_yaml
from math import cos, radians, sin
from types import SimpleNamespace
from re import match
Matrix:type = LazyImport('mathutils', 'Matrix')
Vector:type = LazyImport('mathutils', 'Vector')
cap_deactivation_colour:str = '#cccccc'
glyph_deactivation_colour:str = '#000000'
def get_layout(layout_file:str, profile_data:dict, use_deactivation_colour:bool) -> [dict]:
return parse_layout(read_yaml(layout_file), profile_data, use_deactivation_colour)
def dumb_parse_layout(layout:[[dict]]) -> [dict]:
return parse_layout(layout, None, True)
def parse_layout(layout: [[dict]], profile_data:dict, use_deactivation_colour:bool) -> [dict]:
if not type_check_kle_layout(layout):
die('KLE layout failed type-checking, see console for more information')
printi('Reading layout information')
if type(layout) != list and any(
list(map(lambda l: type(l) != list, layout))):
die('Expected a list of lists in the layout (see the JSON output of KLE)'
)
profile_row_map:dict = safe_get(profile_data, 'profile-row-map')
parsed_layout: [dict] = []
parser_default_state_dict:dict = {
'c': cap_deactivation_colour if not use_deactivation_colour else None,
't': glyph_deactivation_colour if not use_deactivation_colour else None,
'i': 0,
'x': 0.0,
'y': 0.0,
'r': 0.0,
'rx': 0.0,
'ry': 0.0,
'p': profile_row_map['R1'] if profile_row_map is not None else 'R1',
}
parser_state:SimpleNamespace = SimpleNamespace(**parser_default_state_dict)
for line in layout:
parser_state.x = 0.0
parser_state.i = 0
if type(line) != list:
continue
while parser_state.i < len(line):
# Parse for the next key
printi('Handling layout, looking at pair "%s" and "%s"' %
(str(line[parser_state.i]).replace('\n', '\\n'),
str(safe_get(line, parser_state.i + 1)).replace('\n', '\\n')))
(shift, line[parser_state.i]) = parse_key(line[parser_state.i], safe_get(line, parser_state.i + 1), parser_state, profile_row_map)
key: dict = line[parser_state.i]
# Handle colour changes
parser_state.c = key['cap-style-raw']
if use_deactivation_colour and parser_state.c == cap_deactivation_colour:
parser_state.c = None
key['cap-style-raw'] = None
parser_state.t = key['glyph-colour-raw']
if use_deactivation_colour and parser_state.t == glyph_deactivation_colour:
parser_state.t = None
key['glyph-colour-raw'] = None
# Handle shifts
if 'shift-y' in key:
parser_state.y += key['shift-y']
parser_state.x = 0.0
if 'shift-x' in key:
parser_state.x += key['shift-x']
# Handle the profile
parser_state.p = key['profile-part']
# Handle the angle
parser_state.r = key['rotation']
if 'r' in key or 'rx' in key or 'ry' in key:
if 'rx' in key:
parser_state.rx = key['rx']
key = rem(key, 'rx')
else:
parser_state.rx = -key['shift-x'] if 'shift-x' in key else 0.0
if 'ry' in key:
parser_state.ry = key['ry']
key = rem(key, 'ry')
else:
parser_state.ry = -key['shift-y'] if 'shift-y' in key else 0.0
parser_state.x = key['shift-x'] if 'shift-x' in key else 0.0
parser_state.y = key['shift-y'] if 'shift-y' in key else 0.0
# Apply current position data
key['kle-pos'] = Vector((parser_state.rx, parser_state.ry)) + Matrix.Rotation(-parser_state.r, 2) @ Vector((parser_state.x, parser_state.y))
# Add to layout
if 'key' in key and (not 'd' in key or not key['d']):
parsed_layout += [key]
# Move col to next position
parser_state.x += key['width']
# Move to the keycap representation
parser_state.i += shift
if len(line) > 1 and 'shift-y' not in line[-1]:
parser_state.y += 1
return list(map(add_cap_name, parsed_layout))
def parse_key(key: 'either str dict', nextKey: 'maybe (either str dict)', parser_state:SimpleNamespace, profile_row_map:dict) -> [int, dict]:
ret: dict
shift: int = 1
if type(key) == str:
ret = {'key': parse_name(key)}
elif type(key) == dict:
if nextKey is not None and type(nextKey) == str:
ret = dict_union(key, {'key': parse_name(nextKey)})
shift = 2
else:
ret = dict(key)
else:
die('Malformed data when reading %s and %s' % (str(key), str(nextKey)))
if 'key-type' not in ret:
ret_key: str = safe_get(ret, 'key')
x_in:float = safe_get(ret, 'x')
if x_in is not None and x_in >= 0.25 \
and safe_get(ret, 'w') == 1.25 \
and safe_get(ret, 'h') == 2 \
and safe_get(ret, 'w2') == 1.5 \
and safe_get(ret, 'h2') == 1 \
and safe_get(ret, 'x2') == -0.25:
ret['key-type'] = 'iso-enter'
ret['x'] -= 0.25
elif ret_key.endswith('+') and safe_get(ret, 'h') == 2:
ret['key-type'] = 'num-plus'
elif ret_key and ret_key.lower().endswith('enter') and safe_get(ret,
'h') == 2:
ret['key-type'] = 'num-enter'
if 'a' in ret:
ret = rem(ret, 'a')
if 'x' in ret:
ret = key_subst(ret, 'x', 'shift-x')
if 'y' in ret:
ret = key_subst(ret, 'y', 'shift-y')
if 'c' in ret:
ret = key_subst(ret, 'c', 'cap-style-raw')
else:
ret['cap-style-raw'] = parser_state.c
if 't' in ret:
ret = key_subst(ret, 't', 'glyph-colour-raw')
else:
ret['glyph-colour-raw'] = parser_state.t
if 'w' in ret:
ret = key_subst(ret, 'w', 'width')
else:
ret['width'] = 1.0
if 'w2' in ret:
ret = key_subst(ret, 'w2', 'secondary-width')
else:
ret['secondary-width'] = ret['width']
if 'h' in ret:
ret = key_subst(ret, 'h', 'height')
else:
ret['height'] = 1.0
if 'h2' in ret:
ret = key_subst(ret, 'h2', 'secondary-height')
else:
ret['secondary-height'] = ret['height']
if 'r' in ret:
ret['r'] = radians(-ret['r'])
ret['rotation'] = ret['r']
else:
ret['rotation'] = parser_state.r
if 'n' in ret:
ret = key_subst(ret, 'n', 'homing')
else:
ret['homing'] = False
if 'l' in ret:
ret = key_subst(ret, 'l', 'stepped')
else:
ret['stepped'] = False
if 'p' in ret and ret['p']:
if profile_row_map is not None:
if ret['p'] in profile_row_map:
ret['p'] = profile_row_map[ret['p']]
else:
printw('Profile row map does not contain key "%s" (this message appears once for each key which uses this profile)' % ret['p'])
ret = key_subst(ret, 'p', 'profile-part')
else:
ret['profile-part'] = parser_state.p
if 'key' not in ret:
printw("Key \"%s\" %s 'key' field, please put one in" %
(str(key), 'missing' if key != '' else 'has empty'))
ret['key'] = 'SOME_ID@' + hex(id(key))
return (shift, ret)
def parse_name(txt: str) -> str:
return '-'.join(txt.split('\n'))
def add_cap_name(key:dict) -> dict:
key['cap-name'] = gen_cap_name(key)
return key
def gen_cap_name(key:dict) -> str:
if 'key-type' in key:
return key['key-type']
else:
name:str = '%s-%su' %(key['profile-part'], ('%.2f' % float(key['width'])).replace('.', '_'))
if key['stepped']:
name += '-%su' % ('%.2f' % float(key['secondary-height'])).replace('.', '_')
name += '-%su' % ('%.2f' % float(key['secondary-width'])).replace('.', '_')
name += '-stepped'
if key['homing']:
name += '-bar'
return name
def compute_layout_dims(layout:[dict]) -> Vector:
def point_enumerator(v:Vector) -> Matrix:
return Matrix([
(0.0, 0.0, v[0], v[0]),
(0.0, v[1], 0.0, v[1])
])
# Initial extreme values
xmin = 0.0
ymin = 0.0
xmax = 1.0
ymax = 1.0
for cap in layout:
rot:Matrix = Matrix.Rotation(-cap['rotation'], 2)
primary_dims:Vector = Vector((cap['width'], cap['height']))
secondary_dims:Vector = Vector((cap['secondary-width'], cap['secondary-height']))
kle_pos_mat:Matrix = Matrix([[cap['kle-pos'][0]] * 4, [cap['kle-pos'][1]] * 4])
# This method doesn't take into account extremely weird keycap shapes (which use x2, y2 keys but it should work for everything which actually exists) and which doesn't have an iso-enter in the very bottom right
primary_points:Matrix = kle_pos_mat + rot @ point_enumerator(primary_dims)
secondary_points:Matrix = kle_pos_mat + rot @ point_enumerator(secondary_dims)
xmin = min(xmin, *primary_points[0], *secondary_points[0])
ymin = min(ymin, *primary_points[1], *secondary_points[1])
xmax = max(xmax, *primary_points[0], *secondary_points[0])
ymax = max(ymax, *primary_points[1], *secondary_points[1])
return (Vector((xmin, ymin)), Vector((xmax, ymax)))
| TheSignPainter98/adjust-keys | adjustkeys/layout.py | layout.py | py | 9,837 | python | en | code | 14 | github-code | 13 |
74811481616 | """
Functions to load and save data. Some general, some specific to given predictive_network subclasses
"""
import numpy as np
import ntpath
import tables
import sys
import os
import pickle as pkl
FLOATX = 'float32'
RANDOM_SEED = 12345
def load_pickled_data(load_path):
load_path = os.path.expanduser(load_path)
# f = open(load_path, 'rb')
# dat = pkl.load(f)
# f.close()
with open(load_path, "rb") as f:
dat = pkl.load(f)
return dat
def pickle_data(data, save_path, protocol=4):
if not os.path.exists(os.path.dirname(os.path.abspath(save_path))):
os.makedirs(os.path.dirname(os.path.abspath(save_path)))
# f = open(save_path, 'wb')
# pkl.dump(data, f, protocol=protocol)
# f.close()
with open(save_path, "wb") as f:
pkl.dump(data, f, protocol=protocol)
return
def add_noise(data, noise_ratio, renormalise=True, set_seed=True):
if set_seed:
np.random.seed(RANDOM_SEED)
std_data = data.std()
out_data = data + std_data*noise_ratio*np.random.randn(*data.shape)
if renormalise:
new_mean_data = np.mean(out_data[:])
new_std_data = np.std(out_data[:])
out_data = (out_data - new_mean_data)/new_std_data
return out_data
def extract_train_val_data(concattrain, val_prop=0.1, random_order=False, set_seed=False):
tot_train_size = concattrain.shape[0]
val_size = int(tot_train_size*val_prop)
train_size = tot_train_size - val_size
if random_order:
if set_seed:
np.random.seed(RANDOM_SEED)
perm_seq = np.random.permutation(np.arange(tot_train_size))
else:
perm_seq = np.arange(tot_train_size)
# concattrain = np.rollaxis(concattrain,3,1)
train_data = concattrain[perm_seq[:train_size], ...]
val_data = concattrain[perm_seq[-val_size:], ...]
return train_data, val_data
def load_matfile_dataset(mat_path, return_test=False):
# f = tables.openFile(mat_path)
f = tables.open_file(mat_path)
if not return_test:
dataset = np.asarray(f.root.concattrain[:], dtype=FLOATX)
else:
dataset = np.asarray(f.root.concattest[:], dtype=FLOATX)
return dataset
def reshape_auditory_input_data(data, t_past, t_future, numfreq):
I = t_past*numfreq
K = t_future*numfreq
# num_examples, num_feats = data.shape
x = data[:, :I]
y = data[:, I:I+K]
return x, y
def load_auditory_data(file_path, t_past, t_future, numfreq,
post_dict=False,
noise_ratio=0,
input_noise_ratio=0):
# train_file = os.path.join(mat_dir, 'normalized_concattrain.mat')
# load matlab data
concattrain = load_matfile_dataset(file_path)
if post_dict is True:
print('NB! post-diction ordering rather than prediction!!!')
concattrain = concattrain[:, ::-1] #reverse order of
train_data, val_data = extract_train_val_data(concattrain, random_order=True, set_seed=True)
X_train, y_train = reshape_auditory_input_data(train_data, t_past, t_future, numfreq)
X_val, y_val = reshape_auditory_input_data(val_data, t_past, t_future, numfreq)
on_the_fly_preprocessing(X_train, y_train,
X_val, y_val,
noise_ratio=noise_ratio,
input_noise_ratio=input_noise_ratio,
max_examples=500000,
copy_data=True,
norm_type=0)
return X_train, y_train, X_val, y_val
def divide_rfs(dat_to_divide, RF_size):
def divide_along_dim_1(temp):
[x1, x2, t, m] = temp.shape
nrf = int(np.floor(x1/RF_size))
temp = np.reshape(temp, [RF_size, nrf, x2, t, m], order='f')
temp = np.moveaxis(temp, 1, -1)
temp = np.reshape(temp, [RF_size, x2, t, m*nrf], order='c')
temp = np.rollaxis(temp, 1, 0)
return temp
#start with data in the format: [m, x1, x2, t] eg [914,160,100,50]
#move examples to last dimension
dat_to_divide = np.moveaxis(dat_to_divide, 0, -1)
#dat_to_divide.shape: [x1,x2,t,m]
#divide second spatial dimension into RF_size chunks
dat_to_divide = divide_along_dim_1(dat_to_divide)
#dat_to_divide.shape: [x1,RF_size,t,mm]
#Do the same for the first spatial dimension
dat_to_divide = divide_along_dim_1(dat_to_divide)
#dat_to_divide.shape: [RF_size,RF_size,t,mmm]
print(dat_to_divide.shape)
#collapse spatial dimensions into single vector
[_, _, seq_length, m] = dat_to_divide.shape
dat_to_divide = np.reshape(dat_to_divide, [RF_size*RF_size, seq_length, m], order='f')
#dat_to_divide.shape: [RF_size*RF_size,t,mmm]
#move examples to first dimension
dat_to_divide = np.rollaxis(dat_to_divide, -1)
#train_data.shape: [mmm,RF_size*RF_size,t]
return dat_to_divide
def on_the_fly_preprocessing(X_train, y_train,
X_val, y_val,
noise_ratio=0,
input_noise_ratio=0,
max_examples=20000,
copy_data=True,
norm_type=0):
if X_train.shape[0] > max_examples:
X_train = X_train[:max_examples, ...]
y_train = y_train[:max_examples, ...]
X_val = X_val[:max_examples//10, ...]
y_val = y_val[:max_examples//10, ...]
if input_noise_ratio != 0 and input_noise_ratio is not None:
assert(noise_ratio==0)
print('NB! Adding input only noise with noise_ratio = %.2f' %input_noise_ratio)
X_train = add_noise(X_train, input_noise_ratio, renormalise=False, set_seed=True)
# X_val = add_noise(X_val, input_noise_ratio, renormalise=False)
elif noise_ratio != 0 and noise_ratio is not None:
assert(input_noise_ratio==0)
print('NB! Adding noise with noise_ratio = %.2f' %noise_ratio)
X_train = add_noise(X_train, noise_ratio, renormalise=False, set_seed=True)
# X_val = add_noise(X_val, noise_ratio, renormalise=False)
y_train = add_noise(y_train, noise_ratio, renormalise=False, set_seed=False)
# y_val = add_noise(y_val, noise_ratio, renormalise=False)
if norm_type == 0:
#Normalise by subtracting the mean and dividing by the standard deviation of the entire dataset
X_train_mean = X_train.mean()
X_train_std = X_train.std()
X_train = ((X_train-X_train_mean)/X_train_std)
y_train = ((y_train-X_train_mean)/X_train_std)
X_val = ((X_val-X_train_mean)/X_train_std)
y_val = ((y_val-X_train_mean)/X_train_std)
else:
#Normalise by subtracting the mean and dividing by the standard deviation of each example seperately
X_train_mean = np.reshape(X_train, [X_train.shape[0], -1]).mean(axis=-1)
X_train_std = np.reshape(X_train, [X_train.shape[0], -1]).std(axis=-1)
X_train = (X_train-X_train_mean[:, np.newaxis, np.newaxis])/X_train_std[:, np.newaxis, np.newaxis]
y_train = (y_train-X_train_mean[:, np.newaxis, np.newaxis])/X_train_std[:, np.newaxis, np.newaxis]
X_val_mean = np.reshape(X_val, [X_val.shape[0], -1]).mean(axis=-1)
X_val_std = np.reshape(X_val, [X_val.shape[0], -1]).std(axis=-1)
X_val = (X_val-X_val_mean[:, np.newaxis, np.newaxis])/X_val_std[:, np.newaxis, np.newaxis]
y_val = (y_val-X_val_mean[:, np.newaxis, np.newaxis])/X_val_std[:, np.newaxis, np.newaxis]
if copy_data:
# import copy
X_to_train = X_train.copy(order='c').astype('float32')
y_to_train = y_train.copy(order='c').astype('float32')
X_to_val = X_val.copy(order='c').astype('float32')
y_to_val = y_val.copy(order='c').astype('float32')
else:
X_to_train = X_train.astype('float32')
y_to_train = y_train.astype('float32')
X_to_val = X_val.astype('float32')
y_to_val = y_val.astype('float32')
print(X_to_train.shape)
print(y_to_train.shape)
print(X_to_val.shape)
print(y_to_val.shape)
return [X_to_train, y_to_train, X_to_val, y_to_val]
def load_1d_conv_vis_data(data_path,
noise_ratio=0,
input_noise_ratio=0,
max_examples=20000,
copy_data=True,
norm_type=0,
RF_size=20,
t_filter_length=7,
t_predict_length=1):
filedir, filename = ntpath.split(data_path)
if filename == '':
filename = 'normalized_concattrain.pkl'
concattrain = load_pickled_data(os.path.join(filedir, filename))
concattrain = divide_rfs(concattrain, RF_size)
[train_data, val_data] = extract_train_val_data(concattrain, random_order=False)
del concattrain
#Select appropriate timesteps for 1D temporal convolution
print('t_filter_length: ', t_filter_length)
print('t_predict_length: ', t_predict_length)
X_train = train_data[:, :, :-t_predict_length]
y_train = train_data[:, :, t_filter_length:]
X_val = val_data[:, :, :-t_predict_length]
y_val = val_data[:, :, t_filter_length:]
del train_data
del val_data
[X_to_train, y_to_train, X_to_val, y_to_val] = on_the_fly_preprocessing(X_train, y_train,
X_val, y_val,
noise_ratio=noise_ratio,
input_noise_ratio=input_noise_ratio,
max_examples=max_examples,
copy_data=copy_data,
norm_type=norm_type)
return [X_to_train, y_to_train, X_to_val, y_to_val]
def load_3d_conv_vis_data(data_path, s_filter_size=21, t_filter_length=7, start_height=40, end_height=-40,):
concattrain = load_pickled_data(data_path + 'normalized_concattrain.pkl')
[train_data, val_data] = extract_train_val_data(concattrain)
train_data = train_data[:, start_height:end_height, :, :]
val_data = val_data[:, start_height:end_height, :, :]
print(train_data.shape)
print(val_data.shape)
[m, x1, x2, seq_length] = train_data.shape
if s_filter_size%2==0:
raise ValueError('s_filter_size must be odd, instead it is of size {s_filter_size} . Selected an even size.'.format(s_filter_size=repr(s_filter_size)))
start_x = (s_filter_size-1)//2
print('start_x: %i'%start_x)
X_train = train_data[..., :-1]
y_train = train_data[:, start_x:-start_x, start_x:-start_x, t_filter_length:]
X_val = val_data[..., :-1]
y_val = val_data[:, start_x:-start_x, start_x:-start_x, t_filter_length:]
X_to_train = X_train.astype(FLOATX).copy()
y_to_train = y_train.astype(FLOATX).copy()
X_to_val = X_val.astype(FLOATX).copy()
y_to_val = y_val.astype(FLOATX).copy()
return [X_to_train, y_to_train, X_to_val, y_to_val]
def load_tensorized_visual_data_for_fcn(data_path,
noise_ratio=0,
input_noise_ratio=0,
t_past=7, t_future=1,
copy_data=False,
RF_size=None,
norm_type=0,
max_examples=500000):
filedir, filename = ntpath.split(data_path)
if filename == '':
filename = 'normalized_concattrain.pkl'
concattrain = load_pickled_data(os.path.join(filedir, filename))
if RF_size is not None:
concattrain = divide_rfs(concattrain, RF_size)
[train_data, val_data] = extract_train_val_data(concattrain, random_order=False)
X_train = train_data[..., :t_past]
y_train = np.squeeze(train_data[..., t_past:t_past+t_future])
X_val = val_data[..., :t_past]
y_val = np.squeeze(val_data[..., t_past:t_past+t_future])
X_train = np.rollaxis(X_train, -1, -2)
X_train = np.reshape(X_train, [X_train.shape[0], X_train.shape[1]*X_train.shape[2]])
X_val = np.rollaxis(X_val, -1, -2)
X_val = np.reshape(X_val, [X_val.shape[0], X_val.shape[1]*X_val.shape[2]])
if t_future > 1:
y_train = np.rollaxis(y_train, -1, -2)
y_train = np.reshape(y_train, [y_train.shape[0], y_train.shape[1]*y_train.shape[2]])
y_val = np.rollaxis(y_val, -1, -2)
y_val = np.reshape(y_val, [y_val.shape[0], y_val.shape[1]*y_val.shape[2]])
[X_to_train, y_to_train, X_to_val, y_to_val] = on_the_fly_preprocessing(X_train, y_train,
X_val, y_val,
noise_ratio=noise_ratio,
input_noise_ratio=input_noise_ratio,
max_examples=max_examples,
copy_data=copy_data,
norm_type=norm_type)
return [X_to_train, y_to_train, X_to_val, y_to_val]
| yossing/distributed_grid_search | data_handling.py | data_handling.py | py | 13,605 | python | en | code | 0 | github-code | 13 |
38829313547 | #!/usr/bin/env python3
# Standard library imports
import random
# Remote library imports
from faker import Faker
from datetime import datetime
import re
# Local imports
from app import app
from models import db, User, City, Location,CityNote,LocationNote
if __name__ == '__main__':
faker = Faker()
with app.app_context():
print("Starting seed...")
LocationNote.query.delete()
CityNote.query.delete()
Location.query.delete()
City.query.delete()
User.query.delete()
############# * USERS * #############
styles = ['Thrill-seeker', 'Foodie', 'Relaxer', 'Experiencer','Party Animal','Shopper']
for i in range(5):
email_address = faker.email()
travel_style_rand=random.choice(styles)
username_rand=re.sub(r"[^a-zA-Z0-9]+", "", faker.text(max_nb_chars=19).lower())
user = User(
email = email_address,
username = username_rand,
travel_style = travel_style_rand
)
db.session.add(user)
db.session.commit()
styles.remove(travel_style_rand)
user_ids = [user.id for user in User.query.all()]
############# * CITIES * #############
ireland = City(
city_name = 'Killarney',
country = 'Ireland',
user_id = user_ids[0]
)
db.session.add(ireland)
db.session.commit()
seoul = City(
city_name = 'Seoul',
country = 'South Korea',
user_id = user_ids[0]
)
db.session.add(seoul)
db.session.commit()
seoul2 = City(
city_name = 'Seoul',
country = 'South Korea',
user_id = user_ids[1]
)
db.session.add(seoul2)
db.session.commit()
city_ids = [city.id for city in City.query.all()]
############# * CITY NOTES * #############
Killarney1 = CityNote(
note_body = 'great hub for tours to ring of kerry and dingle peninsula',
city_id = city_ids[0]
)
db.session.add(Killarney1)
db.session.commit()
seoul1 = CityNote(
note_body = 'so fast paced',
city_id = city_ids[1]
)
db.session.add(seoul1)
db.session.commit()
seoul2 = CityNote(
note_body = 'subway - well connected, but ends at midnight.',
note_type= 'Transportation',
city_id = city_ids[1]
)
db.session.add(seoul2)
db.session.commit()
seoul3 = CityNote(
note_body = 'buses - some run late night, some end early.',
note_type= 'Transportation',
city_id = city_ids[1]
)
db.session.add(seoul3)
db.session.commit()
############# * LOCATIONS * #############
haneul = Location(
location_name = 'Haneul Park (하늘공원)',
date_visited = faker.date_between_dates(date_start=datetime(2015,1,1), date_end=datetime(2019,12,31)),
rating = 4,
google_map_url = 'https://goo.gl/maps/E6CtsTEMe27p5HXj9',
website = 'https://parks.seoul.go.kr/parks',
avg_cost = 0,
category = 'OutdoorActivity',
city_id = city_ids[1],
user_id = user_ids[0]
)
db.session.add(haneul)
db.session.commit()
seoul_forest = Location(
location_name = 'Seoul Forest Park (서울숲공원)',
date_visited = faker.date_between_dates(date_start=datetime(2015,1,1), date_end=datetime(2019,12,31)),
rating = 4,
google_map_url = 'https://goo.gl/maps/kDGN5J1qCkgRsMoX8',
website = 'https://parks.seoul.go.kr/parks/',
avg_cost = 0,
category = 'OutdoorActivity',
city_id = city_ids[1],
user_id = user_ids[0]
)
db.session.add(seoul_forest)
db.session.commit()
kimbap = Location(
location_name = 'Gimbap Cheonguk (김밥천국 서강대점)',
date_visited = faker.date_between_dates(date_start=datetime(2015,1,1), date_end=datetime(2019,12,31)),
rating = 4,
google_map_url = 'https://goo.gl/maps/NAsvAz4WeD8Hrh3J7',
website = None,
avg_cost = 1,
category = 'FoodDrink',
city_id = city_ids[1],
user_id = user_ids[0]
)
db.session.add(kimbap)
db.session.commit()
location_ids = [location.id for location in Location.query.all()]
############# * LOCATION NOTES * #############
seoul_forest1 = LocationNote(
note_body = 'great for fall foliage. walk all the way to the tip for nice city views and deer ',
location_id = location_ids[1]
)
db.session.add(seoul_forest1)
db.session.commit()
haneul1 = LocationNote(
note_body = 'great city views, especially at night',
location_id = location_ids[0]
)
db.session.add(haneul1)
db.session.commit()
haneul2 = LocationNote(
note_body = 'can take long stairs up/down or paid trolley.',
location_id = location_ids[0]
)
db.session.add(haneul2)
db.session.commit()
haneul3 = LocationNote(
note_body = 'Must see Silver grass in the fall',
location_id = location_ids[0]
)
db.session.add(haneul3)
db.session.commit()
kimbap1 = LocationNote(
note_body = 'Food is very affordable, 3,000-6,000 won',
location_id = location_ids[2]
)
db.session.add(kimbap1)
db.session.commit()
kimbap2 = LocationNote(
note_body = 'Loved the Cheese Ramen (with egg) and the kimbap rolled in egg',
location_id = location_ids[2]
)
db.session.add(kimbap2)
db.session.commit()
kimbap3 = LocationNote(
note_body = 'Not so good: bibimbap',
location_id = location_ids[2]
)
db.session.add(kimbap3)
db.session.commit()
| jordandc20/Vicariously_DJordan-capstone | server/seed.py | seed.py | py | 6,512 | python | en | code | 0 | github-code | 13 |
4692893333 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
data = np.unpackbits(np.load("Earth.npy")).reshape(2160, 4320)
def Uniform_draw_Q1():
"""Random number generator"""
theta = np.random.randint(0, 2159)
phi = np.random.randint(0, 4319)
return theta, phi
def calculate_area_percentage_Q1_d():
"""Calculate the percetage of the land by iterating through all data"""
sea_area = 0
for x in range(0, 2160):
for y in range(0, 4320):
if data[x][y] == 0:
sea_area += 1
print(1 - sea_area / (2160 * 4320))
return 1 - sea_area/(2160*4320)
def calculate_samples_c(sample_num):
"""Calculate the land percentage by randomly select points on the earth"""
count_ocean = 0
count_land = 0
points = []
x = []
y = []
z = []
plt.figure()
ax = plt.axes(projection='3d')
for i in range(0, sample_num):
theta, phi = Uniform_draw_Q1() # pick a random point on earth
# convert latitude and longtitude to 3D cartesian space
points.append([theta/12, phi/12])
x.append(np.sin(theta/12) * np.cos(phi/12))
y.append(np.sin(theta/12)*np.sin(phi/12))
z.append(np.cos(theta/12))
if data[theta][phi] == 0:
count_ocean += 1
else:
count_land += 1
# Plot the randomly chosen points in 3D space
ax.scatter(x, y, z)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.title("Data point chosen")
plt.savefig("point Selection.png")
plt.show()
return count_land / (count_land + count_ocean)
if __name__ == "__main__":
print(calculate_samples_c(100000)/ 0.26880369084362143)
| Sheldonsu28/PHY407-Computational-Physics | Lab 10/Lab10Q1.py | Lab10Q1.py | py | 1,807 | python | en | code | 0 | github-code | 13 |
31941921020 | class Node:
def __init__(self, x: int, next: 'Node' = None, random: 'Node' = None):
self.val = int(x)
self.next = next
self.random = random
class Solution:
def copyRandomList(self, head: 'Node') -> 'Node':
if not head:
return
node = head
while node:
node.next = Node(node.val, node.next)
node = node.next.next
node = head
while node:
if node.random:
node.next.random = node.random.next
node = node.next.next
node, copy = head, head.next
while node:
tmp = node.next
node.next = tmp.next
if tmp.next:
tmp.next = tmp.next.next
node = node.next
return copy
| wylu/leetcodecn | src/python/offer/35.复杂链表的复制.py | 35.复杂链表的复制.py | py | 795 | python | zh | code | 3 | github-code | 13 |
31086753879 | import logging
from twisted.internet import defer, reactor, task
from twisted.protocols.basic import LineReceiver
from exceptions import TimeoutException
logger = logging.getLogger('pymdb')
def pretty(data):
return ' '.join(["0x%0.2X" % ord(c) for c in data])
def encode(command, data=''):
return chr(len(data)+2) + '\x88' + command + data
def log_result(f):
@defer.inlineCallbacks
def pretty_log(self, *args, **kwargs):
clazz = ''
if self.__class__:
clazz = self.__class__.__name__
logger.debug("{}.{} ->".format(clazz, f.func_name))
try:
result = yield f(self, *args, **kwargs)
str_data = pretty(result)
logger.debug("{}.{} <- {}".format(clazz, f.func_name, str_data))
defer.returnValue(result)
except Exception as e:
logger.error("pretty_log error: " + str(e))
raise e
return pretty_log
ACK = '\x01\x00'
MODEBIT = '/x01'
class MDB(LineReceiver):
timeout = 0.3
def __init__(self):
self.req = None
self.setRawMode()
self.lock = defer.DeferredLock()
self.data = ''
def connectionMade(self):
logger.debug("Connected")
def rawDataReceived(self, data):
self.data = self.data + data
@log_result
def call(self, req):
return self.lock.run(self._call, req)
@defer.inlineCallbacks
def _call(self, req):
self.data = ''
if self.req:
raise ValueError(
"call %s while %s request in progress" % (
pretty(req), pretty(self.req)))
self.req = req
try:
self.transport.write(req)
except Exception as e:
logger.exception("Error while write to transport")
self.req = None
raise e
# sleep for timeout
yield task.deferLater(reactor, self.timeout, defer.passthru, None)
self.req = None
# try:
if self.data:
# return as is for ACK, NAK and RET
if len(self.data) == 2:
defer.returnValue(self.data)
# check if send command with mode bit
# and remove all garbage if needed
command = req[1]
if command == '\x88':
data = self.data[1::2]
modebits = self.data[::2]
if modebits[-1] == MODEBIT:
raise ValueError('No modebit at the end')
data = self.checksum(data)
defer.returnValue(data)
else:
raise TimeoutException("Timeout")
# except Exception as e:
# raise e
defer.returnValue(self.data)
def checksum(self, data):
chk = data[-1]
data = data[:-1]
if sum(map(ord, data)) == ord(chk):
return data
raise ValueError('Wrong checksum, data:{}, chk:{}'.format(data, chk))
@log_result
def mdb_init(self):
return self.call('\x02\x85\x0A')
| aborilov/pymdb | pymdb/protocol/mdb.py | mdb.py | py | 3,030 | python | en | code | 6 | github-code | 13 |
70193693457 | def sum_divisors(num):
divs = []
for x in range(1, (num // 2 + 1)):
if num % x == 0:
divs.append(x)
return sum(divs)
def is_abundant(num):
if sum_divisors(num) > num:
return True
else:
return False
abundants = []
for n in range(1, 28124):
if is_abundant(n):
abundants.append(n)
abundants_set = set(abundants)
def abundant_sum(num):
for x in abundants:
if x > num:
return False
if (num - x) in abundants_set:
return True
return False
print(sum(x for x in range(1, 28124) if not abundant_sum(x)))
| aryanmalik567/ProjectEuler | Problems 20 - 29/Problem 23.py | Problem 23.py | py | 620 | python | en | code | 0 | github-code | 13 |
72147041618 | import sys
from softlearning.policies.utils import (
get_policy_from_variant, get_policy_from_params, get_policy)
from softlearning.models.utils import (
get_reward_classifier_from_variant, get_dynamics_model_from_variant)
from softlearning.misc.generate_goal_examples import (
get_goal_example_from_variant, get_goal_transitions_from_variant)
from softlearning.misc.get_multigoal_example_pools import (
get_example_pools_from_variant)
from examples.instrument import run_example_local
from examples.development.main import ExperimentRunner
class ExperimentRunnerClassifierRL(ExperimentRunner):
def _get_algorithm_kwargs(self, variant):
algorithm_kwargs = super()._get_algorithm_kwargs(variant)
algorithm_type = variant['algorithm_params']['type']
# TODO: Replace this with a common API for single vs multigoal
# === SINGLE GOAL POOL ===
if algorithm_type in (
'SACClassifier',
'RAQ',
'VICE',
'VICEGAN',
'VICERAQ',
'VICEDynamicsAware',
'DynamicsAwareEmbeddingVICE'):
reward_classifier = self.reward_classifier = (
get_reward_classifier_from_variant(
self._variant, algorithm_kwargs['training_environment']))
algorithm_kwargs['classifier'] = reward_classifier
goal_examples_train, goal_examples_validation = (
get_goal_example_from_variant(variant))
algorithm_kwargs['goal_examples'] = goal_examples_train
algorithm_kwargs['goal_examples_validation'] = (
goal_examples_validation)
if algorithm_type == 'VICEDynamicsAware':
algorithm_kwargs['dynamics_model'] = (get_dynamics_model_from_variant(
self._variant, algorithm_kwargs['training_environment']))
elif algorithm_type == 'DynamicsAwareEmbeddingVICE':
# TODO(justinvyu): Get this working for any environment
self.distance_fn = algorithm_kwargs['distance_fn'] = (
reward_classifier.observations_preprocessors['state_observation'])
# TODO(justinvyu): include goal state as one of the VICE goal exmaples?
algorithm_kwargs['goal_state'] = None
# === LOAD GOAL POOLS FOR MULTI GOAL ===
elif algorithm_type in (
'VICEGANMultiGoal',
'MultiVICEGAN'):
goal_pools_train, goal_pools_validation = (
get_example_pools_from_variant(variant))
num_goals = len(goal_pools_train)
reward_classifiers = self.reward_classifiers = tuple(
get_reward_classifier_from_variant(
variant,
algorithm_kwargs['training_environment'])
for _ in range(num_goals))
algorithm_kwargs['classifiers'] = reward_classifiers
algorithm_kwargs['goal_example_pools'] = goal_pools_train
algorithm_kwargs['goal_example_validation_pools'] = goal_pools_validation
elif algorithm_type == 'SQIL':
goal_transitions = get_goal_transitions_from_variant(variant)
algorithm_kwargs['goal_transitions'] = goal_transitions
return algorithm_kwargs
def _restore_algorithm_kwargs(self, picklable, checkpoint_dir, variant):
algorithm_kwargs = super()._restore_algorithm_kwargs(picklable, checkpoint_dir, variant)
if 'reward_classifier' in picklable.keys():
reward_classifier = self.reward_classifier = picklable[
'reward_classifier']
algorithm_kwargs['classifier'] = reward_classifier
goal_examples_train, goal_examples_validation = (
get_goal_example_from_variant(variant))
algorithm_kwargs['goal_examples'] = goal_examples_train
algorithm_kwargs['goal_examples_validation'] = (
goal_examples_validation)
if 'distance_estimator' in picklable.keys():
distance_fn = self.distance_fn = picklable['distance_estimator']
algorithm_kwargs['distance_fn'] = distance_fn
algorithm_kwargs['goal_state'] = None
return algorithm_kwargs
def _restore_multi_algorithm_kwargs(self, picklable, checkpoint_dir, variant):
algorithm_kwargs = super()._restore_multi_algorithm_kwargs(
picklable, checkpoint_dir, variant)
if 'reward_classifiers' in picklable.keys():
reward_classifiers = self.reward_classifiers = picklable[
'reward_classifiers']
for reward_classifier in self.reward_classifiers:
reward_classifier.observation_keys = (variant['reward_classifier_params']
['kwargs']
['observation_keys'])
algorithm_kwargs['classifiers'] = reward_classifiers
goal_pools_train, goal_pools_validation = (
get_example_pools_from_variant(variant))
algorithm_kwargs['goal_example_pools'] = goal_pools_train
algorithm_kwargs['goal_example_validation_pools'] = goal_pools_validation
return algorithm_kwargs
@property
def picklables(self):
picklables = super().picklables
if hasattr(self, 'reward_classifier'):
picklables['reward_classifier'] = self.reward_classifier
elif hasattr(self, 'reward_classifiers'):
picklables['reward_classifiers'] = self.reward_classifiers
if hasattr(self, 'distance_fn'):
picklables['distance_estimator'] = self.distance_fn
return picklables
def main(argv=None):
"""Run ExperimentRunner locally on ray.
To run this example on cloud (e.g. gce/ec2), use the setup scripts:
'softlearning launch_example_{gce,ec2} examples.development <options>'.
Run 'softlearning launch_example_{gce,ec2} --help' for further
instructions.
"""
# __package__ should be `development.main`
run_example_local('examples.classifier_rl', argv)
if __name__ == '__main__':
main(argv=sys.argv[1:])
| abhishekunique/RND-ashwin | examples/classifier_rl/main.py | main.py | py | 6,261 | python | en | code | 0 | github-code | 13 |
71545583059 | import config
import matplotlib.pyplot as plt
import torchvision
import torch
# 加载PGAN预训练模型
model = torch.hub.load("facebookresearch/pytorch_GAN_zoo:hub",
"PGAN", model_name="celebAHQ-512", pretrained=True,
useGPU=config.USE_GPU)
# 样本随机噪声向量
(noise, _) = model.buildNoiseData(config.NUM_IMAGES)
# 将采样的噪声向量通过预训练的生成器
with torch.no_grad():
generatedImages = model.test(noise)
# 可视化生成的图像
grid = torchvision.utils.make_grid(
generatedImages.clamp(min=-1, max=1), nrow=config.NUM_IMAGES, scale_each=True, normalize=True)
plt.figure(figsize = (20,20))
plt.imshow(grid.permute(1, 2, 0).cpu().numpy())
# 保存生成的图像可视化
torchvision.utils.save_image(generatedImages.clamp(min=-1, max=1),
config.SAVE_IMG_PATH, nrow=config.NUM_IMAGES, scale_each=True,
normalize=True) | bashendixie/ml_toolset | 案例72 基于Torch Hub的渐进式GAN架构/predict.py | predict.py | py | 860 | python | en | code | 9 | github-code | 13 |
16031681997 | import numpy as np
import torch
from hypothesis.metric import BaseValueMetric
class ExponentialAverageMetric(BaseValueMetric):
r""""""
def __init__(self, initial_value=None, decay=.99):
super(ExponentialAverageMetric, self).__init__(initial_value)
self.decay = decay
def update(self, value):
# Check if the current value was initialized.
if self.current_value is not None:
next_value = self.decay * value + (1 - self.decay) * self.current_value
else:
next_value = value
self._set_current_value(value)
| montefiore-ai/hypothesis | hypothesis/metric/exponential_average.py | exponential_average.py | py | 591 | python | en | code | 47 | github-code | 13 |
10927700118 | import torch
from networkx.classes.reportviews import NodeView
from torch.nn import Module
from typing import Union, Iterator, Callable, Tuple, Optional
from .nodes import RateNet, SpikeNet, InstantNode
from .edges import RLS, Linear, LinearMasked
from .utility import retrieve_from_dict, add_op_name
from .observer import Observer
from pyrates import NodeTemplate, CircuitTemplate
import numpy as np
from time import perf_counter
from networkx import DiGraph
from multipledispatch import dispatch
class Network(Module):
"""Main user interface for initializing, training, testing, and running networks consisting of rnn, input, and
output layers.
"""
def __init__(self, dt: float, device: str = "cpu"):
"""Instantiates network with a single RNN layer.
Parameters
----------
dt
Time-step used for all simulations and rnn layers.
device
Device on which to deploy the `Network` instance.
"""
super().__init__()
self.graph = DiGraph()
self.device = device
self.dt = dt
self._record = {}
self._var_map = {}
self._in_node = None
self._out_node = None
self._bwd_graph = {}
self._train_edge = ()
@dispatch(str)
def __getitem__(self, item: str):
return self.graph.nodes[item]
@dispatch(tuple)
def __getitem__(self, nodes: tuple):
return self.graph[nodes[0]][nodes[1]]
def __iter__(self):
for n in self.graph.nodes:
yield self[n]
def __len__(self):
return len(self.graph.nodes)
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
@property
def n_out(self) -> int:
"""Current output dimensionality.
"""
try:
return self[self._out_node]["n_out"]
except AttributeError:
return 0
@property
def n_in(self) -> int:
"""Current input dimensionality of the network.
"""
try:
return self[self._in_node]["n_in"]
except AttributeError:
return 0
@property
def nodes(self) -> NodeView:
"""Network nodes
"""
return self.graph.nodes
@property
def state(self) -> dict:
"""Dictionary containing the state vectors of each differential equation node in the network.
"""
states = {}
for n in self.nodes:
try:
states[n] = self.get_node(n).y
except AttributeError:
pass
return states
def get_node(self, node: str) -> Union[InstantNode, RateNet]:
"""Returns node instance from the network.
Parameters
----------
node
Name of the node.
Returns
-------
Union[InstantNode, RateNet]
Instance of a node class.
"""
return self[node]["node"]
def get_edge(self, source: str, target: str) -> Linear:
"""Returns edge instance from the network.
Parameters
----------
source
Name of the source node.
target
Name of the target node.
Returns
-------
Linear
Instance of the edge class.
"""
return self[source, target]["edge"]
def get_var(self, node: str, var: str) -> Union[torch.Tensor, float]:
"""Returns variable from network node.
Parameters
----------
node
Name of the network node.
var
Name of the node variable.
Returns
-------
Union[torch.Tensor, float]
"""
try:
return self.get_node(node)[self._relabel_var(var)]
except KeyError:
return self[node][var]
def set_var(self, node: str, var: str, val: Union[torch.Tensor, float]):
"""Set the value of a network node variable.
Parameters
----------
node
Name of the network node.
var
Name of the node variable.
val
New variable value.
Returns
-------
None
"""
try:
n = self.get_node(node)
try:
n.set_param(var, val)
except KeyError:
v = n[var]
v[:] = val
except KeyError:
raise KeyError(f"Variable {var} was not found on node {node}.")
def add_node(self, label: str, node: Union[InstantNode, RateNet], node_type: str, op: str = None,
**node_attrs) -> None:
"""Add node to the network, based on an instance from `rectipy.nodes`.
Parameters
----------
label
Name of the node in the network graph.
node
Instance of a class from `rectipy.nodes`.
node_type
Type of the node. Should be set to "diff_eq" for nodes that contain differential equations.
op
For differential equation-based nodes, an operator name can be passed that is used to identify variables on
the node.
node_attrs
Additional keyword arguments passed to `networkx.DiGraph.add_node`.
Returns
-------
None
"""
# remember operator mapping for each RNN node parameter and state variable
if op:
for p in node.parameter_names:
add_op_name(op, p, self._var_map)
for v in node.variable_names:
add_op_name(op, v, self._var_map)
# add node to graph
self.graph.add_node(label, node=node, node_type=node_type, n_out=node.n_out, n_in=node.n_in, eval=True,
out=torch.zeros(node.n_out, device=self.device), **node_attrs)
def add_diffeq_node(self, label: str, node: Union[str, NodeTemplate, CircuitTemplate], input_var: str,
output_var: str, weights: np.ndarray = None, source_var: str = None, target_var: str = None,
spike_var: Union[str, list] = None, spike_def: Union[str, list] = None, op: str = None,
train_params: list = None, **kwargs) -> RateNet:
"""Adds a differential equation-based RNN node to the `Network` instance.
Parameters
----------
label
The label of the node in the network graph.
node
Path to the YAML template or an instance of a `pyrates.NodeTemplate`.
input_var
Name of the parameter in the node equations that input should be projected to.
output_var
Name of the variable in the node equations that should be used as output of the RNN node.
weights
Determines the number of neurons in the network as well as their connectivity. Given an `N x N` weights
matrix, `N` neurons will be added to the RNN node, each of which is governed by the equations defined in the
`NodeTemplate` (see argument `node`). Neurons will be labeled `n0` to `n<N>` and every non-zero entry in the
matrix will be realized by an edge between the corresponding neurons in the network.
source_var
Source variable that will be used for each connection in the network.
target_var
Target variable that will be used for each connection in the network.
spike_var
Name of the parameter in the node equations that recurrent input from the RNN should be projected to.
spike_def
Name of the variable in the node equations that should be used to determine spikes in the network.
op
Name of the operator in which all the above variables can be found. If not provided, it is assumed that
the operator name is provided together with the variable names, e.g. `source_var = <op>/<var>`.
train_params
Names of all RNN parameters that should be made available for optimization.
kwargs
Additional keyword arguments provided to the `RNNLayer` (or `SRNNLayer` in case of spiking neurons).
Returns
-------
RateNet
Instance of the RNN node that was added to the network.
"""
# add operator key to variable names
var_dict = {'svar': source_var, 'tvar': target_var, 'in_ext': input_var, 'in_net': spike_var,
'out': output_var, 'spike': spike_def}
if "record_vars" in kwargs:
var_dict["record_vars"] = kwargs.pop("record_vars")
self._var_map = {}
if op is not None:
for key, var in var_dict.copy().items():
if key == "record_vars":
kwargs["var_mapping"] = {}
for v in var:
v_new = add_op_name(op, v, self._var_map)
kwargs["var_mapping"][v_new] = v_new
if type(var) is list:
var_dict[key] = [add_op_name(op, v, self._var_map) for v in var]
else:
var_dict[key] = add_op_name(op, var, self._var_map)
if train_params:
train_params = [add_op_name(op, p, self._var_map) for p in train_params]
if "node_vars" in kwargs:
for key in kwargs["node_vars"].copy():
if "/" not in key:
val = kwargs["node_vars"].pop(key)
kwargs["node_vars"][f"all/{op}/{key}"] = val
# initialize rnn layer
if spike_var is None and spike_def is None:
node = RateNet.from_pyrates(node, var_dict['in_ext'], var_dict['out'], weights=weights,
source_var=var_dict['svar'], target_var=var_dict['tvar'],
train_params=train_params, device=self.device, dt=self.dt, **kwargs)
elif spike_var is None or spike_def is None:
raise ValueError('To define a reservoir with a spiking neural network layer, please provide both the '
'name of the variable that spikes should be stored in (`spike_var`) as well as the '
'name of the variable that is used to define spikes (`spike_def`).')
else:
node = SpikeNet.from_pyrates(node, var_dict['in_ext'], var_dict['out'], weights=weights,
source_var=var_dict['svar'], target_var=var_dict['tvar'],
spike_def=var_dict['spike'], spike_var=var_dict['in_net'],
train_params=train_params, device=self.device, dt=self.dt, **kwargs)
# add node to the network graph
self.add_node(label, node=node, node_type="diff_eq", op=op)
return node
def add_func_node(self, label: str, n: int, activation_function: str, **kwargs) -> InstantNode:
"""Add an activation function as a node to the network (no intrinsic dynamics, just an input-output mapping).
Parameters
----------
label
The label of the node in the network graph.
n
Dimensionality of the node.
activation_function
Activation function applied to the output of the last layer. Valid options are:
- 'tanh' for `torch.nn.Tanh()`
- 'sigmoid' for `torch.nn.Sigmoid()`
- 'softmax' for `torch.nn.Softmax(dim=0)`
- 'softmin' for `torch.nn.Softmin(dim=0)`
- 'log_softmax' for `torch.nn.LogSoftmax(dim=0)`
- 'identity' for `torch.nn.Identity`
Returns
-------
ActivationFunc
The node of the network graph.
"""
# create node instance
node = InstantNode(n, activation_function, **kwargs)
# add node to the network graph
self.add_node(label, node=node, node_type="diff_eq")
return node
def add_edge(self, source: str, target: str, weights: Union[torch.Tensor, np.ndarray] = None,
train: Optional[str] = None, dtype: torch.dtype = torch.float64, edge_attrs: dict = None,
**kwargs) -> Linear:
"""Add a feed-forward layer to the network.
Parameters
----------
source
Label of the source node.
target
Label of the target node.
weights
`k x n` weight matrix that realizes the linear projection of the `n` source outputs to
the `k` target inputs.
train
Can be used to make the edge weights trainable. The following options are available:
- `None` for a static edge
- 'gd' for training of the edge weights via standard pytorch gradient descent
- 'rls' for recursive least squares training of the edge weights
dtype
Data type of the edge weights.
edge_attrs
Additional edge attributes passed to `networkx.DiGraph.add_edge`.
kwargs
Additional keyword arguments to be passed to the edge class initialization method.
Returns
-------
Linear
Instance of the edge class.
"""
if not edge_attrs:
edge_attrs = {}
# initialize edge
kwargs.update({"n_in": self[source]["n_out"], "n_out": self[target]["n_in"],
"weights": weights, "dtype": dtype})
trainable = True
LinEdge = LinearMasked if "mask" in kwargs else Linear
if train is None:
trainable = False
edge = LinEdge(**kwargs, detach=True)
elif train == "gd":
edge = LinEdge(**kwargs, detach=False)
elif train == "rls":
edge = RLS(**kwargs)
self._train_edge = (source, target)
else:
raise ValueError("Invalid option for keyword argument `train`. Please see the docstring of "
"`Network.add_output_layer` for valid options.")
# add connecting edge to graph
self.graph.add_edge(source, target, edge=edge.to(self.device), trainable=trainable, n_in=edge.n_in,
n_out=edge.n_out, **edge_attrs)
return edge
def pop_node(self, node: str) -> Union[InstantNode, RateNet]:
"""Removes (and returns) a node from the network.
Parameters
----------
node
Name of the node to remove.
Returns
-------
Union[InstantNode, RateNet]
Removed node.
"""
node_data = self.get_node(node)
self.graph.remove_node(node)
return node_data
def pop_edge(self, source: str, target: str) -> Linear:
"""Removes (and returns) an edge from the network.
Parameters
----------
source
Name of the source node.
target
Name of the target node.
Returns
-------
Linear
Removed edge.
"""
edge = self.get_edge(source, target)
self.graph.remove_edge(source, target)
return edge
def compile(self):
"""Automatically detects a forward pass through the network based on the nodes and edges in the network.
"""
# make sure that only a single input node exists
in_nodes = [n for n in self.graph.nodes if self.graph.in_degree(n) == 0]
if len(in_nodes) != 1:
raise ValueError(f"Unable to identify the input node of the Network. "
f"Nodes that have no input edges: {in_nodes}."
f"Make sure that exactly one such node without input edges exists in the network.")
self._in_node = in_nodes.pop()
# make sure that only a single output node exists
out_nodes = [n for n in self.graph.nodes if self.graph.out_degree(n) == 0]
if len(out_nodes) != 1:
raise ValueError(f"Unable to identify the output node of the Network. "
f"Nodes that have no outgoing edges: {out_nodes}."
f"Make sure that exactly one such node without outgoing edges exists in the network.")
self._out_node = out_nodes.pop()
# create backward pass through network starting from output node
self._bwd_graph = self._compile_bwd_graph(self._out_node, dict())
def forward(self, x: Union[torch.Tensor, np.ndarray]) -> torch.Tensor:
"""Forward method as implemented for any `torch.Module`.
Parameters
----------
x
Input tensor.
Returns
-------
torch.Tensor
Output tensor.
"""
node = self._out_node
x = self._backward(x, node)
self._reset_node_eval()
return x
def parameters(self, recurse: bool = True) -> Iterator:
"""Yields the trainable parameters of the network model.
Parameters
----------
recurse
If true, yields parameters of all submodules.
Yields
------
Iterator
Trainable model parameters.
"""
g = self.graph
for node in g:
for p in self.get_node(node).parameters(recurse=recurse):
yield p
for s, t in g.edges:
for p in g[s][t]["edge"].parameters():
yield p
def detach(self, requires_grad: bool = True, detach_params: bool = False) -> None:
"""Goes through all DE-based nodes and detaches their state variables from the current graph for gradient
calculation.
Parameters
----------
requires_grad
If true, all tensors that will be detached will be set to require gradient calculation after detachment.
detach_params
If true, parameters that require gradient calculation will be detached as well.
Returns
-------
None
"""
for node in self.nodes:
n = self.get_node(node)
if hasattr(n, "y"):
n.detach(requires_grad=requires_grad, detach_params=detach_params)
def reset(self, state: dict = None):
"""Reset the network state.
Parameters
----------
state
Optional dictionary, that contains state-vectors (values) for nodes of the network (keys).
Returns
-------
None
"""
for node in self.nodes:
n = self.get_node(node)
if hasattr(n, "y"):
if state and node in state:
n.reset(state[node])
else:
n.reset()
def clear(self):
"""Removes all nodes and edges from the network
"""
for node in list(self.nodes):
self.pop_node(node)
def run(self, inputs: Union[np.ndarray, torch.Tensor], sampling_steps: int = 1, verbose: bool = True,
enable_grad: bool = True, **kwargs) -> Observer:
"""Perform numerical integration of the input-driven network equations.
Parameters
----------
inputs
`T x m` array of inputs fed to the model, where`T` is the number of integration steps and `m` is the number
of input dimensions of the network.
sampling_steps
Number of integration steps at which to record observables.
verbose
If true, the progress of the integration will be displayed.
enable_grad
If true, the simulation will be performed with gradient calculation.
kwargs
Additional keyword arguments used for the observation.
Returns
-------
Observer
Instance of the `Observer`.
"""
# preparations on input arguments
steps = inputs.shape[0]
if type(inputs) is np.ndarray:
inputs = torch.tensor(inputs, device=self.device)
truncate_steps = kwargs.pop("truncate_steps", steps)
# compile network
self.compile()
# initialize observer
if "obs" in kwargs:
obs = kwargs.pop("obs")
else:
obs = Observer(dt=self.dt, record_loss=kwargs.pop("record_loss", False), **kwargs)
rec_vars = [v for v in obs.recorded_state_variables]
# forward input through static network
grad = torch.enable_grad if enable_grad else torch.no_grad
with grad():
for step in range(steps):
output = self.forward(inputs[step, :])
if step % sampling_steps == 0:
if verbose:
print(f'Progress: {step}/{steps} integration steps finished.')
obs.record(step, output, 0.0, [self.get_var(v[0], v[1]) for v in rec_vars])
if truncate_steps < steps and step % truncate_steps == truncate_steps-1:
self.detach()
return obs
def fit_bptt(self, inputs: Union[np.ndarray, list], targets: [np.ndarray, list], optimizer: str = 'sgd',
optimizer_kwargs: dict = None, loss: str = 'mse', loss_kwargs: dict = None, lr: float = 1e-3,
sampling_steps: int = 1, update_steps: int = 100, verbose: bool = True, **kwargs) -> Observer:
"""Optimize model parameters via backpropagation through time.
Parameters
----------
inputs
`T x m` array of inputs fed to the model, where`T` is the number of training steps and `m` is the number of
input dimensions of the network.
targets
`T x k` array of targets, where `T` is the number of training steps and `k` is the number of outputs of the
network.
optimizer
Name of the optimization algorithm to use. Available options are:
- 'sgd' for `torch.optim.SGD`
- 'adam' for `torch.optim.Adam`
- 'adamw' for torch.optim.AdamW
- 'adagrad' for `torch.optim.Adagrad`
- 'adadelta' for `torch.optim.Adadelta`
- 'rmsprop' for `torch.optim.RMSprop`
- 'rprop' for `torch.optim.Rprop`
optimizer_kwargs
Additional keyword arguments provided to the initialization of the optimizer.
loss
Name of the loss function that should be used for optimization. Available options are:
- 'mse' for `torch.nn.MSELoss`
- 'l1' for `torch.nn.L1Loss`
- 'nll' for `torch.nn.NLLLoss`
- 'ce' for `torch.nn.CrossEntropyLoss`
- 'kld' for `torch.nn.KLDivLoss`
- 'hinge' for `torch.nn.HingeEmbeddingLoss`
loss_kwargs
Additional keyword arguments provided to the initialization of the loss.
lr
Learning rate.
sampling_steps
Number of training steps at which to record observables.
update_steps
Number of training steps after which to perform an update of the trainable parameters based on the
accumulated gradients.
verbose
If true, the training progress will be displayed.
kwargs
Additional keyword arguments used for the optimization, loss calculation and observation.
Returns
-------
Observer
Instance of the `observer`.
"""
# preparations
##############
# compile network
self.compile()
# initialize loss function
loss = self._get_loss_function(loss, loss_kwargs=loss_kwargs)
# initialize optimizer
optimizer = self._get_optimizer(optimizer, lr, self.parameters(), optimizer_kwargs=optimizer_kwargs)
# retrieve keyword arguments for optimization
step_kwargs = retrieve_from_dict(['closure'], kwargs)
error_kwargs = retrieve_from_dict(['retain_graph'], kwargs)
# initialize observer
obs_kwargs = retrieve_from_dict(['record_output', 'record_loss', 'record_vars'], kwargs)
obs = Observer(dt=self.dt, **obs_kwargs)
# optimization
##############
t0 = perf_counter()
if type(inputs) is list:
# transform inputs and targets into tensors
if len(inputs) != len(targets):
raise ValueError('Wrong dimensions of input and target output. Please make sure that `inputs` and '
'`targets` agree in the first dimension (epochs).')
# perform optimization
obs = self._bptt_epochs(inputs, targets, loss=loss, optimizer=optimizer,
obs=obs, error_kwargs=error_kwargs, step_kwargs=step_kwargs,
sampling_steps=sampling_steps, verbose=verbose)
else:
# transform inputs into tensors
inp_tensor = torch.tensor(inputs, device=self.device)
target_tensor = torch.tensor(targets, device=self.device)
if inp_tensor.shape[0] != target_tensor.shape[0]:
raise ValueError('Wrong dimensions of input and target output. Please make sure that `inputs` and '
'`targets` agree in the first dimension.')
# perform optimization
obs = self._bptt(inp_tensor, target_tensor, loss, optimizer, obs, error_kwargs, step_kwargs,
sampling_steps=sampling_steps, optim_steps=update_steps, verbose=verbose)
t1 = perf_counter()
print(f'Finished optimization after {t1-t0} s.')
return obs
def fit_ridge(self, inputs: np.ndarray, targets: np.ndarray, sampling_steps: int = 100, alpha: float = 1e-4,
verbose: bool = True, add_readout_node: bool = True, **kwargs) -> Observer:
"""Train readout weights on top of the input-driven model dynamics via ridge regression.
Parameters
----------
inputs
`T x m` array of inputs fed to the model, where`T` is the number of training steps and `m` is the number of
input dimensions of the network.
targets
`T x k` array of targets, where `T` is the number of training steps and `k` is the number of outputs of the
network.
sampling_steps
Number of training steps at which to record observables.
alpha
Ridge regression regularization constant.
verbose
If true, the training progress will be displayed.
add_readout_node
If true, a readout node is added to the network, which will be connected to the current output node of the
network via the trained readout weights.
kwargs
Additional keyword arguments used for the observation and network simulations.
Returns
-------
Observer
Instance of the `observer`.
"""
# preparations
##############
# transform inputs into tensors
target_tensor = torch.tensor(targets, device=self.device)
if inputs.shape[0] != target_tensor.shape[0]:
raise ValueError('Wrong dimensions of input and target output. Please make sure that `inputs` and '
'`targets` agree in the first dimension.')
# compile network
self.compile()
# collect network states
########################
t0 = perf_counter()
obs = self.run(inputs=inputs, sampling_steps=sampling_steps, verbose=verbose, **kwargs)
t1 = perf_counter()
print(f'Finished network state collection after {t1-t0} s.')
# train read-out classifier
###########################
t0 = perf_counter()
# ridge regression formula
X = torch.stack(obs["out"])
X_t = X.T
w_out = torch.inverse(X_t @ X + alpha*torch.eye(X.shape[1])) @ X_t @ target_tensor
y = X @ w_out
# progress report
t1 = perf_counter()
print(f'Finished fitting of read-out weights after {t1 - t0} s.')
# add read-out layer
####################
if add_readout_node:
self.add_func_node("readout", node_type="function", n=w_out.shape[1], activation_function="identity")
self.add_edge(self._out_node, target="readout", weights=w_out.T)
obs.save("y", y)
obs.save("w_out", w_out)
return obs
def fit_rls(self, inputs: Union[list, np.ndarray], targets: Union[list, np.ndarray], update_steps: int = 1,
sampling_steps: int = 100, verbose: bool = True, **kwargs) -> Observer:
r"""Finds model parameters $w$ such that $||Xw - y||_2$ is minimized, where $X$ contains the neural activity and
$y$ contains the targets.
Parameters
----------
inputs
`T x m` array of inputs fed to the model, where`T` is the number of training steps and `m` is the number of
input dimensions of the network.
targets
`T x k` array of targets, where `T` is the number of training steps and `k` is the number of outputs of the
network.
update_steps
Each `update_steps` an update of the trainable parameters will be performed.
sampling_steps
Number of training steps at which to record observables.
verbose
If true, the training progress will be displayed.
kwargs
Additional keyword arguments used for the optimization, loss calculation and observation.
Returns
-------
Observer
Instance of the `observer`.
"""
# preparations
##############
# compile network
self.compile()
# initialize observer
obs_kwargs = retrieve_from_dict(['record_output', 'record_loss', 'record_vars'], kwargs)
obs = Observer(dt=self.dt, **obs_kwargs)
rec_vars = [self._relabel_var(v) for v in obs.recorded_state_variables]
# optimization
##############
t0 = perf_counter()
if type(inputs) is list:
# check input and target dimensions
if len(inputs) != len(targets):
raise ValueError('Wrong dimensions of input and target output. Please make sure that `inputs` and '
'`targets` agree in the first dimension (epochs).')
# fit weights
obs = self._rls_epoch(inputs, targets, obs, optim_steps=update_steps, verbose=verbose)
else:
# test correct dimensionality of inputs
if inputs.shape[0] != targets.shape[0]:
raise ValueError('Wrong dimensions of input and target output. Please make sure that `inputs` and '
'`targets` agree in the first dimension.')
# transform inputs into tensors
inp_tensor = torch.tensor(inputs, device=self.device)
target_tensor = torch.tensor(targets, device=self.device)
# fit weights
obs = self._rls(inp_tensor, target_tensor, obs, optim_steps=update_steps, sampling_steps=sampling_steps,
verbose=verbose)
t1 = perf_counter()
print(f'Finished optimization after {t1 - t0} s.')
return obs
def fit_eprop(self, inputs: np.ndarray, targets: np.ndarray, feedback_weights: np.ndarray = None,
epsilon: float = 0.99, delta: float = 0.9, update_steps: int = 1, sampling_steps: int = 100,
verbose: bool = True, **kwargs) -> Observer:
r"""Reinforcement learning algorithm that implements slow adjustment of the feedback weights to the RNN layer
based on a running average of the residuals.
Parameters
----------
inputs
`T x m` array of inputs fed to the model, where`T` is the number of training steps and `m` is the number of
input dimensions of the network.
targets
`T x k` array of targets, where `T` is the number of training steps and `k` is the number of outputs of the
network.
feedback_weights
`m x k` array of synaptic weights. If provided, a feedback connections is established with these weights,
that projects the network output back to the RNN layer.
epsilon
Scalar in (0, 1] that controls how quickly the loss used for reinforcement learning can change.
delta
Scalar in (0, 1] that controls how quickly the feedback weights can change.
update_steps
Each `update_steps` an update of the trainable parameters will be performed.
sampling_steps
Number of training steps at which to record observables.
verbose
If true, the training progress will be displayed.
kwargs
Additional keyword arguments used for the optimization, loss calculation and observation.
Returns
-------
Observer
Instance of the `observer`.
"""
# TODO: Implement e-prop as defined in Bellec et al. (2020) Nature Communications
# TODO: Make sure that this fitting method allows for reinforcement learning schemes
raise NotImplementedError("Method is currently not implemented")
def test(self, inputs: np.ndarray, targets: np.ndarray, loss: str = 'mse',
loss_kwargs: dict = None, sampling_steps: int = 100, verbose: bool = True, **kwargs) -> tuple:
"""Test the model performance on a set of inputs and target outputs, with frozen model parameters.
Parameters
----------
inputs
`T x m` array of inputs fed to the model, where`T` is the number of testing steps and `m` is the number of
input dimensions of the network.
targets
`T x k` array of targets, where `T` is the number of testing steps and `k` is the number of outputs of the
network.
loss
Name of the loss function that should be used to calculate the loss on the test data. See `Network.train`
for available options.
loss_kwargs
Additional keyword arguments provided to the initialization of the loss.
sampling_steps
Number of testing steps at which to record observables.
verbose
If true, the progress of the test run will be displayed.
kwargs
Additional keyword arguments used for the loss calculation and observation.
Returns
-------
Tuple[Observer,float]
The `Observer` instance and the total loss on the test data.
"""
# preparations
##############
# transform inputs into tensors
target_tensor = torch.tensor(targets, device=self.device)
# initialize loss function
loss = self._get_loss_function(loss, loss_kwargs=loss_kwargs)
# simulate network dynamics
obs = self.run(inputs=inputs, sampling_steps=sampling_steps, verbose=verbose, **kwargs)
# calculate loss
output = torch.stack(obs["out"])
loss_val = loss(output, target_tensor)
return obs, loss_val.item()
def _compile_bwd_graph(self, n: str, graph: dict) -> dict:
sources = list(self.graph.predecessors(n))
if len(sources) > 0:
graph[n] = sources
for s in sources:
graph = self._compile_bwd_graph(s, graph)
return graph
def _backward(self, x: Union[torch.Tensor, np.ndarray], n: str) -> torch.Tensor:
if n in self._bwd_graph:
inp = self._bwd_graph[n]
if len(inp) == 1:
x = self._edge_forward(x, inp[0], n)
else:
x = torch.sum(torch.tensor([self._edge_forward(x, i, n) for i in inp]), dim=0)
node = self[n]
if node["eval"]:
node["out"] = node["node"].forward(x)
node["eval"] = False
return node["out"]
def _edge_forward(self, x: Union[torch.Tensor, np.ndarray], u: str, v: str) -> torch.Tensor:
x = self._backward(x, u)
return self.get_edge(u, v).forward(x)
def _reset_node_eval(self):
for n in self:
n["eval"] = True
def _bptt_epochs(self, inp: list, target: list, loss: Callable,
optimizer: torch.optim.Optimizer, obs: Observer, error_kwargs: dict, step_kwargs: dict,
sampling_steps: int = 1, verbose: bool = False, **kwargs) -> Observer:
y0 = self.state
epochs = len(inp)
epoch_losses = []
for epoch in range(epochs):
# simulate network dynamics
obs = self.run(torch.tensor(inp[epoch], device=self.device), verbose=False, sampling_steps=sampling_steps,
enable_grad=True, **kwargs)
# perform gradient descent step
epoch_loss = self._bptt_step(torch.stack(obs["out"]), torch.tensor(target[epoch], device=self.device),
optimizer=optimizer, loss=loss, error_kwargs=error_kwargs,
step_kwargs=step_kwargs)
epoch_losses.append(epoch_loss)
# reset network
self.reset(y0)
torch.cuda.empty_cache()
# display progress
if verbose:
print(f'Progress: {epoch+1}/{epochs} training epochs finished.')
print(f'Epoch loss: {epoch_loss}.')
print('')
obs.save("epoch_loss", epoch_losses)
obs.save("epochs", np.arange(epochs))
return obs
def _bptt(self, inp: torch.Tensor, target: torch.Tensor, loss: Callable, optimizer: torch.optim.Optimizer,
obs: Observer, error_kwargs: dict, step_kwargs: dict, sampling_steps: int = 100,
optim_steps: int = 1000, verbose: bool = False) -> Observer:
# preparations
rec_vars = [self._relabel_var(v) for v in obs.recorded_state_variables]
steps = inp.shape[0]
error = 0.0
predictions = []
old_step = 0
# optimization loop
for step in range(steps):
# forward pass
pred = self.forward(inp[step, :])
predictions.append(pred)
# gradient descent optimization step
if step % optim_steps == optim_steps-1:
error = self._bptt_step(torch.stack(predictions), target[old_step:step+1], optimizer=optimizer,
loss=loss, error_kwargs=error_kwargs, step_kwargs=step_kwargs)
self.detach()
old_step = step+1
predictions.clear()
# results storage
if step % sampling_steps == 0:
if verbose:
print(f'Progress: {step}/{steps} training steps finished. Current loss: {error}.')
obs.record(step, pred, error, [self[v] for v in rec_vars])
return obs
def _rls_epoch(self, inp: list, target: list, obs: Observer, optim_steps: int = 1, verbose: bool = False
) -> Observer:
# preparations
rls_edge = self.get_edge(self._train_edge[0], self._train_edge[1])
rls_source = self[self._train_edge[0]]
rls_target = self[self._train_edge[1]]
y0 = self.state
epochs = len(inp)
epoch_losses = []
# fitting
for epoch in range(epochs):
# turn input and target into tensors
inp_tmp = torch.tensor(inp[epoch], device=self.device)
target_tmp = torch.tensor(target[epoch], device=self.device)
# optimization loop
for step in range(inp_tmp.shape[0]):
# forward pass
self.forward(inp_tmp[step, :])
# RLS update
if step % optim_steps == 0:
rls_edge.update(rls_source["out"], target_tmp[step, :], rls_target["out"])
loss = rls_edge.loss
# reset network
self.reset(y0)
torch.cuda.empty_cache()
# display progress
if verbose:
print(f'Progress: {epoch + 1}/{epochs} training epochs finished.')
print(f'Epoch loss: {epoch_losses[-1]}.')
print('')
obs.save("epoch_loss", epoch_losses)
obs.save("epochs", np.arange(epochs))
return obs
def _rls(self, inp: torch.Tensor, target: torch.Tensor, obs: Observer, sampling_steps: int = 100,
optim_steps: int = 1, verbose: bool = False) -> Observer:
# preparations
rec_vars = [self._relabel_var(v) for v in obs.recorded_state_variables]
steps = inp.shape[0]
rls_edge = self.get_edge(self._train_edge[0], self._train_edge[1])
rls_source = self[self._train_edge[0]]
rls_target = self[self._train_edge[1]]
loss = 0.0
# optimization loop
for step in range(steps):
# forward pass
pred = self.forward(inp[step, :])
# update
if step % optim_steps == 0:
rls_edge.update(rls_source["out"], target[step, :], rls_target["out"])
loss = rls_edge.loss
# recording
if step % sampling_steps == 0:
if verbose:
print(f'Progress: {step}/{steps} training steps finished. Current loss: {loss}.')
obs.record(step, pred, loss, [self[v] for v in rec_vars])
return obs
@staticmethod
def _bptt_step(predictions: torch.Tensor, targets: torch.Tensor, optimizer: torch.optim.Optimizer,
loss: Callable, error_kwargs: dict, step_kwargs: dict) -> float:
error = loss(predictions, targets)
optimizer.zero_grad()
error.backward(**error_kwargs)
optimizer.step(**step_kwargs)
return error.item()
def _relabel_var(self, var: str) -> str:
try:
return self._var_map[var]
except KeyError:
return var
@staticmethod
def _get_optimizer(optimizer: str, lr: float, model_params: Iterator, optimizer_kwargs: dict = None
) -> torch.optim.Optimizer:
if optimizer_kwargs is None:
optimizer_kwargs = {}
if optimizer == 'sgd':
opt = torch.optim.SGD
elif optimizer == 'adam':
opt = torch.optim.Adam
elif optimizer == 'adamw':
opt = torch.optim.AdamW
elif optimizer == 'adagrad':
opt = torch.optim.Adagrad
elif optimizer == 'adadelta':
opt = torch.optim.Adadelta
elif optimizer == 'adamax':
opt = torch.optim.Adamax
elif optimizer == 'rmsprop':
opt = torch.optim.RMSprop
elif optimizer == 'rprop':
opt = torch.optim.Rprop
else:
raise ValueError('Invalid optimizer choice. Please see the documentation of the `Network.train()` '
'method for valid options.')
return opt(model_params, lr=lr, **optimizer_kwargs)
@staticmethod
def _get_loss_function(loss: str, loss_kwargs: dict = None) -> Callable:
if loss_kwargs is None:
loss_kwargs = {}
if loss == 'mse':
from torch.nn import MSELoss
l = MSELoss
elif loss == 'l1':
from torch.nn import L1Loss
l = L1Loss
elif loss == 'nll':
from torch.nn import NLLLoss
l = NLLLoss
elif loss == 'ce':
from torch.nn import CrossEntropyLoss
l = CrossEntropyLoss
elif loss == 'kld':
from torch.nn import KLDivLoss
l = KLDivLoss
elif loss == 'hinge':
from torch.nn import HingeEmbeddingLoss
l = HingeEmbeddingLoss
else:
raise ValueError('Invalid loss function choice. Please see the documentation of the `Network.train()` '
'method for valid options.')
return l(**loss_kwargs)
class FeedbackNetwork(Network):
def __init__(self, dt: float, device: str = "cpu"):
super().__init__(dt, device)
self._bwd_graph = None
self._fb_graph = None
def compile(self):
if self._fb_graph is not None:
# add feedback edges to original graph again
for edge in self._fb_graph.edges:
self.graph.add_edge(edge[0], edge[1], **self._fb_graph[edge[0]][edge[1]])
self._fb_graph = None
# sort edges into feedback and feedforward edges
ffwd_edges, fb_edges = [], []
for edge in self.graph.edges:
fb = self.graph[edge[0]][edge[1]].get("feedback")
if fb:
fb_edges.append(edge)
else:
ffwd_edges.append(edge)
# reduce graph to view that contains only feedforward edges
g_fwd = DiGraph(self.graph.edge_subgraph(ffwd_edges))
self._fb_graph = self.graph.edge_subgraph(fb_edges)
self.graph = g_fwd
# call super method
super().compile()
def add_edge(self, source: str, target: str, weights: Union[torch.Tensor, np.ndarray] = None,
train: Optional[str] = None, feedback: bool = False, dtype: torch.dtype = torch.float64,
edge_attrs: dict = None, **kwargs) -> Linear:
"""Add a feed-forward layer to the network.
Parameters
----------
source
Label of the source node.
target
Label of the target node.
weights
`k x n` weight matrix that realizes the linear projection of the `n` source outputs to
the `k` target inputs.
train
Can be used to make the edge weights trainable. The following options are available:
- `None` for a static edge
- 'gd' for training of the edge weights via standard pytorch gradient descent
- 'rls' for recursive least squares training of the edge weights
feedback
If true, this edge is treated as a feedback edge, meaning that it does not affect the feedforward path that
connects the network input to its output.
dtype
Data type of the edge weights.
edge_attrs
Additional edge attributes passed to `networkx.DiGraph.add_edge`.
kwargs
Additional keyword arguments to be passed to the edge class initialization method.
Returns
-------
Linear
Instance of the edge class.
"""
if not edge_attrs:
edge_attrs = {}
edge_attrs["feedback"] = feedback
return super().add_edge(source, target, weights=weights, train=train, dtype=dtype, edge_attrs=edge_attrs,
**kwargs)
def get_edge(self, source: str, target: str) -> Linear:
"""Returns edge instance from the network.
Parameters
----------
source
Name of the source node.
target
Name of the target node.
Returns
-------
Linear
Instance of the edge class.
"""
try:
return super().get_edge(source, target)
except KeyError:
return self._fb_graph[source][target]["edge"]
def _backward(self, x: Union[torch.Tensor, np.ndarray], n: str) -> torch.Tensor:
# get feedforward input
if n in self._bwd_graph:
inp = self._bwd_graph[n]
if len(inp) == 1:
x = self._edge_forward(x, inp[0], n)
else:
x = torch.sum(torch.tensor([self._edge_forward(x, i, n) for i in inp]), dim=0)
# get feedback input
if n in self._fb_graph:
inputs = list(self._fb_graph.predecessors(n))
n_in = len(inputs)
if n_in == 0:
pass
elif n_in == 1:
x = x + self._edge_bwd(inputs[0], n)
else:
x = x + torch.sum(torch.tensor([self._edge_bwd(i, n) for i in inputs]), dim=0)
# calculate node output
node = self[n]
if node["eval"]:
node["out"] = node["node"].forward(x)
node["eval"] = False
return node["out"]
def _edge_bwd(self, source: str, target: str):
x = self.get_node(source)["out"]
edge = self._fb_graph[source][target]["edge"]
return edge.forward(x)
| pyrates-neuroscience/RectiPy | rectipy/network.py | network.py | py | 48,772 | python | en | code | 3 | github-code | 13 |
43114674432 | def find_parent(parent, x):
if parent[x] != x:
parent[x] = find_parent(parent, parent[x])
return parent[x]
def union_parent(parent, a, b):
a = find_parent(parent, a)
b = find_parent(parent, b)
if a < b:
parent[b] = a
else:
parent[a] = b
n = int(input())
parent = [0] * (n + 1)
edges = []
result = 0
for i in range(1, n+1):
parent[i] = i
# 각 축에 대해서 오름차순하여 각 행성의 거리 구함
x = []
y = []
z = []
# 좌표 값 입력 받기
for i in range(1, n+1):
data = list(map(int, input().split()))
x.append((data[0], i))
y.append((data[1], i))
z.append((data[2], i))
x.sort()
y.sort()
z.sort()
# 인접한 노드들로부터 간선 정보를 추출하여 처리
for i in range(n - 1):
# 튜플의 첫번째 원소를 비용으로 설정 -> 비용순으로 정렬하기 위해
edges.append((x[i + 1][0] - x[i][0], x[i][1], x[i + 1][1]))
edges.append((y[i + 1][0] - y[i][0], y[i][1], y[i + 1][1]))
edges.append((z[i + 1][0] - z[i][0], z[i][1], z[i + 1][1]))
# 간선을 비용순으로 정렬
edges.sort()
for edge in edges:
cost, a, b = edge
# 사이클이 발생하지 않는 경우만 집합에 포함
if find_parent(parent, a) != find_parent(parent, b):
union_parent(parent, a, b)
result += cost
print(result) | jinhyungrhee/Problem-Solving | NDB/NDB_1844_행성터널★.py | NDB_1844_행성터널★.py | py | 1,298 | python | ko | code | 0 | github-code | 13 |
34649908863 | import tkinter as tk
#window now has all the properties of tkinter
window = tk.Tk()
# title of label
window.title("My APP")
# size of window
window.geometry("400x350")
# LABELS
title = tk.Label(text="Hello world. \nWelcome to my app", font=("Garamond", 20))
title.grid()
# Entry field
entry_field = tk.Entry()
entry_field.grid()
# BUTTON
button1 = tk.Button(text="Click me!", bg="blue")
button1.grid()
# Text fields
text_field = tk.Text(master=window, height=10, width=30)
text_field.grid()
# mainloop runs everything inside the window.
window.mainloop()
| MarkCrocker/Python | myapp.py | myapp.py | py | 566 | python | en | code | 0 | github-code | 13 |
20296453634 | """
desitarget.targetmask
=====================
This looks more like a script than an actual module.
"""
import os.path
from desiutil.bitmask import BitMask
import yaml
from pkg_resources import resource_filename
def load_mask_bits(prefix=""):
"""Load bit definitions from yaml file.
"""
us = ""
if len(prefix) > 0:
us = '_'
prename = prefix+us
fn = os.path.join(prefix, "data", "{}targetmask.yaml".format(prename))
_filepath = resource_filename('desitarget', fn)
with open(_filepath) as fx:
bitdefs = yaml.safe_load(fx)
try:
bitdefs = _load_mask_priorities(bitdefs, handle="priorities", prename=prename)
except TypeError:
pass
try:
bitdefs = _load_mask_priorities(bitdefs, handle="numobs", prename=prename)
except TypeError:
pass
return bitdefs
def _load_mask_priorities(bitdefs, handle="priorities", prename=""):
"""Priorities and NUMOBS are defined in the yaml file, but they aren't
a bitmask and so require some extra processing.
"""
for maskname, priorities in bitdefs[handle].items():
for bitname in priorities:
# -"SAME_AS_XXX" enables one bit to inherit priorities from another
if isinstance(priorities[bitname], str) and priorities[bitname].startswith('SAME_AS_'):
other = priorities[bitname][8:]
priorities[bitname] = priorities[other]
# -fill in default "more" priority to be same as "unobs"
# ADM specifically applies to dictionary of priorities
if handle == 'priorities':
if isinstance(priorities[bitname], dict):
if 'MORE_ZWARN' not in priorities[bitname]:
priorities[bitname]['MORE_ZWARN'] = priorities[bitname]['UNOBS']
if 'MORE_ZGOOD' not in priorities[bitname]:
priorities[bitname]['MORE_ZGOOD'] = priorities[bitname]['UNOBS']
# - fill in other states as priority=1
for state, blat, foo in bitdefs[prename+'obsmask']:
if state not in priorities[bitname]:
priorities[bitname][state] = 1
else:
priorities[bitname] = dict()
# - add to the extra info dictionary for this target mask
for bitdef in bitdefs[maskname]:
bitname = bitdef[0]
bitdef[3][handle] = priorities[bitname]
return bitdefs
# -convert to BitMask objects
# if bitdefs is None:
# load_bits()
_bitdefs = load_mask_bits()
try:
desi_mask = BitMask('desi_mask', _bitdefs)
mws_mask = BitMask('mws_mask', _bitdefs)
bgs_mask = BitMask('bgs_mask', _bitdefs)
scnd_mask = BitMask('scnd_mask', _bitdefs)
obsconditions = BitMask('obsconditions', _bitdefs)
obsmask = BitMask('obsmask', _bitdefs)
targetid_mask = BitMask('targetid_mask', _bitdefs)
zwarn_mask = BitMask('zwarn_mask', _bitdefs)
except TypeError:
desi_mask = object()
mws_mask = object()
bgs_mask = object()
scnd_mask = object()
obsconditions = object()
obsmask = object()
targetid_mask = object()
zwarn_mask = object()
# -------------------------------------------------------------------------
# -Do some error checking that the bitmasks are consistent with each other
# import sys
# error = False
# for mask in desi_target, mws_target, bgs_target:
# for bitname in targetmask.names:
# if targetmask[bitname]
# if bitname not in priorities.keys():
# print >> sys.stderr, "ERROR: no priority defined for "+bitname
# error = True
#
# for bitname in priorities.keys():
# if bitname not in targetmask.names():
# print >> sys.stderr, "ERROR: priority defined for bogus name "+bitname
# error = True
#
# if error:
# raise ValueError("mismatch between priority and targetmask definitions")
| desihub/desitarget | py/desitarget/targetmask.py | targetmask.py | py | 3,973 | python | en | code | 17 | github-code | 13 |
16809597624 | from __future__ import annotations
import json
import subprocess
import textwrap
from pathlib import Path
from typing import Any
import pytest
from hypothesistooling.projects.hypothesispython import HYPOTHESIS_PYTHON, PYTHON_SRC
from hypothesistooling.scripts import pip_tool, tool_path
PYTHON_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"]
@pytest.mark.skip(
reason="Hypothesis type-annotates the public API as a convenience for users, "
"but strict checks for our internals would be a net drag on productivity."
)
def test_pyright_passes_on_hypothesis():
pip_tool("pyright", "--project", HYPOTHESIS_PYTHON)
@pytest.mark.parametrize("python_version", PYTHON_VERSIONS)
def test_pyright_passes_on_basic_test(tmp_path: Path, python_version: str):
file = tmp_path / "test.py"
file.write_text(
textwrap.dedent(
"""
import hypothesis
import hypothesis.strategies as st
@hypothesis.given(x=st.text())
def test_foo(x: str):
assert x == x
from hypothesis import given
from hypothesis.strategies import text
@given(x=text())
def test_bar(x: str):
assert x == x
"""
),
encoding="utf-8",
)
_write_config(
tmp_path, {"typeCheckingMode": "strict", "pythonVersion": python_version}
)
assert _get_pyright_errors(file) == []
@pytest.mark.parametrize("python_version", PYTHON_VERSIONS)
def test_given_only_allows_strategies(tmp_path: Path, python_version: str):
file = tmp_path / "test.py"
file.write_text(
textwrap.dedent(
"""
from hypothesis import given
@given(1)
def f():
pass
"""
),
encoding="utf-8",
)
_write_config(
tmp_path, {"typeCheckingMode": "strict", "pythonVersion": python_version}
)
assert (
sum(
e["message"].startswith(
'Argument of type "Literal[1]" cannot be assigned to parameter "_given_arguments"'
)
for e in _get_pyright_errors(file)
)
== 1
)
def test_pyright_issue_3296(tmp_path: Path):
file = tmp_path / "test.py"
file.write_text(
textwrap.dedent(
"""
from hypothesis.strategies import lists, integers
lists(integers()).map(sorted)
"""
),
encoding="utf-8",
)
_write_config(tmp_path, {"typeCheckingMode": "strict"})
assert _get_pyright_errors(file) == []
def test_pyright_raises_for_mixed_pos_kwargs_in_given(tmp_path: Path):
file = tmp_path / "test.py"
file.write_text(
textwrap.dedent(
"""
from hypothesis import given
from hypothesis.strategies import text
@given(text(), x=text())
def test_bar(x: str):
pass
"""
),
encoding="utf-8",
)
_write_config(tmp_path, {"typeCheckingMode": "strict"})
assert (
sum(
e["message"].startswith(
'No overloads for "given" match the provided arguments'
)
for e in _get_pyright_errors(file)
)
== 1
)
def test_pyright_issue_3348(tmp_path: Path):
file = tmp_path / "test.py"
file.write_text(
textwrap.dedent(
"""
import hypothesis.strategies as st
st.tuples(st.integers(), st.integers())
st.one_of(st.integers(), st.integers())
st.one_of([st.integers(), st.floats()]) # sequence of strats should be OK
st.sampled_from([1, 2])
"""
),
encoding="utf-8",
)
_write_config(tmp_path, {"typeCheckingMode": "strict"})
assert _get_pyright_errors(file) == []
def test_pyright_tuples_pos_args_only(tmp_path: Path):
file = tmp_path / "test.py"
file.write_text(
textwrap.dedent(
"""
import hypothesis.strategies as st
st.tuples(a1=st.integers())
st.tuples(a1=st.integers(), a2=st.integers())
"""
),
encoding="utf-8",
)
_write_config(tmp_path, {"typeCheckingMode": "strict"})
assert (
sum(
e["message"].startswith(
'No overloads for "tuples" match the provided arguments'
)
for e in _get_pyright_errors(file)
)
== 2
)
def test_pyright_one_of_pos_args_only(tmp_path: Path):
file = tmp_path / "test.py"
file.write_text(
textwrap.dedent(
"""
import hypothesis.strategies as st
st.one_of(a1=st.integers())
st.one_of(a1=st.integers(), a2=st.integers())
"""
),
encoding="utf-8",
)
_write_config(tmp_path, {"typeCheckingMode": "strict"})
assert (
sum(
e["message"].startswith(
'No overloads for "one_of" match the provided arguments'
)
for e in _get_pyright_errors(file)
)
== 2
)
def test_register_random_protocol(tmp_path: Path):
file = tmp_path / "test.py"
file.write_text(
textwrap.dedent(
"""
from random import Random
from hypothesis import register_random
class MyRandom:
def __init__(self) -> None:
r = Random()
self.seed = r.seed
self.setstate = r.setstate
self.getstate = r.getstate
register_random(MyRandom())
register_random(None) # type: ignore
"""
),
encoding="utf-8",
)
_write_config(tmp_path, {"reportUnnecessaryTypeIgnoreComment": True})
assert _get_pyright_errors(file) == []
# ---------- Helpers for running pyright ---------- #
def _get_pyright_output(file: Path) -> dict[str, Any]:
proc = subprocess.run(
[tool_path("pyright"), "--outputjson"],
cwd=file.parent,
encoding="utf-8",
text=True,
capture_output=True,
)
try:
return json.loads(proc.stdout)
except Exception:
print(proc.stdout)
raise
def _get_pyright_errors(file: Path) -> list[dict[str, Any]]:
return _get_pyright_output(file)["generalDiagnostics"]
def _write_config(config_dir: Path, data: dict[str, Any] | None = None):
config = {"extraPaths": [str(PYTHON_SRC)], **(data or {})}
(config_dir / "pyrightconfig.json").write_text(json.dumps(config), encoding="utf-8")
| HypothesisWorks/hypothesis | whole-repo-tests/test_pyright.py | test_pyright.py | py | 6,659 | python | en | code | 7,035 | github-code | 13 |
73341878416 | import os
import numpy as np
import pandas as pd
import simplejson as json
import requests
import quandl
import bokeh
from bokeh.embed import components
from bokeh.layouts import gridplot
from bokeh.plotting import figure, show, output_file,save
from flask import Flask,render_template,request, redirect
# ----------------------------------------------------------------------
app = Flask(__name__)
# a dict to collect input data
app.vars = {}
##########################################################################
@app.route('/')
def homepg():
return redirect('/index')
##########################################################################
@app.route('/index',methods=['GET','POST'])
def index():
if request.method == 'GET':
return render_template('userinfo.html' )
else: #request was a POST
# name is a list of stock tickers, change to upper case and split into a list
app.vars['name'] = request.form['name'].upper().split(',')
# input_faeture = open, close, high, low
app.vars['input_feature'] = []
if request.form.get('open'): app.vars['input_feature'].append(' - Open')
if request.form.get('close'): app.vars['input_feature'].append(' - Close')
if request.form.get('high'): app.vars['input_feature'].append(' - High')
if request.form.get('low'): app.vars['input_feature'].append(' - Low')
#----------------------start python analysis and bokeh plot----------------
tickers = app.vars['name'] # A list of tickers
input_feature = app.vars['input_feature'] # A list of features to show
# create a list of dataset names
dset =[]
for ticker in tickers:
dset.append("WIKI/"+ ticker +"-Open")
#get stock data in pandas dataframe
stock = quandl.get(dataset= dset, api_key= 'wad4CxZw1s-6BTGhjei6', returns= 'pandas' )
# column names to select from pandas data
features =[]
for ds in dset:
for f in input_feature:
features.append(ds + f)
# select only features wanted
stock_data= stock.loc[:, features]
stock_data['Date'] = stock_data.index.values.astype('datetime64[ns]')
color_list = ['red','#33A02C','#B2DF8A','#FB9A99' ] # list 4 colors for lines
plot_list =[] # empty list of plots
for x in tickers:
globals()['p%s' % x] = figure(x_axis_type="datetime", title="Stock Prices: "+x)
globals()['p%s' % x].grid.grid_line_alpha=0.3
globals()['p%s' % x].xaxis.axis_label = 'Date'
globals()['p%s' % x].yaxis.axis_label = 'Price'
globals()['p%s' % x].ygrid.band_fill_color = "olive"
globals()['p%s' % x].ygrid.band_fill_alpha = 0.04
for f in input_feature :
i= input_feature.index(f)
#print(i, x, f)
globals()['p%s' % x].line(stock_data['Date'], stock_data['WIKI/'+x+'-Open'+f], color= color_list[i], legend= x+f)
globals()['p%s' % x].legend.location = "top_left"
plot_list.append(globals()['p%s' % x])
'''#----------------------------------------
f = open('%s_price.txt'%(app.vars['name']),'w')
f.write('Name: %s\n'%(app.vars['name']))
f.write('price'+ '|'.join(app.vars['input_feature'])+'\n')
f.write('column'+ '|'.join(features)+'\n')
f.write('plot'+ '|'.join(tickers)+'\n')
f.close()
#---------------------------------------'''
#output_file("./templates/stock.html", title="Stock Price")
#save(gridplot([plot_list], plot_width=600, plot_height=600)) # open a browser
#return render_template('stock.html')
script, div = components(plot_list)
#-------------------return html plot from bokeh----------------------
return render_template('stocks.html', script=script, div=div)
##########################################################################
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
| tukichen/stock-price | app.py | app.py | py | 4,171 | python | en | code | 1 | github-code | 13 |
72829488979 | import bpy
from ... base_types import AnimationNode
from bpy.props import *
from ... events import propertyChanged
import pygame.mixer as pgm
pgm.init()
class AudioPlayNode(bpy.types.Node, AnimationNode):
bl_idname = "an_AudioPlayNode"
bl_label = "AUDIO Play Music File"
bl_width_default = 200
message = StringProperty()
def draw(self,layout):
col = layout.column()
col.scale_y = 1.5
self.invokeSelector(col ,"PATH", "loadFile", icon = "NEW",
text = "Select Sound File")
layout.label(self.message)
self.invokeFunction(col, "playFile", icon = "NEW",
text = "Play File")
self.invokeFunction(col, "pauseFile", icon = "NEW",
text = "Pause Playback")
self.invokeFunction(col, "resumeFile", icon = "NEW",
text = "Resume Playback")
self.invokeFunction(col, "stopFile", icon = "NEW",
text = "Stop Playback")
def execute(self):
self.use_custom_color = True
self.useNetworkColor = False
self.color = (0.85,1,0.85)
def loadFile(self,path):
pgm.music.load(path)
self.message = str(path)
def playFile(self):
pgm.music.play()
def pauseFile(self):
pgm.music.pause()
def resumeFile(self):
pgm.music.unpause()
def stopFile(self):
pgm.music.stop()
| Clockmender/My-AN-Nodes | nodes/audio/audio-play.py | audio-play.py | py | 1,379 | python | en | code | 16 | github-code | 13 |
41759091969 | from flask import Flask,jsonify,request
app = Flask(__name__)
studentdetails = [{'name':"Teja Kishore","email" : "ytkishore7@gmail.com","id" : 1}]
@app.route("/",methods = ["GET"])
def home():
return ("Welcome")
@app.route("/details", methods = ["GET"])
def readAll():
return jsonify ({"studentdetails" : studentdetails})
@app.route("/details/<string:name>", methods = ["GET"])
def readOne(name):
det = [details for details in studentdetails if details['name'] == name]
return jsonify ({"studentdetails" : det[0]})
@app.route("/details", methods = ["POST"])
def add():
details = {"name" : request.json['name'], "id" : request.json[id], "email" : request.json['email']}
studentdetails.append(details)
return jsonify({"studentdetails" : studentdetails})
@app.route("/details/<string:name>", methods = ["PUT"])
def update(name):
det = [details for details in studentdetails if studentdetails["name"] == name]
det[0]["name"] = request.json['name']
return jsonify ({"studentdetails" : det[0]})
@app.route ("details/<string:name>", methods = ["DELETE"])
def delete(name):
det = [details for details in studentdetails if studentdetails["name"]==name]
studentdetails.remove(det[0])
return jsonify({"studentdetails" : studentdetails})
if __name__ == "__main__":
app.run(debug = True, host = "0.0.0.0", port ="3000")
| teja-kishore/rest_api_python | rest_api_crud.py | rest_api_crud.py | py | 1,404 | python | en | code | 0 | github-code | 13 |
39989055832 | #script to preprocess the youtube trailer data output from youtube_scraper.py
#get Youtube trailer data (release date, views, likes, dislikes)
import pandas as pd
import numpy as np
import pickle
#get metadata from the Movie Database
df_full = pd.read_csv('data/movies_metadata.csv')
#here is a dict to quickly get titles from movie ids
id2title = {k:v for k, v in zip(df_full['id'], df_full['title'])}
#get youtube trailer data
with open ('data/youtube_data', 'rb') as fp:
trailersData = pickle.load(fp)
trailers = pd.Series(trailersData)
trailers_df = pd.DataFrame({'trailers':trailers})
#get index of df without empty lists
trs=[]
for t in trailers_df.trailers:
if len(t)==0:
t = np.nan
trs.append(t)
del t
trs = pd.Series(trs)
trs.dropna(how='any', inplace=True)
idx = list(trs.index)
trailers_df = trailers_df.iloc[idx]
# create list that includes all trailers
trailers_series = trailers_df.apply(lambda x: pd.Series(x['trailers']),axis=1).stack().reset_index(level=1, drop=True)
trailers_series.name = 'trailers'
trailers_series.reset_index(drop=True, inplace=True)
#collect all trailer info into a convenient data frame with each movie as row
dislikes = []
likes = []
ids = []
name = []
youtube_date = []
Type = []
views = []
for i in range(len(trailers_series)):
d = trailers_series.iloc[i].get('dislikes', '-1')
l = trailers_series.iloc[i].get('likes', '-1')
movieid = trailers_series.iloc[i].get('movieid','-1')
n = trailers_series.iloc[i].get('name', '-1')
p = trailers_series.iloc[i].get('publication_date', '-1')
T = trailers_series.iloc[i].get('type', '-1')
v = trailers_series.iloc[i].get('views', '-1')
dislikes.append(d)
likes.append(l)
ids.append(movieid)
name.append(n)
youtube_date.append(p)
Type.append(T)
views.append(v)
views = pd.Series(views)
ids = pd.Series(ids)
trailers_cat = pd.DataFrame({'ids':ids, 'name':name,'type':Type, 'youtube_date':youtube_date})
trailers_num = pd.DataFrame({'ids':ids,'dislikes':dislikes, 'likes':likes, 'views':views })
#drop some rows and columns
trailers_num = trailers_num.set_index('dislikes').drop('-1').reset_index()
trailers_num = trailers_num.set_index('likes').drop('-1').reset_index()
trailers_num = trailers_num.set_index('views').drop('-1').reset_index()
trailers_num = trailers_num.set_index('ids').drop('-1').reset_index()
trailers_cat = trailers_cat.set_index('ids').drop('-1').reset_index()
trailers_cat = trailers_cat.set_index('name').drop('-1').reset_index()
trailers_cat = trailers_cat.set_index('type').drop('-1').reset_index()
trailers_cat = trailers_cat.set_index('youtube_date').drop('-1').reset_index()
trailers_num['title'] = trailers_num['ids'].apply(lambda x: id2title[x])
trailers_cat['title'] = trailers_cat['ids'].apply(lambda x: id2title[x])
trailers_num.drop('ids', axis=1, inplace = True)
trailers_cat.drop('ids', axis=1, inplace = True)
#clean up a bit and change to numeric
trailers_num['dislikes'] = trailers_num['dislikes'].apply(lambda x: x.replace(',',''))
trailers_num['likes'] = trailers_num['likes'].apply(lambda x: x.replace(',',''))
trailers_num['views'] = trailers_num['views'].apply(lambda x: x.replace(',',''))
trailers_num['dislikes'] = pd.to_numeric(trailers_num['dislikes'])
trailers_num['likes'] = pd.to_numeric(trailers_num['likes'])
trailers_num['views'] = pd.to_numeric(trailers_num['views'])
youtube_trailers = np.round(trailers_num.groupby('title').mean().dropna())
youtube_trailers.to_csv('data/youtube_trailers.csv')
| zhuozhi-ge/Movie-Success-Prediction-with-Trailers | crawlers/youtube_preprocessing.py | youtube_preprocessing.py | py | 3,637 | python | en | code | 1 | github-code | 13 |
15628496633 | import os
from datetime import date, datetime
from decimal import Decimal
from dotenv import load_dotenv
from fastapi import FastAPI
from tortoise.contrib.fastapi import register_tortoise
from tortoise.transactions import in_transaction
from app.tariff.dao import TariffDAO
from app.tariff.models import InsuranceCost, InsuredValue, Tariff
from exceptions import (
NoTariffException,
InvalidRateValueExceptions,
InvalidDateFormatException,
)
load_dotenv("infra/.env.local")
app = FastAPI()
@app.post("/calculate_insurance_cost", response_model=InsuranceCost)
async def calculate_insurance_cost(
insured_value: InsuredValue,
cargo_type: str,
insuarance_date: date = datetime.utcnow().date(),
) -> InsuranceCost:
"""Расчет стоимости страховки.
Args:
insured_value: Объявленная стоимость.
cargo_type: Тип груза.
insuarance_date: Дата страхования.
Returns:
Стоимость страхования в зависимости от тарифа и параметров груза.
"""
declared_value = insured_value.declared_cost
rate = await Tariff.get_rate(insuarance_date, cargo_type)
insurance_cost = Decimal(str(rate)) * declared_value if rate else None
return InsuranceCost(
cargo_type="Other",
current_date=insuarance_date,
declared_value=declared_value,
insurance_cost=insurance_cost,
)
@app.post("/tariffs")
async def add_or_update_tariffs(
tariff_data: dict[str, list[dict[str, str | float]]]
) -> dict[str, str]:
"""Добавление тарифа в базу данных.
Args:
tariff_data: данные тарифа в формате json.
Returns:
Сообщение об успешно завершении добавлении тарифа, либо ошибка,
отсылаемая пользователю.
Raises:
InvalidDateFormatException: Неверный формат даты.
InvalidRateValueExceptions: Некорректное значение коэффициента `rate`.
Должно быть от 0 до 1 и должно быть числом.
"""
if not tariff_data:
raise NoTariffException
async with in_transaction():
for date_str, tariffs in tariff_data.items():
try:
tariff_date = date.fromisoformat(date_str)
except ValueError:
raise InvalidDateFormatException(date_str)
for tariff in tariffs:
try:
rate = float(tariff.get("rate"))
except ValueError:
raise InvalidRateValueExceptions(rate)
if rate is None or rate < 0 or rate > 1:
raise InvalidRateValueExceptions(rate)
existing_tariff = await TariffDAO.get(
date=tariff_date, cargo_type=tariff["cargo_type"]
)
if existing_tariff:
existing_tariff.rate = tariff["rate"]
await existing_tariff.save()
else:
await TariffDAO.create(
date=tariff_date,
cargo_type=tariff["cargo_type"],
rate=tariff["rate"],
)
return {"message": "Тариф успешно добавлен"}
register_tortoise(
app,
db_url=os.getenv("DB_URL"),
modules={"models": ["main"]},
generate_schemas=True,
add_exception_handlers=True,
)
| Yohimbe227/Cargo | main.py | main.py | py | 3,646 | python | ru | code | 0 | github-code | 13 |
43262928502 | def main():
dp = [[-1] * D for _ in range(K+1)]
dp[0][0] = 0
for ai in A:
for k in range(K, 0, -1):
for d_bfo in range(D):
d = (d_bfo + ai) % D
if dp[k-1][d_bfo] != -1:
dp[k][d] = max(dp[k][d], dp[k-1][d_bfo] + ai)
return print(dp[K][0])
if __name__ == '__main__':
N, K, D = map(int, input().split())
A = list(map(int, input().split()))
main()
| Shirohi-git/AtCoder | abc281-/abc281_d.py | abc281_d.py | py | 447 | python | en | code | 2 | github-code | 13 |
4249368827 | from typing import Optional, Any, List, Type, TypeVar, Union
from app.exceptions import *
from ..base import BaseService
import app.schemas.models.closecom.contactemail as contactemail_schema
from ..models.closecom.contactemail import CloseComContactEmail
from aiogoogle import Aiogoogle
from app.core.config import settings
from bson.objectid import ObjectId
import traceback
class CloseComContactEmailService(BaseService[CloseComContactEmail,
contactemail_schema.ContactEmailCreate,
contactemail_schema.ContactEmailUpdate]):
def __init__(self):
super().__init__(model=CloseComContactEmail)
async def upsert_many(self,
items: List[dict]) -> Any:
res = None
if not items:
raise AppErrors(f"items list can't be empty")
try:
#low level operations start here because of poor implamentation of umongo
collection = CloseComContactEmail.collection
for item in items:
res = await collection.update_one({'email' : item['email']},
{ '$set' : {
'data' : item['data']
}
},
upsert=True)
except Exception as e:
traceback.print_exc()
print(f"CloseComContactEmail.upsert_many {str(e)} type={type(e)}")
return None
return res
async def get_emails(self,
customer=None,
limit=1000) -> List[str]:
match = {}
if customer:
match['customer'] = {'$eq' : customer}
pipeline = [
{
'$match': match
},
{
'$limit' : limit
},
{
'$project':
{
'email': 1
}
},
]
collection = CloseComContactEmail.collection
return collection.aggregate(pipeline)
| Grinnbob/g_theclone | app/services/closecom/contactemail_service.py | contactemail_service.py | py | 2,247 | python | en | code | 0 | github-code | 13 |
7268902086 | #!/usr/bin/python3
"""Module 0-rotate_2d_matrix
"""
def rotate_2d_matrix(matrix):
"""rotate a matrix by 90 deg
"""
val = len(matrix)
temp = create_matrix(val)
for row in range(val):
for col in range(val):
i = val - row - 1
temp[col][i] = matrix[row][col]
copy_matrix(temp, matrix)
def create_matrix(n):
"""creates n by n matrix
"""
my_list = []
for i in range(n):
my_list.append([])
for j in range(n):
my_list[i].append(0)
return my_list
def copy_matrix(current, old):
"""copy matrix to another
"""
for i in range(len(old)):
for j in range(len(old[i])):
old[i][j] = current[i][j]
| kevohm/alx-interview | 0x07-rotate_2d_matrix/0-rotate_2d_matrix.py | 0-rotate_2d_matrix.py | py | 722 | python | en | code | 0 | github-code | 13 |
41171351374 | import os
from math import log2
class GitIndexEntry(object):
def __init__(
self,
relpath=None,
ctime=None,
mtime=None,
dev=None,
ino=None,
mode_type=None,
mode_perms=None,
uid=None,
gid=None,
fsize=None,
sha=None,
flag_assume_valid=None,
flag_stage=None,
name=None,
):
self.relpath = relpath
# The last time a file's metadata changed. This is a pair
# (timestamp in seconds, nanoseconds)
self.ctime = ctime
# The last time a file's data changed. This is a pair
# (timestamp in seconds, nanoseconds)
self.mtime = mtime
# The ID of device containing this file
self.dev = dev
# The file's inode number
self.ino = ino
# The object type, either b1000 (regular), b1010 (symlink),
# b1110 (gitlink).
self.mode_type = mode_type
# The object permissions, an integer.
self.mode_perms = mode_perms
# User ID of owner
self.uid = uid
# Group ID of ownner
self.gid = gid
# Size of this object, in bytes
self.fsize = fsize
# The object's SHA
self.sha = sha
self.flag_assume_valid = flag_assume_valid
self.flag_stage = flag_stage
# Name of the object (full path this time!)
self.name = name
def get_relative_path(file_path, base_directory):
return file_path.split(os.path.commonprefix([base_directory, file_path]))[1][1:]
def human_readable_file_size(size):
# Taken from Dipen Panchasara
# https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
_suffixes = ["Bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"]
order = int(log2(size) / 10) if size else 0
return "{:.4g} {}".format(size / (1 << (order * 10)), _suffixes[order])
| leepand/mini-mlops | mlopskit/ext/dpipe/io/file_base.py | file_base.py | py | 1,948 | python | en | code | 1 | github-code | 13 |
30485761962 | import sys, torch
sys.path.append('..')
import argparse
from torch import nn
from helper import helper
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
from tqdm import tqdm
import matplotlib.pyplot as plt
ap = argparse.ArgumentParser()
ap.add_argument('--epochs', type=int, default=10)
ap.add_argument('--batch_sz', type=int, default=4)
ap.add_argument('--le', type=float, default=1e-2)
args = ap.parse_args()
helper.make_dir()
device = helper.device()
# Loading the dataset
transform = transforms.Compose([transforms.ToTensor()])
train = datasets.FashionMNIST(root='', download=True, train=True, transform=transform)
test = datasets.FashionMNIST(root='', download=True, train=False, transform=transform)
train_load = DataLoader(train, batch_size=args.batch_sz, shuffle=True)
test_load = DataLoader(test, batch_size=args.batch_sz, shuffle=False)
image, label = next(iter(train_load))
print(image.size())
print(len(train_load))
#creating the model
class variational_encoder(nn.Module):
def __init__(self, latent_dims):
super(variational_encoder, self).__init__()
self.linear1 = nn.Linear(784, 512)
self.linear2 = nn.Linear(512, latent_dims)
self.linear3 = nn.Linear(512, latent_dims)
self.normal = torch.distributions.Normal(0, 1)
self.k1 = 0
def forward(self, x):
x = torch.flatten(x, start_dim=1)
x = torch.nn.functional.relu(self.linear1(x))
mu = self.linear2(x)
sigma = torch.exp(self.linear3(x))
z = mu + sigma*self.normal.sample(mu.shape)
self.k1 = (sigma**2 + mu**2 - torch.log(sigma) - 0.5).sum()
return z
class Decoder(nn.Module):
def __init__(self, latent_dims):
super(Decoder, self).__init__()
self.linear1 = nn.Linear(latent_dims, 512)
self.linear2 = nn.Linear(512, 784)
def forward(self, z):
z = torch.nn.functional.relu(self.linear1(z))
z = torch.sigmoid(self.linear2(z))
return z.reshape((-1, 1, 28, 28))
class VariationalAutoencoder(nn.Module):
def __init__(self, latent_dims):
super(VariationalAutoencoder, self).__init__()
self.encoder = variational_encoder(latent_dims)
self.decoder = Decoder(latent_dims)
def forward(self, x):
z = self.encoder(x)
return self.decoder(z)
# Training
def train(autoencoder, data, epochs=args.epochs):
opt = torch.optim.Adam(autoencoder.parameters())
vae.train()
train_loss=0.0
for epoch in tqdm(range(epochs)):
for x, _ in data:
x = x.to(device) # GPU
opt.zero_grad()
x_hat = autoencoder(x)
loss = ((x - x_hat)**2).sum() + autoencoder.encoder.k1
loss.backward()
opt.step()
train_loss+=loss.item()
return train_loss/len(data)
vae = VariationalAutoencoder(4).to(device) # GPU
training_loss = train(vae, train_load)
plt.plot(training_loss)
torch.save(vae.state_dict(), f'output/models.pth')
| kashyap333/Types-of-Autoencoders | Variational_AE/Variational_AE.py | Variational_AE.py | py | 3,124 | python | en | code | 0 | github-code | 13 |
27475237896 |
annual_salary = float(input("Enter your starting annual salary: "))
monthly_salary = annual_salary / 12
portion_saved = float(input("Enter the percent of your salary to save, as a decimal: "))
total_cost = float(input("Enter the cost of your dream home: "))
semi_annual_raise = float(input("Enter the semiannual raise, as a decimal: "))
portion_down_payment = 0.25
r = 0.04
current_savings = 0
number_of_months = 0
while(current_savings < total_cost * portion_down_payment):
current_savings += current_savings * r / 12 + portion_saved * monthly_salary
number_of_months += 1
if((number_of_months) % 6 == 0):
monthly_salary += monthly_salary * semi_annual_raise
print("Number of months:", number_of_months) | BelinusAI/6.100A_Introduction_to_Computer_Science_and_Programming_in_Python | ps1/ps1b.py | ps1b.py | py | 748 | python | en | code | 0 | github-code | 13 |
13518320845 | from AlgorithmImports import *
class BuyAndHold(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2010, 2, 12) # Set Start Date
self.SetEndDate(datetime.now() - timedelta(1)) # Set End Date using relative date
self.SetCash(100_000) # Set Strategy Cash
self.SetBrokerageModel(BrokerageName.InteractiveBrokersBrokerage, AccountType.Margin) # Set Margin account
self.SetBenchmark("SPY") # Set benchmark using SPY
self.tqqq = self.AddEquity("TQQQ", Resolution.Daily)
# This leveraged ProShares ETF seeks a return that is
# 3x the return of its underlying benchmark (target) i.e. QQQ for a single day
self.tqqq.SetDataNormalizationMode(DataNormalizationMode.Raw)
def OnData(self, data):
"""OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.
Arguments:
data: Slice object keyed by symbol containing the stock data
"""
if not self.Portfolio.Invested:
self.SetHoldings("TQQQ", 1) # All in with no leverage
self.Debug("Purchased Stock")
| ilovestocks/Strategies | Buy and Hold/TQQQ_1x.py | TQQQ_1x.py | py | 1,148 | python | en | code | 0 | github-code | 13 |
18696283014 | class Solution:
def maximizeWin(self, A: List[int], k: int) -> int:
dp = [0] * (len(A) + 1)
res = 0
j = 0
for i, a in enumerate(A):
#print(dp)
while A[j] < A[i] - k: j += 1
dp[i + 1] = max(dp[i], i - j + 1)
res = max(res, i - j + 1 + dp[j])
return res
def maximizeWin(self, arr: List[int], k: int) -> int:
n = len(arr)
best = [0]*(n+1) # best segment after >= i
res = 0
for i in range(n-1,-1,-1): # curr seg start at ith
e = bisect.bisect_right(arr,arr[i]+k) # take maximum as possible
res = max(res,e-i + best[e]) # maximize the segments by curr seg + next segment after >= e
best[i] = max(best[i+1],e-i) # track the best segment
return res | mehkey/leetcode | python6/2555. Maximize Win From Two Segments.py | 2555. Maximize Win From Two Segments.py | py | 845 | python | en | code | 0 | github-code | 13 |
74583827858 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accounts', '0004_account_account_type'),
]
operations = [
migrations.AddField(
model_name='account',
name='created_date',
field=models.DateTimeField(auto_now_add=True, verbose_name=b'date created', null=True),
),
migrations.AddField(
model_name='account',
name='owner',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True),
),
]
| kahihia/hiretech | hiretech/accounts/migrations/0005_auto_20150922_1029.py | 0005_auto_20150922_1029.py | py | 748 | python | en | code | 0 | github-code | 13 |
7770648869 | # Example
# Input s = 'abcac'
# n = 10
def repeatedString(s, n):
full_string = ''
s_length = len(s)
a_counter = 0
# if s_length < n:
# full_string = s
length_check = s_length
while s_length < n:
full_string += s
return count_a(full_string)
def count_a(s):
a_counter = 0
for i in range(len(s)):
if s[i] == 'a':
a_counter +=1
return a_counter
#s = 'Ricardo'
#print(s.count('a'))
#print(count_a('Ricardo Emmanuel'))
print(repeatedString('abcac',10))
| rishells/100daysOfPython | DataStructuresAndAlgos/repeteadStrings.py | repeteadStrings.py | py | 570 | python | en | code | 0 | github-code | 13 |
25974928055 | import sys
import unittest
import numpy as np
import neurolab as nl
sys.path.insert(0, '../Logic')
from FuzzyLogic import *
class FuzzyAdapt():
def setupNetwork(self,weightSize):
networkInputRanges = []
#minimum weight is zero (no effect), max is 100 (randomly picked)
weightRange = [0, 100]
numNeuronsInputLayer = weightSize * 2
numNeuronsOutputLayer = weightSize
for x in range(weightSize):
networkInputRanges.append(weightRange)
myNet = nl.net.newff(networkInputRanges, [numNeuronsInputLayer, numNeuronsOutputLayer])
return myNet
def __init__(self, FuzzyLogic):
self.Fz = FuzzyLogic
myFuzzyClass = FuzzyLogic
self.weightSize = self.Fz.getWeightLength()
self.currentWeights = self.Fz.get1DWeights()
self.network = self.setupNetwork(self.weightSize)
def trainNetwork(self, targetFuzzyChoice):
currentFuzzyChoice = self.Fz.neuralNetworkRun()
self.network.train(currentFuzzyChoice, targetFuzzyChoice, show = 10)
self.currentWeights = self.Fz.get1DWeights()
return self.currentWeights
| IronMage/SeniorDesign | Engine/Adaptation/Adaptation.py | Adaptation.py | py | 1,071 | python | en | code | 0 | github-code | 13 |
20654465824 | # -*- coding: utf-8 -*-
"""
This module provides a list of tool functions about date/time.
- diff_time
- str_to_date
Author : Eric OLLIVIER
"""
import time
import datetime as dt
# =============================================================================
# str_to_date
# =============================================================================
def str_to_date(s_date, sep='/', format='DMY'):
"""
Convert a date with 'str' type to date object (from datetime module).
- s_date: string contains the date
- sep : the separator between the date items. By default the value is '/'
- format : define the order of the item
'Y' for year, 'M' for month, 'D' for day.
example : the format 'DMY' defines a date like '01/08/1972'
"""
try:
idx_format = list(format.upper())
idx_date = s_date.split(sep)
return dt.date(int(idx_date[idx_format.index('Y')]),
int(idx_date[idx_format.index('M')]),
int(idx_date[idx_format.index('D')]))
except:
return None
# =============================================================================
# diff_times
# =============================================================================
def diff_times(date1, date2, unit='second'):
"""
This function return the differential time between two dates or times.
'date1' and 'date2' must be the same type and among the following type :
date type = {datetime, date, time}
The return value is a float type exprimed in the unit passed in parameters.
The unit can take the following value :
unit type = {'day', 'hour', 'minute', 'second'}
"""
# Parameters checking
if not isinstance(date1, (dt.datetime, dt.date, dt.time)):
raise TypeError(type(date1))
return None
if not isinstance(date2, (dt.datetime, dt.date, dt.time)):
raise TypeError(type(date2))
return None
if type(date1) != type(date2): # Check the same type
raise TypeError(type(date1), type(date2))
return None
# Set the order of date
if date1 > date2:
date_min = date2
date_max = date1
else:
date_min = date1
date_max = date2
# Convertion to second unit
_date_min = 0
_date_max = 0
if isinstance(date_max, (dt.time)):
_date_min = date_min.second + date_min.minute * 60 + date_min.hour * 3600
_date_max = date_max.second + date_max.minute * 60 + date_max.hour * 3600
_date_diff = _date_max - _date_min
if isinstance(date1, (dt.date, dt.datetime)):
_delta_date = date_max - date_min
_date_diff = _delta_date.total_seconds()
# Convertion to target unit
if unit == 'second':
return _date_diff
elif unit == 'minute':
return _date_diff / 60
elif unit == 'hour':
return _date_diff / 3600
elif unit == 'day':
return _date_diff / (24 * 3600)
# end of file : timetools.py
| Eric-Oll/pytools | pytools/time/timetools.py | timetools.py | py | 3,135 | python | en | code | 0 | github-code | 13 |
35219441144 | from django.db import models
from django.contrib.auth.models import AbstractUser
from PIL import Image
from django.conf import settings
from django.contrib.auth.models import User
# Create your models here.
class Profile(AbstractUser):
DesignationChoices = (
("SSE/C&W/TKD", "SSE/C&W/TKD"),
("JE/C&W/TKD", "JE/C&W/TKD"),
("Techical Asst", "Technical Asst"),
)
PlaceOfPosting = (
("TKD Sickline Office", "TKD Sickline Office"),
("TKD ROH Office", "TKD ROH"),
("TKD Contract Office", "TKD Contract Office"),
("TKD Tech Cell Office", "TKD Tech Cell Office"),
("TKD OMRS Office", "TKD OMRS Office"),
("TKD SSE Planning Office", "TKD SSE Planning Office"),
("TKD M&P Section", "TKD M&P Section"),
("TKD Administration", "TKD Administration"),
("TKD Stores", "TKD Stores"),
("TKD Wheel Lathe", "TKD Wheel Lathe"),
("TKD Train Duty Office", "TKD Train Duty Office"),
("TKD ICD", "TKD ICD"),
)
Name = models.OneToOneField(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True, blank=True)
image = models.ImageField(default='default.jpg', upload_to='profile_pics')
Mobile = models.BigIntegerField(null=True, blank=True)
LocalAddress = models.TextField(null=True, blank=True)
IDNumber = models.CharField(max_length=30, default='ID Number', null=True)
Designation = models.CharField(max_length=30, default='JE/C&W/TKD', null=True, blank=True)
Posted = models.CharField(max_length=30, choices=PlaceOfPosting, default='TKD Tech Cell Office', null=True, blank=True)
DateOfJoining = models.DateField(null=True, default='1001-01-01', blank=True)
def __str__(self):
return f'{self.username}'
def save(self, *args, **kwargs):
super(Profile, self).save(*args, **kwargs)
img = Image.open(self.image.path)
if img.height > 300 or img.width > 300:
output_size = (300, 300)
img.thumbnail(output_size)
img.save(self.image.path)
else:
output_size = (300, 300)
img.thumbnail(output_size)
img.save(self.image.path)
| vinaykumar1908/082021i | users/models.py | models.py | py | 2,204 | python | en | code | 0 | github-code | 13 |
41387877348 | # Program to prompt the user to enter two lists of integers and check
# (a) Whether lists are of the same length.
# (b) Whether the list sums to the same value .
# (c) Whether any value occurs in both Lists.
# Function to check whether lists are of the same length
def equal_or_not(list1,list2):
if len(list1) == len(list2): # check whether lists are of the same length
print("Lists are of the same length")
else:
print("Lists are not of the same length")
# Function to check whether lists sum to the same value
def sum_list(list1,list2):
print("Sum of list 1 is:",sum(list1))
print("Sum of list 2 is:",sum(list2))
if sum(list1) == sum(list2): # check whether lists sum to the same value
print("Lists sum to the same value")
else:
print("Lists do not sum to the same value")
# unction to check whether any value occurs in both lists
def occurrences(list1,list2):
for i in list1: # iterate through list1
if i in list2: # iterate through list2
print("Value",i,"occurs in both lists")
print("Enter the first list of integers separated by commas:")
list1 = [int(x) for x in input().split()] # Get list1 from user
print("Enter the second list of integers separated by commas:")
list2 = [int(x) for x in input().split()] # Get list2 from user
equal_or_not(list1,list2) # Call function to check whether lists are of the same length
sum_list(list1,list2) # Call function to check whether lists sum to the same value
occurrences(list1,list2) # Call function to check whether any value occurs in both lists
| AdhithyaSomaraj/PYTHON | 2.0/program9.py | program9.py | py | 1,584 | python | en | code | 0 | github-code | 13 |
7847803344 | # -*- coding: utf-8 -*-
"""
query utilities.
"""
import typing as T
import attr
from attrs_mate import AttrsClass
@attr.define
class Query(AttrsClass):
"""
Structured query object.
:param parts: the parts of query string split by delimiter
:param trimmed_parts: similar to parts, but each part is white space stripped
"""
parts: T.List[str] = AttrsClass.ib_list_of_str()
trimmed_parts: T.List[str] = AttrsClass.ib_list_of_str()
SEP = "____"
class QueryParser:
"""
Utility class that can parse string to query.
"""
def __init__(
self,
delimiter: T.Union[str, T.List[str]] = " ",
):
if isinstance(delimiter, str):
self.delimiter = [delimiter,]
else:
self.delimiter = delimiter
def parse(self, s: str) -> Query:
"""
Convert string query to structured query object.
"""
for sep in self.delimiter:
s = s.replace(sep, SEP)
parts = s.split(SEP)
trimmed_parts = [c.strip() for c in parts if c.strip()]
return Query(
parts=parts,
trimmed_parts=trimmed_parts,
)
| MacHu-GWU/afwf-project | afwf/query.py | query.py | py | 1,170 | python | en | code | 1 | github-code | 13 |
35441244990 | # minzhou@bu.edu
def computeCommission(salesAmount):
balance = commission = 0.0
if salesAmount >= 10000.01:
balance = salesAmount - 10000
commission += balance * 0.12
if salesAmount >= 5000.01:
balance -= balance - 5000
commission += balance * 0.10
if salesAmount >= 0.01:
commission += balance * 0.08
return commission
def main():
print("SalesAmount Commission\n")
for i in range(10000, 100001, 5000):
print('{0:<10} {1:10}'.format(i, computeCommission(i)))
main() | minzhou1003/intro-to-programming-using-python | practice7/6_11.py | 6_11.py | py | 560 | python | en | code | 0 | github-code | 13 |
17228088999 | import torch
import torch.nn as nn
import torch.nn.functional as F
class TargetPred(nn.Module):
def __init__(self, in_channels, hidden_dim=64, m=50, device=torch.device("cpu")):
""""""
super(TargetPred, self).__init__()
self.in_channels = in_channels
self.hidden_dim = hidden_dim
self.M = m # output candidate target
self.device = device
# yapf: disable
self.prob_mlp = nn.Sequential(
nn.Linear(in_channels + 2, hidden_dim),
nn.LayerNorm(hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, hidden_dim),
nn.LayerNorm(hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, 1)
)
self.offset_mlp = nn.Sequential(
nn.Linear(in_channels + 2, hidden_dim),
nn.LayerNorm(hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, hidden_dim),
nn.LayerNorm(hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, 2)
)
# yapf: enable
def target_nms(self, targets, threshold=1):
target_selected = targets[:, :6] # [batch_size, 1, horizon * 2]
for batch_id in range(targets.shape[0]): # one batch for a time
cnt = 1
for i in range(1, 50):
dis = torch.pow(target_selected[batch_id, :cnt, :] - targets[batch_id, i], 2)
dis = torch.sum(dis, dim=1)
if not torch.any(dis < threshold): # not exist similar trajectory
target_selected[batch_id, cnt] = targets[batch_id, i] # add this trajectory
cnt += 1
if cnt >= 6:
break
return target_selected
def forward(self, feat_in, tar_candidate, mask, candidate_gt=None, offset_gt=None):
"""
predict the target end position of the target agent from the target candidates
:param feat_in: the encoded trajectory features, [batch_size, inchannels]
:param tar_candidate: the target position candidate (x, y), [batch_size, N, 2]
:return:
"""
# tar_candidate = tar_candidate[:, :candidate_num, :]
batch_size, n, _ = tar_candidate.size()
# stack the target candidates to the end of input feature: [batch_size, n_tar, inchannels + 2]
feat_in_repeat = torch.cat([feat_in.repeat(1, n, 1), tar_candidate], dim=2)
# compute probability for each candidate
prob_tensor = self.prob_mlp(feat_in_repeat)
prob_tensor += mask
tar_candit_prob = F.softmax(prob_tensor, dim=1).squeeze(-1) # [batch_size, n_tar]
tar_offset = self.offset_mlp(feat_in_repeat) # [batch_size, n_tar, 2]
# TODO: 50 ...
_, indices = torch.topk(tar_candit_prob, 50, dim=1)
gather_indices = indices.unsqueeze(-1).repeat(1, 1, 2)
tar_selected = torch.gather(tar_candidate, 1, gather_indices) + torch.gather(tar_offset, 1, gather_indices)
tar_selected = self.target_nms(tar_selected, 2)
loss = self.loss(tar_candit_prob, tar_offset, candidate_gt, offset_gt)
return tar_selected, loss
def loss(self, tar_candit_prob, tar_offset, candidate_gt, offset_gt, reduction="mean"):
n_candidate_loss = F.binary_cross_entropy(tar_candit_prob, candidate_gt, reduction=reduction)
# candidate_gt = candidate_gt.unsqueeze(-1).repeat(1, 1, 2)
# tar_offset = torch.gather(tar_offset, 1, candidate_gt)
offset_loss = F.smooth_l1_loss(tar_offset[candidate_gt.bool()], offset_gt, reduction=reduction)
# print("target choose: ", n_candidate_loss.data, offset_loss.data, torch.max(tar_candit_prob, dim=1), tar_candit_prob[candidate_gt.bool()])
return n_candidate_loss + offset_loss
class MotionEstimation(nn.Module):
def __init__(self, in_channels, horizon=30, hidden_dim=64):
"""
estimate the trajectories based on the predicted targets
:param in_channels:
:param horizon:
:param hidden_dim:
"""
super(MotionEstimation, self).__init__()
self.in_channels = in_channels
self.horizon = horizon
self.hidden_dim = hidden_dim
# yapf: disable
self.traj_pred = nn.Sequential(
nn.Linear(in_channels + 2, hidden_dim),
nn.LayerNorm(hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, hidden_dim),
nn.LayerNorm(hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, horizon * 2)
)
# self.fc = nn.Linear(hidden_dim + 2, horizon * 2)
# yapf: enable
def forward(self, feat_in, loc_in, traj_gt, reduction="mean"):
"""
predict the trajectory according to the target location
:param feat_in: encoded feature vector for the target agent, torch.Tensor, [batch_size, in_channels]
:param loc_in: end location, torch.Tensor, [batch_size, M, 2] or [batch_size, 1, 2]
:return: [batch_size, M, horizon * 2] or [batch_size, 1, horizon * 2]
"""
batch_size, M, _ = loc_in.size()
if M > 1:
# target candidates
input = torch.cat([feat_in.repeat(1, M, 1), loc_in], dim=2)
else:
# targt ground truth
input = torch.cat([feat_in, loc_in], dim=2)
traj_pred = self.traj_pred(input)
loss = F.smooth_l1_loss(traj_pred, traj_gt.repeat(1, M, 1), reduction=reduction)
return traj_pred, loss
class TrajScoreSelection(nn.Module):
def __init__(self, feat_channels, horizon=30, hidden_dim=64, temper=0.01):
"""
init trajectories scoring and selection module
:param feat_channels: int, number of channels
:param horizon: int, prediction horizon, prediction time x pred_freq
:param hidden_dim: int, hidden dimension
:param temper: float, the temperature
"""
super(TrajScoreSelection, self).__init__()
self.feat_channels = feat_channels
self.horizon = horizon
self.temper = temper
# yapf: disable
self.score_mlp = nn.Sequential(
nn.Linear(feat_channels + horizon * 2, hidden_dim),
nn.LayerNorm(hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, hidden_dim),
nn.LayerNorm(hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, 1)
)
# yapf: enable
def distance_metric(self, traj_candidate, traj_gt):
"""
compute the distance between the candidate trajectories and gt trajectory
:param traj_candidate: torch.Tensor, [batch_size, M, horizon * 2] or [M, horizon * 2]
:param traj_gt: torch.Tensor, [batch_size, horizon * 2] or [1, horizon * 2]
:return: distance, torch.Tensor, [batch_size, M] or [1, M]
"""
_, M, horizon_2_times = traj_candidate.size()
dis = torch.pow(traj_candidate - traj_gt, 2).view(-1, M, int(horizon_2_times / 2), 2)
dis, _ = torch.max(torch.sum(dis, dim=3), dim=2)
return dis
# todo: determine appropiate threshold
def traj_selection(self, traj_in, score, targets, threshold=0.01):
"""
select the top k trajectories according to the score and the distance
:param traj_in: candidate trajectories, [batch, M, horizon * 2]
:param score: score of the candidate trajectories, [batch, M]
:param threshold: float, the threshold for exclude traj prediction
:return: [batch_size, k, horizon * 2]
"""
# for debug
# return traj_in, targets
def distance_metric(traj_candidate, traj_gt):
if traj_candidate.dim() == 2:
traj_candidate = traj_candidate.unsqueeze(1)
_, M, horizon_2_times = traj_candidate.size()
dis = torch.pow(traj_candidate - traj_gt, 2).view(-1, M, int(horizon_2_times / 2), 2)
dis, _ = torch.max(torch.sum(dis, dim=3), dim=2)
return dis
# re-arrange trajectories according the the descending order of the score
M, k = 50, 6
_, batch_order = score.sort(descending=True)
traj_pred = torch.cat([traj_in[i, order] for i, order in enumerate(batch_order)], dim=0).view(-1, M, self.horizon * 2)
targets_sorted = torch.cat([targets[i, order] for i, order in enumerate(batch_order)], dim=0).view(-1, M, 2)
traj_selected = traj_pred[:, :k] # [batch_size, 1, horizon * 2]
# targets_selected = targets_sorted[:, :k]
for batch_id in range(traj_pred.shape[0]): # one batch for a time
traj_cnt = 1
for i in range(1, M):
dis = distance_metric(traj_selected[batch_id, :traj_cnt, :], traj_pred[batch_id, i].unsqueeze(0))
if not torch.any(dis < threshold): # not exist similar trajectory
traj_selected[batch_id, traj_cnt] = traj_pred[batch_id, i] # add this trajectory
# targets_selected[batch_id, traj_cnt] = targets_sorted[batch_id, i]
traj_cnt += 1
if traj_cnt >= k:
break # break if collect enough traj
# no enough traj, pad zero traj
if traj_cnt < k:
traj_selected[:, traj_cnt:] = 0.0
# targets_selected = targets_selected[:, :traj_cnt]
# return traj_selected, targets_selected
return traj_selected, targets_sorted
# def forward(self, feat_in, traj_in, traj_gt, targets, reduction="mean"):
# """
# forward function
# :param feat_in: input feature tensor, torch.Tensor, [batch_size, feat_channels]
# :param traj_in: candidate trajectories, torch.Tensor, [batch_size, M, horizon * 2]
# :return: [batch_size, M]
# """
# batch_size, M, _ = traj_in.size()
# input_tenor = torch.cat([feat_in.repeat(1, M, 1), traj_in], dim=2)
# score_pred = F.softmax(self.score_mlp(input_tenor).squeeze(-1), dim=-1)
# score_gt = F.softmax(-self.distance_metric(traj_in, traj_gt) / self.temper, dim=1)
# score_gt = score_gt.detach()
# loss = F.binary_cross_entropy(score_pred, score_gt, reduction=reduction)
# selected_traj, targets_selected = self.traj_selection(traj_in, score_pred, targets, threshold=0.1)
# return selected_traj, targets_selected, loss
def forward(self, feat_in, traj_in, traj_gt, targets, reduction="mean"):
"""
forward function
:param feat_in: input feature tensor, torch.Tensor, [batch_size, feat_channels]
:param traj_in: candidate trajectories, torch.Tensor, [batch_size, M, horizon * 2]
:return: [batch_size, M]
"""
batch_size, M, _ = traj_in.size()
input_tenor = torch.cat([feat_in.repeat(1, M, 1), traj_in], dim=2)
score_pred = F.softmax(self.score_mlp(input_tenor).squeeze(-1), dim=-1)
score_gt = F.softmax(-self.distance_metric(traj_in, traj_gt) / self.temper, dim=1)
score_gt = score_gt.detach()
loss = F.mse_loss(score_pred, score_gt)
selected_traj, targets_selected = self.traj_selection(traj_in, score_pred, targets, threshold=0.1)
return selected_traj, targets_selected, loss
class TNTDecoder(nn.Module):
def __init__(self, in_channels, hidden_dim=64, m=50, device=torch.device("cpu")):
super(TNTDecoder, self).__init__()
self.target_pred = TargetPred(in_channels, hidden_dim, m, device=device)
self.motion_estimator = MotionEstimation(in_channels, horizon=30, hidden_dim=hidden_dim)
self.traj_score_selection = TrajScoreSelection(in_channels, horizon=30, hidden_dim=hidden_dim)
def forward(self, feat_in, tar_candidate, mask, traj_gt, candidate_gt, offset_gt):
final_pos, loss_target_pred = self.target_pred(feat_in, tar_candidate, mask, candidate_gt, offset_gt)
_, loss_motion_estimator = self.motion_estimator(feat_in, traj_gt[:, :, -2:], traj_gt)
pred_traj, _ = self.motion_estimator(feat_in, final_pos, traj_gt)
loss = 1 * loss_target_pred + loss_motion_estimator
return pred_traj, loss, final_pos
# selected_traj, targets_selected, loss_traj_score_selection = self.traj_score_selection(
# feat_in, pred_traj, traj_gt, final_pos)
# loss = 1 * loss_target_pred + loss_motion_estimator + 1 * loss_traj_score_selection
# print(loss.data, 0.1 * loss_target_pred.data, loss_motion_estimator.data, 0.1 * loss_traj_score_selection.data)
# return selected_traj, loss, targets_selected
if __name__ == "__main__":
in_channels = 128
horizon = 30
device = torch.device("cuda")
max_target = 2000
model = TNTDecoder(in_channels=in_channels, hidden_dim=64, m=50, device=device)
model.cuda()
batch_size = 4
feat_in = torch.randn((batch_size, 1, in_channels), device=device)
tar_candidate = torch.rand((batch_size, max_target, 2), device=device)
mask = torch.zeros((batch_size, max_target, 1), device=device)
traj_gt = torch.randn((batch_size, 1, horizon * 2), device=device)
candidate_gt = torch.zeros((batch_size, max_target), device=device)
candidate_gt[:, 1] = 1
offset_gt = torch.randn((batch_size, 2), device=device)
# forward
selected_traj, loss = model(feat_in, tar_candidate, mask, traj_gt, candidate_gt, offset_gt)
print("selected_traj and loss: ", selected_traj, loss)
| ZhoubinXM/AV-EnvModeling-MotionForecasting | model/decoder.py | decoder.py | py | 13,567 | python | en | code | 0 | github-code | 13 |
9046506 | import urllib2
import bs4
from bs4 import BeautifulSoup
import os
from NLPCore import NLPCoreClient
import operator
url = "https://www.google.com"
page = urllib2.urlopen(url)
soup = bs4.BeautifulSoup(page, 'html.parser')
plaintext = soup.get_text()
text = ["Bill Gates works at Microsoft.", "Sergei works at Google."] # In actuality, you will want to input the cleaned webpage for the first pipeline, and a list of candidate sentences for the second.
#text = "BIll Gates works at Microsoft. Sergei works at Google."
#path to corenlp
dir_path = os.path.dirname(os.path.realpath(__file__))
nlp_path = os.path.join(dir_path, "stanford-corenlp-full-2017-06-09")
client = NLPCoreClient(nlp_path)
p1props = {
"annotators": "tokenize,ssplit,pos,lemma,ner", #Second pipeline; leave out parse,relation for first
"parse.model": "edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz", #Must be present for the second pipeline!
"ner.useSUTime": "0"
}
#print(plaintext)
doc = client.annotate(text=text, properties=p1props)
#print(doc.sentences[0].tokens[0].ner)
#print(doc.sentences[0].tree_as_string())
#print(doc.tree_as_string())
'''PIPELINE 1 '''
entities = ["PERSON", "ORGANIZATION"]
p2sents = []
newsentence = ""
for sentence in doc.sentences:
#copy entities, remove as found in sentence
print(sentence)
matchedEntities = list(entities)
for token in sentence.tokens:
print("ner: " + token.ner)
if token.ner in matchedEntities:
print("Match!:" + token.ner)
matchedEntities.remove(token.ner)
#if all entities removed, its a match!
if len(matchedEntities) == 0:
for x in sentence.tokens:
newsentence += " " + x.word
print(newsentence)
p2sents.append(newsentence)
newsentence = ""
p2props = {
"annotators": "tokenize,ssplit,pos,lemma,ner,parse,relation", #Second pipeline; leave out parse,relation for first
"parse.model": "edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz", #Must be present for the second pipeline!
"ner.useSUTime": "0"
}
doc = client.annotate(text=p2sents, properties=p2props)
''' PIPELINE 2 '''
setRelation = "Work_For"
threshhold = .1
relationList = []
#print(doc.sentences[0].relations[0])
for sentence in doc.sentences:
for relation in sentence.relations:
#check to see if correct relation
currentRelation = max(relation.probabilities.iteritems(), key=operator.itemgetter(1))[0]
print(currentRelation)
if(currentRelation == setRelation):
#print(type(max(relations.probabilities.iteritems(), key=operator.itemgetter(1))[1]))
currentProb = max(relation.probabilities.iteritems(), key=operator.itemgetter(1))[1]
#print(float(currentProb) >= float(threshhold))
#check to see if prob is above threshhold
if(float(currentProb) >= threshhold):
temp = []
temp.append(currentProb)
temp.append(relation.entities[0].value)
temp.append(relation.entities[0].type)
temp.append(relation.entities[1].value)
temp.append(relation.entities[1].type)
print(relation.entities[0].type)
print(relation.entities[0].value)
relationList.append(temp)
print("Success!")
print("===================== ALL RELATIONS =====================")
for rList in relationList:
lineString = ("Relation type: " + setRelation + "Confidence: " + rList[0] +
" Entity #1: " + rList[1] + " (" + rList[2] + ") " +
" Entity #2: " + rList[3] + " (" + rList[4] + ") ")
print(lineString)
| sarinaxie/iter-set-expansion | cleanpagetest.py | cleanpagetest.py | py | 3,390 | python | en | code | 0 | github-code | 13 |
17048224924 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ReferenceId import ReferenceId
class AnttechBlockchainDefinSaasFunditemQueryModel(object):
def __init__(self):
self._fund_type = None
self._out_order_id = None
self._out_payer_id = None
self._out_request_id = None
self._platform_member_id = None
@property
def fund_type(self):
return self._fund_type
@fund_type.setter
def fund_type(self, value):
self._fund_type = value
@property
def out_order_id(self):
return self._out_order_id
@out_order_id.setter
def out_order_id(self, value):
self._out_order_id = value
@property
def out_payer_id(self):
return self._out_payer_id
@out_payer_id.setter
def out_payer_id(self, value):
if isinstance(value, ReferenceId):
self._out_payer_id = value
else:
self._out_payer_id = ReferenceId.from_alipay_dict(value)
@property
def out_request_id(self):
return self._out_request_id
@out_request_id.setter
def out_request_id(self, value):
self._out_request_id = value
@property
def platform_member_id(self):
return self._platform_member_id
@platform_member_id.setter
def platform_member_id(self, value):
self._platform_member_id = value
def to_alipay_dict(self):
params = dict()
if self.fund_type:
if hasattr(self.fund_type, 'to_alipay_dict'):
params['fund_type'] = self.fund_type.to_alipay_dict()
else:
params['fund_type'] = self.fund_type
if self.out_order_id:
if hasattr(self.out_order_id, 'to_alipay_dict'):
params['out_order_id'] = self.out_order_id.to_alipay_dict()
else:
params['out_order_id'] = self.out_order_id
if self.out_payer_id:
if hasattr(self.out_payer_id, 'to_alipay_dict'):
params['out_payer_id'] = self.out_payer_id.to_alipay_dict()
else:
params['out_payer_id'] = self.out_payer_id
if self.out_request_id:
if hasattr(self.out_request_id, 'to_alipay_dict'):
params['out_request_id'] = self.out_request_id.to_alipay_dict()
else:
params['out_request_id'] = self.out_request_id
if self.platform_member_id:
if hasattr(self.platform_member_id, 'to_alipay_dict'):
params['platform_member_id'] = self.platform_member_id.to_alipay_dict()
else:
params['platform_member_id'] = self.platform_member_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AnttechBlockchainDefinSaasFunditemQueryModel()
if 'fund_type' in d:
o.fund_type = d['fund_type']
if 'out_order_id' in d:
o.out_order_id = d['out_order_id']
if 'out_payer_id' in d:
o.out_payer_id = d['out_payer_id']
if 'out_request_id' in d:
o.out_request_id = d['out_request_id']
if 'platform_member_id' in d:
o.platform_member_id = d['platform_member_id']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AnttechBlockchainDefinSaasFunditemQueryModel.py | AnttechBlockchainDefinSaasFunditemQueryModel.py | py | 3,354 | python | en | code | 241 | github-code | 13 |
27910614205 | class Solution(object):
def climbStairs(self, n):
"""
:type n: int
:rtype: int
"""
cache = {}
cache[1], cache[2] = 1, 2
for i in range(3, n+1):
cache[i] = cache[i-1] + cache[i-2]
return cache[n]
def climbStairs_faster(self, n):
if n <= 0:
return 0
if n == 1:
return 1
if n == 2:
return 2
n1, n2 = 1, 2
res = 0
for i in range(n+1):
res = n1 + n2
n1, n2 = n2, res
return res
| JinhanM/leetcode-playground | Recursion/climb_stairs.py | climb_stairs.py | py | 575 | python | en | code | 0 | github-code | 13 |
37170473833 | from itertools import takewhile as lxpDwtEkCfBQUiO
lxpDwtEkCfBQUie=float
lxpDwtEkCfBQUHi=int
lxpDwtEkCfBQUHN=min
lxpDwtEkCfBQUHM=len
lxpDwtEkCfBQUHo=max
lxpDwtEkCfBQUHs=range
lxpDwtEkCfBQUHg=enumerate
lxpDwtEkCfBQUHj=list
from typing import NamedTuple
from numpy import average
from p2.src.algorithm_api import Algorithm
from p2.src.data_api import Instance,Solution,Schedule,Task
lxpDwtEkCfBQUiH=1
lxpDwtEkCfBQUiN=0
lxpDwtEkCfBQUiM=3
lxpDwtEkCfBQUio=1.5
lxpDwtEkCfBQUis=0
class lxpDwtEkCfBQUiu(NamedTuple):
lxpDwtEkCfBQUig:lxpDwtEkCfBQUie
lxpDwtEkCfBQUiv:lxpDwtEkCfBQUie
index:lxpDwtEkCfBQUHi
def lxpDwtEkCfBQUib(lxpDwtEkCfBQUiL):
return lxpDwtEkCfBQUiL.lxpDwtEkCfBQUig
def lxpDwtEkCfBQUiF(lxpDwtEkCfBQUid):
return lxpDwtEkCfBQUid[lxpDwtEkCfBQUiH].duration
def lxpDwtEkCfBQUiR(lxpDwtEkCfBQUid:Task):
return lxpDwtEkCfBQUid.ready
def lxpDwtEkCfBQUiX(lxpDwtEkCfBQUij,lxpDwtEkCfBQUiA,lxpDwtEkCfBQUic,lxpDwtEkCfBQUiT,lxpDwtEkCfBQUiq,enumerated,lxpDwtEkCfBQUir):
lxpDwtEkCfBQUij.sort(key=lxpDwtEkCfBQUib)
lxpDwtEkCfBQUiA.sort(key=lxpDwtEkCfBQUiF)
lxpDwtEkCfBQUiz=lxpDwtEkCfBQUHN(lxpDwtEkCfBQUiT,key=lambda m:m.lxpDwtEkCfBQUig)
lxpDwtEkCfBQUih=lxpDwtEkCfBQUHN(lxpDwtEkCfBQUij,key=lambda m:m.lxpDwtEkCfBQUig)
for lxpDwtEkCfBQUiL in lxpDwtEkCfBQUij:
if lxpDwtEkCfBQUHM(lxpDwtEkCfBQUiA)>0:
if lxpDwtEkCfBQUiz.lxpDwtEkCfBQUiv>lxpDwtEkCfBQUir and lxpDwtEkCfBQUiz.lxpDwtEkCfBQUiv<lxpDwtEkCfBQUir+lxpDwtEkCfBQUiM and lxpDwtEkCfBQUih.lxpDwtEkCfBQUig>lxpDwtEkCfBQUio and lxpDwtEkCfBQUHo(lxpDwtEkCfBQUiA,key=lambda t:t[lxpDwtEkCfBQUiH].duration)[lxpDwtEkCfBQUiH].duration>1.1*lxpDwtEkCfBQUis and lxpDwtEkCfBQUih.lxpDwtEkCfBQUig*lxpDwtEkCfBQUiA[0][lxpDwtEkCfBQUiH].duration-lxpDwtEkCfBQUiA[0][lxpDwtEkCfBQUiH].ready>lxpDwtEkCfBQUiA[0][lxpDwtEkCfBQUiH].duration+lxpDwtEkCfBQUiM-lxpDwtEkCfBQUiA[0][lxpDwtEkCfBQUiH].ready:
break
if lxpDwtEkCfBQUiL.lxpDwtEkCfBQUig<lxpDwtEkCfBQUio:
lxpDwtEkCfBQUiY=lxpDwtEkCfBQUiA.pop()
else:
lxpDwtEkCfBQUiY=lxpDwtEkCfBQUiA.pop(0)
lxpDwtEkCfBQUiv=lxpDwtEkCfBQUir+lxpDwtEkCfBQUiY[lxpDwtEkCfBQUiH].duration*lxpDwtEkCfBQUiL.lxpDwtEkCfBQUig
lxpDwtEkCfBQUic.append(lxpDwtEkCfBQUiY[0])
lxpDwtEkCfBQUiT[lxpDwtEkCfBQUiL.index]=lxpDwtEkCfBQUiT[lxpDwtEkCfBQUiL.index]._replace(lxpDwtEkCfBQUiv=lxpDwtEkCfBQUiv)
lxpDwtEkCfBQUiq[lxpDwtEkCfBQUiL.index].append(lxpDwtEkCfBQUiY[0])
enumerated.remove(lxpDwtEkCfBQUiY)
class Algorithm136705(Algorithm):
def run(self,lxpDwtEkCfBQUiJ:Instance)->Solution:
global lxpDwtEkCfBQUis
lxpDwtEkCfBQUir:lxpDwtEkCfBQUie=0
lxpDwtEkCfBQUic=[]
lxpDwtEkCfBQUiq=[[]for _ in lxpDwtEkCfBQUHs(lxpDwtEkCfBQUiJ.no_machines)]
lxpDwtEkCfBQUiT=[lxpDwtEkCfBQUiu(lxpDwtEkCfBQUig,0,i)for i,lxpDwtEkCfBQUig in lxpDwtEkCfBQUHg(lxpDwtEkCfBQUiJ.machine_speeds)]
lxpDwtEkCfBQUis=average([lxpDwtEkCfBQUid.duration for lxpDwtEkCfBQUid in lxpDwtEkCfBQUiJ.tasks])
lxpDwtEkCfBQUia=lxpDwtEkCfBQUiJ.tasks
lxpDwtEkCfBQUia.sort(key=lxpDwtEkCfBQUiR)
lxpDwtEkCfBQUiK=[[i,val]for i,val in lxpDwtEkCfBQUHg(lxpDwtEkCfBQUia,start=1)]
while lxpDwtEkCfBQUHM(lxpDwtEkCfBQUic)<lxpDwtEkCfBQUiJ.no_tasks:
lxpDwtEkCfBQUij=lxpDwtEkCfBQUin(lxpDwtEkCfBQUiT,lxpDwtEkCfBQUir)
lxpDwtEkCfBQUiA=lxpDwtEkCfBQUiS(lxpDwtEkCfBQUiK,lxpDwtEkCfBQUir)
if lxpDwtEkCfBQUHM(lxpDwtEkCfBQUij)>0 and lxpDwtEkCfBQUHM(lxpDwtEkCfBQUiA)>0:
lxpDwtEkCfBQUiX(lxpDwtEkCfBQUij,lxpDwtEkCfBQUiA,lxpDwtEkCfBQUic,lxpDwtEkCfBQUiT,lxpDwtEkCfBQUiq,lxpDwtEkCfBQUiK,lxpDwtEkCfBQUir)
lxpDwtEkCfBQUir+=1
elif lxpDwtEkCfBQUHM(lxpDwtEkCfBQUij)==0:
lxpDwtEkCfBQUir=lxpDwtEkCfBQUHo(lxpDwtEkCfBQUHN(lxpDwtEkCfBQUiT,key=lambda lxpDwtEkCfBQUiL:lxpDwtEkCfBQUiL.lxpDwtEkCfBQUiv).lxpDwtEkCfBQUiv,lxpDwtEkCfBQUir)
elif lxpDwtEkCfBQUHM(lxpDwtEkCfBQUiA)==0:
lxpDwtEkCfBQUir=lxpDwtEkCfBQUHN(lxpDwtEkCfBQUiK,key=lambda lxpDwtEkCfBQUid:lxpDwtEkCfBQUid[lxpDwtEkCfBQUiH].ready)[lxpDwtEkCfBQUiH].ready
else:
lxpDwtEkCfBQUir+=1
return lxpDwtEkCfBQUiy(lxpDwtEkCfBQUiJ,lxpDwtEkCfBQUiq)
def lxpDwtEkCfBQUiS(lxpDwtEkCfBQUia,lxpDwtEkCfBQUir:lxpDwtEkCfBQUie):
return lxpDwtEkCfBQUHj(lxpDwtEkCfBQUiO(lambda lxpDwtEkCfBQUid:lxpDwtEkCfBQUid[lxpDwtEkCfBQUiH].ready<=lxpDwtEkCfBQUir,lxpDwtEkCfBQUia))
def lxpDwtEkCfBQUin(lxpDwtEkCfBQUiT,lxpDwtEkCfBQUir:lxpDwtEkCfBQUie):
return[lxpDwtEkCfBQUiL for lxpDwtEkCfBQUiL in lxpDwtEkCfBQUiT if lxpDwtEkCfBQUiL.lxpDwtEkCfBQUiv<=lxpDwtEkCfBQUir]
def lxpDwtEkCfBQUiy(lxpDwtEkCfBQUiJ,lxpDwtEkCfBQUiq):
lxpDwtEkCfBQUiG=0
for lxpDwtEkCfBQUiW in lxpDwtEkCfBQUHs(lxpDwtEkCfBQUiJ.no_machines):
lxpDwtEkCfBQUir=0
for lxpDwtEkCfBQUiI in lxpDwtEkCfBQUiq[lxpDwtEkCfBQUiW]:
lxpDwtEkCfBQUir+=lxpDwtEkCfBQUHo(lxpDwtEkCfBQUiJ.tasks[lxpDwtEkCfBQUiI-1].ready-lxpDwtEkCfBQUir,0)
lxpDwtEkCfBQUir+=lxpDwtEkCfBQUiJ.machine_speeds[lxpDwtEkCfBQUiW]*lxpDwtEkCfBQUiJ.tasks[lxpDwtEkCfBQUiI-1].duration
lxpDwtEkCfBQUiG+=lxpDwtEkCfBQUir-lxpDwtEkCfBQUiJ.tasks[lxpDwtEkCfBQUiI-1].ready
lxpDwtEkCfBQUiG=lxpDwtEkCfBQUiG/lxpDwtEkCfBQUiJ.no_tasks
return Solution(lxpDwtEkCfBQUiG,Schedule(lxpDwtEkCfBQUiJ.no_tasks,lxpDwtEkCfBQUiJ.no_machines,lxpDwtEkCfBQUiq))
# Created by pyminifier (https://github.com/liftoff/pyminifier)
| KamilPiechowiak/ptsz | p2/src/id136705/algorithm.py | algorithm.py | py | 5,076 | python | en | code | 0 | github-code | 13 |
11571167037 | import random as r
print('数当てゲームを始めます。3桁の数を当ててください!')
answer=[r.randint(0,9) for i in range(3)]
while True:
hit=0
blow=0
for i in range(3):
num=int(input(f'{i}桁目の予想を入力(0~9)>>'))
for j in range(3):
if answer[j]==num:
if j==i:
hit+=1
else:
blow+=1
print(f'{hit}ヒット!{blow}ボール!')
if hit==3:
print('正解です!')
break
else:
select=int(input('続けますか?1:続ける2:終了>>'))
if select==2:
print(f'正解は{answer[0]}{answer[1]}{answer[2]}です')
break
| nomoto720/PythonTraining | day0209/numhit.py | numhit.py | py | 721 | python | ja | code | 0 | github-code | 13 |
25296770900 | from django.test import TestCase
from occurrence.models import (
SurveyMethod,
SamplingSizeUnit,
)
from occurrence.factories import SurveyMethodFactory
from occurrence.factories import SamplingSizeUnitFactory
from species.models import Taxon
from species.factories import TaxonRankFactory
from django.contrib.auth.models import User
from django.db.utils import IntegrityError
class SurveyMethodTestCase(TestCase):
"""Survey method test case."""
@classmethod
def setUpTestData(cls):
"""Setup test data."""
cls.survey_method = SurveyMethodFactory()
def test_create_survey_method(self):
"""Test create survey method."""
self.assertTrue(
isinstance(self.survey_method, SurveyMethod)
)
self.assertEqual(SurveyMethod.objects.count(), 1)
self.assertEqual(self.survey_method.name, SurveyMethod.objects.get(id=self.survey_method.id).name)
def test_update_survey_method(self):
"""Test update survey method."""
self.survey_method.name = 'survey method 1'
self.survey_method.save()
self.assertEqual(
SurveyMethod.objects.get(id=self.survey_method.id).name,
'survey method 1',
)
def test_survey_method_unique_name_constraint(self):
"""Test survey method unique name constraint."""
with self.assertRaises(Exception) as raised:
SurveyMethodFactory(name='survey method 0')
self.assertEqual(IntegrityError, type(raised.exception))
def test_survey_method_unique_sort_id_constraint(self):
"""Test survey method unique sort id constraint."""
with self.assertRaises(Exception) as raised:
SurveyMethodFactory(sort_id=0)
self.assertEqual(IntegrityError, type(raised.exception))
def test_delete_survey_method(self):
"""Test delete survey method."""
self.survey_method.delete()
self.assertEqual(SurveyMethod.objects.count(), 0)
class SamplingSizeUnitTestCase(TestCase):
"""Sampling size unit testcase."""
@classmethod
def setUpTestData(cls):
"""Setup test data."""
cls.sampling_size_unit = SamplingSizeUnitFactory(unit='cm')
def test_create_sampling_size_unit(self):
"""Test create sampling size unit."""
self.assertTrue(isinstance(self.sampling_size_unit, SamplingSizeUnit))
self.assertEqual(SamplingSizeUnit.objects.count(), 1)
self.assertEqual(self.sampling_size_unit.unit, 'cm')
def test_update_sampling_size_unit(self):
"""Test update sampling size unit."""
self.sampling_size_unit.unit = 'mm'
self.sampling_size_unit.save()
self.assertEqual(SamplingSizeUnit.objects.get(id=self.sampling_size_unit.id).unit, 'mm')
def test_sampling_size_unit_unique_unit_constraint(self):
"""Testing unique values for the unit."""
with self.assertRaises(Exception) as raised:
SamplingSizeUnitFactory(unit='mm')
self.assertEqual(raised.exception, IntegrityError)
def test_delete_sampling_size_unit(self):
"""Test delete sampling size unit."""
self.sampling_size_unit.delete()
self.assertEqual(SamplingSizeUnit.objects.count(), 0)
| kartoza/sawps | django_project/occurrence/test_occurrence_models.py | test_occurrence_models.py | py | 3,266 | python | en | code | 0 | github-code | 13 |
31078839903 | import os
import flask
import logging
import requests
import utils
log = logging.getLogger(__name__)
app = flask.Flask(__name__)
service_b_endpoint = os.environ.get("SERVICE_B_ENDPOINT", None)
service_c_endpoint = os.environ.get("SERVICE_C_ENDPOINT", None)
if not service_c_endpoint or not service_b_endpoint:
raise RuntimeError("Set the ENDPOINTS in environment variable")
@app.before_request
def before_request():
"""Generate a unique request ID."""
flask.g.request_id = utils.generate_request_id()
@app.after_request
def after_request(response):
"""Return the unique request ID."""
response.headers["Request_ID"] = flask.g.request_id
return response
@app.route("/health")
def handle_health():
"""Report the HTTP server health."""
return flask.jsonify(status="up")
@app.route("/foo", methods=["POST"])
def handle_foo():
"""Invoke serviceB and then serviceC."""
log.info("Service A /foo called")
data = {}
data.update({"foo": "111"})
log.info("Sending request to Service B")
resp = requests.post("%s/bar" % service_b_endpoint,
headers={"X-Request-ID": flask.g.request_id})
log.info("Service B responded with status code %s and data `%s`",
resp.status_code, resp.json())
data.update(resp.json())
log.info("Sending request to Service C")
resp = requests.post("%s/spam" % service_c_endpoint,
headers={"X-Request-ID": flask.g.request_id})
log.info("Service C responded with status code %s and data `%s`")
data.update(resp.json())
log.info("The overall response is `%s'", data)
return flask.jsonify(data=data)
log.info("The Service A is ready and waiting for requests")
| PanGan21/distributed-tracing | serviceA/service_a.py | service_a.py | py | 1,738 | python | en | code | 0 | github-code | 13 |
38401237796 | # simfin data example 1
# https://simfin.com/data/access/download
# output-mixeddet-quarters-gaps-publish-semicolon-wide
# SW API key
# 6BEqsSZGmXpbrRjS06PoHU8l78R3gBqS
# https://github.com/SimFin/api-tutorial
# import pandas
# location = 'C:/Users/SW/Downloads/output-mixeddet-quarters-gaps-publish-semicolon-wide/'
# firsttest = pandas.read_csv(location + 'output-semicolon-wide.csv',
# sep=';', nrows = 5)
# code to auto add packages
import subprocess
import sys
def install(package):
subprocess.call([sys.executable, "-m", "pip", "install", package])
install("requests")
import requests
api_key = "6BEqsSZGmXpbrRjS06PoHU8l78R3gBqS"
tickers = ["AAPL","NVDA","WMT"]
sim_ids = []
for ticker in tickers:
request_url = f'https://simfin.com/api/v1/info/find-id/ticker/{ticker}?api-key={api_key}'
content = requests.get(request_url)
data = content.json()
if "error" in data or len(data) < 1:
sim_ids.append(None)
else:
sim_ids.append(data[0]['simId'])
print(sim_ids)
# define time periods for financial statement data
statement_type = "pl"
time_periods = ["Q1","Q2","Q3","Q4"]
year_start = 2013
year_end = 2018
# prep writer
install("pandas")
import pandas as pd
install("xlsxwriter")
import xlsxwriter
writer = pd.ExcelWriter("simfin_data.xlsx", engine='xlsxwriter')
data = {}
# get standardized financial statement
data = {}
for idx, sim_id in enumerate(sim_ids):
d = data[tickers[idx]] = {"Line Item": []}
if sim_id is not None:
for year in range(year_start, year_end + 1):
for time_period in time_periods:
period_identifier = time_period + "-" + str(year)
if period_identifier not in d:
d[period_identifier] = []
request_url = f'https://simfin.com/api/v1/companies/id/{sim_id}/statements/standardised?stype={statement_type}&fyear={year}&ptype={time_period}&api-key={api_key}'
content = requests.get(request_url)
statement_data = content.json()
# collect line item names once, they are the same for all companies with the standardised data
if len(d['Line Item']) == 0:
d['Line Item'] = [x['standardisedName'] for x in statement_data['values']]
if 'values' in statement_data:
for item in statement_data['values']:
d[period_identifier].append(item['valueChosen'])
else:
# no data found for time period
d[period_identifier] = [None for _ in d['Line Item']]
# saving to xlsx
# convert to pandas dataframe
df = pd.DataFrame(data=d)
# save in the XLSX file configured earlier
df.to_excel(writer, sheet_name=tickers[idx])
writer.save()
writer.close()
| sws144/learning-python | simfin.py | simfin.py | py | 2,849 | python | en | code | 0 | github-code | 13 |
18864442897 | from datetime import datetime
import numpy as np
import pandas as pd
import os
import re
import json
import shutil
from mne_bids import BIDSPath
from collections import defaultdict
# hard-coded stuff
DATA_DIR = 'data'
OUTPUT_DIR = 'data_bids'
def extract_datetime(f):
'''
pulls timestamp out of pavlovia filename
'''
time_info = re.findall('(\d+)-(\d+)-(\d+)_(\d+)h(\d+)\.(\d+)\.(\d+)', f)[0]
time_info = [int(t) for t in time_info]
timestamp = datetime(*time_info[:-1], 1000*time_info[-1])
return timestamp
def subtract_angles(a, b):
'''
returns signed angle between two angles
Parameters
-----------
a : float
in radians
b : float
in radians
Returns
----------
delta : float
the difference a - b, in radians
'''
return (a - b + np.pi) % (2*np.pi) - np.pi
def unpack_survey_responses(row, questions):
resps = json.loads(row.response)
resps = pd.Series(resps) + 1 # so choices are 1 indexed
resps = pd.DataFrame({'response': resps, 'question': questions})
item = 'Q' + (resps.index.str.extract('(\d+)').astype(int) + 1).astype(str)
resps.insert(0, 'trial', item.iloc[:, 0].to_numpy())
return resps
# gather data files & metadata
fnames = os.listdir(DATA_DIR)
fnames = [f for f in fnames if '.csv' in f]
timestamps = [extract_datetime(f) for f in fnames]
fnames.sort(key = extract_datetime)
sub_ids = ['%03d'%(i + 1) for i in range(len(fnames))] # order of timestamps
fpaths = [os.path.join(DATA_DIR, f) for f in fnames]
# get survey questions so we may record them for posterity
with open('questions.json', 'r') as qf:
survey_questions = json.load(qf)
worker_ids = defaultdict(lambda: None)
ages = defaultdict(lambda: None)
sexes = defaultdict(lambda: None)
has_all_data = defaultdict(lambda: False)
for f, sub, t in zip(fpaths, sub_ids, timestamps):
try:
df = pd.read_csv(f)
# extract demographic info
try:
resp = df[df['trial_type'] == 'survey-multi-choice'].iloc[2]['response']
resp = json.loads(resp)
sexes[sub] = resp['sex'][0]
resp = df[df['trial_type'] == 'survey-text'].iloc[0]['response']
resp = json.loads(resp)
ages[sub] = int(resp['Q0'])
if ages[sub] < 18: # minimum age to get a prolific account
ages[sub] = None # Some participants just put 0 to decline putting in their age
except:
try: # earlier subjects got demographic questions in a different format
resp = df[df['trial_type'] == 'survey-multi-choice'].iloc[2]['response']
resp = json.loads(resp)
sexes[sub] = resp['gender'][0]
resp = df[df['trial_type'] == 'survey-text'].iloc[0]['response']
resp = json.loads(resp)
ages[sub] = int(2023 - int(resp['Q0'].split('/')[-1])) + 1
except:
sexes[sub] = None
ages[sub] = None
# save worker ID
worker_ids[sub] = df.subject_id[0]
## parse libet data and save
libet = df[df.trial_type == 'libet']
libet = libet[['cond_bo', 'cond_kt', 'early', 'theta_initial',
'clock_start_ms', 'keypress_ms', 'theta_keypress',
'theta_tone', 'tone_ms', 'theta_target', 'spin_continue_ms',
'theta_est_0', 'theta_est', 'tone_delay_ms', 'timeout']]
libet = libet[~(libet.early | libet.timeout)]
cond = libet.cond_bo + '_' + libet.cond_kt
libet.insert(0, 'trial', cond)
libet = libet.drop(['cond_bo', 'cond_kt', 'timeout', 'early'], axis = 1)
# compute difference between actual and estimate times
delta = subtract_angles(libet.theta_est, libet.theta_target)
delta = -delta # flip sign, since clock was moving counterclockwise
# record difference
clock_period_ms = 2560
libet['overest_rad'] = delta
libet['overest_ms'] = delta / (2*np.pi) * clock_period_ms
# and save to tsv
bids_path = BIDSPath(
subject = sub,
task = 'libet',
datatype = 'beh',
root = OUTPUT_DIR,
suffix = 'beh',
extension = 'tsv'
)
bids_path.mkdir()
libet.to_csv(str(bids_path), sep = '\t', na_rep = 'n/a', index = False)
# pull out dot-motion trials
dot = df[df.trial_type == 'dot-motion']
dot = dot[[
'controlLevel', 'correct', 'confidenceLevel',
'key_press', 'test_part', 'staircase', 'reverse'
]]
# remove practice and timed-out trials
dot = dot[(dot.test_part == 'dot_catch_trial') | (dot.test_part == 'dot_stimulus')]
# add trial description
trial_type = dot.test_part.str.extract('dot_([^\W_]+)').replace('stimulus', 'staircase')
dot.insert(0, 'trial', trial_type.iloc[:, 0].to_numpy())
dot = dot.drop(['test_part'], axis = 1)
# and save
bids_path = BIDSPath(
subject = sub,
task = 'dotMotion',
datatype = 'beh',
root = OUTPUT_DIR,
suffix = 'beh',
extension = 'tsv'
)
dot.to_csv(str(bids_path), sep = '\t', na_rep = 'n/a', index = False)
## SoA scale
row = df[df.trial_type == 'survey-likert'].iloc[0]
questions = survey_questions['Sense of Agency Scale']
SoAScale = unpack_survey_responses(row, questions)
bids_path = BIDSPath(
subject = sub,
task = 'SoAScale',
datatype = 'beh',
root = OUTPUT_DIR,
suffix = 'beh',
extension = 'tsv'
)
SoAScale.to_csv(str(bids_path), sep = '\t', na_rep = 'n/a', index = False)
## ESoS scale
row = df[df.trial_type == 'survey-likert'].iloc[1]
questions = survey_questions['Embodied Sense of Self Scale']
ESoSScale = unpack_survey_responses(row, questions)
bids_path = BIDSPath(
subject = sub,
task = 'ESoSScale',
datatype = 'beh',
root = OUTPUT_DIR,
suffix = 'beh',
extension = 'tsv'
)
ESoSScale.to_csv(str(bids_path), sep = '\t', na_rep = 'n/a', index = False)
## tellegan scale
row = df[df.trial_type == 'survey-likert'].iloc[2]
questions = survey_questions['Tellegan Absorption Scale']
tellegan = unpack_survey_responses(row, questions)
bids_path = BIDSPath(
subject = sub,
task = 'tellegan',
datatype = 'beh',
root = OUTPUT_DIR,
suffix = 'beh',
extension = 'tsv'
)
tellegan.response = tellegan.response == 1 # convert to boolean
tellegan.to_csv(str(bids_path), sep = '\t', na_rep = 'n/a', index = False)
# if you've gotten this far, all data seems to be there
has_all_data[sub] = True
except:
continue
# save subject metadata
wids = [worker_ids[sub] for sub in sub_ids]
has_data = [has_all_data[sub] for sub in sub_ids]
ages = [ages[sub] for sub in sub_ids]
sexes = [sexes[sub] for sub in sub_ids]
participants = pd.DataFrame({
'participant_id': sub_ids,
'age': ages,
'sex' : sexes,
'complete': has_data,
'timestamp': [t.strftime("%Y-%m-%dT%H:%M:%S.%f") for t in timestamps],
'prolific_id': wids
})
out_f = os.path.join(OUTPUT_DIR, 'participants.tsv')
participants.to_csv(out_f, sep = '\t', na_rep = 'n/a', index = False)
# and copy original data to BIDS source folder
src_dir = os.path.join(OUTPUT_DIR, 'source')
shutil.copytree(DATA_DIR, src_dir)
| apex-lab/agency-battery-analysis | to_bids.py | to_bids.py | py | 7,752 | python | en | code | 0 | github-code | 13 |
3724711322 | import math
import operator as op
import time
import sys
#resource.setrlimit(resource.RLIMIT_STACK, (2**29,-1))
sys.setrecursionlimit(10**6)
# (define fact (lambda (n) (if (<= n 1) 1 (+ n (fact (- n 1))))))
# (define fib (lambda (n) (if (< n 2) n (+ (fib (- n 1)) (fib (- n 2))))))
# (set! adam 'xyz')
Symbol = str # A Lisp Symbol is implemented as a Python str
List = list # A Lisp List is implemented as a Python list
Number = (int, float) # A Lisp Number is implemented as a Python int or float
antal = 0
def parse(program):
return read_from_tokens(tokenize(program))
def tokenize(s):
return s.replace('(',' ( ').replace(')',' ) ').split()
def read_from_tokens(tokens):
if len(tokens) == 0:
raise SyntaxError('unexpected EOF while reading')
token = tokens.pop(0)
if '(' == token:
L = []
while tokens[0] != ')':
L.append(read_from_tokens(tokens))
tokens.pop(0) # pop off ')'
return L
elif ')' == token:
raise SyntaxError('unexpected )')
else:
return atom(token)
def atom(token):
try: return int(token)
except ValueError:
try: return float(token)
except ValueError:
return Symbol(token)
def standard_env():
env = Env()
env.update(vars(math)) # sin, cos, sqrt, pi, ...
env.update({
'+':op.add, '-':op.sub, '*':op.mul, '/':op.truediv,
'>':op.gt, '<':op.lt, '>=':op.ge, '<=':op.le, '=':op.eq,
'abs': abs,
'append': op.add,
# 'apply': apply,
'begin': lambda *x: x[-1],
'car': lambda x: x[0],
'cdr': lambda x: x[1:],
'cons': lambda x,y: [x] + y,
'eq?': op.is_,
'equal?': op.eq,
'length': len,
'list': lambda *x: list(x),
'list?': lambda x: isinstance(x,list),
'map': map,
'max': max,
'min': min,
'not': op.not_,
'null?': lambda x: x == [],
'number?': lambda x: isinstance(x, Number),
'procedure?': callable,
'round': round,
'symbol?': lambda x: isinstance(x, Symbol),
})
return env
class Env(dict):
def __init__(self, parms=(), args=(), outer=None):
self.update(zip(parms, args))
self.outer = outer
def find(self, var):
global antal
antal += 1
#env = self
# while env!=None and var not in env:
# env = self.outer
# return env
#print(var)
return self if (var in self) else self.outer.find(var)
global_env = standard_env()
def repl(prompt='lis.py> '):
while True:
s = input(prompt)
start = time.time_ns()
parsed = print(parse(s))
val = eval(parsed)
print((time.time_ns() - start) / 10 ** 9)
if val is not None:
print(lispstr(val))
print(antal)
def lispstr(exp):
if isinstance(exp, List):
return '(' + ' '.join(map(lispstr, exp)) + ')'
else:
return str(exp)
class Procedure(object):
def __init__(self, parms, body, env):
self.parms, self.body, self.env = parms, body, env
def __call__(self, *args):
return eval(self.body, Env(self.parms, args, self.env))
def eval(x, env=global_env):
if isinstance(x, Symbol): # variable reference
return env.find(x)[x]
elif not isinstance(x, List): # constant literal
return x
elif x[0] == 'quote': # (quote exp)
(_, exp) = x
return exp
elif x[0] == 'if': # (if test conseq alt)
(_, test, conseq, alt) = x
exp = (conseq if eval(test, env) else alt)
return eval(exp, env)
elif x[0] == 'define': # (define var exp)
(_, var, exp) = x
env[var] = eval(exp, env)
elif x[0] == 'set!': # (set! var exp)
(_, var, exp) = x
env[var] = eval(exp, env)
elif x[0] == 'lambda': # (lambda (var...) body)
(_, parms, body) = x
return Procedure(parms, body, env)
else: # (proc arg...)
proc = eval(x[0], env)
args = [eval(exp, env) for exp in x[1:]]
return proc(*args)
repl() | ChristerNilsson/2023 | 041-Lisp/lis.py | lis.py | py | 3,624 | python | en | code | 0 | github-code | 13 |
3833699258 |
def get_envelope(inputSignal):
# Taking the absolute value
absoluteSignal = []
for sample in inputSignal:
absoluteSignal.append(abs(sample))
# Peak detection
intervalLength = 35 # change this number depending on your Signal frequency content and time scale
outputSignal = []
for baseIndex in range(0, len(absoluteSignal)):
maximum = 0
for lookbackIndex in range(intervalLength):
maximum = max(absoluteSignal[baseIndex - lookbackIndex], maximum)
outputSignal.append(maximum)
return outputSignal | AdriannaZychewicz/mgr | env_fun/env_fun.py | env_fun.py | py | 576 | python | en | code | 0 | github-code | 13 |
74403150097 | import sys
def read_problem(infile):
pack= [int(i) for i in infile.next().split(' ')]
M = pack[0]
N = pack[1]
en_x, en_y, ex_x, ex_y = [int(i) for i in infile.next().split(' ')]
en = (en_x,en_y)
ex = (ex_x, ex_y)
matrix = [[int(j) for j in infile.next().split(' ')] for i in range(N)]
return (en, ex, matrix)
def get_submatrix(en, ex, matrix):
stepx = 1
stepy = 1
sizex = 0
sizey = 0
if en[0]-ex[1] > 0:
stepx = -1
if en[1]-ex[1] > 0:
stepy = -1
sizex = abs(en[0]-ex[0])
sizey = abs(en[1]-ex[1])
submatrix = []
for i in range(en[0],ex[0]+stepx, stepx):
submatrix.append([])
for j in range(en[1],ex[1]+stepy, stepy):
submatrix[-1].append(matrix[i][j])
return (sizex+1, sizey+1, submatrix)
# do something
def solve_problem(prob):
en = prob[0]
ex = prob[1]
matrix = prob[2]
sizex, sizey, submatrix = get_submatrix(en,ex,matrix)
credit_matrix = [[None for i in range(sizey)] for j in range(sizex)]
# initial condition of dy
credit_matrix[0][0] = submatrix[0][0]
print(submatrix)
for x in range(1,max(sizex, sizey)-1):
if x > sizey:
break;
for y in range(max(sizex, sizey)-1):
i = y
j = x-y
if j < 0 or j<0:
break
elif i>sizex or j > sizey:
continue
if matrix[i][j] == -1:
credit_matrix[i][j] = -1
else:
cur_credit = matrix[i][j]
max_credit = -1
if i-1>=0:
if matrix[i-1][j]!=-1 and cur_credit+matrix[i-1][j] > max_credit :
max_credit = cur_credit+matrix[i-1][j]
if j-1>=0:
if matrix[i][j-1]!=-1 and cur_credit+matrix[i][j-1] > max_credit :
max_credit = cur_credit+matrix[i][j-1]
credit_matrix[i][j] = max_credit
if submatrix[sizex-1][sizey-1] == -1:
return "Mission Impossible"
else:
return str(submatrix[sizex-1][sizey-1])
def main():
infile = open(sys.argv[1], 'r')
outfile = open(sys.argv[2], 'w')
T = int(infile.next())
problems = [read_problem(infile) for i in range(T)]
results = [solve_problem(prob) for prob in problems]
for i in range(T):
outfile.write("Case #{}: {}\n".format( i+1, results[i]))
if __name__ == "__main__":
main()
| litao91/googlecodejam | gchina/roundD/roundD.py | roundD.py | py | 2,480 | python | en | code | 1 | github-code | 13 |
12451197754 | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 12 11:06:21 2021
@author: owatson2
"""
# import os
# print(os.getcwd())
# os.chdir("DF_Python")
def clean_csv(filename):
#Opening and creating the necessary files
excel = open(filename, "r")
excel_cleaned = open("csv_cleaned.csv", "w")
excel_old = open("csv_old.csv", "w")
count_ls = []
#Iterating through to select the rows with the correct number of columns
for i in excel:
count = 0
for j in i:
if j == ',':
count += 1
count_ls.append(count)
if count == count_ls[0]:
excel_cleaned.write(i)
else:
excel_old.write(i)
excel.close()
excel_cleaned.close()
excel_old.close()
#Reopening the correct files and printing them
excel_cleaned = open("csv_cleaned.csv", "r")
excel_old = open("csv_old.csv", "r")
print(excel_cleaned.read())
print(excel_old.read())
#clean_csv("csv_cleaning.csv")
excel_old_cleaned = open("csv_old_cleaned", "w")
for i in range(0,2):
if i == 0:
excel_old_cleaned.write("Alice, 27, 1 Main Street - Townsville - Berks, My friend recommended it, 8")
else:
excel_old_cleaned.write("Carla, 17, My House, I’ve always been interested and now seemed like a great time, 5")
excel_old_cleaned.close()
excel_old_cleaned = open("csv_old_cleaned", "r")
print(excel_old_cleaned.read()) | livwatson/Data_Fellowship | DF_Python/csv_cleaning_stretch.py | csv_cleaning_stretch.py | py | 1,506 | python | en | code | 0 | github-code | 13 |
37251413244 | t = int(input())
while(t>0):
t-=1
x = str(input())
var = 0
for element in x:
if(element=='0'):
var=var+1
else:
var= var-1
var = abs(var)
if(len(x)%2==1):
print("-1")
else:
print(int(var/2)) | parthsarthiprasad/Competitive_Programming | codechef/december_cook/p3.py | p3.py | py | 273 | python | en | code | 0 | github-code | 13 |
26198222072 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Definition of basic score functions that applies to a point prediction or a single prediction interval.
Author: Guillaume St-Onge <stongeg1@gmail.com>
"""
import numpy as np
def interval_score(observation, lower, upper, interval_range, specify_range_out=False):
"""interval_score.
Parameters
----------
observation : array_like
Vector of observations.
lower : array_like
Prediction for the lower quantile.
upper : array_like
Prediction for the upper quantile.
interval_range : int
Percentage covered by the interval. For instance, if lower and upper correspond to 0.05 and 0.95
quantiles, interval_range is 90.
Returns
-------
out : dict
Dictionary containing vectors for the interval scores, but also the dispersion, underprediction and
overprediction.
Raises
------
ValueError:
If the observation, the lower and upper vectors are not the same length or if interval_range is not
between 0 and 100
"""
if len(lower) != len(upper) or len(lower) != len(observation):
raise ValueError("vector shape mismatch")
if interval_range > 100 or interval_range < 0:
raise ValueError("interval range should be between 0 and 100")
#make sure vector operation works
obs,l,u = np.array(observation),np.array(lower),np.array(upper)
alpha = 1-interval_range/100 #prediction probability outside the interval
dispersion = u - l
underprediction = (2/alpha) * (l-obs) * (obs < l)
overprediction = (2/alpha) * (obs-u) * (obs > u)
score = dispersion + underprediction + overprediction
if not specify_range_out:
out = {'interval_score': score,
'dispersion': dispersion,
'underprediction': underprediction,
'overprediction': overprediction}
else:
out = {f'{interval_range}_interval_score': score,
f'{interval_range}_dispersion': dispersion,
f'{interval_range}_underprediction': underprediction,
f'{interval_range}_overprediction': overprediction}
return out
def coverage(observation,lower,upper):
"""coverage. Output the fraction of observations within lower and upper.
Parameters
----------
observation : array_like
Vector of observations.
lower : array_like
Prediction for the lower quantile.
upper : array_like
Prediction for the upper quantile.
Returns
-------
cov : float
Fraction of observations within the lower and upper bound.
Raises
------
ValueError:
If the observation, the lower and upper vectors are not the same length.
"""
if len(lower) != len(upper) or len(lower) != len(observation):
raise ValueError("vector shape mismatch")
#make sure vector operation works
obs,l,u = np.array(observation),np.array(lower),np.array(upper)
return np.mean(np.logical_and(obs >= l, obs <= u))
if __name__ == '__main__':
observation = np.array([0.5]*3)
lower = np.array([0.4,0.5,0.6])
upper = np.array([0.45,0.55,0.65])
print(coverage(observation,lower,upper))
| gstonge/scorepi | scorepi/score_functions.py | score_functions.py | py | 3,235 | python | en | code | 1 | github-code | 13 |
7524505882 | import json
import re
import scipy as sp
import humanhash
from itertools import chain
from listing_scraper import get_search_options, get_coords
WEEKS_PER_MONTH = 365/12./7
EARTH_CIRCUMFERENCE = 40075
KM_PER_MILE = 1.609
MEAN_RADIUS_OF_POINT_IN_UNIT_DISC = 2./3.
WALKING_SPEED = 5./60.#km per minute
MAX_DISTANCE_FROM_STATION_IN_KM = KM_PER_MILE*get_search_options()['radius']
MAX_DISTANCE_FROM_STATION_IN_MINS = int(MEAN_RADIUS_OF_POINT_IN_UNIT_DISC*MAX_DISTANCE_FROM_STATION_IN_KM/WALKING_SPEED)
DATE_REGEX = '(?:aug|august|sep|sept|september|oct|october|now|immediately|\d+/\d+|\d+.\d+)'
AVAILABILITY_REGEX = 'available.{,20}' + DATE_REGEX
def get_availabilities(listing):
from_text = re.findall(AVAILABILITY_REGEX, listing['description'], flags=re.IGNORECASE)
from_property_info = re.findall(AVAILABILITY_REGEX, listing['property_info'], flags=re.IGNORECASE)
return from_text + from_property_info
def is_available_in_sept(listing):
availabilities = get_availabilities(listing)
matches = (re.findall('sep|\.09\.|\.9\.|/09/|/9/', a, flags=re.IGNORECASE) for a in availabilities)
return True if any(matches) else False
def should_be_included(listing):
return True if listing['photo_filenames'] and is_available_in_sept(listing) else False
def walking_distance(lat_1, lon_1, lat_2, lon_2):
change_in_lat = EARTH_CIRCUMFERENCE*(lat_1 - lat_2)/360
average_lat = (lat_1 + lat_2)/2
circumference_at_lat = EARTH_CIRCUMFERENCE*sp.cos(sp.pi/180*average_lat)
change_in_lon = circumference_at_lat*(lon_1 - lon_2)/360
distance_in_km = sp.sqrt(change_in_lat**2 + change_in_lon**2)
distance_in_minutes = distance_in_km/WALKING_SPEED
return int(sp.ceil(distance_in_minutes))
def distance_from_station(lat, lon, station_name):
station_lat, station_lon = get_coords(station_name)
return walking_distance(lat, lon, station_lat, station_lon)
def distances_from_stations(listing):
if 'latitude' in listing and 'longitude' in listing:
lat, lon = listing['latitude'], listing['longitude']
return {name: distance_from_station(lat, lon, name) for name in listing['station_name']}
else:
return {name: MAX_DISTANCE_FROM_STATION_IN_MINS for name in listing['station_name']}
def get_commutes(listing):
station_names = listing['station_name']
commutes = json.load(open('resources/commute_lengths.json', 'r'))
a_commutes = commutes['Aldgate Underground Station']
ae_commutes = commutes['Aldgate East Underground Station']
distances = distances_from_stations(listing)
return {
'Aldgate': min(int(a_commutes[name + ' Underground Station']) + distances[name] for name in station_names),
'Aldgate East': min(int(ae_commutes[name + ' Underground Station']) + distances[name] for name in station_names)
}
def get_humanhash(listing):
return humanhash.humanize(listing['listing_id'], words=2)
def is_ignored(listing):
ignores = json.load(open('resources/ignores.json'))
return ignores.get(get_humanhash(listing), '')
def has_expired(listing, listings):
last_scrape_time = max(chain(*[l['store_times'] for l in listings]))
last_store_time = max(listing['store_times'])
listing_removed = last_store_time < last_scrape_time
unrentable = listing['status'] != 'to_rent'
return listing_removed or unrentable
def sort_listings(listings):
def key(l):
ignored = l['expired'] or l['ignored']
date = l['last_published_date']
return (not ignored, date)
return sorted(listings, key=key, reverse=True)
def get_listings():
listings = json.load(open('resources/listings.json', 'r')).values()
included_listings = [l for l in listings if should_be_included(l)]
for listing in included_listings:
listing.update(
monthly_price=int(WEEKS_PER_MONTH*int(listing['price'])),
availabilities=get_availabilities(listing),
commutes=get_commutes(listing),
hashname=get_humanhash(listing),
expired=has_expired(listing, listings),
ignored=is_ignored(listing),
printable_station_names=', '.join(listing['station_name']),
printable_availabilities=str.format('"{}"', '" or "'.join(get_availabilities(listing)))
)
return sort_listings(included_listings)
| andyljones/flat-scraper | listing_transformer.py | listing_transformer.py | py | 4,343 | python | en | code | 0 | github-code | 13 |
41667685770 | import random
import sys
value = random.randint(0, 100)
lowestNumber = 0
highestNumber = 100
def guessnumber():
global lowestNumber
global highestNumber
print("Wähle eine Nummer zwischen "+ str(lowestNumber) + " und "+ str(highestNumber))
guess = int(input(""))
if value == guess:
print("Herzlichen Glückwunsch, diese Zahl ist korrekt!")
print("")
print("Nochmal? (j/n)")
restart = input("")
if restart == "j" or restart == "j":
start()
else:
sys.exit()
elif value > guess:
print("Leider nicht richtig, die Zahl ist größer")
if guess >= lowestNumber:
lowestNumber = guess
else:
print("...aber das sollte ja eigentlich klar sein?")
guessnumber()
else:
print("Leider nicht richtig, die Zahl ist kleiner")
if guess <= highestNumber:
highestNumber = guess
else:
print("...aber das sollte ja eigentlich klar sein?")
guessnumber()
def start():
global value
global lowestNumber
global highestNumber
lowestNumber = 0
highestNumber = 100
value = random.randint(0, 100)
guessnumber()
guessnumber()
| NotGoodWithNamingStuff/randomNumberGuesser | main.py | main.py | py | 1,290 | python | de | code | 0 | github-code | 13 |
25525617268 | import numpy as np
from ray.rllib.env import PettingZooEnv, ParallelPettingZooEnv
from ray.rllib.utils.spaces import space_utils
from pettingzoo_env import CustomEnvironment
from aec_env import AsyncMapEnv
from ray.rllib.policy.policy import Policy
from ray.rllib.algorithms.algorithm import Algorithm
import imageio.v2 as imageio
import os
import glob
from collections import defaultdict
from pprint import pprint
import csv
from brute_search import SearchTree
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('checkpoint', type=int)
parser.add_argument('-p', '--prefix', dest='prefix', default='PPO')
parser.add_argument('-g', '--gif', dest='gif', action='store_true')
parser.add_argument('-e', '--eval', dest='eval', action='store_true')
parser.add_argument('-s', '--start', dest='start', type=int, default=10)
parser.add_argument('--step', dest='step', type=int, default=10)
parser.add_argument('--path', dest='path', type=str, default='/tmp/rllib_checkpoint/')
parser.add_argument('-n', '--num_agents', dest='num_agents', type=int, default=1)
parser.add_argument('-m', '--map_size', dest='map_size', type=int, default=4)
parser.add_argument('-r', '--reinit_agents', action='store_true')
parser.add_argument('--small_obs', action='store_true')
args = parser.parse_args()
env_config = {
'num_agents': args.num_agents,
'map_size': args.map_size,
'num_iters': args.num_agents * np.hypot(args.map_size, args.map_size),
'reinit_agents': args.reinit_agents,
'fit_split': 2,
# 'render_mode': 'human'
}
if args.gif:
print("creating GIF")
env_config['render_mode'] = 'human'
env_config['figpath'] = 'figures/img'
chkpt = f'{args.path}checkpoint_{str(args.checkpoint).zfill(6)}'
restored_policy = Policy.from_checkpoint(chkpt)
print("policy", restored_policy)
env = PettingZooEnv(AsyncMapEnv(**env_config))
obs, infos = env.reset()
fname = f'{args.prefix}_{env_config["num_agents"]}agent_{env_config["map_size"]}x{env_config["map_size"]}_{args.checkpoint}'
with imageio.get_writer(f'figures/{fname}.gif', mode='I', duration=0.3) as writer:
x = 0
while env.env.agents:
if x > 100:
break
curr_agent = env.env.agent_selection
batch_obs = space_utils.flatten_to_single_ndarray(obs[curr_agent])
action = [restored_policy['default_policy'].compute_single_action(batch_obs)][0][0]
obs, rewards, terminations, truncations, infos = env.env.step(action)
# print({curr_agent: action})
# print(rewards['curr_agent'])
try:
writer.append_data(imageio.imread(f'{env_config["figpath"]}/img_{x}.png'))
except FileNotFoundError:
print(f"File {x} not found...")
break
finally:
# check if all agents have finished
if any([truncations[a] for a in truncations]):
print("Truncating...")
break
x += 1
# repeat final frame 5 times
for i in range(4):
writer.append_data(imageio.imread(f'{env_config["figpath"]}/img_{x-1}.png'))
print(f"{env.env.pollution}")
print(f"{env.env._cumulative_rewards}")
# tidy up img dir
files = glob.glob(f'{env_config["figpath"]}/*')
for f in files:
os.remove(f)
if args.eval:
print("Evaluating performance")
env_config['render_mode'] = None # remove rendering to accelerate
raw_env = AsyncMapEnv(**env_config)
env = PettingZooEnv(raw_env)
for c in range(args.start, args.checkpoint+1, args.step):
chkpt = f'{args.path}checkpoint_{str(c).zfill(6)}'
print(chkpt)
restored_policy = Policy.from_checkpoint(chkpt)
length = defaultdict(list)
tot_reward = defaultdict(list)
poll_optimality = defaultdict(list)
for i in range(int(np.ceil(800/args.num_agents))): # 80 runs for 10 agent, 800 for 1 agent
obs, infos = env.reset()
truncations = {
agent: False for agent in env.env.possible_agents
}
# brute force optimal (non-congested) route
search = SearchTree(env.env)
search.build_tree()
optimal_polls = search.pollutions
reward_store = defaultdict(int)
episode_lengths = defaultdict(int)
while not any(truncations.values()): # until truncation
if args.small_obs:
raw_obs = obs[env.env.agent_selection]
raw_obs['cyclist'] = raw_obs['cyclist'][0]
batch_obs = space_utils.flatten_to_single_ndarray(raw_obs)
else:
batch_obs = space_utils.flatten_to_single_ndarray(obs[env.env.agent_selection])
action = [restored_policy['default_policy'].compute_single_action(batch_obs)][0][0]
obs, rewards, terminations, truncations, infos = env.step({env.env.agent_selection: action})
for agent, reward in rewards.items():
reward_store[agent] += reward
episode_lengths[agent] += 1
for agent in reward_store:
length[agent].append(episode_lengths[agent])
tot_reward[agent].append(reward_store[agent])
poll_optimality[agent].append(search.pollutions[agent] / env.env.pollution[agent])
mean_len = np.mean([np.mean(length[agent]) for agent in length.keys()])
mean_tot_reward = np.mean([np.mean(tot_reward[agent]) for agent in tot_reward.keys()])
mean_optimality = [np.mean(poll_optimality[agent]) for agent in poll_optimality.keys()]
print(f"Checkpoint {c}: \t{mean_len}\t{mean_tot_reward}\t{np.mean(mean_optimality)}")
fitness_optimality = defaultdict(list)
for agent in poll_optimality.keys():
fitness_optimality[env.env.agent_name_mapping[agent].fitness] += poll_optimality[agent]
fitness_optimality = {fit: np.mean(fitness_optimality[fit]) for fit in fitness_optimality.keys()}
for x in range(3):
try:
fitness_optimality[x]
except KeyError:
fitness_optimality[x] = np.nan
pprint(fitness_optimality)
with open('optimality_eval.csv', 'a', newline='') as csv_file:
writer = csv.writer(csv_file)
writer.writerow([args.path, c, args.num_agents, mean_len, mean_tot_reward, np.mean(mean_optimality), fitness_optimality[0], fitness_optimality[1], fitness_optimality[2]]) | Langbridge/RL_RAR | policy_eval_aec.py | policy_eval_aec.py | py | 6,633 | python | en | code | 0 | github-code | 13 |
22843740959 | from utils.data_input_util import *
from utils.image_utils import *
import logging
import os
from random import randint
from torchinfo import summary
import numpy as np
import random
from PIL import Image
import pickle
import torch
import torchvision
import torch.distributed as dist
import torch.nn as nn
from torch.utils import data as data_utils
from helper_functions import accuracy_fn
from models.thoughtviz import DiscriminatorRGB, GeneratorRGB, CombinedDisClassifier,CombinedGD
from models.classification import *
device = "cuda"
# def train_step2(model: torch.nn.Module,
# data_loader: torch.utils.data.DataLoader,
# loss_fn: torch.nn.Module,
# optimizer: torch.optim.Optimizer,
# accuracy_fn,
# device: torch.device = device):
# train_loss, train_acc = 0, 0
# for batch, (X, y) in enumerate(data_loader):
# # Send data to GPU
# X, y = X.to(device), y.to(device)
# # print(X.shape)
# # print(y.shape)
# # 1. Forward pass
# fake,aux = model(X)
# # 2. Calculate loss
# loss = loss_fn(fake, aux)
# train_loss += loss
# train_acc += accuracy_fn(y_true=aux.argmax(dim=1),
# y_pred=fake.argmax(dim=1)) # Go from logits -> pred labels
# # 3. Optimizer zero grad
# optimizer.zero_grad()
# # 4. Loss backward
# loss.backward()
# # 5. Optimizer step
# optimizer.step()
# # Calculate loss and accuracy per epoch and print out what's happening
# train_loss /= len(data_loader)
# train_acc /= len(data_loader)
# print(f"Train loss: {train_loss:.5f} | Train accuracy: {train_acc:.2f}%")
activation = {}
def get_activation(name):
def hook(model, input, output):
activation[name] = output.detach()
return hook
# def train_step(model: torch.nn.Module,
# data_loader: torch.utils.data.DataLoader,
# loss_fn: torch.nn.Module,
# optimizer: torch.optim.Optimizer,
# accuracy_fn,
# batch_size,
# eeg_classfier,
# y_test,
# generator,
# input_noise_dim=100,
# device: torch.device = device):
# for batch,(X,y) in enumerate(data_loader):
# noise = np.random.uniform(-1, 1, (batch_size, input_noise_dim))
# random_labels = np.random.randint(0, 10, batch_size)
# one_hot_vectors = [to_categorical(label, 10) for label in random_labels]
# eeg_feature_vectors = np.array([eeg_classfier[random.choice(np.where(y_test == random_label)[0])] for random_label in random_labels])
# real_images = X
# real_labels = y
# generated_images = generator(noise,eeg_feature_vectors)
def train_on_batch(model:torch.nn.Module,
X,y,
optim,loss_fn):
X,y = X.to(device),y.to(device)
y = y.unsqueeze(1)
# print(X.shape)
# print(y.shape)
y_pred,labels = model(X)
loss = loss_fn(y_pred,y)
train_loss = loss
optim.zero_grad()
loss.backward()
optim.step()
return train_loss
def train_on_batch2(d:torch.nn.Module,
g:torch.nn.Module,
X,y,
optim,loss_fn1,loss_fn2):
X = [X[0].to(device),X[1].to(device)]
y = [y[0].view(100,1).to(device).to(torch.float32),y[1].to(device).type(torch.float32)]
gen_imgs = g(X[0],X[1])
y_pred,labels = d(gen_imgs)
loss_bce = loss_fn1(y_pred,y[0])
loss_ce = loss_fn2(labels,y[1])
loss = loss_bce + loss_ce
optim.zero_grad()
loss.backward()
# gen_imgs.grad = None
# gen_imgs.retain_grad()
# print(loss.grad)
# gen_imgs.backward(loss.grad)
optim.step()
train_loss = loss
return train_loss
def train_gan(input_noise_dim, batch_size, epochs, data_dir, saved_classifier_model_file, model_save_dir, output_dir, classifier_model):
imagenet_folder = "./images/ImageNet-Filtered"
num_classes = 10
feature_encoding_dim = 100
d_adam_lr = 0.00005
d_adam_beta_1 = 0.5
g_adam_lr = 0.00003
g_adam_beta_1 = 0.5
# load data and compile discriminator, generator models depending on the dataaset
x_train, y_train, x_test, y_test = load_image_data(imagenet_folder, patch_size=(64, 64))
x_train = torch.tensor(x_train, dtype=torch.float32).view(x_train.shape[0], 3, x_train.shape[1], x_train.shape[2])
y_train = torch.tensor(y_train, dtype=torch.float32)
x_test = torch.tensor(x_test, dtype=torch.float32).view(x_test.shape[0], 3, x_test.shape[1], x_test.shape[2])
y_test = torch.tensor(y_test, dtype=torch.float32)
print("Loaded Images Dataset.", )
train_dataset = data_utils.TensorDataset(x_train, y_train)
test_dataset = data_utils.TensorDataset(x_test, y_test)
test_loader = data_utils.DataLoader(test_dataset, shuffle=True)
train_loader = data_utils.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
c = classifier_model.to(device)
# c.load_state_dict(torch.load(os.path.join("./image_classifier","vgg_final4.pt")))
# summary(c, (1,3, 64, 64),
# col_names=["input_size", "output_size", "num_params", "trainable"],
# col_width=20,
# row_settings=["var_names"]
# )
# c.load_state_dict(torch.load(os.path.join("./image_classifier","vgg_final4.pt")))
# for epoch in range(2000):
# print(f"Epoch: {epoch}\n---------")
# train_step(c,train_loader,torch.nn.CrossEntropyLoss(),optimizer = torch.optim.SGD(c.parameters(), lr=0.0001, momentum=0.9, weight_decay=1e-6),accuracy_fn=accuracy_fn,device=device)
# torch.save(c.state_dict(), os.path.join("./image_classifier","vgg_final5.pt"))
# model_save_dir = "./discriminator"
# saved_model_file = os.path.join(model_save_dir, "dis_final1.pt")
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
d = DiscriminatorRGB()
# d.load_state_dict(torch.load(os.path.join(model_save_dir, 'd_' + '350' + '.pth')))
d2 = CombinedDisClassifier(d, c).to(device)
summary(d2, (1,3, 64, 64),col_names=["input_size", "output_size", "num_params", "trainable"],col_width=20,row_settings=["var_names"])
optim_d = torch.optim.Adam(d2.parameters(), lr=d_adam_lr, betas=(d_adam_beta_1, 0.999))
loss_d = torch.nn.BCELoss()
# # summary(d2, (1,3, 64, 64),col_names=["input_size", "output_size", "num_params", "trainable"],
# # col_width=20,
# # row_settings=["var_names"])
# # torch.save(d2.state_dict(), saved_model_file)
g = GeneratorRGB(input_noise_dim, feature_encoding_dim).to(device)
# g.load_state_dict(torch.load(os.path.join(model_save_dir, 'g_' + '350' + '.pth')))
summary(g, [(1,100),(1,100)],col_names=["input_size", "output_size", "num_params", "trainable"],col_width=20,row_settings=["var_names"])
# d_on_g = CombinedGD(g, d)
optim_g = torch.optim.Adam(g.parameters(), lr=g_adam_lr, betas=(g_adam_beta_1, 0.999))
loss_g2 = torch.nn.CrossEntropyLoss()
loss_g1 = torch.nn.BCELoss()
# eeg_data = pickle.load(open(os.path.join(data_dir, 'data.pkl'), "rb"))
# classifier = torch.load(saved_classifier_model_file)
eeg_classifier = ConvolutionalEncoder(14,1,10).to(device)
eeg_classifier.fc1.register_forward_hook(get_activation('fc1'))
eeg_classifier.load_state_dict(torch.load(saved_classifier_model_file,map_location=torch.device(device)))
eeg_data = pickle.load(open(os.path.join(data_dir,"data.pkl"), 'rb'),encoding='bytes')
x_test = eeg_data[b'x_test']
x_test = torch.tensor(x_test, dtype=torch.float32).view(x_test.shape[0], 1, x_test.shape[1], x_test.shape[2]).to(device)
y_test = eeg_data[b'y_test']
y_test = np.array([np.argmax(y) for y in y_test])
output = eeg_classifier(x_test)
for epoch in range(epochs):
print("Epoch is ", epoch)
print("Number of batches", int(x_train.shape[0]/batch_size))
for batch,(X,y) in enumerate(train_loader):
noise = np.random.uniform(-1, 1, (batch_size, input_noise_dim))
random_labels = np.random.randint(0, 10, batch_size)
one_hot_vectors = [to_categorical(label, 10) for label in random_labels]
# print(activation['fc1'][0].shape)
eeg_feature_vectors = np.array([activation['fc1'].cpu().detach().numpy()[random.choice(np.where(y_test == random_label)[0])] for random_label in random_labels])
eeg_feature_vectors = torch.tensor(eeg_feature_vectors[0], dtype=torch.float32).to(device)
noise = torch.tensor(noise, dtype=torch.float32).to(device)
# print(noise.shape)
# print(eeg_feature_vectors.shape)
random_labels = torch.tensor(random_labels, dtype=torch.float32).to(device)
real_images = X
real_labels = y
generated_images = g(noise,eeg_feature_vectors)
d_loss_real = train_on_batch(d2,X,torch.ones(X.shape[0]),optim_d,loss_d)
d_loss_fake = train_on_batch(d2,generated_images.detach(),torch.zeros(batch_size),optim_d,loss_d)
d_loss = (d_loss_real + d_loss_fake)*0.5
for param in d.parameters():
param.requires_grad = False
g_loss = train_on_batch2(d2,g,[noise,eeg_feature_vectors],[torch.ones(batch_size),torch.from_numpy(np.array(one_hot_vectors).reshape(batch_size, num_classes))],optim_g,loss_g1,loss_g2)
for param in d.parameters():
param.requires_grad = True
if epoch % 100 == 0:
image = combine_rgb_images(generated_images)
image = image * 255.0
img_save_path = os.path.join(output_dir, str(epoch) + "_g" + ".png")
print("Saving image to ", img_save_path)
Image.fromarray(image.astype(np.uint8)).save(img_save_path)
if epoch % 100 == 0:
test_image_count = 50000
test_noise = np.random.uniform(-1, 1, (test_image_count, input_noise_dim))
test_noise = torch.tensor(test_noise,dtype=torch.float32)
test_labels = np.random.randint(0, 10, test_image_count)
print("Hii")
eeg_feature_vectors_test = np.array([activation['fc1'].cpu().detach().numpy()[random.choice(np.where(y_test == test_label)[0])] for test_label in test_labels])
eeg_feature_vectors_test = torch.tensor(eeg_feature_vectors_test, dtype=torch.float32).to(device)
test_labels = torch.tensor(test_labels, dtype=torch.float32).to(device)
test_images = g(test_noise, eeg_feature_vectors_test)
test_images = test_images * 255.0
# inception_score = get_inception_score([test_image for test_image in test_images], splits=10)
print("Epoch %d d_loss : %f" % (epoch, d_loss))
print("Epoch %d g_loss : %f" % (epoch, g_loss.item()))
# print("Epoch %d inception_score : %f" % (epoch, inception_score[0]))
if epoch % 50 == 0:
# save generator and discriminator models along with the weights
torch.save(g.state_dict(), os.path.join(model_save_dir, 'g_' + str(epoch) + '.pth'))
torch.save(d.state_dict(), os.path.join(model_save_dir, 'd_' + str(epoch) + '.pth'))
def train():
dataset = 'Image'
batch_size = 100
run_id = 1
epochs = 10000
model_save_dir = os.path.join('./saved_models/thoughtviz_image_with_eeg/', dataset, 'run_' + str(run_id))
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
output_dir = os.path.join('./outputs/thoughtviz_image_with_eeg/', dataset, 'run_' + str(run_id))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
classifier_model = torchvision.models.vgg16().to(device)
# classifier_model.load_state_dict(torch.load('./image_classifier/vgg16_bn.pth'))
num_features = classifier_model.classifier[6].in_features
classifier_model.classifier[6] = nn.Sequential(
nn.Linear(num_features, 256), nn.ReLU(), nn.Dropout(0.4),
nn.Linear(256, 10), nn.LogSoftmax(dim=1))
classifier_model.load_state_dict(torch.load('./image_classifier/vgg_final4.pt', map_location=torch.device(device)))
for param in classifier_model.features.parameters():
param.requires_grad = False
# classifier_model.classifier = torch.nn.Sequential(
# torch.nn.Dropout(p=0.2, inplace=True),
# torch.nn.Linear(in_features=1280,
# out_features=10, # same number of output units as our number of classes
# bias=True)).to(device)
eeg_data_dir = os.path.join('../data/eeg/', dataset.lower())
eeg_classifier_model_file = os.path.join('./eeg_image_classification', '1_final.pt')
train_gan(input_noise_dim=100, batch_size=batch_size, epochs=epochs,data_dir=eeg_data_dir, saved_classifier_model_file=eeg_classifier_model_file, model_save_dir=model_save_dir, output_dir=output_dir, classifier_model=classifier_model)
if __name__ == '__main__':
# dist.init_process_group(backend='gloo')
train() | cbstars06/EEG | ThoughtViz_py/training/thoughtviz_image_with_eeg.py | thoughtviz_image_with_eeg.py | py | 13,288 | python | en | code | 0 | github-code | 13 |
7051517719 | import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import util
def test():
ret = _recursive_dir('./', 'dir2', 0)
return str(ret)
def _recursive_dir(parent, dir_name, lv):
ret = ''
this_path = parent + dir_name
if not this_path.endswith('/'):
this_path += '/'
target_path = this_path
list = util.list_dir(target_path)
for item in list:
path = this_path + item
is_dir = util.is_dir(path)
file_info = util.str_padding(item + ' ' + str(is_dir) , ' ', lv) + '\n'
ret += file_info
if is_dir:
ret += _recursive_dir(this_path, item, lv + 1)
return ret
def main():
try:
ret = test()
except Exception as e:
ret = str(e)
print(ret)
main()
| takashiharano/util | python/test/file/test_recursive_dir.py | test_recursive_dir.py | py | 798 | python | en | code | 1 | github-code | 13 |
30139031912 | from flask import current_app
from zou.app.models.project import Project
from zou.app.models.entity import Entity
from zou.app.services import shots_service
from zou.app.blueprints.source.shotgun.base import (
BaseImportShotgunResource,
ImportRemoveShotgunBaseResource,
)
class ImportShotgunScenesResource(BaseImportShotgunResource):
def __init__(self):
BaseImportShotgunResource.__init__(self)
def prepare_import(self):
self.scene_type = shots_service.get_shot_type()
self.project_map = Project.get_id_map(field="name")
def extract_data(self, sg_scene):
project_id = self.get_project(sg_scene, self.project_map)
sequence_id = self.get_sequence(sg_scene)
scene_type = shots_service.get_scene_type()
data = {
"name": sg_scene["code"],
"shotgun_id": sg_scene["id"],
"project_id": project_id,
"entity_type_id": scene_type["id"],
"parent_id": sequence_id,
}
return data
def get_project(self, sg_scene, project_map):
project_id = None
if sg_scene["project"] is not None:
project_id = project_map.get(sg_scene["project"]["name"], None)
return project_id
def get_sequence(self, sg_scene):
sequence_id = None
sequence_key = "sequence_sg_scenes_1_sequences"
if (
sequence_key in sg_scene
and sg_scene[sequence_key] is not None
and len(sg_scene[sequence_key]) > 0
):
sequence_id = self.get_sequence_id(sg_scene[sequence_key][0]["id"])
return sequence_id
def import_entry(self, data):
scene = Entity.get_by(
shotgun_id=data["shotgun_id"],
entity_type_id=shots_service.get_scene_type()["id"],
)
if scene is None:
scene = Entity(**data)
scene.save()
current_app.logger.info("Scene created: %s" % scene)
else:
scene.update(data)
scene.save()
current_app.logger.info("Scene updated: %s" % scene)
return scene
def filtered_entries(self):
return self.sg_entries
class ImportRemoveShotgunSceneResource(ImportRemoveShotgunBaseResource):
def __init__(self):
ImportRemoveShotgunBaseResource.__init__(
self, Entity, entity_type_id=shots_service.get_scene_type()["id"]
)
| cgwire/zou | zou/app/blueprints/source/shotgun/scene.py | scene.py | py | 2,433 | python | en | code | 152 | github-code | 13 |
40571080370 | palindromes = []
for n in range(10000, 998001):
temp = n
rev = 0
while(n>0):
dig = n%10
rev = rev*10+dig
n = n//10
if (temp==rev):
palindromes.append(temp)
quotients = []
for i in palindromes:
for x in range(900, 999):
if i%x==0:
quotients.append(x)
products = []
for i in quotients:
for x in range(900, 999):
products.append(i*x)
largest_palindromes = []
for i in products:
for x in palindromes:
if i==x:
largest_palindromes.append(i)
print(max(largest_palindromes))
# Short Solution
solutions = set()
for a in range(999,100,-1):
for b in range(999,100,-1):
product = a*b
product_string = str(product)
if product_string == product_string[::-1]:
solutions.add((product))
print('Calculation complete:', max(solutions))
| ayan1995/Thinkful | Bootcamp/Unit_5_Other_Topics/Algorithms and Data Structures/Project Euler/palindrome.py | palindrome.py | py | 871 | python | en | code | 0 | github-code | 13 |
10320916812 | import os
import json
import boto3
import tweepy
import pytz
from tweepy import OAuthHandler
from tweepy import StreamingClient
from textblob import TextBlob
from datetime import datetime
import requests
from requests_aws4auth import AWS4Auth
import pysolr
class process_tweet(tweepy.StreamingClient):
print("in process_tweet")
def on_data(self, data):
print("on_data")
# print(data)
# decode json
dict_data = json.loads(data)
# pass tweet into TextBlob
tweet_id = TextBlob(dict_data['data']['id'])
timestamp = datetime.now().astimezone(est).strftime("%Y-%m-%d %H:%M:%S")
tweet = TextBlob(dict_data['data']['text'])
tweetstr = str(tweet)
s = tweetstr.split(':')[0].strip()
p = s.partition("RT @")
message = dict_data["data"]["text"].strip('\n').strip()
#stripped = ''.join(e for e in message if e.isalnum())
if len(p) >= 2:
if p[1] == "RT @":
user_name = p[2]
else:
user_name = "none"
else:
user_name = "none"
# determine if sentiment is positive, negative, or neutral
if tweet.sentiment.polarity < 0:
sentiment = "negative"
elif tweet.sentiment.polarity == 0:
sentiment = "neutral"
else:
sentiment = "positive"
# output values
print("tweet_id: "+ str(tweet_id))
print("timestamp: " +timestamp)
print("user_name: " + user_name)
print("tweet_polarity: " + str(tweet.sentiment.polarity))
print("tweet_subjectivity: " + str(tweet.sentiment.subjectivity))
print("sentiment: " + sentiment)
final_solr_data = {}
final_solr_data['id'] = dict_data['data']['id']
final_solr_data['user_name'] = user_name
final_solr_data['tweet_tstamp'] = timestamp
final_solr_data['message'] = message
final_solr_data['polarity'] = str(tweet.sentiment.polarity)
final_solr_data['subjectivity'] = str(tweet.sentiment.subjectivity)
final_solr_data['sentiment'] = sentiment
final_fh_data = {}
final_fh_data['tweet_id'] = str(tweet_id)
final_fh_data['timestamp'] = timestamp
final_fh_data['user_name'] = user_name
final_fh_data['tweet_polarity'] = str(tweet.sentiment.polarity)
final_fh_data['tweet_subjectivity'] = str(tweet.sentiment.subjectivity)
final_fh_data['sentiment'] = sentiment
json_solr_data = json.dumps(final_solr_data)
json_fh_data = json.dumps(final_fh_data)
# index values into Solr
solr.add(final_solr_data)
# Write to Kinesis Firehose with delivery stream set to Amazon OpenSearch
fh.put_record(
DeliveryStreamName='PUT-OPS-lVe9k',
Record={'Data': json_fh_data},
)
def on_error(self, status):
print(status)
if __name__ == '__main__':
#def handler(event, context):
print("in handler")
# Create Solr client
print("create Solr client")
solr = pysolr.Solr('http://ec2-18-212-174-25.compute-1.amazonaws.com:8983/solr/data_sentiment', always_commit=True)
# Create client for Kinesis Firehose with delivery stream set to Amazon OpenSearch
print("create client instance for firehose")
fh = boto3.client('firehose', region_name='us-east-1',aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID"),aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY"))
print("opensearch instance created")
est = pytz.timezone('US/Eastern')
bt=os.environ.get("TWITTER_API_TOKEN")
printer = process_tweet(bt)
printer.sample()
| vasveena/opensearch-workshop | py-files/fh-solr-to-os.py | fh-solr-to-os.py | py | 3,457 | python | en | code | 1 | github-code | 13 |
26889979784 | from __future__ import annotations
import os
import numpy as np
import validate
from errors import LoadError
class GridParameters:
"""
This class contains all the information pertaining to the grid:
dx (x-coordinate of disk centre w.r.t. eclipse centre [t_ecl])
dy (y-coordinate of disk centre w.r.t. eclipse centre [t_ecl])
rf (extent of rf_array in one-direction)
rf_array (radius factor that compares with the smallest disk at a
given point)
grid_shape (shape of the total grid)
slice_shape (shape of dy, dx slice)
extendable (whether the grid contains point (0, 0))
It also contains methods to save and load the grid parameters.
"""
def __init__(self,
min_x: float,
max_x: float,
num_x: int,
min_y: float,
max_y: float,
num_y: int,
max_rf: float,
num_rf: int
) -> None:
"""
This is the constructor for the disk grid parameter class
Parameters
----------
num_xy : integer
This is the resolution of the grid in the dx and dy directions.
maximum_radius : float
This is the maximum radius of the disk [t_ecl]
num_rf : integer
This is the resolution of the grid in the rf direction. Note that
the size of this grid dimension is then (2 * num_rf - 1)
maximum_rf : float
This is the maximum rf value
"""
self.dx = self._determine_dx(min_x, max_x, num_x)
self.dy = self._determine_dy(min_y, max_y, num_y)
self.rf, self.rf_array = self._determine_rf(max_rf, num_rf)
self._set_grid_and_slice_shape()
self._set_extendable()
def __str__(self) -> str:
"""
This returns the string representation of the class.
Returns
-------
str_string : str
String representation of the GridParameters class.
"""
str_string = self.__repr__()
return str_string
def __repr__(self) -> str:
"""
This generates a string representation of the grid parameters object.
Returns
-------
repr_string : str
Representation string of the grid parameters class.
"""
dy, dx, rf_array = self.get_vectors()
lines: list[str] = [""]
lines.append("Grid Parameters")
lines.append(28 * "-")
dx_min = f"{dx[0]:.2f}".rjust(6)
dx_max = f"{dx[-1]:.2f}".rjust(6)
lines.append(f"dx: {dx_min} -> {dx_max} ({len(dx)})")
dy_min = f"{dy[0]:.2f}".rjust(6)
dy_max = f"{dy[-1]:.2f}".rjust(6)
lines.append(f"dy: {dy_min} -> {dy_max} ({len(dy)})")
rf_min = f"{1:.2f}".rjust(6)
rf_max = f"{rf_array[0]:.2f}".rjust(6)
rf_num = len(rf_array)
lines.append(f"rf: {rf_max} -> {rf_min} -> {rf_max} ({rf_num})")
lines.append(f"grid_shape: {str(self.grid_shape)}")
repr_string = "\n".join(lines)
return repr_string
def _determine_dx(self,
min_x: float,
max_x: float,
num_x: int
) -> np.ndarray:
"""
This method is used to determine the dx vector
Parameters
----------
min_x : float
The minimum value of x [t_ecl].
max_x : float
The maximum value of x [t_ecl].
num_x : int
The number of dx elements.
Returns
-------
dx : np.ndarray
Grid dx dimension vector.
"""
min_x = validate.number(min_x, "min_x")
max_x = validate.number(max_x, "max_x")
if min_x >= max_x:
raise ValueError("max_x must be greater than min_x")
num_x = validate.number(num_x, "num_x", check_integer=True,
lower_bound=1)
dx = np.linspace(min_x, max_x, num_x)[None, :, None]
return dx
def _determine_dy(self,
min_y: float,
max_y: float,
num_y: int
) -> np.ndarray:
"""
This method is used to determine the dx vector
Parameters
----------
min_y : float
The minimum value of y [t_ecl].
max_y : float
The maximum value of y [t_ecl].
num_y : int
The number of dy elements.
Returns
-------
dy : np.ndarray
Grid dy dimension vector.
"""
min_y = validate.number(min_y, "min_y")
max_y = validate.number(max_y, "max_y")
if min_y >= max_y:
raise ValueError("max_y must be greater than min_y")
num_y = validate.number(num_y, "num_y", check_integer=True,
lower_bound=1)
dy = np.linspace(min_y, max_y, num_y)[:, None, None]
return dy
def _determine_rf(self,
max_rf: float,
num_rf: int
) -> tuple[np.ndarray, np.ndarray]:
"""
This method is used to determine the dx vector
Parameters
----------
max_rf : float
The maximum value of rf [-].
num_rf : int
The number of rf elements (in one direction).
Returns
-------
rf : np.ndarray
Rf range from 1 to max_rf in num_rf.
rf_array : np.ndarray
Grid rf dimension vector.
"""
max_rf = validate.number(max_rf, "max_rf", lower_bound=1)
num_rf = validate.number(num_rf, "num_rf", check_integer=True,
lower_bound=1)
rf = np.linspace(1, max_rf, num_rf)
rf_array = np.concatenate((np.flip(rf), rf[1:]), 0)
return rf, rf_array
def _set_grid_and_slice_shape(self) -> None:
"""
This method sets useful grid parameters (grid shape and slice shape).
"""
dy, dx, rf_array = self.get_vectors()
self.grid_shape = (len(dy), len(dx), len(rf_array))
self.slice_shape = (len(dy), len(dx))
def _set_extendable(self) -> None:
"""
This method is used to determine whether this particular set of grid
parameters can be extended
"""
dy, dx, _ = self.get_vectors()
self.extendable: bool = dy[0] == 0 and dx[0] == 0
def extend_grid(self) -> None:
"""
This method is used to reflect the grid parameters about the x and y
axes.
"""
if not self.extendable:
raise AttributeError("This grid parameter object can not be "
"extended. That is only possible when dx[0] = dy[0] = 0.")
num_y, num_x = self.slice_shape
max_y = self.dy[-1, 0, 0]
max_x = self.dx[0, -1, 0]
self.dx = self._determine_dx(-max_x, max_x, 2 * num_x - 1)
self.dy = self._determine_dy(-max_y, max_y, 2 * num_y - 1)
self._set_grid_and_slice_shape()
self._set_extendable()
def get_vectors(self) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
This method returns the flattened dy, dx, and rf grid vectors.
Returns
-------
dy : np.ndarray
The y coordinates of the centre of the ellipse [t_ecl]
dx : np.ndarray
The x coordinates of the centre of the ellipse [t_ecl]
rf_array : np.ndarray
The rf radius stretch factors of the ellipse [-]
"""
return self.dy.flatten(), self.dx.flatten(), self.rf_array
def save(self, directory: str) -> None:
"""
This method saves all the information of this object to a specified
directory.
Parameters
----------
directory : str
File path for the saved information.
"""
directory = validate.string(directory, "directory")
if not os.path.exists(directory):
os.mkdir(directory)
np.save(f"{directory}/dx", self.dx)
np.save(f"{directory}/dy", self.dy)
np.save(f"{directory}/rf", self.rf)
np.save(f"{directory}/rf_array", self.rf_array)
@classmethod
def load(cls, directory: str) -> GridParameters:
"""
This method loads all the information of this object from a specified
directory.
Parameters
----------
directory : str
File path for the saved information.
Returns
-------
grid_parameters : GridParameters
This is the loaded object.
"""
directory = validate.string(directory, "directory")
try:
grid_parameters = cls(0, 1, 1, 0, 1, 1, 2, 1)
grid_parameters.dx = np.load(f"{directory}/dx.npy")
grid_parameters.dy = np.load(f"{directory}/dy.npy")
grid_parameters.rf = np.load(f"{directory}/rf.npy")
grid_parameters.rf_array = np.load(f"{directory}/rf_array.npy")
grid_parameters._set_grid_and_slice_shape()
grid_parameters._set_extendable()
except Exception:
raise LoadError("grid parameters", directory)
return grid_parameters | dmvandam/beyonce | GridParameters.py | GridParameters.py | py | 9,318 | python | en | code | 0 | github-code | 13 |
17015630195 | """
So turns out I have a real blank spot around indices manipulation like this.
Well, that and live interviewing, probably been practicing TDD-like too much.
Of course, I can do it, no doubt, but live, nah
------------------------
Given a matrix
[
["a", "b", "c"],
["d", "e", "f"],
["g", "h", "i"],
["j", "k", "l"],
["m", "n", "o"]
]
Print the inverse diags as bellow
a
b d
c e g
f h j
i k m
n l
o
Do not access the matrix any
"""
matrix = [
["a", "b", "c"],
["d", "e", "f"],
["g", "h", "i"],
["j", "k", "l"],
["m", "n", "o"]
]
def print_diags(matrix):
for j_col in range(len(matrix[0])):
for i_row in range(j_col + 1):
print(matrix[i_row][j_col - i_row], end=" ")
print()
for i_row_s in range(1, len(matrix) - len(matrix[0])):
for i_row_p, j_col in enumerate(range(len(matrix[0])-1, -1, -1)):
print(matrix[i_row_s + i_row_p][j_col], end=" ")
print()
for i_row_p in range(len(matrix[0])):
i_row_s = len(matrix) - len(matrix[0]) + i_row_p
for i_row_p, j_col in enumerate(range(len(matrix[0])-1, i_row_p-1, -1)):
print(matrix[i_row_s + i_row_p][j_col], end=" ")
print()
print_diags(matrix)
| chrisjdavie/interview_practice | my_own/inv_diag_matrix/try_0.py | try_0.py | py | 1,248 | python | en | code | 0 | github-code | 13 |
29926871422 | class GameData(object):
game_name=''
game_link=''
value=0.0
num_of_reviews=-1
steam_score=-1
def __init__(self, game_name, game_link, value, num_of_reviews=-1, steam_score=-1):
self.game_name = game_name
self.game_link = game_link
self.value = float(value)
self.num_of_reviews = int(num_of_reviews)
self.steam_score = int(steam_score)
def equals(self, game):
return self.game_name == game.game_name\
and self.game_link == game.game_link\
and self.value == game.value\
and self.num_of_reviews == game.num_of_reviews\
and self.steam_score == game.steam_score | AlexMilman/steamgifts-group-management-tool | Data/GameData.py | GameData.py | py | 698 | python | en | code | 6 | github-code | 13 |
4230805241 | # 4
# Дама,сдавала в багаж
# диван, чемодан, саквояж
# картину, корзину, картонку
# и маленькую собачонку,,
# 4
# 0,0
# 1,2
# 3,1
# 3,0
text = []
lineCount = int(input("Введите количество строк: "))
for i in range(lineCount):
line = input().split(",")
text.append(line)
print(text)
wordCount = int(input("Введите количество необходимых слов: "))
for i in range(wordCount):
coordinate = [int(i) for i in input("Координаты слова: ").split(", ")]
d = text[coordinate[0]]
word = d[coordinate[1]]
print(word)
| DOMOKUL/Python | Laba2/16.2.py | 16.2.py | py | 675 | python | ru | code | 0 | github-code | 13 |
23778723280 | # Seeed Grove Ultrasonic Sensor v2
#
# https://wiki.seeedstudio.com/Grove-Ultrasonic_Ranger/
# #play-with-raspberry-pi-with-grove-base-hat-for-raspberry-pi
import machine
import time
# blink on-board led to verify operating status
led = machine.Pin(25, machine.Pin.OUT)
def blink(timer):
global led
led.toggle()
machine.Timer(freq=2, mode=machine.Timer.PERIODIC, callback=blink)
def get_distance(pin):
# send pulse
pin.init(machine.Pin.OUT)
pin.value(0)
time.sleep_us(2)
pin.value(1)
time.sleep_us(10)
pin.value(0)
# listen for response
pin.init(machine.Pin.IN)
# wait for on
t0 = time.ticks_us()
count = 0
while count < 10000:
if pin.value():
break
count += 1
# wait for off
t1 = time.ticks_us()
count = 0
while count < 10000:
if not pin.value():
break
count += 1
t2 = time.ticks_us()
if t1 - t2 > 530:
return None
else:
return (t2 - t1) / 29 / 2
sensor = machine.Pin(20)
def report_distance(timer):
global sensor
d = get_distance(sensor)
print(d)
machine.Timer(freq=10, mode=machine.Timer.PERIODIC, callback=report_distance)
| jckantor/cbe61622 | Raspberry_Pi_Pico/demo_ultrasonic_sensor.py | demo_ultrasonic_sensor.py | py | 1,256 | python | en | code | 6 | github-code | 13 |
14206562720 | def read():
f = open('./problem1.dat', 'r')
f.seek(0)
whole_file = f.read()
lines = whole_file.splitlines()
lines = list(filter(len, lines))
width = int(lines.pop(0))
distances = lines[:25]
flows = lines[25:]
ds = []
for d in distances:
dline = []
for dd in d.split(" "):
dline.append(int(dd))
ds.append(dline)
fs = []
for f in flows:
fline = []
line = f.replace(" ", " ").split(" ")
line = filter(len, line)
for ff in line:
fline.append(int(ff))
fs.append(fline)
return width, ds, fs | willlogs/EvolutionaryComputationsExamples | 4.QAP(Building Scale)/datfileReader.py | datfileReader.py | py | 643 | python | en | code | 0 | github-code | 13 |
18717751902 | from igraph import *
import psycopg2
from psycopg2.extensions import AsIs
from psycopg2.extras import execute_values
import sys
from config import config
import numpy as np
import datetime
from pprint import pprint
def main(argv):
if (len(sys.argv) < 3 or len(sys.argv) > 3):
print(
"Debe ingresar nombre de mapa de caminos y nombre de mapa con puntos a conectar")
else:
map_name = argv[1]
points_name = argv[2]
conn = None
try:
# read connection parameters
params = config()
# connect to the PostgreSQL server
print('Connecting to the PostgreSQL database')
conn = psycopg2.connect(**params)
print('Connected!\n')
# create a cursor
# conn.cursor will return a cursor object, you can use this cursor to perform queries
cursor = conn.cursor()
# execute queries
currentTime = datetime.datetime.now()
total_cost = build_paths(map_name, points_name, cursor)
timeDelta = datetime.datetime.now() - currentTime
# close communication with the PostgreSQL database server
cursor.close()
# commit the changes
conn.commit()
print(timeDelta)
print("Costo total de construcción: ", total_cost)
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
# create table with segments that need to be built
def build_paths(map_name, points_name, cursor):
# create a results table
needed_paths_name = map_name + "_suurballe"
paths_to_build = map_name + "_construccion"
cursor.execute(
"CREATE TABLE IF NOT EXISTS %s (path_id int, id int generated by default as identity PRIMARY KEY);",
(AsIs(needed_paths_name),))
find_paths(map_name, points_name, cursor, needed_paths_name)
cursor.execute(
"CREATE TABLE %s AS SELECT distinct m.camino, m.costo, c.path_id FROM %s m, %s c WHERE c.path_id = m.id AND m.costo != 0;",
(AsIs(paths_to_build), AsIs(map_name), AsIs(needed_paths_name)))
cursor.execute("select sum(costo) from %s;", (AsIs(paths_to_build),))
cost = cursor.fetchone()[0]
return cost
# find two disjoint paths between each pair of points, except for capital cities
def find_paths(map_name, points_name, cursor, needed_paths_name):
# create graph from map linestrings, in which the vertex name is its postgis geometry, edge weight is the building cost and edge id is the corresponding id in table
cursor.execute("select origen, fin, id, costo from %s where camino notnull;", (AsIs(map_name),))
edges = cursor.fetchall()
graph = Graph.TupleList(edges, directed=False, vertex_name_attr="name", edge_attrs=("id", "weight"))
# get valid points (comunas)
points = get_points(cursor, points_name)
# create a grid to mark found paths
points_grid = np.zeros((len(points), len(points)))
# fill the grid's diagonal
np.fill_diagonal(points_grid, 2)
# get capital cities and peripheral points
capitales = get_points(cursor, "capitales_regionales")
limitrofes = get_points(cursor, "limitrofes")
# for each point we must connect
for p in range(len(points)):
geom = points[p]
try:
s = graph.vs.find(name=geom)
except:
graph.add_vertex(name=geom)
s = graph.vs.find(name=geom)
for p in range(len(points)):
print("buscando desde comuna ", p)
geom = points[p]
# list of target comunas
target = compute_targets(graph, points, points_grid, p)
# source vertex associated to p
try:
s = graph.vs.find(name=geom)
except:
graph.add_vertex(name=geom)
s = graph.vs.find(name=geom)
# if source is an isolated point, it only needs 1 path
if geom in limitrofes:
paths = get_single_path(graph, s, target)
all_ids = list(
process_paths(paths, p, graph, points, target, points_grid, map_name, points_name, cursor, geom, single=True))
else:
# list of disjoint paths from s to each target
paths = suurballe(graph, s, target)
# process paths to obtain the corresponding table IDs
all_ids = process_paths(paths, p, graph, points, target, points_grid, map_name, points_name, cursor, geom)
# capitals must have three paths
if geom in capitales and geom:
# check if third connection hasn't been registered already
if max_connections(points_grid, p) < 3:
try:
thrid_path = find_third_path(graph, points, s, p, all_ids, target, points_grid)
all_ids = list(all_ids) + thrid_path
except:
all_ids = list(all_ids) + \
process_new_path(map_name, points_name, cursor, geom, graph, p, points, points_grid)
# add IDs to psql table
plain_ids = [str(x[0]) for x in all_ids]
log = open('caminos.txt', 'a')
log.write("\n".join(plain_ids))
log.write("\n")
log.close()
update_table(needed_paths_name, cursor, all_ids)
# create missing paths
update_table(needed_paths_name, cursor,
create_direct_paths(map_name, points_name, cursor, graph, points_grid, points))
pprint(points_grid)
# Process the paths obtained, filling the grid accordingly and returning all edges' ID
def process_paths(paths, p, graph, points, target, points_grid, map_name, points_name, cursor, geom, single=False):
all_ids = set([])
# for each comuna's paths
for path in paths:
# obtain target point corresponding to paths
target_index = paths.index(path)
point_index = points.index(graph.vs[target[target_index]]['name'])
inc = 0
if single and path:
# only one path per comuna
inc = 1
fill_grid(points_grid, p, point_index, inc)
joint_paths = path
elif path:
# first and second are tuple lists like ((i, j), id)
first = path[0]
second = path[1]
# how many paths were found
inc = count_paths(first, second)
# fill points grid accordingly
fill_grid(points_grid, p, point_index, inc)
joint_paths = first + second
# add paths to list of needed paths
if inc:
for (i, j), id in joint_paths:
all_ids.add((id,))
return all_ids
# returns the number of paths found
def count_paths(first, second):
if (first and second):
return 2
elif first or second:
return 1
else:
return 0
def create_direct_paths(map_name, points_name, cursor, graph, points_grid, points):
new_edges = set([])
for i in range(len(points)):
for j in range(len(points)):
# we must create a path to nearest connection
if points_grid[i][j] == 0:
new_edge = process_new_path(map_name, points_name, cursor, points[i], graph, i, points, points_grid)
if new_edge:
new_edges.add((new_edge[0],))
return new_edges
# creates a new connection between p and the nearest comuna
def process_new_path(map_name, points_name, cursor, geom, graph, p, points, points_grid):
closest = get_closest(cursor, points_name, geom)
if points_grid[points.index(geom)][points.index(closest)] == 0:
point_geom, new_edge_id = create_shortest_connection(map_name, cursor, geom, points_name=points_name)
graph.add_edge(geom, point_geom, weight=0, id=new_edge_id)
fill_grid(points_grid, p, points.index(point_geom), 1)
return [new_edge_id]
else:
return []
def get_closest(cursor, points_name, geom):
cursor.execute(
"select b.pun_geom, min(ST_DistanceSphere(a.pun_geom, b.pun_geom)) m from %s a,%s b where a.pun_geom=%s and a.pun_id != b.pun_id group by b.pun_geom order by m limit 1;",
(AsIs(points_name), AsIs(points_name), geom))
target, distance = cursor.fetchall()[0]
return target
# creates new connection between two vertex
def new_connection(map_name, cursor, geom, graph, points, target, points_grid):
target, new_edge_id = create_shortest_connection(map_name, cursor, geom, target=target)
graph.add_edge(geom, target, weight=0, id=new_edge_id)
fill_grid(points_grid, points.index(geom), points.index(target), 1)
return new_edge_id
# Returns list of shortest paths from source to each target in target_list
def get_single_path(graph, source, target_list):
paths = graph.get_shortest_paths(source, weights="weight", to=target_list, output="epath")
return get_edges(graph, paths)
# returns the points that don't yet have 2 paths to 'p'
def compute_targets(graph, points, points_grid, p):
target = []
# for each point
for q in range(len(points)):
# if it hasn't been computed
if points_grid[p][q] == 0:
t = graph.vs.find(name=points[q])
target.append(t.index)
return target
# fills points grid given paths from source to target according to increment
def fill_grid(points_grid, source_index, target_index, increment):
points_grid[source_index][target_index] += increment
points_grid[target_index][source_index] += increment
# returns two disjoint shortest paths from source vertex to each point in target_list
def suurballe(graph, source, target_list, directed=False):
if directed == False:
graph.to_directed()
# get shortest path to target vertices using Dijkstra
edge_path = graph.get_shortest_paths(source, weights="weight", to=target_list, output="epath")
# get shortest distance to all vertices using Dijkstra
distance = graph.shortest_paths(source, weights="weight")[0]
shortest_path = get_edges(graph, edge_path)
# problem_vertex contains vertices without two paths
result = []
# create transformed graph Gv for each V. Future: one unified graph
for t in range(len(target_list)):
v = graph.vs.find(target_list[t]).index
if (distance[v] != float("inf") and target_list[t] != source):
gv = create_graph(graph, distance, edge_path, t)
transformed_path = gv.get_shortest_paths(source, weights="weight", to=target_list[t], output="epath")
shortest_transformed = get_edges(gv, transformed_path)[0]
# unify paths discarding edges that are in both paths
union = unify_paths(shortest_transformed, shortest_path[t])
# obtain paths from union
path_1 = get_path(union, source.index, v)
try:
path_2 = get_path(union, source.index, v)
# success.append(v, path_1, path_2)
pair = (path_1, path_2)
result.append(pair)
except Exception:
result.append((path_1, []))
# if vertex is unreachable
elif distance[v] == float("inf"):
result.append(([], []))
else:
result.append([])
return result
# find a third disjoint path between source and target
def find_third_path(graph, points, source, p, path_ids, target, points_grid):
# for psql reasons the format is (id,)
edge_index = graph.es.select(lambda edge: (edge["id"],) in list(path_ids))
# copy the graph so we can delete the edges
graph_copy = graph.copy()
graph_copy.delete_edges(edge_index)
# find shortest distance to the other vertices
new_distance = graph_copy.shortest_paths_dijkstra(source, target=target, weights="weight", mode=OUT)
try:
# select target with shortest distance
target_index = target.index(min(new_distance[0]))
points_index = points.index(graph.vs[target[target_index]]['name'])
# get path to selected target
path = graph_copy.get_shortest_paths(source, weights="weight", to=target[target_index], output="epath")[0]
if path:
new_path = []
for s in path:
# obtain actual edge id
id = graph_copy.es.find(s)["id"]
new_path.append(id)
fill_grid(points_grid, p, points_index, 1)
else:
pass
except Exception:
# must create new path from scratch
raise Exception()
return path
# insert paths into table
def update_table(table_name, cursor, path_ids):
if path_ids:
query = "insert into " + table_name + " (path_id) values %s;"
execute_values(cursor, query, path_ids)
# returns copy of graph in which tree edges are reversed and edge weights are transformed for Suurballe
def create_graph(original_graph, distance, edge_path, target):
gv = Graph(directed=True)
gv.add_vertices(len(original_graph.vs))
reverse_edges(gv, original_graph, edge_path, target)
gv.es["weight"] = transform_attributes(original_graph, distance)
return gv
# returns union of paths without complement edges
def unify_paths(path1, path2):
for ((x, y), z) in path1:
if ((y, x), z) in path2:
path2.remove(((y, x), z))
path1.remove(((x, y), z))
return path2 + path1
# flips edges in gv that are in edge_path
def reverse_edges(gv, graph, edge_path, v):
gv_edge_id = []
for e in range(len(graph.get_edgelist())):
edge = graph.es[e]
source = edge.source
target = edge.target
if e in edge_path[v]:
gv.add_edge(target, source)
gv_edge_id.append(edge["id"])
else:
gv.add_edge(source, target)
gv_edge_id.append(edge["id"])
gv.es["id"] = gv_edge_id
# returns edges in edge_path as source and target IDs and edge ID (from map)
def get_edges(graph, edge_path):
edges = graph.get_edgelist()
edges_as_pairs = []
for path in edge_path:
path_pairs = []
for e in path:
path_pairs.append((edges[e], graph.es[e]["id"]))
edges_as_pairs.append(path_pairs)
return edges_as_pairs
# constructs a path from source to target with given edges
def get_path(edges, source, target):
path = []
edges.sort(key=lambda x: (x[1], x[0]))
try:
current = search_tuple(edges, target)
path.append(current)
while (current[0][0] != source):
current = search_tuple(edges, current[0][0])
path.append(current)
except Exception:
raise Exception
return path
# looks for pair in which the target vertix matches with elem
def search_tuple(tups, elem):
result = list(filter(lambda tup: tup[0][1] == elem, tups))
try:
tups.remove(result[0])
return result[0]
except IndexError:
raise Exception
# transform edge_costs according to Suurballe algorithm
def transform_attributes(graph, distance):
transformed_costs = []
for i in range(len(graph.es)):
d_target = distance[graph.es[i].target]
d_source = distance[graph.es[i].source]
if d_target == float("inf") or d_source == float("inf"):
transformed_costs.append(float("inf"))
else:
transformed_costs.append(graph.es[i]["weight"] - d_target + d_source)
return transformed_costs
# returns psycopg result as a python list
def clean_list(psycopg_list):
clean = []
for p in psycopg_list:
clean.append(p[0])
return clean
# returns points from a table, if map_name is specified, only returns points that intersect map
def get_points(cursor, points_name, map_name=None):
if map_name == None:
cursor.execute("select ST_snaptogrid(pun_geom, 0.00001) from %s;", (AsIs(points_name),))
return clean_list(cursor.fetchall())
else:
cursor.execute("select distinct ST_snaptogrid(pun_geom, 0.00001) from %s, %s\
where st_intersects(st_snaptogrid(pun_geom, 0.00001), st_snaptogrid(camino, 0.00001));",
(AsIs(map_name), AsIs(points_name)))
points_list = cursor.fetchall()
return clean_list(points_list)
# returns max number of connections that p has
def max_connections(points_grid, p):
return max(points_grid[p])
# creates the shortest connection from source to any comuna, returns
def create_shortest_connection(map_name, cursor, geom, target=None, points_name=None):
if target == None:
# find building cost and point id of closest comuna
cursor.execute(
"select b.pun_geom, min(ST_DistanceSphere(a.pun_geom, b.pun_geom)) m from %s a,%s b where a.pun_geom=%s and a.pun_id != b.pun_id group by b.pun_geom order by m limit 1;",
(AsIs(points_name), AsIs(points_name), geom))
target, distance = cursor.fetchall()[0]
else:
cursor.execute(
"SELECT ST_DistanceSphere(%s, %s) ;",
(geom, target))
distance = cursor.fetchone()[0]
cursor.execute("select cost_per_km from costs_table where type='nada';")
path_cost = round((distance / 1000) * math.sqrt(2) * cursor.fetchone()[0])
new_path_log = open('caminos_nuevos.txt', 'a')
new_path_log.write("\t".join((str(geom), str(target), str(path_cost))))
new_path_log.write("\n")
new_path_log.close()
# camino geometry, x1 float, y1 float, x2 float, y2 float, tipo varchar(255), id_origen int, largo float, costo int, tabla_origen varchar(255), origen geometry, fin geometry
cursor.execute(
"INSERT INTO %s (camino, x1, y1, x2, y2, tipo, largo, costo, origen, fin) VALUES (ST_makeline(%s, %s), ST_X(%s), ST_Y(%s), ST_X(%s), ST_Y(%s), 'nada' ,%s, %s, %s, %s ) returning id; ", \
(AsIs(map_name), geom, target, geom, geom, target, target, distance, path_cost, geom, target))
edge_id = cursor.fetchone()[0]
return target, edge_id
if __name__ == "__main__":
main(sys.argv)
| niclabs/InternetResilience | suurballe.py | suurballe.py | py | 16,044 | python | en | code | 0 | github-code | 13 |
40454350515 | import os
import time
import hmac
import hashlib
import base64
import urllib.parse
import requests
from config import config
class DingTalk(object):
SIGN_SECRET = os.environ.get(config.DING_ROBOT_SIGN_SECRET)
BASE_URL = os.environ.get(config.DING_ROBOT_URL)
TOKEN = os.environ.get(config.DING_TOKEN)
@staticmethod
def call(msg):
url = DingTalk.gen_url()
data = DingTalk.gen_text_data(msg)
requests.post(url, json=data)
@staticmethod
def check_token(header):
token = header.get('Token')
return DingTalk.TOKEN == token
@staticmethod
def get_msg(data):
return data.get('text', {}).get('content')
@staticmethod
def gen_text_data(msg):
return {
"text": {
"content": msg,
},
"msgtype": "text"
}
@staticmethod
def gen_url():
timestamp = str(round(time.time() * 1000))
sign = DingTalk.gen_sign(timestamp)
url = DingTalk.BASE_URL + '×tamp={}&sign={}'.format(timestamp, sign)
return url
@staticmethod
def gen_sign(timestamp):
secret = DingTalk.SIGN_SECRET
secret_enc = secret.encode('utf-8')
string_to_sign = '{}\n{}'.format(timestamp, secret)
string_to_sign_enc = string_to_sign.encode('utf-8')
hmac_code = hmac.new(secret_enc, string_to_sign_enc, digestmod=hashlib.sha256).digest()
sign = urllib.parse.quote_plus(base64.b64encode(hmac_code))
return sign
if __name__ == '__main__':
dingClient = DingTalk()
dingClient.call("test") | cailu/ding-robot | app/dingtalk/dingtalk.py | dingtalk.py | py | 1,607 | python | en | code | 0 | github-code | 13 |
16987619138 | class Solution:
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
candidates = list(range(1, n+1))
# return list of all possible combinations of #count numbers out of candidates[leftmostIndex:]
def combineHelper(leftmostIndex, count):
if count == 0:
return [[]]
res = []
for numIndex in range(leftmostIndex, n - count + 1):
for combination in combineHelper(numIndex + 1, count - 1):
res.append([candidates[numIndex]] + combination)
return res
return combineHelper(0, k) | HzCeee/Algorithms | LeetCode/DFS/77_Combinations.py | 77_Combinations.py | py | 700 | python | en | code | 0 | github-code | 13 |
21767143884 | import os
import re
FIXES = [
('(g|G)ehiilf', '\g<1>ehülf'),
('Herrn\.', 'Herm.'),
('Job\.', 'Joh.'),
#(r'£(\d\d+)', '↯\g<1>'),
#(r'gasse(\d+)', r'gasse \g<1>'),
#(r'Kirclienfeld|Kirchen-\sIfeld', r'Kirchenfeld'),
#(r'\\Vildhain' , r'Wildhain'),
#(r'^\d+\s+(.+)$'
]
def apply_replacements():
dirpath = os.path.join(os.path.dirname(__file__), '..', '..', 'proofread')
print(dirpath)
for filename in sorted(os.listdir(dirpath)):
if not filename.endswith('.txt'):
continue
path = os.path.join(dirpath, filename)
with open(path, 'r') as f:
content = f.read()
for (fro, to) in FIXES:
content = re.sub(fro, to, content)
with open(path, 'w') as f:
f.write(content)
if __name__ == '__main__':
apply_replacements()
| brawer/bern-addresses | src/cleanup/apply_replacement.py | apply_replacement.py | py | 851 | python | en | code | 2 | github-code | 13 |
12561541646 | import threading
class ThreadScraper(threading.Thread):
process_result = []
def __init__(self, session, offset, people, country, city, datein, dateout, is_detail, parsing_data):
threading.Thread.__init__(self)
self.session = session
self.offset = offset
self.people = people
self.country = country
self.city = city
self.datein = datein
self.dateout = dateout
self.is_detail = is_detail
self.parsing_data = parsing_data
def run(self):
self.process_result.append(self.parsing_data(self.session, self.people, self.country, self.city, self.datein, self.dateout, self.offset, self.is_detail)) | HexNio/booking_scraper | booking_scraper/core/ThreadScraper.py | ThreadScraper.py | py | 670 | python | en | code | 27 | github-code | 13 |
17144314413 | import pandas.io.data as web
""" Download prices from an external data source """
class MarketDataSource:
def __init__(self):
self.event_tick = None
self.ticker, self.source = None, None
self.start, self.end = None, None
self.md = MarketData()
def start_market_simulation(self):
data = web.DataReader(self.ticker, self.source, self.start, self.end)
for time, row in data.iterrows():
self.md.add_last_price(time, self.ticker, row["Close"], row["Volume"])
self.md.add_open_price(time, self.ticker, row["Open"])
if not self.event_tick is None:
self.event_tick(self.md)
| SFL012/quant | backtests/MarketDataSourceClass.py | MarketDataSourceClass.py | py | 626 | python | en | code | 1 | github-code | 13 |
29753979936 | s = list(input())
string = 'abcdefghijklmnopqrstuvwxyz'
lst = list(string)
result = []
for i in range(len(lst)):
if s.count(lst[i])>0:
result.append(s.index(lst[i]))
else:
result.append(-1)
print(" ".join(map(str, result)))
| 1000hyehyang/BAEKJOON | 5. 문자열/10809.py | 10809.py | py | 250 | python | en | code | 0 | github-code | 13 |
16587462892 | import logging
def logged(exception, mode):
def decorator(method):
def wrapper(*args, **kwargs):
try:
return method(*args, **kwargs)
except exception as ex:
if mode == 'console':
logging.error(str(ex))
elif mode == 'file':
logging.basicConfig(filename='logfile.txt', level=logging.ERROR)
logging.error(str(ex))
else:
raise ValueError("Недійсний запит. Оберіть 'choose' або 'file")
return wrapper
return decorator
| unchain3d/python_labs | decorators/decorator.py | decorator.py | py | 631 | python | uk | code | 0 | github-code | 13 |
70189820499 | from inventory_report.reports.colored_report import ColoredReport
from inventory_report.reports.simple_report import SimpleReport
products = [
{
"id": 1,
"nome_do_produto": "Cafe",
"nome_da_empresa": "Cafes Nature",
"data_de_fabricacao": "2020-07-04",
"data_de_validade": "2023-02-09",
"numero_de_serie": "FR48",
"instrucoes_de_armazenamento": "instrucao",
},
{
"id": 2,
"nome_do_produto": "Refrigerante",
"nome_da_empresa": "Cafes Nature",
"data_de_fabricacao": "2021-07-04",
"data_de_validade": "2024-02-09",
"numero_de_serie": "GR49",
"instrucoes_de_armazenamento": "instrucao",
},
{
"id": 3,
"nome_do_produto": "Bolo",
"nome_da_empresa": "Bolos Nature",
"data_de_fabricacao": "2022-07-04",
"data_de_validade": "2025-02-09",
"numero_de_serie": "AR40",
"instrucoes_de_armazenamento": "instrucao",
},
]
def test_decorar_relatorio():
GREEN, BLUE, RED, END = ["\033[32m", "\033[36m", "\033[31m", "\033[0m"]
result = (
f"{GREEN}Data de fabricação mais antiga:{END} {BLUE}2020-07-04{END}\n"
f"{GREEN}Data de validade mais próxima:{END} {BLUE}2023-02-09{END}\n"
f"{GREEN}Empresa com mais produtos:{END} {RED}Cafes Nature{END}"
)
colored_report = ColoredReport(SimpleReport).generate(products)
assert colored_report in result
| GustavoGracioM/inventory-report | tests/report_decorator/test_report_decorator.py | test_report_decorator.py | py | 1,464 | python | pt | code | 0 | github-code | 13 |
22437061571 | import turtle
import random
#_______________________ 1. DRAWING AND PREPARATION ______________________
#black screen + register Pac-Man shapes
screen = turtle.Screen()
screen.bgcolor("black")
screen.register_shape("Pac-Man", ((-9,-2), (-8,-4), (-7,-5), (-6.5,-6.5), (-5,-7), (-4,-8), (-2,-9), (0,-9.5), (2,-9), (4,-8), (5,-7), (6.5,-6.5), (7,-5), (8,-4), (9,-2), (9.5,0), (9,2), (8,4), (7,5), (6.5,6.5), (0,-2.5), (-6.5,6.5), (-7,5), (-8,4), (-9,2), (-9.5,0)))
screen.register_shape("Pac-Man2", ((-9,-2), (-8,-4), (-7,-5), (-6.5,-6.5), (-5,-7), (-4,-8), (-2,-9), (0,-9.5), (2,-9), (4,-8), (5,-7), (6.5,-6.5), (7,-5), (8,-4), (9,-2), (9.5,0), (9,2), (8,4), (7,5), (6.5,6.5), (5,7), (0,-2.5), (-5,7), (-6.5,6.5), (-7,5), (-8,4), (-9,2), (-9.5,0)))
screen.register_shape("Pac-Man3", ((-9,-2), (-8,-4), (-7,-5), (-6.5,-6.5), (-5,-7), (-4,-8), (-2,-9), (0,-9.5), (2,-9), (4,-8), (5,-7), (6.5,-6.5), (7,-5), (8,-4), (9,-2), (9.5,0), (9,2), (8,4), (7,5), (6.5,6.5), (5,7),(4,8), (0,-2.5), (-4,8), (-5,7), (-6.5,6.5), (-7,5), (-8,4), (-9,2), (-9.5,0)))
screen.register_shape("Cyrcle", ((-9,-2), (-8,-4), (-7,-5), (-6.5,-6.5), (-5,-7), (-4,-8), (-2,-9), (0,-9.5), (2,-9), (4,-8), (5,-7), (6.5,-6.5), (7,-5), (8,-4), (9,-2), (9.5,0), (9,2), (8,4), (7,5), (6.5,6.5), (5,7),(4,8), (2,9), (0,9.5), (-2,9), (-4,8), (-5,7), (-6.5,6.5), (-7,5), (-8,4), (-9,2), (-9.5,0)))
screen.tracer(0) # this disables animation
playing = True
vulnerable = False
winnum = 30
amountpellets = winnum
timer = 800
#create Pac-Man
pacman = turtle.Turtle()
pacman.penup()
pacman.goto(0,0)
pacman.speed(0)
pacman.shape("Pac-Man")
pacman.color("yellow")
#create boundary
t = turtle.Turtle()
t.color("blue")
t.ht()
t.penup()
t.goto(-230,250)
t.pendown()
for i in range(2):
t.forward(460)
t.right(90)
t.forward(500)
t.right(90)
#create ghosts (list)
ghosts = []
colors = 0
for i in range(4):
ghost = turtle.Turtle()
ghost.shape('turtle')
ghost.penup()
ghost.setheading(270)
if colors == 0:
ghost.color('red')
elif colors == 1:
ghost.color('pink')
elif colors == 2:
ghost.color('cyan')
elif colors == 3:
ghost.color('orange')
ghost.speed(0)
ghost.goto(random.randint(-210,210),240)
colors += 1
ghosts.append(ghost)
#create pellets (list)
pellets = []
for i in range(amountpellets - 1):
pellet = turtle.Turtle()
pellet.color('white')
pellet.shape('circle')
pellet.speed(0)
pellet.penup()
pellet.goto(random.randint(-220,220), random.randint(-240,240))
pellets.append(pellet)
powerpellet = turtle.Turtle()
powerpellet.color('yellow')
powerpellet.shape('circle')
powerpellet.speed(0)
powerpellet.penup()
powerpellet.goto(random.randint(-220,220), random.randint(-240,240))
#create scoreturtle (remaining pellets)
scoreturtle = turtle.Turtle()
scoreturtle.ht()
scoreturtle.color('white')
scoreturtle.penup()
scoreturtle.goto(-50,260)
scoreturtle.write('Score: 0/' + str(winnum))
score = 0
# create timeturtle (remaining time)
timeturtle = turtle.Turtle()
timeturtle.ht()
timeturtle.color('white')
timeturtle.penup()
timeturtle.goto(10,260)
timeturtle.write('Time: ' + str(timer))
# this updates the canvas with what has been drawn
screen.update()
#__________________________ 2. TECHNICALITIES ___________________________
#function definitions
def faceright():
pacman.setheading(0)
def faceleft():
pacman.setheading(180)
def faceup():
pacman.setheading(90)
def facedown():
pacman.setheading(270)
def abort():
global playing
global vulnerable
screen.tracer(0)
lose = turtle.Turtle()
lose.goto(-30,0)
lose.color('red')
lose.ht()
lose.write('GAME ABORTED')
screen.update()
screen.tracer(1)
playing = False
vulnerable = False
#button mapping
screen.onkey(faceright, "Right")
screen.onkey(faceleft, "Left")
screen.onkey(faceup, "Up")
screen.onkey(facedown, "Down")
screen.onkey(abort, "Space")
screen.listen()
screen.tracer(1) #enables animation again
#_________________________ 3. PLAYING THE GAME ___________________________
phase = 0
while playing: #!!!if spacebar is pressed, game will abort
#move pacman and ghosts, move time forward
screen.tracer(0)
pacman.forward(4)
far = 0
for ghost in ghosts:
if abs(ghost.ycor()) < 270 and abs(ghost.xcor()) < 270:
if abs(ghost.ycor() + 250) < 9:
ghost.goto(random.randint(-210,210),240)
if far == 0:
ghost.forward(13)
elif far == 1:
ghost.forward(10)
elif far == 2:
ghost.forward(9)
elif far == 3:
ghost.forward(7)
far += 1
timer -= 1
timeturtle.clear()
timeturtle.write('Time: ' + str(timer))
if abs(timer) < 2:
playing = False #!!!end condition: if time is over
screen.update()
screen.tracer(1)
#pacman's eating movements
if phase == 0:
pacman.shape("Cyrcle")
phase = 1
elif phase == 1:
pacman.shape("Pac-Man3")
phase = 2
elif phase == 2:
pacman.shape("Pac-Man2")
phase = 3
elif phase == 3:
pacman.shape("Pac-Man")
phase = 4
elif phase == 4:
pacman.shape("Pac-Man2")
phase = 5
elif phase == 5:
pacman.shape("Pac-Man3")
phase = 0
else:
phase = 0
#check for power pellet (ghosts now vulnerable for a ceratin amount of time)
if pacman.distance(powerpellet) < 15:
screen.tracer(0)
powerpellet.ht()
powerpellet.goto(-300,300)
score += 1
scoreturtle.clear()
scoreturtle.write('Score: ' + str(score) + '/' + str(winnum))
screen.update()
screen.tracer(1)
#!!!end condition: if all pellets are eaten
if score == winnum:
screen.tracer(0)
win = turtle.Turtle()
win.goto(-20,0)
win.color('white')
win.ht()
win.write('YOU WIN!!!')
screen.update()
screen.tracer(1)
playing = False
if abs(pacman.xcor() - 230) < 4:
pacman.goto(-225, pacman.ycor())
pacman.setheading(0)
elif abs(pacman.xcor() + 230) < 4:
pacman.goto(225, pacman.ycor())
pacman.setheading(180)
elif abs(pacman.ycor() - 250) < 4:
pacman.goto(pacman.xcor(), -245)
pacman.setheading(90)
elif abs(pacman.ycor() + 250) < 4:
pacman.goto(pacman.xcor(), 245)
pacman.setheading(270)
#___________________________ SUBGAME HERE ____________________________
vulnerable = True
timeup = 0
if timer > 100:
timeup = timer - 100
else:
timeup = 0
while(vulnerable & playing):
screen.tracer(0)
pacman.forward(7)
for ghost in ghosts:
if abs(ghost.ycor()) < 270 and abs(ghost.xcor()) < 270:
ghost.color('blue')
if abs(ghost.ycor() + 250) < 9:
ghost.goto(random.randint(-210,210),240)
ghost.forward(4)
if pacman.distance(ghost) < 15:
ghost.ht()
ghost.goto(-300,300)
timer += 25
timer -= 1
timeturtle.clear()
timeturtle.write('Time: ' + str(timer))
if abs(timer) < 2:
playing = False
vulnerable = False #!!!end condition: if time is over
screen.update()
screen.tracer(1)
#pacman's eating movements
if phase == 0:
pacman.shape("Cyrcle")
phase = 1
elif phase == 1:
pacman.shape("Pac-Man3")
phase = 2
elif phase == 2:
pacman.shape("Pac-Man2")
phase = 3
elif phase == 3:
pacman.shape("Pac-Man")
phase = 4
elif phase == 4:
pacman.shape("Pac-Man2")
phase = 5
elif phase == 5:
pacman.shape("Pac-Man3")
phase = 0
else:
phase = 0
if playing & vulnerable:
for pellet in pellets:
if pacman.distance(pellet) < 15:
screen.tracer(0)
pellet.ht()
pellet.goto(-300,300)
score += 1
scoreturtle.clear()
scoreturtle.write('Score: ' + str(score) + '/' + str(winnum))
screen.update()
screen.tracer(1)
#!!!end condition: if all pellets are eaten
if score == winnum:
screen.tracer(0)
win = turtle.Turtle()
win.goto(-20,0)
win.color('white')
win.ht()
win.write('YOU WIN!!!')
screen.update()
screen.tracer(1)
vulnerable = False
playing = False
if abs(pacman.xcor() - 230) < 4:
pacman.goto(-225, pacman.ycor())
pacman.setheading(0)
elif abs(pacman.xcor() + 230) < 4:
pacman.goto(225, pacman.ycor())
pacman.setheading(180)
elif abs(pacman.ycor() - 250) < 4:
pacman.goto(pacman.xcor(), -245)
pacman.setheading(90)
elif abs(pacman.ycor() + 250) < 4:
pacman.goto(pacman.xcor(), 245)
pacman.setheading(270)
if abs(timer - timeup) < 5:
colors = 0
for i in range(4):
if colors == 0:
ghosts[i].color('red')
elif colors == 1:
ghosts[i].color('pink')
elif colors == 2:
ghosts[i].color('cyan')
elif colors == 3:
ghosts[i].color('orange')
ghost.speed(0)
if abs(ghosts[i].ycor()) < 270 and abs(ghosts[i].xcor()) < 270:
ghosts[i].goto(random.randint(-210,210),240)
colors += 1
break
#checking for pellets
if playing:
for pellet in pellets:
if pacman.distance(pellet) < 15:
screen.tracer(0)
pellet.ht()
pellet.goto(-300,300)
score += 1
scoreturtle.clear()
scoreturtle.write('Score: ' + str(score) + '/' + str(winnum))
screen.update()
screen.tracer(1)
#!!!end condition: if all pellets are eaten
if score == winnum:
screen.tracer(0)
win = turtle.Turtle()
win.goto(-20,0)
win.color('white')
win.ht()
win.write('YOU WIN!!!')
screen.update()
screen.tracer(1)
playing = False
#!!!checking if it is touching the ghosts (end the game) - must be invulnerable
for ghost in ghosts:
if abs(ghost.ycor()) < 270 and abs(ghost.xcor()) < 270 and pacman.distance(ghost) < 15:
screen.tracer(0)
lose = turtle.Turtle()
lose.goto(-25,0)
lose.color('red')
lose.ht()
lose.write('GAME OVER')
screen.update()
screen.tracer(1)
playing = False
#checking boundaries (teleport to other side)
if abs(pacman.xcor() - 230) < 4:
pacman.goto(-225, pacman.ycor())
pacman.setheading(0)
elif abs(pacman.xcor() + 230) < 4:
pacman.goto(225, pacman.ycor())
pacman.setheading(180)
elif abs(pacman.ycor() - 250) < 4:
pacman.goto(pacman.xcor(), -245)
pacman.setheading(90)
elif abs(pacman.ycor() + 250) < 4:
pacman.goto(pacman.xcor(), 245)
pacman.setheading(270)
#you only get here if you reach the end of the game
if abs(timer) < 2:
screen.tracer(0)
lose = turtle.Turtle()
lose.goto(-25,0)
lose.color('red')
lose.ht()
lose.write('GAME OVER')
screen.update()
screen.tracer(1)
#GAME OVER or no message
# idea:
# have everything stop: spin when dying from ghost | yaleyang5/Pac-Man-With-Turtle | Pac-Man.py | Pac-Man.py | py | 11,176 | python | en | code | 0 | github-code | 13 |
41940971048 | from menu import Menu, MenuItem
from coffee_maker import CoffeeMaker
from money_machine import MoneyMachine
coffee_maker = CoffeeMaker()
money_machine = MoneyMachine()
_menu = Menu()
order = "on"
while order != 'off':
menu = _menu.get_items()
order = input(f"What would you like? {menu}:")
if order == 'report':
coffee_maker.report()
money_machine.report()
elif order == 'off':
order = 'off'
else:
choice = _menu.find_drink(order_name=order)
if coffee_maker.is_resource_sufficient(choice) and money_machine.make_payment(choice.cost):
coffee_maker.make_coffee(choice)
| emmanuelkb/100-days-of-code | oop-coffee-machine-start/main.py | main.py | py | 641 | python | en | code | 0 | github-code | 13 |
7836548570 | #This code is adapted from
#https://dashee87.github.io/football/python/predicting-football-results-with-statistical-modelling/
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn
from scipy.stats import poisson,skellam
epl = pd.read_csv("http://www.football-data.co.uk/mmz4281/1920/E0.csv")
ep = epl[['HomeTeam','AwayTeam','FTHG','FTAG']]
epl = epl.rename(columns={'FTHG': 'HomeGoals', 'FTAG': 'AwayGoals'})
epl.head()
epl = epl[:-10]
epl.mean()
# importing the tools required for the Poisson regression model
import statsmodels.api as sm
import statsmodels.formula.api as smf
goal_model_data = pd.concat([epl[['HomeTeam','AwayTeam','HomeGoals']].assign(home=1).rename(
columns={'HomeTeam':'team', 'AwayTeam':'opponent','HomeGoals':'goals'}),
epl[['AwayTeam','HomeTeam','AwayGoals']].assign(home=0).rename(
columns={'AwayTeam':'team', 'HomeTeam':'opponent','AwayGoals':'goals'})])
#Fit the model to the data
#Home advantage included
#Team and opponent as fixed effects.
poisson_model = smf.glm(formula="goals ~ home + team + opponent", data=goal_model_data,
family=sm.families.Poisson()).fit()
poisson_model.summary()
home_team='Man City'
away_team='Arsenal'
#Predict for Arsenal vs. Manchester City
home_score_rate=poisson_model.predict(pd.DataFrame(data={'team': home_team, 'opponent': away_team,
'home':1},index=[1]))
away_score_rate=poisson_model.predict(pd.DataFrame(data={'team': away_team, 'opponent': home_team,
'home':1},index=[1]))
print(home_team + ' against ' + away_team + ' expect to score: ' + str(home_score_rate))
print(away_team + ' against ' + home_team + ' expect to score: ' + str(away_score_rate))
#Lets just get a result
home_goals=np.random.poisson(home_score_rate)
away_goals=np.random.poisson(away_score_rate)
print(home_team + ': ' + str(home_goals[0]))
print(away_team + ': ' + str(away_goals[0]))
#Code to caluclate the goals for the match.
def simulate_match(foot_model, homeTeam, awayTeam, max_goals=10):
home_goals_avg = foot_model.predict(pd.DataFrame(data={'team': homeTeam,
'opponent': awayTeam,'home':1},
index=[1])).values[0]
away_goals_avg = foot_model.predict(pd.DataFrame(data={'team': awayTeam,
'opponent': homeTeam,'home':0},
index=[1])).values[0]
team_pred = [[poisson.pmf(i, team_avg) for i in range(0, max_goals+1)] for team_avg in [home_goals_avg, away_goals_avg]]
return(np.outer(np.array(team_pred[0]), np.array(team_pred[1])))
max_goals=5
score_matrix=simulate_match(poisson_model, home_team, away_team,max_goals)
fig=plt.figure()
#Make 2d histogram of results
from pylab import rcParams
rcParams['figure.figsize'] = 12/2.54, 8/2.54
ax=fig.add_subplot(1,1,1)
pos=ax.imshow(score_matrix, extent=[-0.5,max_goals+0.5,-0.5,max_goals+0.5], aspect='auto',cmap=plt.cm.Reds)
fig.colorbar(pos, ax=ax)
ax.set_title('Probability of outcome')
plt.xlim((-0.5,5.5))
plt.ylim((-0.5,5.5))
plt.tight_layout()
ax.set_xlabel('Goals scored by ' + away_team)
ax.set_ylabel('Goals scored by ' + home_team)
plt.show()
fig.savefig('output/2DOutcomes.pdf' , dpi=None, bbox_inches="tight")
#Home, draw, away probabilities
homewin=np.sum(np.tril(score_matrix, -1))
draw=np.sum(np.diag(score_matrix))
awaywin=np.sum(np.triu(score_matrix, 1))
| Friends-of-Tracking-Data-FoTD/SoccermaticsForPython | 11SimulateMatches.py | 11SimulateMatches.py | py | 3,612 | python | en | code | 360 | github-code | 13 |
26869775385 | # -*- coding: utf-8 -*-
import pandas as pd
from time import time
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from matplotlib import pyplot
def load_data(file_path):
df = pd.read_csv(file_path, delimiter=';', parse_dates=['date'])
length = df['length'][0]
v_max = 120
t_min = length/v_max * 3.6
t_max = df['travel_time'].max()
# log ?
df['travel_time_normalise'] = df['travel_time'].map(lambda x: (x-t_min) / (t_max-t_min))
return df[['travel_time_normalise']]
def series_to_supervised(data, n_in=50, n_out=1):
"""转换数据形式
Args:
data: DataFrame
n_in: int, 输入时间序列长度 (t-n, ... t-1)
n_out: int, 预测时间序列长度 (t, t+1, ..., t+n)
Returns:
DataFrame
"""
n_vars = data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
df_concat = pd.concat(cols, axis=1)
df_concat.columns = names
return df_concat
def train_test_split(data, train_size=0.8, valid_size=0.1, test_size=0.1):
"""训练集80、验证集10、测试集10
Args:
data: DataFrame,
train_size: float,
valid_size: float,
test_size: float,
Returns:
"""
values = data.values
size = data.shape[0]
train = values[:int(size*train_size), :]
valid = values[int(size*train_size):int(size*(train_size+valid_size)), :]
test = values[int(size*(1-test_size)):, :]
train_X, train_y = train[:, :-1], train[:, -1]
valid_X, valid_y = valid[:, :-1], valid[:, -1]
test_X, test_y = test[:, :-1], test[:, -1]
# reshape [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
valid_X = valid_X.reshape((valid_X.shape[0], 1, valid_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
return train_X, train_y, valid_X, valid_y, test_X, test_y
if __name__ == '__main__':
global_start_time = time()
epochs = 1
seq_len = 96
print('>>> loading data... ')
df_data = load_data('data/HE_2013_M25LM326_6_10.csv')
df_reframe = series_to_supervised(df_data, 1, 1)
print(df_reframe.head())
train_X, train_y, valid_X, valid_y, test_X, test_y = train_test_split(df_reframe)
# LSTM
model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2]))) # shape=(*, 1, 1)
model.add(Dense(1))
model.compile(loss='mae', optimizer='rmsprop')
# fit
history = model.fit(train_X, train_y, epochs=50, batch_size=72, validation_data=(valid_X, valid_y), verbose=2,
shuffle=False)
# plot history
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
pyplot.show()
#
# # make a prediction
# yhat = model.predict(test_X)
# test_X = test_X.reshape((test_X.shape[0], test_X.shape[2]))
# # invert scaling for forecast
# inv_yhat = concatenate((yhat, test_X[:, 1:]), axis=1)
# inv_yhat = scaler.inverse_transform(inv_yhat)
# inv_yhat = inv_yhat[:, 0]
# # invert scaling for actual
# test_y = test_y.reshape((len(test_y), 1))
# inv_y = concatenate((test_y, test_X[:, 1:]), axis=1)
# inv_y = scaler.inverse_transform(inv_y)
# inv_y = inv_y[:, 0]
# # calculate RMSE
# rmse = sqrt(mean_squared_error(inv_y, inv_yhat))
# print('Test RMSE: %.3f' % rmse)
| zydarChen/predict_17 | Highways_England/LM326.py | LM326.py | py | 4,022 | python | en | code | 1 | github-code | 13 |
35452136903 | import random
from gevent import monkey
from multiprocessing import Process
from myexperiements.sockettest.socket_server import HoneyBadgerBFTNode
from myexperiements.sockettest.make_key_files import *
monkey.patch_all(thread=False)
def _test_honeybadger_2(N=4, f=1, seed=None):
def run_hbbft_instance(badger: HoneyBadgerBFTNode):
badger.run_hbbft_instance()
rnd = random.Random(seed)
sid = 'sidA'
trusted_key_gen(N, f)
# Nodes list
host = "127.0.0.1"
port_base = 10007
addresses = [(host, port_base + 200 * i) for i in range(N)]
print(addresses)
K = 10
B = 1
badgers = [None] * N
processes = [None] * N
for i in range(N):
badgers[i] = HoneyBadgerBFTNode(sid, i, B, N, f, addresses_list=addresses, K=K)
for i in range(N):
processes[i] = Process(target=run_hbbft_instance, args=(badgers[i], ))
processes[i].start()
processes[i].join()
#while True:
# pass
# Test by processes
def test_honeybadger_proc():
_test_honeybadger_2(7, 2)
if __name__ == '__main__':
test_honeybadger_proc()
| yylluu/dumbo | myexperiements/localtests/my_run_hbbft_socket.py | my_run_hbbft_socket.py | py | 1,116 | python | en | code | 16 | github-code | 13 |
17628546505 | from selenium import webdriver
import time
from selenium.webdriver.common.by import By
import requests
user=input('请输入学号:')
xnm=input('请输入学年:')
# 创建Edge浏览器的驱动程序对象
driver = webdriver.Edge()
# 打开登录页面
driver.get('http://111.75.254.215:9002/jwglxt/xtgl/login_slogin.html')
# 执行登录操作,输入用户名和密码
username_input = driver.find_element(By.ID, "yhm")
password_input = driver.find_element(By.ID, "mm")
username_input.send_keys(user)
password_input.send_keys('xs123456')
# 点击登录按钮
login_button = driver.find_element(By.ID, "dl")
login_button.click()
# 等待登录完成和加载Cookie
driver.implicitly_wait(10)
time.sleep(5)
# 获取所有的Cookie信息
cookies = driver.get_cookies()
# 打印Cookie信息
for cookie in cookies:
print(cookie)
# 关闭浏览器
driver.quit()
aa=cookies[0]
cok=f'{aa["name"]}={aa["value"]}'
res=requests.post(
url=f"http://111.75.254.215:9002/jwglxt/kbcx/xskbcx_cxXsgrkb.html?gnmkdm=N2151&su={user}",
headers={
"Accept":"*/*",
"Accept-Encoding":"gzip, deflate",
"Accept-Language":"zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
"Content-Length":"28",
"Content-Type":"application/x-www-form-urlencoded;charset=UTF-8",
"Cookie":cok,
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 Edg/116.0.1938.54"
},
data={
"xnm": xnm,
"xqm": "3",
"kzlx": "ck",
}
)
print(res.text) | yunigongshang/pyStudy | 8月/post.py | post.py | py | 1,581 | python | en | code | 0 | github-code | 13 |
73806846737 | """
pyexcel.sheets
~~~~~~~~~~~~~~~~~~~
Representation of data sheets
:copyright: (c) 2014-2015 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
"""
from .nominablesheet import NominableSheet
class Sheet(NominableSheet):
"""Two dimensional data container for filtering, formatting and iteration
:class:`Sheet` is a container for a two dimensional array, where individual
cell can be any Python types. Other than numbers, value of thsee
types: string, date, time and boolean can be mixed in the array. This
differs from Numpy's matrix where each cell are of the same number type.
In order to prepare two dimensional data for your computation, formatting
functions help convert array cells to required types. Formatting can be
applied not only to the whole sheet but also to selected rows or columns.
Custom conversion function can be passed to these formatting functions. For
example, to remove extra spaces surrounding the content of a cell, a custom
function is required.
Filtering functions are used to reduce the information contained in the
array.
"""
def save_to(self, source):
"""Save to a writeable data source"""
source.write_data(self)
def save_as(self, filename, **keywords):
"""Save the content to a named file"""
from ..sources import SheetSource
source = SheetSource(file_name=filename, **keywords)
return self.save_to(source)
def save_to_memory(self, file_type, stream, **keywords):
"""Save the content to memory
:param str file_type: any value of 'csv', 'tsv', 'csvz',
'tsvz', 'xls', 'xlsm', 'xslm', 'ods'
:param iostream stream: the memory stream to be written to
"""
self.save_as((file_type, stream), **keywords)
def save_to_django_model(self, model, initializer=None, mapdict=None, batch_size=None):
"""Save to database table through django model
:param table: a database model or a tuple of (model, column_names, name_columns_by_row, name_rows_by_column).
initializer is needed when the supplied table had a custom initialization function.
mapdict is needed when the column headers of your sheet does not match the column names of the supplied table.
name_column_by_row indicates which row has column headers and by default it is the first row of the supplied sheet
"""
from ..sources import SheetDjangoSource
source = SheetDjangoSource(model=model, initializer=initializer, mapdict=mapdict, batch_size=batch_size)
self.save_to(source)
def save_to_database(self, session, table,
initializer=None, mapdict=None, auto_commit=True):
"""Save data in sheet to database table
:param session: database session
:param table: a database table or a tuple of (table, initializer, mapdict, name_columns_by_row, name_rows_by_column).
initializer is needed when the supplied table had a custom initialization function.
mapdict is needed when the column headers of your sheet does not match the column names of the supplied table.
name_column_by_row indicates which row has column headers and by default it is the first row of the supplied sheet
"""
from ..sources import SheetSQLAlchemySource
source = SheetSQLAlchemySource(
session=session,
table=table,
initializer=initializer,
mapdict=mapdict,
auto_commit=auto_commit
)
self.save_to(source)
| ayasavolian/derived-attributes | venv/lib/python2.7/site-packages/pyexcel/sheets/sheet.py | sheet.py | py | 3,828 | python | en | code | 0 | github-code | 13 |
74812497936 | #!/usr/bin/env python
import roslib
import rospy
import tf
from tf.msg import tfMessage
from geometry_msgs.msg import TransformStamped
import tfx
import pickle
class CTRB:
def __init__(self):
print("init")
def publishLatchTransforms(self):
rospy.init_node('ctrb', anonymous=True)
self.publishLatchTransform("left")
while not rospy.is_shutdown():
rospy.spin()
def publishLatchTransform(self, arm):
if arm == 'left':
self.pub_tf_left = rospy.Publisher("/tf", tfMessage, queue_size=1, latch=True)
else:
self.pub_tf_right = rospy.Publisher("/tf", tfMessage, queue_size=1, latch=True)
listener_camera = rospy.Subscriber("/BC/chessboard_pose", PoseStamped, self.get_transform_callback)
listener_robot = rospy.Subscriber()
f = open("/home/davinci2/catkin_ws/src/davinci_vision/launch/BC_registration/robot_transform_" + arm + \
".p", "r")
p = pickle.load(f)
f.close()
pt = p.translation
rot = p.rotation
print("x: " + str(pt.x))
print("y: " + str(pt.y))
print("z: " + str(pt.z))
print("x: " + str(rot.x))
print("y: " + str(rot.y))
print("z: " + str(rot.z))
print("w: " + str(rot.w))
# Send static link transforms
msg = TransformStamped()
msg.header.stamp = rospy.Time.now()
msg.transform.rotation.x = rot.x
msg.transform.rotation.y = rot.y
msg.transform.rotation.z = rot.z
msg.transform.rotation.w = rot.w
msg.child_frame_id = "registration_brick"
msg.transform.translation.x = pt.x
msg.transform.translation.y = pt.y
msg.transform.translation.z = pt.z
if arm == 'left':
msg.header.frame_id = "one_remote_center_link"
else:
msg.header.frame_id = "two_remote_center_link"
# ???
while not rospy.is_shutdown():
msg.header.stamp = rospy.Time.now()
self.pub_tf_left.publish([msg])
rospy.sleep(0.5)
if __name__ == '__main__':
c = CTRB()
c.publishLatchTransforms() | yjen/camera_registration | ctrb.py | ctrb.py | py | 2,201 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.