seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
10412589273 | from collections import Counter
import numpy as np
import pandas
import pprint
# noinspection PyUnresolvedReferences
from utils import tokenize
# nltk.download('stopwords')
# importing corpus as resume
resume_file = open('../assets/resume.txt', 'r')
resume = resume_file.read().lower()
resume_file.close()
# tokenizing the resume
tokens = tokenize(resume)
# dividing corpus into 6 documents
k = len(tokens) // 6
documents = []
for i in range(5):
documents.append(tokens[i * k: (i + 1) * k])
documents.append(tokens[5 * k:])
# calculating most common 5 tokens from each document
most_common = set()
for document in documents:
frequencies = Counter(document)
for word, frequency in frequencies.most_common(5):
most_common.add(word)
# creating one hot vector for each word in most common
vectors = {}
for word in most_common:
vector = [0] * 6
for index, document in enumerate(documents):
vector[index] = int(word in document)
vectors[word] = vector
pprint.pp(vectors)
# one hot vector representation
table = pandas.DataFrame(data=vectors)
# writing the table in a text file to view output
file = open('../assets/one-hot-vector.txt', 'w')
file.write(table.to_string())
file.close()
| anishLearnsToCode/bow-representation | src/one-hot-vector.py | one-hot-vector.py | py | 1,229 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "utils.tokenize",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pprint.pp",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"... |
35533985483 | import argparse
import socket
from pretenders.server.base import in_parent_process, save_pid_file
from pretenders.server.log import get_logger
from pretenders.client import BossClient
from pretenders.common.constants import RETURN_CODE_PORT_IN_USE
LOGGER = get_logger("pretenders.server.pretender")
class Pretender(object):
def __init__(self, uid, host, port, boss_port):
self.uid = uid
self.boss_port = boss_port
self.host = host
self.port = port
self.boss_api_handler = BossClient(host, boss_port).boss_access
if in_parent_process():
save_pid_file("pretenders-mock-{0}.pid".format(uid))
def run(self):
raise NotImplementedError("run() not defined in {0}".format(self.__class__))
@classmethod
def start(cls):
server = cls.from_command_line_args()
try:
server.run()
except socket.error as e:
LOGGER.exception("Socket error", exc_info=e)
LOGGER.info(f"QUITTING {server.uid} {server.__class__.__name__}")
import sys
sys.exit(RETURN_CODE_PORT_IN_USE)
@classmethod
def from_command_line_args(cls):
"""Default parser for mock server scripts.
Parse command line args and return the parsed object.
"""
parser = argparse.ArgumentParser(description="Start the server")
parser.add_argument(
"-H",
"--host",
dest="host",
default="localhost",
help="host/IP to run the server on (default: localhost)",
)
parser.add_argument(
"-p",
"--port",
dest="port",
type=int,
default=8001,
help=("port number to run the " "server on (default: 8001)"),
)
parser.add_argument(
"-b",
"--boss",
dest="boss_port",
default="8000",
help="port for accessing the Boss server.",
)
parser.add_argument(
"-d",
"--debug",
dest="debug",
default=False,
action="store_true",
help="start a build right after creation",
)
parser.add_argument("-i", "--uid", dest="uid")
args = parser.parse_args()
return cls(
uid=args.uid, host=args.host, port=args.port, boss_port=args.boss_port
)
def store_history_retrieve_preset(self, body):
return self.boss_api_handler.http(
"POST", url="/replay/{0}".format(self.uid), body=body
)
| pretenders/pretenders | pretenders/server/pretender.py | pretender.py | py | 2,599 | python | en | code | 108 | github-code | 1 | [
{
"api_name": "pretenders.server.log.get_logger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pretenders.client.BossClient",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pretenders.server.base.in_parent_process",
"line_number": 20,
"usage_type": ... |
12143832924 | import config
import classes.player
import classes.club
def create_players():
'''
ABOUT:
Creates the players for the game.
gametype tells us whether it's a single game or a league so that we can generate the right number of players.
names set to None will allow us to randomly generate them.
Need to read in a config file that has the race_lookup as well as info to randomly generate names. Probably hold this in JSON so can also say what sort of race the name is suitable for.
UPDATE:
Need to allow it to pick up hard-coded players names for clubs in the config file rather than generate random ones every time.
Prerequisite for this function is that the clubs exist as this will also try and join the players to a club.
read game for the clubs
for each club:
read config file to see if there are any fixed players associated with that club
if so call the player class with these attributes fed in for the players
make up the rest to 15 for each club with calls to player class with no parms. At present don't worry about race as only human. Will need to rethink that soon. Feels like an attribute of team as to what sort of races should be possible.
add player to club
'''
'''
IDEAS:
race_lookup = {'Human': Human, 'Elf': Elf} - need to look at how this case statement replacement works. Need to read this in from a config file.
players = []
for club in clubs:
if gametype == 'single':
for 1 to 15: # 15 is number of players for single game
players.append(race_lookup[race]{parms...}
club.add_player_to_club(players[-1]) # adds last player in list to club
players[-1].add_player_to_club(club.name)
return players
'''
print("starting create_players")
# iterate through the clubs attribute of game object
for club in config.game.clubs:
# get just the team from the config file that relates to this club
team_data = next(team for team in config.teams_data if team['name'] == club.name)
if 'players' in team_data:
# there are some fixed players so create these players
for player_uuid in team_data['players']:
# locate the matching player attributes in the config file
player_data = next(player for player in config.players_data if player['uuid'] == player_uuid)
# call Player class to create the new player
player_class = 'classes.player.' + player_data['race']
player = eval(player_class)(player_data) # eval allows us to execute strings
# store the player in the game
classes.game.Game.add_player(config.game, player)
classes.club.Club.add_player_to_club(club, player)
# now need to create any random players to make up the rest of the team
# for 15 minus len(team_data)['players']
for _ in range(config.settings_data['players_per_team'] - len(team_data['players'])):
# need a way to decide which race this player is. Hardcoded as Human currently.
player = classes.player.Human()
classes.game.Game.add_player(config.game, player)
classes.club.Club.add_player_to_club(club, player)
import classes.game
| chimel3/stormbowl | createplayers.py | createplayers.py | py | 3,440 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "config.game",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "config.teams_data",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "config.players_data",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "clas... |
11587990326 | #!/usr/bin/env python3
"""
Solution for embedded items 5x5 matrices
- plot data as grid and average response in each category (col 0)
- compare with random/no-structure model (col 1)
- compare with user-specified model (col 2)
"""
# core
import csv, os, re
# data management
import pandas as pd
# scientific computing
import numpy as np
from scipy import linalg, mat, dot
from scipy.stats import norm
# vis
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib as mpl
# progress bar
from tqdm import tqdm
def mat2grid(M):
""" transform response dataframe to grid response
"""
X = np.zeros([5,5])
### frequency of response
for j, x in enumerate(X):
col = M[:,j]
for jj in col:
if np.isnan(jj) == False:
X[int(jj)-1,j] += 1
### relative frequency of response
for k in range(X.shape[0]):
X[:,k] = X[:,k]/sum(X[:,k])
return X
def addnoise(mdl,step=2):
""" add integer noise to model respoinses
- +/- step noise
"""
# noise in tange -1
noise = np.random.randint(-1-step, high=step+1, size=mdl.shape)
mdlnoise = mdl + noise
# floor & roof on values
for j, vec in enumerate(mdlnoise):
for jj, val in enumerate(vec):
if val < 1:
mdlnoise[j,jj] = 1
elif val > 5:
mdlnoise[j,jj] = 5
return mdlnoise
def geom_dist_mat(M1,M2):
"""average cosine distance for columns in M1 and M2
"""
delta_mat = list()
for i, m1 in enumerate(M1.T):
m2 = M2[:,i]
delta = dot(m1,m2.T)/linalg.norm(m1)/linalg.norm(m2)
delta_mat.append(1 - delta)
return np.mean(delta_mat)
def mdl2plot(M,mdl,q_str,filename="embed_plt.png",outname="embed_data.txt"):
# create matrices
## data
X = mat2grid(M)
## random model
mdl0 = np.random.randint(1, high=6, size=M.shape)
## contrast model
mdl1 = np.array([mdl for row in M])
## number of samples from model
n = 1000
dim1_str = ["Nothing", "Plans", "Locally","Strategy","Portfolio"]
dim2_str = ["No","Yes"]
mdl_spc = [mdl0, mdl1]
mdl_strs = ["Random model","Contrast model"]
mdl_spc_size = len(mdl_spc)
# figure properties
fig, ax = plt.subplots(2,mdl_spc_size+1,dpi=300)
fig.set_size_inches(9,6)
fig.subplots_adjust(wspace = .6, hspace = .25)
# data model
pl = ax[0,0].pcolormesh(X,cmap = plt.cm.Spectral_r,edgecolor='k',shading='gouraud')
ax[0,0].axis('image')
ax[0,0].set_title(q_str)
ax[0,0].set_xticks(range(0,5))
ax[0,0].set_xticklabels(dim1_str)
ax[0,0].set_yticks([0.5,3.5])
ax[0,0].set_yticklabels(dim2_str)
for tick in ax[0,0].get_xticklabels():
tick.set_rotation(45)
divider = make_axes_locatable(ax[0,0])
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = fig.colorbar(pl,cax=cax)
# central tendencies for data
y = np.nanmean(M,axis=0)
error = [val/np.sqrt(M.shape[0]) for val in np.nanstd(M,axis=0)]
xpos = range(0,len(y))
ax[1,0].bar(xpos, y, yerr=error, align='center',alpha=0.5, color="r", ecolor='blue',capsize=0)
ax[1,0].set_xticks(range(0,5))
ax[1,0].set_xticklabels(dim1_str)
ax[1,0].set_ylabel("$Mean$")
for tick in ax[1,0].get_xticklabels():
tick.set_rotation(45)
# plot random and user-specified model
out = list(); out.append(X)
for i, mdl in enumerate(mdl_spc):
mdl_noise = mat2grid(addnoise(mdl))
out.append(mdl_noise)
pl = ax[0,i+1].pcolormesh(mdl_noise,cmap = plt.cm.Spectral_r,edgecolor='k',shading='gouraud')
ax[0,i+1].axis('image')
ax[0,i+1].set_title(mdl_strs[i])
ax[0,i+1].set_xticks(range(0,5))
ax[0,i+1].set_xticklabels(dim1_str)
ax[0,i+1].set_yticks([0.5,3.5])
ax[0,i+1].set_yticklabels(dim2_str)
for tick in ax[0,i+1].get_xticklabels():
tick.set_rotation(45)
divider = make_axes_locatable(ax[0,i+1])
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = fig.colorbar(pl,cax=cax)
dat = list()
for ii in range(n):
Y = mat2grid(addnoise(mdl))
dat.append(geom_dist_mat(X,Y))
dat = np.asarray(dat)
# write simulation data to file
np.savetxt("{}_{}_simulation.txt".format(outname.split(".")[0],mdl_strs[i].replace(" ","_").lower()),dat)
# histograms for simualtions
mu, std = norm.fit(dat)
# Plot the histogram.
ax[1,i+1].hist(dat, bins=25, density=True, alpha=0.6, color='r')
# Plot the PDF.
xmin, xmax = plt.xlim()
x = np.linspace(xmin, xmax, 1000)
p = norm.pdf(x, mu, std)
ax[1,i+1].plot(x, p, 'b', linewidth=2)
ax[1,i+1].set_xlim([mu-.1,mu+.1])
ax[1,i+1].set_xlabel("$\delta(data,model)$\n$\mu = %.2f,\sigma^2 = %.2f$" % (mu, std))
# write matrices to file
with open(outname, 'w') as f:
csvwriter = csv.writer(f)
csvwriter.writerows(out)
# write visualiation to file
plt.savefig(filename)
def main():
# visualiation parameters
mpl.rcParams.update({'text.usetex': False,
'font.family': 'serif',
'font.serif': 'cmr10',
'font.weight':'bold',
'mathtext.fontset': 'cm',
'axes.unicode_minus' : False
})
## data
fname = "dat/response.csv"
df = pd.read_csv(fname)
## items
items = pd.read_csv("dat/items_expand.csv")
### embedded items indices
init_idxs = [5, 11]
for i in tqdm(init_idxs):
ii = (i + 4) + 1
q_str = items["Short-form"].iloc[i]
### data dataframe
M = np.array(df.iloc[:,i:ii])
### specification of comparison model
mdl_max = [1,1,5,5,5]
mdl_med = [1,3,3,3,2]
mdl_min = [5,1,1,1,1]
MDLS = [mdl_max,mdl_med,mdl_min]
MDLS_labels = ["yeswecan","bland","nighmare"]
for j, mdl in enumerate(MDLS):
fname = "{}_{}".format(re.sub(" ","",q_str.lower()),MDLS_labels[j])
mdl2plot(M,mdl,q_str,filename="fig/{}.png".format(fname),outname="dat/export/{}.txt".format(fname))
if __name__ == '__main__':
main()
| knielbo/survey_visualization | embedded2plot.py | embedded2plot.py | py | 6,395 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "numpy.zeros",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line... |
32437564312 | import pandas as pd
from stable_baselines3.ppo import MlpPolicy
from stable_baselines3.common.callbacks import CheckpointCallback
from stable_baselines3 import PPO
from torch import nn
from tqdm import trange
from golds.contracts import Currency, Stock, Option, OptionFlavor, OptionStyle, Holdings
from golds.env import AmericanOptionEnv
from golds.mkt_data import PricingSource, SingleStockGBMMarketDataSource
from golds.params import GBMParams
from golds.reward_functions import NaiveHedgingRewardFunction
from golds.tcost import NaiveTransactionCostModel
REWARD_KAPPA = 100
INITIAL_WEALTH = 1e7
SELF_FINANCING_LAMBDA = 1000
TRADING_DAYS_IN_YEAR = 252
def main():
aapl = Stock(ticker="AAPL", is_tradable=True)
warrant = Option(
strike=100,
expiry_time=1.,
underlying=aapl,
flavor=OptionFlavor.CALL,
style=OptionStyle.EUROPEAN,
is_tradable=False
)
cash = Currency(code="USD", is_tradable=False)
initial_holdings: Holdings = {
aapl: 0.,
warrant: 100.,
cash: INITIAL_WEALTH,
}
universe = list(initial_holdings.keys())
gbm_params = GBMParams(mu=0.005, sigma=0.2, risk_free_rate=0.)
mkt_data_source = SingleStockGBMMarketDataSource(universe, gbm_params, data_reuse_num_episodes=5*3000)
tcost_model = NaiveTransactionCostModel(universe)
pricing_source = PricingSource(mkt_data_source, tcost_model)
env = AmericanOptionEnv(
episode_length=TRADING_DAYS_IN_YEAR,
pricing_source=pricing_source,
reward_function=NaiveHedgingRewardFunction(kappa=REWARD_KAPPA, initial_holdings=initial_holdings),
actions_config=list(range(-100, 101))
)
# TODO experiment with gamma, gae_lambda, ent_coef, vf_coef, max_grad_norm (kwargs to PPO.__init__)
# TODO experiment with batch size (how to do this?)
# TODO Lerrel says entropy related to exploration -- increase ent_coef if agent is not exploring enough
# TODO experiment with different number of hidden nodes per layer in "net_arch" (64? 128? more?)
policy_kwargs = {"activation_fn": nn.ReLU, "net_arch": [32]*5}
model = PPO(MlpPolicy, env, verbose=1, learning_rate=1e-4, policy_kwargs=policy_kwargs)
N_YEARS_TRAINING = 5_000_000
checkpoint_callback = CheckpointCallback(save_freq=1000, save_path='./checkpoints/', name_prefix='hedging_model')
model.learn(total_timesteps=TRADING_DAYS_IN_YEAR*N_YEARS_TRAINING, callback=checkpoint_callback)
# TODO should log the training output here somehow (loss over time)
# with open("american_option_env_5e6.pkl", "w+b") as f:
# pickle.dump(env, f)
# model.save("trained_hedging_model_5e6")
# with open("american_option_env.pkl", "rb") as f:
# env = pickle.load(f)
# model = PPO.load("trained_hedging_model")
NUM_YEARS_OUT_OF_SAMPLE = 10000
print(f"Done training. Evaluating on {NUM_YEARS_OUT_OF_SAMPLE} years of data.")
evaluation_histories = []
reward_histories = []
for i in trange(NUM_YEARS_OUT_OF_SAMPLE):
evaluation_records = []
obs = env.reset()
for day in range(TRADING_DAYS_IN_YEAR):
prices = env.observation_array_to_dict(obs)
# print(f"Observed prices: {prices}")
action, _states = model.predict(obs)
trade = env.action_array_to_dict(action)
# print(f"Executed trade: {trade}")
obs, rewards, dones, info = env.step(action)
# print(f"Received reward: {rewards}")
evaluation_records.append({"obs": prices, "action": trade, "reward": rewards})
env.render()
evaluation_history = pd.DataFrame.from_records(evaluation_records)
reward_history = pd.DataFrame.from_records(env.reward_function._reward_records)
evaluation_history["run_number"] = i
reward_history["run_number"] = i
evaluation_histories.append(evaluation_history)
reward_histories.append(reward_history)
evaluation_history = pd.concat(evaluation_histories, ignore_index=True)
reward_history = pd.concat(reward_histories, ignore_index=True)
evaluation_history.to_hdf("evaluation_history.h5", key="df", mode="w")
reward_history.to_hdf("reward_history.h5", key="df", mode="w")
'''
evaluation_records = []
obs = env.reset()
for day in range(TRADING_DAYS_IN_YEAR):
prices = env.observation_array_to_dict(obs)
print(f"Observed prices: {prices}")
action, _states = model.predict(obs)
trade = env.action_array_to_dict(action)
print(f"Executed trade: {trade}")
obs, rewards, dones, info = env.step(action)
print(f"Received reward: {rewards}")
evaluation_records.append({"obs": prices, "action": trade, "reward": rewards})
env.render()
pd.DataFrame.from_records(evaluation_records).to_hdf("rl_results.h5", key="df", mode="w")
env.reward_function.persist_history_to_hdf("reward_history.h5", key="df", mode="w")
'''
if __name__ == '__main__':
main()
| fany02656/RL-Finance-FY | main.py | main.py | py | 5,173 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "golds.contracts.Stock",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "golds.contracts.Option",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "golds.contracts.OptionFlavor.CALL",
"line_number": 27,
"usage_type": "attribute"
},
{
... |
35329125934 | #!/usr/bin/env python3
import os
import sys
import json
import re
from argparse import ArgumentParser
import hashlib
import copy
import subprocess
import uuid
import datetime
def get_app_info(wf_name, data_type):
app_info = {
"sanger-wxs": {
"snv": ["CaVEMan"],
"indel": ["Pindel"]
},
"sanger-wgs": {
"snv": ["CaVEMan"],
"indel": ["Pindel"],
"cnv": ["ASCAT"],
"sv": ["BRASS"]
},
"broad-mutect2": {
"snv-indel": ["Mutect2"]
}
}
if app_info.get(wf_name) and app_info.get(wf_name).get(data_type):
return app_info.get(wf_name).get(data_type)
else:
sys.exit("Unknown workflow or data type")
def get_analysis_type(data_type):
analysis_type = {
"snv": "Simple somatic mutation calling",
"indel": "Simple somatic mutation calling",
"snv-indel": "Simple somatic mutation calling",
"cnv": "Copy number somatic mutation calling",
"sv": "Structural somatic mutation calling"
}
return analysis_type.get(data_type)
def get_data_type(file_to_upload):
filename = os.path.basename(file_to_upload)
if re.match(r".*\.copynumber\.caveman\.vcf\.gz$", filename):
data_type = 'cnv'
elif re.match(r".*\.annot\.vcf\.gz$", filename):
data_type = 'sv'
elif re.match(r".*\.flagged\.vcf\.gz$", filename):
data_type = 'indel'
elif re.match(r".*\.flagged\.muts\.vcf\.gz$", filename):
data_type = 'snv'
elif re.match(r"broad-mutect2\.snv-indel\.vcf\.gz$", filename):
data_type = 'snv-indel'
else:
sys.exit("Unknown data_type!")
return data_type
def get_uuid5(bid, fid):
uuid5 = str(uuid.uuid5(uuid.UUID("6ba7b810-9dad-11d1-80b4-00c04fd430c8"), "%s/%s" % (bid, fid)))
return uuid5
def calculate_size(file_path):
return os.stat(file_path).st_size
def calculate_md5(file_path):
md5 = hashlib.md5()
with open(file_path, 'rb') as f:
for chunk in iter(lambda: f.read(1024 * 1024), b''):
md5.update(chunk)
return md5.hexdigest()
def get_files_info(file_to_upload, filename=None):
payload_files = {}
if filename:
cmd = 'cp %s %s' % (file_to_upload, filename)
run_cmd(cmd)
file_to_upload = os.path.realpath(filename)
payload_files['name'] = os.path.basename(file_to_upload)
payload_files['path'] = file_to_upload
payload_files['size'] = calculate_size(file_to_upload)
payload_files['checksum'] = calculate_md5(file_to_upload)
return payload_files
def run_cmd(cmd):
p, success = None, True
try:
p = subprocess.run([cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
except Exception as e:
print('Execution failed: %s' % e)
success = False
if p and p.returncode != 0:
print('\nError occurred, return code: %s. Details: %s' %
(p.returncode, p.stderr.decode("utf-8")), file=sys.stderr)
success = False
if not success:
sys.exit(p.returncode if p.returncode else 1)
return
def main(args):
if args.bundle_type == 'lane_seq_submission':
with open(args.user_submit_metadata, 'r') as f:
metadata = json.load(f)
if metadata.get("input_seq_format") == 'FASTQ':
read_group = metadata.get("read_groups")
payload_template_url = "https://raw.githubusercontent.com/icgc-argo/argo-metadata-schemas/%s/schemas/_example_docs/36.lane_seq_submission.01.ok.json" % args.payload_schema_version
cmd = "curl -o template --retry 10 %s" % payload_template_url
run_cmd(cmd)
with open("template", "r") as f:
payload = json.load(f)
payload['program_id'] = metadata.get('program_id')
#get inputs of the payload
for rg in read_group:
rg_id = rg.get("submitter_read_group_id")
rg_fname = "".join([c if re.match(r"[a-zA-Z0-9\-_]", c) else "_" for c in rg_id])
if not rg_fname in args.file_to_upload: continue
payload['inputs']['submitter_read_group_id'] = rg_id
payload['inputs']['files']['fastq'] = rg.get('files')
elif metadata.get("input_seq_format") == 'BAM':
files = metadata.get("files")
payload_template_url = "https://raw.githubusercontent.com/icgc-argo/argo-metadata-schemas/%s/schemas/_example_docs/35.lane_seq_submission.01.ok.json" % args.payload_schema_version
cmd = "curl -o template --retry 10 %s" % payload_template_url
run_cmd(cmd)
with open("template", "r") as f:
payload = json.load(f)
payload['program_id'] = metadata.get('program_id')
# get inputs of the payload
for input_file in files:
for rg in input_file.get('read_groups'):
rg_id = rg.get("submitter_read_group_id")
rg_fname = "".join([c if re.match(r"[a-zA-Z0-9\-_]", c) else "_" for c in rg_id])
if not rg_fname in args.file_to_upload: continue
payload['inputs']['submitter_read_group_id'] = rg_id
payload['inputs']['files']['bam'] = copy.deepcopy(input_file)
payload['inputs']['files']['bam'].pop('read_groups')
else:
sys.exit('\n%s: Input files format are not FASTQ or BAM')
#get files of the payload
payload['files']['bam_file'].update(get_files_info(args.file_to_upload))
payload['files']['bam_file'].pop('_final_doc', None)
payload['files']['bam_file'].pop('_mocked_system_properties', None)
elif args.bundle_type == 'dna_alignment':
payload_template_url = "https://raw.githubusercontent.com/icgc-argo/argo-metadata-schemas/%s/schemas/_example_docs/40.dna_alignment.01.ok.json" % args.payload_schema_version
cmd = "curl -o template --retry 10 %s" % payload_template_url
run_cmd(cmd)
with open("template", "r") as f:
payload = json.load(f)
# get inputs of the payload
lane_seq_list = []
for res_file in args.analysis_input_payload:
lane_seq = {}
with open(res_file, 'r') as f:
res_json = json.load(f)
payload['program_id'] = res_json.get('program_id')
lane_seq['lane_seq_submission_id'] = res_json.get('id')
lane_seq['files'] = {}
lane_seq['files']['lane_seq'] = res_json['files']['bam_file']
lane_seq['files']['lane_seq'].update({"bundle_id": res_json.get('id')})
lane_seq_list.append(lane_seq)
payload['inputs']['lane_seq'] = lane_seq_list
#get files of the payload
payload['files']['aligned_seq'].update(get_files_info(args.file_to_upload))
#get index files of the payload
if os.path.exists(args.file_to_upload + ".bai"):
payload['files']['aligned_seq_index'].update(get_files_info(args.file_to_upload + ".bai"))
elif os.path.exists(args.file_to_upload + ".crai"):
payload['files']['aligned_seq_index'].update(get_files_info(args.file_to_upload + ".crai"))
else:
sys.exit('\n%s: Missing index file')
payload['files']['aligned_seq'].pop('_final_doc', None)
payload['files']['aligned_seq'].pop('_mocked_system_properties', None)
payload['files']['aligned_seq_index'].pop('_final_doc', None)
payload['files']['aligned_seq_index'].pop('_mocked_system_properties', None)
elif args.bundle_type == 'somatic_variant_call':
payload_template_url = "https://raw.githubusercontent.com/icgc-argo/argo-metadata-schemas/%s/schemas/_example_docs/60.somatic_variant_call.01-sanger-wxs-snv.ok.json" % args.payload_schema_version
cmd = "curl -o template --retry 10 %s" % payload_template_url
run_cmd(cmd)
with open("template", "r") as f:
payload = json.load(f)
data_type = get_data_type(args.file_to_upload)
# update analysis of the payload
payload['analysis']['analysis_type'] = get_analysis_type(data_type)
payload['analysis']['tool']['name'] = "icgc-argo/%s-variant-calling" % args.wf_short_name
payload['analysis']['tool']['short_name'] = args.wf_short_name
payload['analysis']['tool']['version'] = args.wf_version
payload['analysis']['tool']['included_apps'] = get_app_info(args.wf_short_name, data_type)
# get inputs of the payload
for res_file in args.analysis_input_payload:
input_file = {}
with open(res_file, 'r') as f:
res_json = json.load(f)
payload['program_id'] = res_json.get('program_id')
input_file['dna_alignment_id'] = res_json.get('id')
input_file['files'] = {}
input_file['files']['aligned_dna_seq'] = res_json['files']['aligned_seq']
input_file['files']['aligned_dna_seq'].update({"bundle_id": res_json.get('id')})
if input_file['files']['aligned_dna_seq']['name'].endswith('bam'):
input_file['files']['aligned_dna_seq'].update({"secondary_file": '.bai'})
elif input_file['files']['aligned_dna_seq']['name'].endswith('cram'):
input_file['files']['aligned_dna_seq'].update({"secondary_file": '.crai'})
else:
sys.exit('\n%s: Unknown file type')
if res_json.get('info') and res_json.get('info').get('tumour_normal_designation'):
if 'normal' in res_json.get('info').get('tumour_normal_designation').lower():
payload['inputs']['normal'] = input_file
else:
payload['inputs']['tumour'] = input_file
uuid_prefix = get_uuid5(res_json.get('info').get('program_id'), res_json.get('info').get('submitter_sample_id'))
filename = '.'.join([uuid_prefix, res_json.get('info').get('library_strategy').lower(),
datetime.date.today().strftime("%Y%m%d"),
args.wf_short_name, args.wf_version, 'somatic',
data_type, 'vcf', 'gz'])
else:
sys.exit('\n%s: Not enough information to proceed!')
# get files of the payload
payload['files']['vcf'].update(get_files_info(args.file_to_upload, filename))
# get index files of the payload
if os.path.exists(args.file_to_upload + ".tbi"):
payload['files']['vcf_index'].update(get_files_info(args.file_to_upload + ".tbi", filename+".tbi"))
elif os.path.exists(args.file_to_upload + ".idx"):
payload['files']['vcf_index'].update(get_files_info(args.file_to_upload + ".idx", filename+".idx"))
else:
sys.exit('\n%s: Missing index file')
payload['files']['vcf'].pop('_final_doc', None)
payload['files']['vcf'].pop('_mocked_system_properties', None)
payload['files']['vcf_index'].pop('_final_doc', None)
payload['files']['vcf_index'].pop('_mocked_system_properties', None)
else:
sys.exit('\n%s: Unknown bundle_type')
payload.pop('_final_doc', None)
payload.pop('_mocked_system_properties', None)
payload_fname = ".".join([args.bundle_type, os.path.basename(args.file_to_upload), 'json'])
with open(payload_fname, 'w') as f:
f.write(json.dumps(payload, indent=2))
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-t", "--bundle_type", dest="bundle_type", type=str,
help="Payload type")
parser.add_argument("-p", "--payload_schema_version", dest="payload_schema_version", help="release version of payload schema")
parser.add_argument("-m", "--user_submit_metadata", dest="user_submit_metadata",
help="json file containing experiment, read_group and file information submitted from user")
parser.add_argument("-f", "--file_to_upload", dest="file_to_upload", type=str, help="File to upload to server")
parser.add_argument("-a", "--analysis_input_payload", dest="analysis_input_payload", help="Input payloads for the analysis",
type=str, nargs='+')
parser.add_argument("-c", "--wf_short_name", dest="wf_short_name", type=str, choices=['sanger-wxs', 'sanger-wgs', 'broad-mutect2'],
help="workflow short name")
parser.add_argument("-v", "--wf_version", dest="wf_version", type=str,
help="workflow version")
args = parser.parse_args()
main(args)
| icgc-argo-workflows/data-processing-utility-tools | tools/payload-generation/payload-generation.py | payload-generation.py | py | 12,859 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.exit",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "re.match",
"line_number": ... |
73034211233 | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import skipIf, TestCase
from salttesting.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch
)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.states import rabbitmq_plugin
rabbitmq_plugin.__opts__ = {}
rabbitmq_plugin.__salt__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class RabbitmqPluginTestCase(TestCase):
'''
Test cases for salt.states.rabbitmq_plugin
'''
# 'enabled' function tests: 1
def test_enabled(self):
'''
Test to ensure the RabbitMQ plugin is enabled.
'''
name = 'some_plugin'
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
mock = MagicMock(side_effect=[True, False])
with patch.dict(rabbitmq_plugin.__salt__,
{'rabbitmq.plugin_is_enabled': mock}):
comt = ('Plugin some_plugin is already enabled')
ret.update({'comment': comt})
self.assertDictEqual(rabbitmq_plugin.enabled(name), ret)
with patch.dict(rabbitmq_plugin.__opts__, {'test': True}):
comt = ('Plugin some_plugin is set to be enabled')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(rabbitmq_plugin.enabled(name), ret)
# 'disabled' function tests: 1
def test_disabled(self):
'''
Test to ensure the RabbitMQ plugin is disabled.
'''
name = 'some_plugin'
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
mock = MagicMock(side_effect=[False, True])
with patch.dict(rabbitmq_plugin.__salt__,
{'rabbitmq.plugin_is_enabled': mock}):
comt = ('Plugin some_plugin is not enabled')
ret.update({'comment': comt})
self.assertDictEqual(rabbitmq_plugin.disabled(name), ret)
with patch.dict(rabbitmq_plugin.__opts__, {'test': True}):
comt = ('Plugin some_plugin is set to be disabled')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(rabbitmq_plugin.disabled(name), ret)
if __name__ == '__main__':
from integration import run_tests
run_tests(RabbitmqPluginTestCase, needs_daemon=False)
| shineforever/ops | salt/tests/unit/states/rabbitmq_plugin_test.py | rabbitmq_plugin_test.py | py | 2,591 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "salttesting.helpers.ensure_in_syspath",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "salt.states.rabbitmq_plugin.__opts__",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "salt.states.rabbitmq_plugin",
"line_number": 24,
"usage_t... |
70431605475 | import numpy as np
import matplotlib.pyplot as plt
class Kmeans(object):
def __init__(self, k=1):
self.k = k
def train(self, data, verbose=1):
shape = data.shape
ranges = np.zeros((shape[1], 2))
centroids = np.zeros((shape[1], 2))
for dim in range(shape[1]):
ranges[dim, 0] = np.min(data[:,dim])
ranges[dim, 1] = np.max(data[:,dim])
if verbose == 1:
print('Ranges: ')
print(ranges)
centroids = np.zeros((self.k, shape[1]))
for i in range(self.k):
for dim in range(shape[1]):
centroids[i, dim] = np.random.uniform(ranges[dim, 0], ranges[dim, 1], 1)
if verbose == 1:
print('Centroids: ')
print(centroids)
plt.scatter(data[:,0], data[:,1])
plt.scatter(centroids[:,0], centroids[:,1], c = 'r')
plt.show()
count = 0
while count < 100:
count += 1
if verbose == 1:
print('-----------------------------------------------')
print('Iteration: ', count)
distances = np.zeros((shape[0],self.k))
for ix, i in enumerate(data):
for ic, c in enumerate(centroids):
distances[ix, ic] = np.sqrt(np.sum((i-c)**2))
labels = np.argmin(distances, axis = 1)
new_centroids = np.zeros((self.k, shape[1]))
for centroid in range(self.k):
temp = data[labels == centroid]
if len(temp) == 0:
return 0
for dim in range(shape[1]):
new_centroids[centroid, dim] = np.mean(temp[:,dim])
if verbose == 1:
plt.scatter(data[:,0], data[:,1], c = labels)
plt.scatter(new_centroids[:,0], new_centroids[:,1], c = 'r')
plt.show()
if np.linalg.norm(new_centroids - centroids) < np.finfo(float).eps:
print("DONE!")
break
centroids = new_centroids
self.centroids = centroids
self.labels = labels
if verbose == 1:
print(labels)
print(centroids)
return 1
def getAverageDistance(self, data):
dists = np.zeros((len(self.centroids),))
for ix, centroid in enumerate(self.centroids):
temp = data[self.labels == ix]
dist = 0
for i in temp:
dist += np.linalg.norm(i - centroid)
dists[ix] = dist/len(temp)
return dists
def getLabels(self):
return self.labels | divyanshugit/representation_learning | models/classificaton/kMeans.py | kMeans.py | py | 2,669 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "numpy.zeros",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 18,
... |
20969486373 | import math
import sys
import time
from nba_api.stats.static import players
from nba_api.stats.endpoints import playergamelog
from nba_api.stats.library.parameters import SeasonAll
from nba_api.stats.endpoints import leaguegamefinder
import csv
import pandas as pd
from bs4 import BeautifulSoup
import requests
from dateutil.parser import parse
import datetime
from io import StringIO
import tqdm
def get_active_players(player_csv_name):
active_players_array_of_dicts = players.get_active_players()
field_names = active_players_array_of_dicts[0].keys()
with open(player_csv_name, mode="w", newline="") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=field_names)
writer.writeheader()
writer.writerows(active_players_array_of_dicts)
return player_csv_name
def add_nba_api_data(players_csv, data_csv_name, testing, wait_seconds=1):
data = pd.read_csv(data_csv_name)
players = pd.read_csv(players_csv)
counter = 0
for _, row in players.iterrows():
# cut short for testing
if counter > 3 and testing:
break
counter += 1
# get the data for the player
player_id = row["id"]
gamelogs = pd.concat(
playergamelog.PlayerGameLog(
player_id=player_id, season=SeasonAll.all
).get_data_frames()
)
if len(gamelogs) == 0:
print("No data for", row["full_name"])
# have the data, now to add it to the csv.
for _, gamelog in gamelogs.iterrows():
already_exists = (gamelog["Player_ID"] == player_id) & (
data["game_id"] == gamelog["Game_ID"]
)
if not any(already_exists):
to_add = gamelog
to_add["player_id"] = to_add["Player_ID"]
to_add["game_id"] = to_add["Game_ID"]
data = pd.concat([data, pd.DataFrame([to_add])], ignore_index=True)
# wait before next request to not get rate limited
data.drop_duplicates(subset=["player_id", "game_id"], inplace=True)
data.to_csv(data_csv_name, index=False)
outtext = (
"Added data for "
+ row["full_name"]
+ " "
+ str(counter)
+ "/"
+ str(len(players))
+ " "
)
print(" " * 80, end="\r", flush=True)
print(outtext, end="\r", flush=True)
time.sleep(wait_seconds)
print()
data = pd.read_csv(data_csv_name)
data.drop_duplicates(subset=["player_id", "game_id"], inplace=True)
data.to_csv(data_csv_name, index=False)
return data_csv_name
def instantiate_data_if_needed(data_csv_name, columns):
try:
data = pd.read_csv(data_csv_name)
except pd.errors.EmptyDataError:
data = pd.DataFrame()
for column in columns:
if column not in data.columns:
data[column] = 0
data.to_csv(data_csv_name, index=False)
return data_csv_name
def clean_date(text):
return parse(text)
def find_matching_id(injury_row, keyword_df):
for _, row in keyword_df.iterrows():
if row["full_name"] in injury_row["Relinquished"]:
if "DTD" in injury_row["Notes"]:
return (row["id"], "DTD")
else:
return (row["id"], "Not DTD")
return None, None
def add_injury_data(players_csv, data_csv_name, testing, wait_seconds=1):
# get correct date data
data = pd.read_csv(data_csv_name)
data["GAME_DATE"] = data["GAME_DATE"].apply(clean_date)
data["GAME_DATE"] = pd.to_datetime(data["GAME_DATE"])
# make initial url request
earliest_date = data["GAME_DATE"].min().strftime("%Y-%m-%d")
players = pd.read_csv(players_csv)
url = (
"https://www.prosportstransactions.com/basketball/Search/SearchResults.php?Player=&Team=&BeginDate="
+ earliest_date
+ "&EndDate=&InjuriesChkBx=yes&Submit=Search"
)
response = requests.get(url)
# get all page links
if response.status_code == 200:
soup = BeautifulSoup(response.content, "html.parser")
links = soup.find_all(
"a", href=lambda href: href and "SearchResults.php?Player=&Team=" in href
)
urls = [link["href"] for link in links]
else:
print(f"Failed to retrieve content. Status code: {response.status_code}")
raise ValueError("cant read main page!")
# for each page, scrape relevant data
counter = 0
all_injuries = pd.DataFrame(columns=["Date", "matching_id", "injury_type"])
for url in urls:
out_string = ("Adding injury page " + str(counter + 1) + "/" + str(len(urls)) + " ")
print(out_string, end="\r")
if counter > 3 and testing:
break
counter += 1
# https://www.prosportstransactions.com/basketball/Search/SearchResults.php?Player=&Team=&BeginDate=2022-12-21&EndDate=&InjuriesChkBx=yes&Submit=Search
# call page, read table & add clean date
url_to_call = "https://www.prosportstransactions.com/basketball/Search/" + url
if response.status_code == 200:
response = requests.get(url_to_call)
soup = BeautifulSoup(response.content, "html.parser")
table = soup.find("table", class_="datatable center")
table_dataframes = pd.read_html(StringIO(str(table)), header=0)
page_df = table_dataframes[0]
page_df["Date"] = pd.to_datetime(page_df["Date"].apply(clean_date))
else:
print("failed to get data for", url)
# If find player match in a row, add matching id and injury type in page_df
page_df["matching_id"] = None
page_df["injury_type"] = None
for injury_index, injury_row in page_df.iterrows():
for _, row in players.iterrows():
if isinstance(injury_row["Relinquished"], str) and isinstance(
injury_row["Notes"], str
):
if row["full_name"] in injury_row["Relinquished"]:
if "DTD" in injury_row["Notes"]:
page_df.at[injury_index, "matching_id"] = row["id"]
page_df.at[injury_index, "injury_type"] = "DTD"
else:
page_df.at[injury_index, "matching_id"] = row["id"]
page_df.at[injury_index, "injury_type"] = "Not DTD"
# add injuries to all_injuries
page_df = page_df[page_df["matching_id"].notna()]
if not all_injuries.empty:
all_injuries = pd.concat([all_injuries, page_df], ignore_index=True)
else:
all_injuries = pd.concat([page_df], ignore_index=True)
time.sleep(wait_seconds)
# For row in all injuries, update relevant rows in data
all_injuries = all_injuries.sort_values(by="Date")
for _, injury_row in all_injuries.iterrows():
# Find just the data rows matching the injury player_id AND are later:
ref_date = pd.to_datetime(injury_row["Date"])
ref_id = int(injury_row["matching_id"])
ref_injury_type = injury_row["injury_type"]
filtered_df = data[data["GAME_DATE"] > ref_date]
filtered_df = data[data["player_id"] == ref_id]
# Calculate the number of days since the reference date & update data
print("Adding injuries to data...")
for data_idx, data_row in filtered_df.iterrows():
days_since_injury = (data_row["GAME_DATE"] - ref_date).days
if days_since_injury < 0:
data.loc[data_idx, "days_since_last_injury"] = math.inf
else:
data.loc[data_idx, "days_since_last_injury"] = days_since_injury
data.loc[data_idx, "type_of_last_injury"] = ref_injury_type
# fillna
data["days_since_last_injury"] = data["days_since_last_injury"].fillna(math.inf)
data["type_of_last_injury"] = data["type_of_last_injury"].fillna(math.inf)
print()
data.drop_duplicates(subset=["player_id", "game_id"], inplace=True)
data.to_csv(data_csv_name, index=False)
return data_csv_name
def make_time_idx(game_id, min_id):
return game_id - min_id
def turn_game_id_to_time_idx(data_csv_name):
data = pd.read_csv(data_csv_name)
data["game_id_str"] = data["game_id"].astype(str).str[1:]
data = data.sort_values(by="game_id_str")
data["time_idx"] = range(len(data))
del data["game_id_str"]
data.to_csv(data_csv_name, index=False)
return data_csv_name
def apply_home(matchup):
if "vs." in matchup:
return matchup.split(" vs. ")[0]
elif "@" in matchup:
return matchup.split(" @ ")[1]
def apply_away(matchup):
if "vs." in matchup:
return matchup.split(" vs. ")[1]
elif "@" in matchup:
return matchup.split(" @ ")[0]
def add_teams_playing_column(data_csv_name):
"""Add home and away team columns"""
data = pd.read_csv(data_csv_name)
data["home_team"] = data["MATCHUP"].apply(apply_home)
data["away_team"] = data["MATCHUP"].apply(apply_away)
data.drop_duplicates(subset=["player_id", "game_id"], inplace=True)
data.to_csv(data_csv_name, index=False)
return data_csv_name
def apply_which_team(matchup):
if "vs." in matchup:
return matchup.split(" vs. ")[0]
elif "@" in matchup:
return matchup.split(" @ ")[0]
def add_which_team_column(data_csv_name):
data = pd.read_csv(data_csv_name)
data["team"] = data["MATCHUP"].apply(apply_which_team)
data["was_home"] = False
data.loc[data["team"] == data["home_team"], "was_home"] = True
data.drop_duplicates(subset=["player_id", "game_id"], inplace=True)
data.to_csv(data_csv_name, index=False)
return data_csv_name
def apply_game_type(season_id):
first_digit = str(season_id)[0]
switch_dict = {
"1": "preseason",
"2": "regular",
"3": "allstar",
"4": "postseason",
"5": "playin",
}
return switch_dict.get(first_digit, "Unknown")
def add_game_type(data_csv_name):
# https://github.com/swar/nba_api/issues/220
# 1 = pre season
# 2 = regular
# 3 = all star
# 4 = finals/playoffs, post-season
# 5 = play-in
data = pd.read_csv(data_csv_name)
data["game_type"] = data["SEASON_ID"].apply(apply_game_type)
data.to_csv(data_csv_name, index=False)
return data_csv_name
def clean_up_data(data_csv_name):
data = pd.read_csv(data_csv_name)
data = data.drop("VIDEO_AVAILABLE", axis=1)
data.to_csv(data_csv_name, index=False)
return data_csv_name
if __name__ == "__main__":
# only do 3 players for testing
testing = False
# get active players, store in a csv
columns = [
"player_id",
"game_id",
"days_since_last_injury",
"type_of_last_injury",
"home_team",
"away_team",
"team",
"was_home",
]
data_csv_name = "data.csv"
player_csv_name = "players.csv"
players_csv = get_active_players(player_csv_name)
#data_csv_name = instantiate_data_if_needed(data_csv_name, columns)
#data_csv_name = add_nba_api_data(players_csv, data_csv_name, testing)
data_csv_name = add_injury_data(players_csv, data_csv_name, testing)
data_csv_name = turn_game_id_to_time_idx(data_csv_name)
data_csv_name = add_teams_playing_column(data_csv_name)
data_csv_name = add_which_team_column(data_csv_name)
data_csv_name = add_game_type(data_csv_name)
# add was finals??
data_csv_name = clean_up_data(data_csv_name)
| DanielMillward/NBADataScraper | 1_update_db.py | 1_update_db.py | py | 11,619 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "nba_api.stats.static.players.get_active_players",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "nba_api.stats.static.players",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "csv.DictWriter",
"line_number": 22,
"usage_type": "call"
}... |
704796313 | # --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Reading the file
data=pd.read_csv(path)
#Code starts here
# Step 1
#Reading the file
#Creating a new variable to store the value counts
loan_status=data["Loan_Status"].value_counts()
#Plotting bar plot
loan_status.plot(kind="bar")
# Step 2
#Plotting an unstacked bar plot
property_and_loan=data.groupby(["Property_Area","Loan_Status"]).size().unstack()
property_and_loan.plot(kind="bar",stacked=False)
plt.xlabel("Property Area")
plt.ylabel("Loan Status")
plt.xticks(rotation=45)
#Changing the x-axis label
#Changing the y-axis label
#Rotating the ticks of X-axis
# Step 3
#Plotting a stacked bar plot
education_and_loan=data.groupby(["Education","Loan_Status"]).size().unstack()
education_and_loan.plot(kind="bar",stacked=True)
plt.xlabel("Education Status")
plt.ylabel("Loan Status")
plt.xticks(rotation=45)
#Changing the x-axis label
#Changing the y-axis label
#Rotating the ticks of X-axis
# Step 4
#Subsetting the dataframe based on 'Education' column
graduate=data[data["Education"]=="Graduate"]
not_graduate=data[data["Education"]=="Not Graduate"]
dd=graduate["LoanAmount"]
dd=dd.fillna(0)
dd.plot(kind="density",label="Graduate")
ss=not_graduate["LoanAmount"]
ss=ss.fillna(0)
ss.plot(kind="density",label="not_Graduate")
#Subsetting the dataframe based on 'Education' column
#Plotting density plot for 'Graduate'
#Plotting density plot for 'Graduate'
#For automatic legend display
# Step 5
#Setting up the subplots
fig ,(ax_1,ax_2,ax_3)=plt.subplots(nrows = 3 , ncols = 1)
#Plotting scatter plot
data.plot.scatter(x='ApplicantIncome',y="LoanAmount",ax=ax_1,title="Applicant Income")
data.plot.scatter(x='CoapplicantIncome',y="LoanAmount",ax=ax_2,title="Coapplicant Income")
#Setting the subplot axis title
data["TotalIncome"]=data["ApplicantIncome"]+data["CoapplicantIncome"]
#Plotting scatter plot
data.plot.scatter(x="TotalIncome",y="LoanAmount",ax=ax_3,title="Total Income")
#Setting the subplot axis title
#Creating a new column 'TotalIncome'
#Plotting scatter plot
#Setting the subplot axis title
| raghulsenthilkumar/greyatom-python-for-data-science | visulization/code.py | code.py | py | 2,181 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "matplotlib.... |
27733324416 | import logging
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from timeline.models import Timeline
# Models
class SubscribeRecord(models.Model):
is_read = models.BooleanField(
default=False,
verbose_name='Запись прочитана пользователем'
)
record = models.OneToOneField(
'blog.Record',
on_delete=models.CASCADE,
verbose_name='Запись',
)
class Meta:
verbose_name = 'Запись из блога на которую подписан пользователь'
verbose_name_plural = 'Записи из блога на которую подписан пользователь'
def __str__(self):
return f'{self.id}'
class SubscribeByBlog(models.Model):
timeline = models.ForeignKey(
'timeline.Timeline',
on_delete=models.CASCADE,
verbose_name='Лента записей'
)
created_at = models.DateTimeField(
auto_now_add=True,
verbose_name='Дата создания'
)
subscribes_record = models.ManyToManyField(
'blog.SubscribeRecord',
verbose_name='Подписка на новость',
)
class Meta:
verbose_name = 'Подписка на блог'
verbose_name_plural = 'Подписки на блоги'
def __str__(self):
return f'{self.created_at}'
class Blog(models.Model):
name = models.fields.CharField(
max_length=150,
verbose_name='Название'
)
user = models.OneToOneField(
'auth.User',
on_delete=models.CASCADE,
verbose_name='Пользователь'
)
subscribes_by_blog = models.ManyToManyField(
'blog.SubscribeByBlog',
verbose_name='Подписки на блог',
)
class Meta:
verbose_name = 'Блог'
verbose_name_plural = 'Блоги'
def __str__(self):
return f'{self.name}'
class Record(models.Model):
title = models.fields.CharField(
max_length=150,
verbose_name='Заголовок'
)
text = models.TextField(
verbose_name='Текст'
)
created_at = models.DateTimeField(
auto_now_add=True,
verbose_name='Дата создания'
)
blog = models.ForeignKey(
'blog.Blog',
on_delete=models.CASCADE,
verbose_name='Блог',
)
class Meta:
verbose_name = 'Запись'
verbose_name_plural = 'Записи'
def __str__(self):
return f'{self.title}'
# Signals for models
@receiver(post_save, sender=User)
def create_blog(sender, instance, created, **kwargs):
if created:
Blog.objects.create(
name=f'Блог пользователя {instance.username}',
user=instance,
)
@receiver(post_save, sender=Record)
def create_subscribe_record(sender, instance, created, **kwargs):
from manager_tasks.tasks import send_notify
if created:
subscribes_by_blog = instance.blog.subscribes_by_blog.all()
if subscribes_by_blog:
send_notify.delay(instance.id, instance.blog.id)
for subscribe_by_blog in subscribes_by_blog:
subscribe_by_blog.subscribes_record.add(SubscribeRecord.objects.create(record=instance))
@receiver(post_delete, sender=Record)
def delete_subscribe_record(sender, instance, **kwargs):
subscribes_by_blog = instance.blog.subscribes_by_blog.all()
if subscribes_by_blog:
for subscribe_by_blog in subscribes_by_blog:
subscribe_by_blog.subscribes_record.filter(record=instance).delete() | Sergey19940808/blog | blog/models.py | models.py | py | 3,771 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.models.Model",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 13,
"usage_type": "call"
},
{
"api_na... |
43346740956 | import json
from pprint import pprint
from time import ctime
def lambda_response(status_code, message, **kwargs) -> dict:
'''Presents the custom OleTalk API Gateway/server responses in standard HTTP format, for all communications sent
from the API to the client. \n
Parameters:
-----------
status_code (int, req'd):
Standard http codes. \n
msg (str, req'd):
String displaying the message response. \n
kwargs (dict, opt):
Other relevant data (used in debugging) such as Exceptions, etc. \n
Returns:
--------
return (dict):
Response to the client's requests in standard HTTP form. \n
'''
data = {"message": message}
for key in kwargs:
data.update({key: kwargs[key]})
return {
'statusCode': status_code, # cCode, sCode, conf_cde, stat_cde
'body': json.dumps(data)
}
def html_response(status_code, html) -> dict:
'''Responds with html (or web page) as its payload instead of a json object. \n
Parameter:
----------
status_code (int/str, req):
Standard HTTP Status Codes. \n
html (txt, req.):
HTML formatted text. \n
Returns:
--------
return (dict, req):
A response with HTML formatted text in the body. \n
'''
return {
"headers": {
"Content-Type": "text/html"
},
"statusCode": status_code,
"body": html
}
def error_response(error_code, error_message, **kwargs) -> dict:
'''Presents the Error in the required standard format,
or all error communications from the API to the client. \n
Parameters:
-----------
error_code (int, req'd):
HTTP Status/Error code for this error-type. \n
error_message (str, req'd):
HTTP code for the error. \n
kwargs (dict, opt):
The error details in key - value pairs giving further eg. stack trace, error type etc. \n
Returns:
--------
return (dict):
Error presented in the standard HTTP response format. \n
'''
# should trigger logging and custom debugging functions.
error_data = kwargs
error_data.update({"errorMessage": error_message})
error_data.update({"timestamp": ctime()})
return {
'statusCode': error_code, # usually 400 or 500 follows the same convention as http error codes \n
'body': json.dumps(error_data) # including the 'call stack'
}
#
def respond(err=None, res=None, html=None) -> dict:
'''Customizes response using a consistant, standard format for AWS API Gateway/AWS Lambda Functions. \n
Parameters:
-----------
err (exception/error object, opt):
Exception class based object. Must be set to None if not used. \n
res (dict, opt):
Key-Value pairs containing the data being sent to a client. \n
html (text, opt):
HTML formatted text. \n
Returns:
--------
return (dict):
Custom OleTalk API response in the standard http format.
'''
return {
"headers": {
"Access-Control-Allow-Origin": '*',
"Access-Control-Allow-Methods": 'GET,POST,OPTIONS',
"Access-Control-Allow-Headers": 'Content-Type',
# "Content-Type": "text/html" if html else "application/json"
"Content-Type": "text/html" if html else "application/json"
},
"statusCode": '400' if err else '200',
"body": err if err else html if html else json.dumps(res)
}
| Gallatin-Engineering/oletalk_response | python/api_response.py | api_response.py | py | 3,907 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.dumps",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "time.ctime",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 123,... |
31276842685 | """
Fix binary png images so that they're in a consistent binary format
Used for ground truth (_fg) images which can be saved by some image programs as 8 bit RGB pngs
rather than binary ones.
"""
import cv2
import os
import io
for img in os.listdir("/home/carter/Desktop/our-data/training/imgs"):
if "_fg" in img:
print(img)
#read image as grey scale
img_grey = cv2.imread("/home/carter/Desktop/our-data/training/imgs/"+img, cv2.IMREAD_GRAYSCALE)
# define a threshold, 128 is the middle of black and white in grey scale
thresh = 128
# threshold the image
img_binary = cv2.threshold(img_grey, thresh, 255, cv2.THRESH_BINARY)[1]
#save image
cv2.imwrite("/home/carter/Desktop/our-data/training/imgs/"+img, img_binary) | cpsiff/plant-segmentation | fix_imgs.py | fix_imgs.py | py | 795 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "os.listdir",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_GRAYSCALE",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold",
"... |
13255279223 | #! python 3
"""
Program that reads in text files and lets users replace some keywords. Lastly, save as new file
"""
from pathlib import Path
import re
### read in text file
file_name = input("Enter relative path of file (eg. folder/file.txt): ")
p = Path(Path.cwd() / file_name)
input_file = open(p)
txt = input_file.read()
print(txt)
### find following keywords
kwRegex = re.compile(r'ADJECTIVE|NOUN|VERB|ADVERB')
kwList = kwRegex.findall(txt)
### get user to input string to replace
for i in kwList:
replaceWord = input(f'Enter a {i}:')
txt = txt.replace(i, replaceWord, 1)
### save updated text to new file
print(txt)
# consider identifying how many different parents levels are there
out_file_name = input("Enter relative path of new file (eg. folder/file.txt): ")
new_file = open(f"{Path.cwd() / out_file_name}", 'w')
new_file.write(txt)
input_file.close()
new_file.close() | simink/py_automatetheboringstuff | code/madLibs.py | madLibs.py | py | 893 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pathlib.Path.cwd",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pathlib.Path.cwd",
"line... |
70007433955 | from __future__ import print_function
import wave
import numpy as np
from struct import unpack
class WaveWrap(object):
def __init__(self, filename, output_window_size, window_overlap, unpack_fmt="<{}h"):
self.wav_buffer_size = output_window_size - window_overlap
self.wave_file = wave.open(filename, 'r')
self.length = self.wave_file.getnframes()
self.output_window_size = output_window_size
self.unpack_fmt = unpack_fmt.format(self.wav_buffer_size)
self.cursor = 1
self.n_windows = self.length / self.wav_buffer_size
self.wav_data = np.zeros(self.output_window_size)
self.fft_input = np.zeros(self.output_window_size)
self.hamming = np.hamming(self.output_window_size)
def next_pcm_window(self):
try:
wave_data = self.wave_file.readframes(self.wav_buffer_size)
except Exception as e:
print(e.message)
wave_data = np.zeros(self.wav_buffer_size)
self.cursor += 1
return unpack(self.unpack_fmt, wave_data)
def next_fft_input_window(self):
np.roll(self.wav_data, -self.wav_buffer_size)
self.wav_data[-self.wav_buffer_size:] = self.next_pcm_window()
return self.wav_data * self.hamming
def next_fft_output_window(self):
fft = np.abs(np.fft.rfft(self.next_fft_input_window()))
max_energy = max(fft)
fft /= max_energy
return fft
window_size = 4096 * 4
overlap = (window_size / 4) * 3
wav_wrap = WaveWrap(filename='bee.wav', output_window_size=window_size, window_overlap=overlap)
bin_size = wav_wrap.wave_file.getframerate() / float(window_size)
print('bin size', bin_size)
d = np.array([wav_wrap.next_fft_output_window() for _ in range(wav_wrap.n_windows)])
print('len(d) = ', len(d))
import matplotlib.pyplot as plt
plt.imshow(np.flip(d.T, 0), interpolation='nearest', aspect='auto')
plt.show()
| lelloman/python-utils | sfft_from_wav.py | sfft_from_wav.py | py | 1,927 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "wave.open",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.hamming",
"line_number": ... |
24249664220 | import os
from PySide6 import QtWidgets
from PySide6.QtGui import QFont, QIcon
from PySide6.QtWidgets import (
QCheckBox,
QComboBox,
QFileDialog,
QFrame,
QLabel,
QLineEdit,
QPushButton,
QRadioButton,
QSizePolicy,
QSpacerItem,
QTabWidget,
QTextEdit,
QWidget,
)
from nadeocr.GUI.functions.utils.extra import edit_config_ini, get_data, read_config_ini
from nadeocr.GUI.widgets.input_hotkey_widget import MainHotkeyExecution
_ROOT = os.path.abspath(os.path.dirname(__file__))
class OptionsWidget(QWidget):
def __init__(self, parent):
self.parent = parent
super(OptionsWidget, self).__init__()
self.setWindowTitle("Opciones")
self.setFixedSize(425, 310)
icon_path = get_data(_ROOT, "../../resources/assets", "icon.ico")
self.setWindowIcon(QIcon(icon_path))
self.config_reader = read_config_ini()
layout = QtWidgets.QGridLayout()
self.setLayout(layout)
self.tabs = QTabWidget()
self.tab_general = QWidget()
self.tab_advanced = QWidget()
self.tab_about = QWidget()
self.tabs.addTab(self.tab_general, "General")
self.tabs.addTab(self.tab_advanced, "Avanzado")
self.tabs.addTab(self.tab_about, "Acerca de")
# Create first tab layout
self.tab_general.layout = QtWidgets.QGridLayout(self)
self.tab_advanced.layout = QtWidgets.QGridLayout(self)
self.tab_about.layout = QtWidgets.QGridLayout(self)
# Add tabs to main widget
layout.addWidget(self.tabs)
self.tab_general.setLayout(self.tab_general.layout)
self.tab_advanced.setLayout(self.tab_advanced.layout)
self.tab_about.setLayout(self.tab_about.layout)
# General Tab
self.label_interface_user = QLabel("Interfaz de usuario")
self.label_interface_user.setFont(QFont("Arial", 9, weight=QFont.Bold))
self.tab_general.layout.addWidget(self.label_interface_user, 0, 0, 1, 5)
self.label_language = QLabel("Idioma")
self.tab_general.layout.addWidget(self.label_language, 1, 0)
self.box_language = QComboBox()
self.box_language.addItem("Español")
self.box_language.addItem("Inglés")
self.tab_general.layout.addWidget(self.box_language, 1, 1, 1, 5)
self.label_notification_pos = QLabel("Posición notificación")
self.tab_general.layout.addWidget(self.label_notification_pos, 2, 0, 1, 2)
spanish_to_pos_text = {
"TopLeft": "Esquina superior izquierda",
"TopRight": "Esquina superior derecha",
"BottomLeft": "Esquina inferior izquierda",
"BottomRight": "Esquina inferior derecha",
}
notification_pos = self.config_reader["user_settings"]["notification_pos"]
self.box_notification_pos = QComboBox()
self.box_notification_pos.addItem("Esquina superior izquierda")
self.box_notification_pos.addItem("Esquina superior derecha")
self.box_notification_pos.addItem("Esquina inferior izquierda")
self.box_notification_pos.addItem("Esquina inferior derecha")
self.box_notification_pos.setCurrentText(spanish_to_pos_text[notification_pos])
self.tab_general.layout.addWidget(self.box_notification_pos, 2, 1, 1, 5)
self.separatorLine = QFrame(frameShape=QFrame.HLine)
self.separatorLine.setLineWidth(0)
self.separatorLine.setMidLineWidth(5)
self.separatorLine.setStyleSheet("font: 9pt; color: grey;")
self.tab_general.layout.addWidget(self.separatorLine, 4, 0, 1, 6)
self.label_preferences_user = QLabel("Preferencias")
self.label_preferences_user.setFont(QFont("Arial", 9, weight=QFont.Bold))
self.tab_general.layout.addWidget(self.label_preferences_user, 5, 0)
self.checkbox_run_startup = QCheckBox("Ejecutar al arranque", self)
self.tab_general.layout.addWidget(self.checkbox_run_startup, 6, 0, 1, 2)
hotkey = self.config_reader["user_settings"]["shortcut_key"]
self.label_preference_hotkey = QLabel("Atajo para escaneo")
self.tab_general.layout.addWidget(self.label_preference_hotkey, 7, 0, 1, 2)
self.parent.button_input_hotkey = QPushButton(self)
self.parent.button_input_hotkey.setText(hotkey)
self.tab_general.layout.addWidget(self.parent.button_input_hotkey, 7, 1, 1, 5)
self.parent.button_input_hotkey.clicked.connect(self.on_button_hotkey_click)
self.label_preference_scan = QLabel("Motor de escaneo preferido")
self.tab_general.layout.addWidget(self.label_preference_scan, 8, 0, 1, 3)
self.radio_button_google = QRadioButton("Google (recomendado)")
self.tab_general.layout.addWidget(self.radio_button_google, 9, 0)
self.label_separator = QLabel(" ")
self.tab_general.layout.addWidget(self.label_separator, 9, 1)
self.radio_button_mangaocr = QRadioButton("MangaOCR")
self.tab_general.layout.addWidget(self.radio_button_mangaocr, 9, 2)
self.radio_button_paddleocr = QRadioButton("PaddleOCR")
self.tab_general.layout.addWidget(self.radio_button_paddleocr, 9, 3)
self.radio_button_paddleocr.setDisabled(True)
ocr_provider = self.config_reader["provider_settings"]["ocr_provider"]
# OCR client selection based on the config file
if ocr_provider == "Google":
self.radio_button_google.setChecked(True)
elif ocr_provider == "MangaOCR":
self.radio_button_mangaocr.setChecked(True)
self.spaceItem = QSpacerItem(100, 10, QSizePolicy.Expanding)
self.tab_general.layout.addItem(self.spaceItem, 10, 0)
self.button_accept = QPushButton("Aceptar", self)
self.button_cancel = QPushButton("Cancelar", self)
self.button_accept.clicked.connect(self.accept_button)
self.button_cancel.clicked.connect(self.cancel_button)
self.tab_general.layout.addWidget(self.button_accept, 16, 2, 2, 1)
self.tab_general.layout.addWidget(self.button_cancel, 16, 3, 2, 2)
# Tab Advanced
self.label_paths_user = QLabel("Rutas de archivos")
self.label_paths_user.setFont(QFont("Arial", 9, weight=QFont.Bold))
self.tab_advanced.layout.addWidget(self.label_paths_user, 0, 0, 1, 4)
self.label_path_google = QLabel("Credenciales Google")
self.tab_advanced.layout.addWidget(self.label_path_google, 1, 0, 1, 2)
self.line_edit_google = QLineEdit(self)
self.tab_advanced.layout.addWidget(self.line_edit_google, 2, 0, 1, 5)
self.button_accept_google = QPushButton("...", self)
self.tab_advanced.layout.addWidget(self.button_accept_google, 2, 5, 1, 1)
self.button_accept_google.clicked.connect(self.accept_button_path_google)
path_string_google = self.config_reader["path_settings"]["credentials_google"]
if path_string_google.strip() == "":
self.line_edit_google.setText("No se ha definido")
else:
self.line_edit_google.setText(path_string_google)
self.button_accept2 = QPushButton("Aceptar", self)
self.button_cancel2 = QPushButton("Cancelar", self)
self.button_accept2.clicked.connect(self.accept_button)
self.button_cancel2.clicked.connect(self.cancel_button)
self.tab_advanced.layout.addWidget(self.button_accept2, 16, 4, 2, 1)
self.tab_advanced.layout.addWidget(self.button_cancel2, 16, 5, 2, 1)
self.labelAbout = QLabel(
"<b>NadeOCR v1.0.1</b> (10/10/2022)<br>An easy and fast-to-use tool for scanning text anywhere with Google's Vision API and other third party services.<br><br>"
"This project wouldn't be possible without:<br>- Google's Vision API for detecting and recognising a wide variety of languages including, but not limited to, English, Japanese and Spanish.<br>- The awesome Manga-OCR model by Maciej Budyś for recognizing Japanese characters in manga. <br><br>"
"More information about this project can be found <a href='https://github.com/Natsume-197/NadeOCR'>here</a>."
)
self.labelAbout.setWordWrap(True)
self.labelAbout.setOpenExternalLinks(True)
self.tab_about.layout.addWidget(self.labelAbout, 0, 0, 1, 6)
self.button_accept3 = QPushButton("Cerrar", self)
self.button_accept3.clicked.connect(self.cancel_button)
self.tab_about.layout.addWidget(self.button_accept3, 16, 5, 2, 1)
self.tab_general.layout.setRowStretch(16, 5)
self.tab_advanced.layout.setRowStretch(15, 5)
self.tab_about.layout.setRowStretch(15, 5)
self.show()
def accept_button_path_google(self):
path_google_fdialog = QFileDialog.getOpenFileName(
self, "Abrir archivo", "", "Credenciales (*.json)", ""
)
self.line_edit_google.setText(path_google_fdialog[0])
edit_config_ini("path_settings", "credentials_google", path_google_fdialog[0])
def accept_button(self):
if self.radio_button_google.isChecked():
edit_config_ini("provider_settings", "ocr_provider", "Google")
elif self.radio_button_mangaocr.isChecked():
edit_config_ini("provider_settings", "ocr_provider", "MangaOCR")
if self.box_notification_pos.currentText() == "Esquina superior izquierda":
edit_config_ini("user_settings", "notification_pos", "TopLeft")
elif self.box_notification_pos.currentText() == "Esquina superior derecha":
edit_config_ini("user_settings", "notification_pos", "TopRight")
elif self.box_notification_pos.currentText() == "Esquina inferior derecha":
edit_config_ini("user_settings", "notification_pos", "BottomRight")
elif self.box_notification_pos.currentText() == "Esquina inferior izquierda":
edit_config_ini("user_settings", "notification_pos", "BottomLeft")
self.close()
def cancel_button(self):
self.close()
def on_button_hotkey_click(self):
self.input_hotkey_window = MainHotkeyExecution(parent=self.parent)
| Natsume-197/NadeOCR | nadeocr/GUI/widgets/options_widget.py | options_widget.py | py | 10,175 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "os.path.abspath",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PySide6.QtWidgets.QWidge... |
38712381876 | # Imports
import bs4
import requests
from bs4 import BeautifulSoup
# URL Base
url_base = 'https://www.who.int'
def scrapingCovid():
# URL com os dados do COVID-19 (coloque essa URL no seu navegador e compreenda os dados disponíveis)
url = 'https://www.who.int/emergencies/diseases/novel-coronavirus-2019/situation-reports/'
# Obtém os dados da URL
response = requests.get(url)
# Leitura do conteúdo
soup = BeautifulSoup(response.content, 'lxml')
# Lista temporária
tempList = []
# Loop pelos elementos da página para buscar as tags "a" que indicam URLs
# Estamos buscando os links dos relatórios chamados 'situation-reports'
for element in soup.find_all('a', href = True):
if 'situation-reports' in str(element):
tempList.append(str(element))
# Visualiza alguns registros da lista
# print(tempList[1:5])
# Lista para as URLs dos arquivos em pdf
pdfurls = []
# Variável que indica se é o último relatório
lastreport = None
# Loop para limpar cada registro da lista temporária, extraindo somente a URL dos arquivos em pdf
for url in tempList:
# Replace da tag de link html
x = url.replace('<a href="', '')
# Busca pelo sinal de interrogação para usar como índice de final do endereço de cada arquivo pdf
index = x.find('?')
# Extrai tudo de 0 até o índice
extraction = x[0:index]
# Nas posições de 68:70 do endereço do arquivo pdf estão os dígitos do número do relatório
reportnumber = extraction[68:70]
# Verifica se o relatório é o último
# Se não for, adicionamos a url_base ao nome do arquivo e gravamos na lista de URLs
if reportnumber != lastreport:
pdfurls.append(url_base + extraction)
# Atualiza a variável com o número do último relatório (posições de 68:70)
lastreport = extraction[68:70]
# Total de arquivos pdf
print(len(pdfurls))
# Visualiza amostra da lista de URLs dos arquivos pdf
return pdfurls
| AleTavares/scrapingCovid19 | scraping.py | scraping.py | py | 2,144 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 16,
"usage_type": "call"
}
] |
42291469922 | import os
from torchvision.transforms import CenterCrop
from PIL import Image
completed_processing = set()
source_dir = "C:/Users/jessi/Documents/Master_Courses/MIE1517_IDL/Project/dataset/dataset/"
output_dir = "C:/Users/jessi/Documents/Master_Courses/MIE1517_IDL/Project/dataset/processed_dataset/"
i = 0
nb_excluded = 0
with os.scandir(source_dir) as it:
for item in it:
i += 1
if item.name.endswith('.jpg'):
if item.name in completed_processing:
continue
completed_processing.add(item.name)
with Image.open(source_dir + item.name) as image:
width, height = image.size
if width < 128 or height < 128:
nb_excluded += 1
continue
crop_img = CenterCrop(128)(image)
crop_img.save(output_dir + item.name)
if i % 50 == 0:
print(i)
| ChessieN132D/Self-Supervised-Image-Inpainting | Image Preprocessing/image_center_crop_128.py | image_center_crop_128.py | py | 945 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.scandir",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.CenterCrop... |
11350861272 | # -*- coding: utf-8 -*-
"""Home dashboard layout."""
import locale
from apps import utils_dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from portfolio.fii import FiiPortfolio
from portfolio.funds import FundsPortfolio
from portfolio.stocks import StocksPortfolio
print("locale: ", locale.setlocale(locale.LC_ALL, utils_dash.my_locale))
stocksportfolio = StocksPortfolio()
fiiportfolio = FiiPortfolio()
fundsportfolio = FundsPortfolio()
#############################################################################
# Figures
#############################################################################
fig_class_distribution_labels = ["Stocks", "FIIs", "Funds"]
fig_class_distribution_values = []
fig_class_distribution_values.append(stocksportfolio.total_invest()[1])
fig_class_distribution_values.append(fiiportfolio.total_invest[1])
fig_class_distribution_values.append(fundsportfolio.total_invest[1])
fig_class_distribution = go.Figure(
data=[
go.Pie(
labels=fig_class_distribution_labels, values=fig_class_distribution_values
)
]
)
fig_class_distribution.update_layout(title="Porfolio Class Distribution", title_x=0.5)
df_fii_money_monthly = fiiportfolio.fiitransactions.money_invested_monthly()
df_fii_money_monthly["Class"] = "FII"
df_fun_money_monthly = fundsportfolio.money_invested_monthly()
df_fun_money_monthly.rename(columns={"Value": "Operation Cost"}, inplace=True)
df_fun_money_monthly["Class"] = "Funds"
df_sto_money_monthly = stocksportfolio.stockstransactions.money_invested_monthly()
df_sto_money_monthly["Class"] = "Stocks"
fig_money_inv_monthly = px.bar(
pd.concat([df_fii_money_monthly, df_fun_money_monthly, df_sto_money_monthly]),
x="Date",
y="Operation Cost",
labels={"Operation Cost": "Amount Invested", "Date": "Month"},
color="Class",
title="Money Invested Monthly",
)
fig_money_inv_monthly.update_layout(
title_x=0.5, yaxis={"tickprefix": utils_dash.graph_money_prefix}
)
fig_money_inv_monthly.update_xaxes(rangeslider_visible=True)
fig_money_inv_monthly.update_layout(
title_x=0.5,
xaxis={
"rangeselector": {
"buttons": [
{"count": 1, "label": "1m", "step": "month", "stepmode": "backward"},
{"count": 6, "label": "6m", "step": "month", "stepmode": "backward"},
{"count": 1, "label": "YTD", "step": "year", "stepmode": "todate"},
{"count": 1, "label": "1y", "step": "year", "stepmode": "backward"},
{"count": 2, "label": "2y", "step": "year", "stepmode": "backward"},
{"count": 5, "label": "5y", "step": "year", "stepmode": "backward"},
{"step": "all"},
],
},
"rangeslider": {"visible": False},
"type": "date",
},
)
#############################################################################
# layout
#############################################################################
layout = dbc.Container(
[
dbc.Row(
[
dbc.Col(
[
dcc.Graph(
id="fig_class_distribution_id",
figure=fig_class_distribution,
)
],
),
dbc.Col(
[
dcc.Graph(
id="fig_money_inv_monthly_id",
figure=fig_money_inv_monthly,
)
],
),
]
),
],
fluid=True,
)
# vim: ts=4
| thobiast/myinvestments | apps/home_dash.py | home_dash.py | py | 3,748 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "locale.setlocale",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "locale.LC_ALL",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "apps.utils_dash.my_locale",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "ap... |
13839587396 | #################################################################################
# Description: Class for generating augmented data for training of
# neural network and SVM
#
# Authors: Petr Buchal <petr.buchal@lachub.cz>
# Martin Ivanco <ivancom.fr@gmail.com>
# Vladimir Jerabek <jerab.vl@gmail.com>
#
# Date: 2019/04/13
#
# Note: This source code is part of project created on UnIT extended 2019.
#################################################################################
import random
import argparse
from datetime import datetime
import cv2
import numpy as np
from matplotlib import pyplot
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import ImageDataGenerator
from image import Image
from tools import parse_data
def get_args():
"""
method for parsing of arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('--batches', type="int", action="store", default=100,
help='Number of generated batch of size 32.')
args = parser.parse_args()
return args
#Class storing data for augmentation
class DataSet(object):
def __init__(self, ImageList):
self.Images = ImageList
self.p_images, self.labels, self. grand_truths = DataSet.strip_futilities(ImageList)
random.seed(datetime.now())
#Function for augmentation images
#@param: imageArr - is numpy array of images of shape (n, 1, height, width)
# ^-- number of images
#@param: grandArr - is numpy array of grand truths images of shape (n, 1, height, width)
#@param: batchSize - is integer, which is less or equal 'n'
#@return: new_img - array of augmented images of shape (batchSize, 1, height, width)
#@return: new_g_t - array of augmented grand truths images of shape (batchSize, 1, height, width)
@staticmethod
def augmentImage(imageArr, grandArr, batchSize):
for imgs in [imageArr, grandArr]:
for img in imgs:
h, w = img[0].shape
img[0,0,:] = 0
img[0,h-1,:] = 0
img[0,:,0] = 0
img[0,:,w-1] = 0
shift = 0.2
data_gen_args = dict( data_format="channels_first",
rotation_range=90,
height_shift_range=shift,
width_shift_range=shift,
zoom_range=0.2,
horizontal_flip=True,
vertical_flip=True)
#creating Image generator same for image data and grand truths data
img_dataGen = ImageDataGenerator(**data_gen_args)
g_tr_dataGen = ImageDataGenerator(**data_gen_args)
seed = random.randint(0,65535)
for new_img in img_dataGen.flow(imageArr, seed=seed, batch_size=batchSize):
new_img = np.uint16(new_img)
break
for new_g_t in g_tr_dataGen.flow(grandArr, seed=seed, batch_size=batchSize):
new_g_t = np.uint16(new_g_t)
break
return new_img, new_g_t
#Function for striping unneeded data from Image class
#@param: data - List of Image class ^-- for more info see image.py
#@return: images - numpy array of images of shape (n, 1, height, width) %note: 16-bit grayscale
#@return: labels - numpy array of booleans of shape (n,1),
# - 0 mean, that i-th image is doesn't contain ellipse
# - 1 mean, that i-th image contains ellipse
#@return: grand_truths - numpy array of images of shape (n, 1, height, width) %note: 16-bit grayscale
@staticmethod
def strip_futilities(data):
images = []
labels = []
grand_truths = []
for i, item in enumerate(data):
images.append(np.array([item.processed_image]))
labels.append(np.array([item.ellipse]))
grand_truths.append(np.array([item.processed_ground_truths]))
return np.array(images), np.array(labels), np.array(grand_truths)
#Method for getting augmented data for training neural netowrk
#@param batchSize - nuber of images in one training epoche
def getBatch(self, batchSize, isClassNet=True):
img, g_truths = DataSet.augmentImage(self.p_images, self.grand_truths, batchSize)
labels = []
if batchSize > len(self.p_images):
while len(img) != batchSize:
curSize = batchSize - len(img)
img2, g_truths2 = DataSet.augmentImage(self.p_images, self.grand_truths, curSize)
img = np.concatenate((img, img2), axis=0)
g_truths = np.concatenate((g_truths, g_truths2), axis=0)
#get labels
for i in range(len(img)):
unique, counts = np.unique(g_truths[i], return_counts=True)
if unique[-1] > 60000:
labels.append(np.array([1]))
else:
labels.append(np.array([0]))
if isClassNet:
return img, np.array(labels)
else:
return img, g_truths
if __name__ == "__main__":
args = parse_args()
trn_data = parse_data("./data_training/ground_truths_develop.csv", "./data_training/images/", "./data_training/ground_truths/")
myData = DataSet(trn_data)
if not os.path.exists("./images_png"):
os.makedirs("./images_png")
counter = 0
for e in range(args.batches):
x, y = myData.getBatch(32)
for i, item in enumerate(x):
print("Generated batches: {}" .format(counter))
new = cv2.cvtColor(item[0], cv2.COLOR_GRAY2BGR)
if y[i] == 0:
cv2.imwrite("./images_png/{}_F.png" .format(counter), new)
if y[i] == 1:
cv2.imwrite("./images_png/{}_T.png" .format(counter), new)
counter += 1
| LachubCz/ItAintMuchButItsHonestWork | src/batch.py | batch.py | py | 6,122 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "datetime.da... |
45290308912 | # -*- coding: utf-8 -*-
import abc
import importlib
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
class BaseEventsPushBackend(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def emit_event(self, message:str, *, routing_key:str, channel:str="events"):
pass
def load_class(path):
"""
Load class from path.
"""
mod_name, klass_name = path.rsplit('.', 1)
try:
mod = importlib.import_module(mod_name)
except AttributeError as e:
raise ImproperlyConfigured('Error importing {0}: "{1}"'.format(mod_name, e))
try:
klass = getattr(mod, klass_name)
except AttributeError:
raise ImproperlyConfigured('Module "{0}" does not define a "{1}" class'.format(mod_name, klass_name))
return klass
def get_events_backend(path:str=None, options:dict=None):
if path is None:
path = getattr(settings, "EVENTS_PUSH_BACKEND", None)
if path is None:
raise ImproperlyConfigured("Events push system not configured")
if options is None:
options = getattr(settings, "EVENTS_PUSH_BACKEND_OPTIONS", {})
cls = load_class(path)
return cls(**options)
| phamhongnhung2501/Taiga.Tina | fwork-backend/tina/events/backends/base.py | base.py | py | 1,213 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "abc.ABCMeta",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "abc.abstractmethod",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "importlib.import_module",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "djan... |
34490854766 | import json
filename = r'/home/xxm/下载/qlora/data/estate_qa.json'
with open(filename, 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
example = json.loads(line)
content = example['output']
if content == '':
print(example)
| xxm1668/qlora_chatglm | data_processon.py | data_processon.py | py | 320 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "json.loads",
"line_number": 8,
"usage_type": "call"
}
] |
5047930515 | from flask import Flask
from flask import render_template, url_for, jsonify, request
from game import Game
app = Flask(__name__)
@app.route('/')
def main():
return render_template('index.html')
game=0
@app.route('/init', methods=['POST'])
def init():
global game
game = Game()
game.start()
# I have no idea why this is necessary but without the list comprehension it
# breaks.
data = [int(x) for x in game.grid.flatten()]
response = jsonify({'board': data})
return response
directions = { 'U': (1,-1), 'D': (1, 1), 'L': (0,-1), 'R': (0, 1) }
@app.route('/move', methods=['POST'])
def move():
d = request.args['direction']
game.move(*directions[d.upper()])
response = jsonify({ 'board': [int(x) for x in game.grid.flatten()] })
return response
| Samcfuchs/DataSci | 2048/server.py | server.py | py | 807 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "game.Game",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "game.start",
"line_numb... |
33881354393 | import numpy
from matplotlib import pyplot
from utils import timeit
@timeit
def get_data():
data = []
with open('input.txt') as input_file:
for line in input_file:
points = tuple((int(x), int(y)) for x, y in (point.split(',') for point in line.strip().split(' -> ')))
data.append(points)
return data
def build_grid(paths, infinite_floor=False):
paths.append(((500, 0),))
min_x = min(point[0] for path in paths for point in path)
min_y = min(point[1] for path in paths for point in path)
max_x = max(point[0] for path in paths for point in path)
max_y = max(point[1] for path in paths for point in path)
if infinite_floor:
max_y += 2
min_x = min(min_x, 500 - (max_y - min_y) - 2)
max_x = max(max_x, 500 + (max_y - min_y) + 2)
paths = [((min_x, max_y), (max_x, max_y))] + paths
paths = tuple(tuple((x - min_x, max_y - y) for x, y in path) for path in paths)
start_point = paths[-1][0]
grid = numpy.full((max_x - min_x + 1, max_y - min_y + 1), '.')
for path in paths:
for (a_x, a_y), (b_x, b_y) in zip(path, path[1:]):
a_x, b_x = min(a_x, b_x), max(a_x, b_x)
a_y, b_y = min(a_y, b_y), max(a_y, b_y)
grid[a_x:b_x + 1, a_y:b_y + 1] = '#'
return grid, start_point
def get_resting_place(path, grid):
x, y = path[-1]
while True:
if not 0 <= x < grid.shape[0] or not 0 <= y < grid.shape[1]:
return None
y -= 1
if grid[x][y] == '.':
pass
elif grid[x - 1][y] == '.':
x -= 1
elif grid[x + 1][y] == '.':
x += 1
else:
return path
path.append((x, y))
@timeit
def part_1(paths, infinite_floor=False):
grid, start_point = build_grid(paths, infinite_floor)
path = [start_point]
count = 0
while path:
path = get_resting_place(path, grid)
if not path:
break
x, y = path.pop()
grid[x][y] = 'o'
count += 1
pyplot.imshow(numpy.rot90(grid.view(numpy.uint32)))
pyplot.show()
return count
@timeit
def part_2(paths):
return part_1.func(paths, infinite_floor=True)
def main():
data = get_data()
part_1(data)
part_2(data)
if __name__ == "__main__":
main()
| bdaene/advent-of-code | 2022/day14/solve.py | solve.py | py | 2,337 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "utils.timeit",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "numpy.full",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
... |
70738755233 | import re
import keyword
import os
from collections import Counter
def add_keywords(python_kw, cpp_kw, java_kw):
# keyword.kwlist is a list with python language keywords
python_kw.extend(keyword.kwlist)
# list of cpp keywords: http://www.cplusplus.com/doc/oldtutorial/variables/
cpp_kw.extend(['asm', 'auto', 'bool', 'break', 'case', 'catch', 'char', 'class', 'const', 'const_cast', 'continue',
'default', 'delete', 'do', 'double', 'dynamic_cast', 'else', 'enum', 'explicit', 'export', 'extern',
'false', 'float', 'for', 'friend', 'goto', 'if', 'inline', 'int', 'long', 'mutable', 'namespace',
'new', 'operator', 'private', 'protected', 'public', 'register', 'reinterpret_cast', 'return',
'short', 'signed', 'sizeof', 'static', 'static_cast', 'struct', 'switch', 'template', 'this',
'throw', 'true', 'try', 'typedef', 'typeid', 'typename', 'union', 'unsigned', 'using', 'virtual',
'void', 'volatile', 'wchar_t', 'while'])
# list of java keywords: https://docs.oracle.com/javase/tutorial/java/nutsandbolts/_keywords.html
java_kw.extend(['abstract', 'continue', 'for', 'new', 'switch', 'assert', 'default', 'goto', 'package',
'synchronized', 'boolean', 'do', 'if', 'private', 'this', 'break', 'double', 'implements',
'protected', 'throw', 'byte', 'else', 'import', 'public', 'throws', 'case', 'enum', 'instanceof',
'return', 'transient', 'catch', 'extends', 'int', 'short', 'try', 'char', 'final', 'interface',
'static', 'void', 'class', 'finally', 'long', 'strictfp', 'volatile', 'const', 'float', 'native',
'super', 'while'])
def word_frequency(file):
# separate words in the given file
words = re.findall(r'\w+', open(file).read().lower())
# returns a counter with the number of appearances of each word in the file
word_counter = Counter(words)
return word_counter
def guess_language(file):
python_kw = []
cpp_kw = []
java_kw = []
# fill the lists with each language keywords
add_keywords(python_kw, cpp_kw, java_kw)
# get words frequency
word_counter = word_frequency(file)
# count appearances of each language keywords in the given file
kw_counter = {'python': 0, 'cpp': 0, 'java': 0}
for kw in python_kw:
kw_counter['python'] += word_counter[kw]
for kw in cpp_kw:
kw_counter['cpp'] += word_counter[kw]
for kw in java_kw:
kw_counter['java'] += word_counter[kw]
# guess language depending on the number of each language keywords encountered
language = max(kw_counter, key=kw_counter.get)
return language
def main():
# Put the files to test in directories called 'cpp', 'java' and 'python'. The output will show a summary of the
# given files, the prediction success percentage and the files in which it failed.
cpp_files = os.listdir('cpp/')
java_files = os.listdir('java/')
python_files = os.listdir('python/')
total_files = cpp_files.__len__() + java_files.__len__() + python_files.__len__()
total_hits = 0
# Iterate over all the files
for file in cpp_files:
if guess_language('cpp/' + file) == 'cpp':
total_hits = total_hits + 1
else:
print('Missed prediction: ', file, '\tLanguage: cpp\tPredicted: ', guess_language('cpp/' + file))
for file in java_files:
if guess_language('java/' + file) == 'java':
total_hits = total_hits + 1
else:
print('Missed prediction: ', file, '\tLanguage: java\tPredicted: ', guess_language('java/' + file))
for file in python_files:
if guess_language('python/' + file) == 'python':
total_hits = total_hits + 1
else:
print('Missed prediction: ', file, '\tLanguage: python\tPredicted: ', guess_language('python/' + file))
# print summary
print('\nTotal files: ', total_files)
print('Files correctly classified: ', total_hits)
print('Hit percentage: ', round(total_hits / total_files * 100, 2), '%')
if __name__ == "__main__":
main()
| aaronojeda/language-detector | LangDetector.py | LangDetector.py | py | 4,294 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "keyword.kwlist",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "re.findall",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"l... |
21563726124 | import json
from tags.tests.base_test import TagsBaseTestCase
class TagNamespaceTestCase(TagsBaseTestCase):
fixtures = ["test_authentication.yaml", "test_marketplace.yaml"]
def test_create_scoped_namespace(self):
##############################
# Creation of global namespace
self.login("17admin")
res = self.post(
"/tags/namespaces/", {"scoped_to_model": "global", "name": "users"}
)
self.assertStatusCode(res, 201)
# Check cannot create two global namespaces with the same name
res = self.post(
"/tags/namespaces/", {"scoped_to_model": "global", "name": "users"}
)
self.assertStatusCode(res, 400)
############################################
# Creation of a namespace for an association
self.login("17admin_pdm")
res = self.post(
"/tags/namespaces/",
{"scoped_to_model": "association", "scoped_to_pk": "pdm", "name": "farine"},
)
self.assertStatusCode(res, 201)
# Try to get all namespaces
res = self.get("/tags/namespaces/")
self.assertStatusCode(res, 200)
self.assertEqual(len(res.data["results"]), 2)
self.assertSetEqual(
{n["name"] for n in res.data["results"]}, {"users", "farine"}
)
# Try to get only the namespaces for the association + globals
res = self.get("/tags/namespaces/association/pdm/")
self.assertStatusCode(res, 200)
self.assertEqual(len(res.data["namespaces"]), 2)
self.assertSetEqual(
{n["name"] for n in res.data["namespaces"]}, {"farine", "users"}
)
# Try to get namespace forgetting the scope
res = self.post(
"/tags/namespaces/", {"scoped_to_model": "association", "name": "test_v"}
)
self.assertStatusCode(res, 400)
res = self.post("/tags/namespaces/", {"scoped_to_pk": "pdm", "name": "test_w"})
self.assertStatusCode(res, 400)
# Try to get global namespace
res = self.get("/tags/namespaces/?scoped_to_model=global")
self.assertStatusCode(res, 200)
self.assertEqual(len(res.data["results"]), 1)
self.assertSetEqual({n["name"] for n in res.data["results"]}, {"users"})
#######################################
# Creation without proper authorization
self.login("17admin_biero")
res = self.post(
"/tags/namespaces/",
{"scoped_to_model": "association", "scoped_to_pk": "pdm", "name": "test_x"},
)
self.assertStatusCode(res, 403)
self.login("17admin_biero")
res = self.post(
"/tags/namespaces/", {"scoped_to_model": "global", "name": "test_y"}
)
self.assertStatusCode(res, 403)
def test_edit_namespace(self):
###########################
# Create needed namespaces
self.login("17admin")
res = self.post(
"/tags/namespaces/", {"scoped_to_model": "global", "name": "users"}
)
self.assertStatusCode(res, 201, user_msg=res.data)
namespace_user = json.loads(res.content)
self.login("17admin_pdm")
res = self.post(
"/tags/namespaces/",
{"scoped_to_model": "association", "scoped_to_pk": "pdm", "name": "farine"},
)
self.assertStatusCode(res, 201)
namespace_farine = json.loads(res.content)
###########################
# Basic namespace edition
self.login("17admin_biero")
# Global namespace
res = self.patch(
"/tags/namespaces/{}/".format(namespace_user["id"]), {"name": "coucou"}
)
self.assertStatusCode(res, 403)
res = self.delete("/tags/namespaces/{}/".format(namespace_user["id"]))
self.assertStatusCode(res, 403)
# Specific namespace that doesn't belong to us
res = self.patch(
"/tags/namespaces/{}/".format(namespace_farine["id"]), {"name": "coucou"}
)
self.assertStatusCode(res, 403)
res = self.delete("/tags/namespaces/{}/".format(namespace_farine["id"]))
self.assertStatusCode(res, 403)
self.login("17admin")
res = self.patch(
"/tags/namespaces/{}/".format(namespace_user["id"]), {"name": "coucou"}
)
self.assertStatusCode(res, 200)
self.login("17admin_pdm")
res = self.patch(
"/tags/namespaces/{}/".format(namespace_farine["id"]), {"name": "coucou"}
)
self.assertStatusCode(res, 200)
############################
# Try to move the namespace
self.login("17admin_pdm")
res = self.patch(
"/tags/namespaces/{}/".format(namespace_user["id"]),
{"scoped_to_model": "global"},
)
self.assertStatusCode(res, 400)
res = self.patch(
"/tags/namespaces/{}/".format(namespace_user["id"]),
{"scoped_to_model": "association", "scoped_to_pk": "biero"},
)
self.assertStatusCode(res, 400)
def test_get_namespace_for_object(self):
self.login("17admin_pdm")
res = self.post(
"/tags/namespaces/",
{"scoped_to_model": "association", "scoped_to_pk": "pdm", "name": "farine"},
)
self.assertStatusCode(res, 201)
namespace_id = res.data["id"]
results = self.get("/tags/namespaces/?product=4").data[
"results"
] # get namespaces for a pdm product
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["id"], namespace_id)
results = self.get("/tags/namespaces/?product=3").data[
"results"
] # get namespaces for a biero product
self.assertEqual(len(results), 0)
| Mines-Paristech-Students/Portail-des-eleves | backend/tags/tests/test_namespaces.py | test_namespaces.py | py | 5,854 | python | en | code | 23 | github-code | 1 | [
{
"api_name": "tags.tests.base_test.TagsBaseTestCase",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 98,
"usage_type": "call"
}
] |
73654169633 | import os
from qgis.PyQt import QtWidgets, uic
from qgis.PyQt.QtCore import pyqtSignal
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtWidgets import QApplication
from qgis.utils import iface
from qgis.core import (
QgsProject,
QgsPointXY,
QgsCoordinateReferenceSystem,
QgsCoordinateTransform,
)
from qgis.gui import QgsVertexMarker
# using utils
from .utils import (
draw_rect_bound,
icon,
get_nlp,
bk_10000,
bk_2500,
bk_1000,
bk_500,
bk_250,
dialogBox,
)
from .maptools import MapTool
FORM_CLASS, _ = uic.loadUiType(
os.path.join(os.path.dirname(__file__), "../ui/gambar_nlp.ui")
)
# constants
skala = ["1:10000", "1:2500", "1:1000", "1:500", "1:250"]
# constants for NLP
x_origin = 32000
y_origin = 282000
grid_10rb = 6000
grid_2500 = 1500
grid_1000 = 500
grid_500 = 250
grid_250 = 125
class DrawNLPDialog(QtWidgets.QDialog, FORM_CLASS):
"""Dialog for NLP Dialog"""
closingPlugin = pyqtSignal()
def __init__(self, parent=iface.mainWindow()):
self.iface = iface
self.canvas = iface.mapCanvas()
super(DrawNLPDialog, self).__init__(parent)
self.setupUi(self)
self.project = QgsProject()
self.setWindowIcon(icon("icon.png"))
copy_icon = QIcon(":/images/themes/default/mActionEditCopy.svg")
self.project.instance().crsChanged.connect(self.set_epsg)
# Clipboard
self.clipboard = QApplication.clipboard()
self.ambil_titik.checked = False
self.point = None
# setup map tool
self.previousMapTool = self.canvas.mapTool()
self.epsg = self.project.instance().crs().authid()
self.crs_tm3.setText(self.project.instance().crs().description())
# copy to clipboard
self.copyTeksNLP.setIcon(copy_icon)
self.copyTeksNLP.clicked.connect(self.copy_clicked)
# self.skala_peta.currentIndexChanged.connect(self.get_nlp_text())
self.ambil_titik.clicked.connect(self.on_pressed)
self.skala_peta.addItems(skala)
def closeEvent(self, event):
try:
self.closingPlugin.emit()
self.iface.actionPan().trigger()
MapTool(self.canvas, self.vm).clear_drawing()
event.accept()
except Exception as e:
pass
def copy_clicked(self, button_index):
text = self.nlp.text()
self.clipboard.setText(text)
self.iface.statusBarIface().showMessage(
"Nomor lembar peta berhasil disalin", 3000
)
def createMapTool(self):
self.canvas.setMapTool(self.myMapTool)
def deactivateMapTool(self):
self.point_tool.isEmittingPoint = False
self.point_tool.deleteLater()
self.canvas.scene().removeItem(self.vm)
self.canvas.setMapTool(self.previousMapTool)
def set_epsg(self):
self.epsg = self.project.instance().crs().authid()
self.crs_tm3.setText(self.project.instance().crs().description())
# print("changing epsg now into", self.epsg)
def on_pressed(self):
self.check_is_tm3()
self.ambil_titik.checked = True
try:
self.canvas.scene().removeItem(self.vm)
self.canvas.scene().removeItem(self.rb)
MapTool(self.canvas, self.vm).clear_drawing()
except: # noqa
pass
self.vm = self.create_vertex_marker()
self.point_tool = MapTool(self.canvas, self.vm)
self.point_tool.map_clicked.connect(self.update_titik)
self.point_tool.isEmittingPoint = True
self.canvas.setMapTool(self.point_tool)
def check_is_tm3(self):
if int(self.epsg.split(":")[1]) in range(23830, 23846):
return True
# print("EPSG Tercatat", self.epsg.split(":")[1])
else:
dialogBox("Anda belum mengatur sistem proyeksi TM-3 Project")
# print("EPSG Tercatat", self.epsg.split(":")[1])
self.ambil_titik.checked = False
return False
def update_titik(self, x, y):
self.ambil_titik.setChecked(False)
self.point = QgsPointXY(x, y)
# check point bounds against TM-3 Boundary
source_crs = QgsCoordinateReferenceSystem("EPSG:4326")
crs = QgsCoordinateReferenceSystem(self.epsg)
transform = QgsCoordinateTransform(source_crs, crs, QgsProject.instance())
crs_box = transform.transformBoundingBox(crs.bounds())
if not crs_box.contains(self.point):
dialogBox("Anda memilih titik di luar zona TM-3 Project")
else:
self.koordinat.setText(str(round(x, 3)) + "," + str(round(y, 3)))
self.canvas.unsetMapTool(self.point_tool)
self.deactivateMapTool()
self.get_nlp_text()
def create_vertex_marker(self, type="CROSS"):
vm = QgsVertexMarker(self.canvas)
if type == "BOX":
icon_type = QgsVertexMarker.ICON_BOX
elif type == "CIRCLE":
icon_type = QgsVertexMarker.ICON_CIRCLE
elif type == "CROSS":
icon_type = QgsVertexMarker.ICON_CROSS
else:
icon_type = QgsVertexMarker.ICON_X
vm.setIconType(icon_type)
vm.setPenWidth(3)
vm.setIconSize(7)
return vm
def get_nlp_text(self):
skala_now = self.skala_peta.currentText()
if self.point is not None:
x, y = self.point
self.nlp.setText(get_nlp(skala_now[2:], x, y))
if self.checkBoxNLP.isChecked():
self.draw_nlp()
def draw_nlp(self):
xMin = xMax = yMin = yMax = None
skala_now = self.skala_peta.currentText()
if self.point is not None:
x, y = self.point
def rect10rb():
k_10rb, b_10rb = bk_10000(x, y)
xMin = x_origin + (k_10rb - 1) * grid_10rb
yMin = y_origin + (b_10rb - 1) * grid_10rb
xMax = x_origin + (k_10rb) * grid_10rb
yMax = y_origin + (b_10rb) * grid_10rb
return [xMin, yMin, xMax, yMax]
def rect2500():
k_2500, b_2500 = bk_2500(x, y)
ori_10rb_x, ori_10rb_y, p, q = rect10rb()
xMin = ori_10rb_x + (k_2500 - 1) * grid_2500
yMin = ori_10rb_y + (b_2500 - 1) * grid_2500
xMax = ori_10rb_x + (k_2500) * grid_2500
yMax = ori_10rb_y + (b_2500) * grid_2500
return [xMin, yMin, xMax, yMax]
def rect1000():
k_1000, b_1000 = bk_1000(x, y)
ori_2500_x, ori_2500_y, p, q = rect2500()
xMin = ori_2500_x + (k_1000 - 1) * grid_1000
yMin = ori_2500_y + (b_1000 - 1) * grid_1000
xMax = ori_2500_x + (k_1000) * grid_1000
yMax = ori_2500_y + (b_1000) * grid_1000
return [xMin, yMin, xMax, yMax]
def rect500():
k_500, b_500 = bk_500(x, y)
ori_1000_x, ori_1000_y, p, q = rect1000()
xMin = ori_1000_x + (k_500 - 1) * grid_500
yMin = ori_1000_y + (b_500 - 1) * grid_500
xMax = ori_1000_x + (k_500) * grid_500
yMax = ori_1000_y + (b_500) * grid_500
return [xMin, yMin, xMax, yMax]
def rect250():
k_250, b_250 = bk_250(x, y)
ori_500_x, ori_500_y, p, q = rect500()
xMin = ori_500_x + (k_250 - 1) * grid_250
yMin = ori_500_y + (b_250 - 1) * grid_250
xMax = ori_500_x + (k_250) * grid_250
yMax = ori_500_y + (b_250) * grid_250
return [xMin, yMin, xMax, yMax]
if skala_now == skala[0]:
xMin, yMin, xMax, yMax = rect10rb()
elif skala_now == skala[1]:
xMin, yMin, xMax, yMax = rect2500()
elif skala_now == skala[2]:
xMin, yMin, xMax, yMax = rect1000()
elif skala_now == skala[3]:
xMin, yMin, xMax, yMax = rect500()
elif skala_now == skala[4]:
xMin, yMin, xMax, yMax = rect250()
if xMin is not None:
draw_rect_bound(xMin, yMin, xMax, yMax, self.epsg)
| danylaksono/GeoKKP-GIS | modules/draw_nlp.py | draw_nlp.py | py | 8,095 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "qgis.PyQt.uic.loadUiType",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "qgis.PyQt.uic",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"li... |
2299113487 | import os
import json
def get_ground_truth(filename):
#0022000159342.jpg
#0022000159342_1.jpg
if filename.find("_")!=-1:
return filename[0:filename.find("_")]
else:
return filename[0:filename.find(".")]
f = open("single_test.txt","r")
for line in f.readlines():
data = line.strip().split(" ")
filename = data[0]
left = data[1]
top = data[2]
right = data[3]
bottom = data[4]
bbox = {}
bbox["left"] = left
bbox["top"] = top
bbox["right"] = right
bbox["bottom"] = bottom
attrib = {}
attrib["Type"] = "EAN13"
result = {}
result["attrib"] = attrib
result["bbox"] = bbox
result["text"] = get_ground_truth(filename)
results = []
results.append(result)
fw = open(filename+".txt","w")
fw.write(json.dumps(results))
fw.close()
f.close()
| xulihang/Barcode-Reading-Performance-Test | utils/create_ground_truth_for_barcode_bb.py | create_ground_truth_for_barcode_bb.py | py | 891 | python | en | code | 11 | github-code | 1 | [
{
"api_name": "json.dumps",
"line_number": 35,
"usage_type": "call"
}
] |
29505854194 | '''
Prepare a word sequence (sentence) with padding
- convert a sentence String to list of words in sentence
- convert list of words in sentence to array of word_embedding and padded
'''
import numpy as np
from keras.preprocessing import sequence
from textblob import Sentence
from src.static_variable import load_word_embedding
# Example dialogs
dialogs = [
"Beijing is a historical city that can be traced back to 3,000 years ago.",
"The city's history dates back three millennia. As the last of the Four Great Ancient Capitals of China",
"Beijing has been the political center of the country for much of the past eight centuries",
"With mountains surrounding the inland city on three sides",
"When you want to use a different language, select the same Keyboard & Character Viewer item in the menu bar -- although, when more than one language is enabled, the icon will now show the flag or symbol of the current language in use -- and click on the desired language. The Keyboard Viewer will then change layout to represent the keys of the desired language and allow you to insert native characters. To change back to your regular language, once again click on the Keyboard & Character Viewer menu bar item and select the desired input language to switch to."
]
# load pre-trained Word Embedding model
WORD_EMB = load_word_embedding(load_glove=False)
# constant of maximum number of word in sentence (important when padding)
MAX_WORD = 11
N_CHANEL = 1
def generate_wordlist_emb(txt):
'''
Process a text string to list of embedded word
'I love you' -> ['I', 'love', 'you'] -> list([emb('I'), emb('love'), emb('you')])
:param word_seq: text String
:return: list of embedded word
'''
word_seq = Sentence(txt.lower()).words
wordlist = []
for word in word_seq:
if word in WORD_EMB.vocab:
wordlist.append(WORD_EMB[word])
else:
# shape is depend on embedding dimension
# use [1., 1., 1., ...] represent a unknown word
wordlist.append(np.full(shape=[300], fill_value=1.0, dtype=np.float32))
return wordlist
def generate_sentence_embedding(sentence):
# get list of embedded words
new_sentence = generate_wordlist_emb(sentence)
# padding a sentence by add 0 value at the front until sentence's length = max_word
return sequence.pad_sequences([new_sentence], maxlen=MAX_WORD, dtype=np.float32)[0]
def generate_dialogs_embedding(dialogs):
tmp_dialogs = []
for i, sentence in enumerate(dialogs):
tmp_dialogs.append(generate_sentence_embedding(sentence))
# padding a word that lower than max_words
tmp_dialogs = np.array(tmp_dialogs)
# n_sample, max_word, emb_dim, = tmp_dialogs.shape[0], tmp_dialogs.shape[1], tmp_dialogs.shape[2]
# # return with add 1 chanel in the last index
# return np.reshape(tmp_dialogs, [n_sample, max_word, emb_dim, N_CHANEL])
return tmp_dialogs
# dialogs_emb = generate_dialogs_embedding(dialogs)
| jamemamjame/JameChat | coding_model/preprocess/word_seq_perp.py | word_seq_perp.py | py | 3,013 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "src.static_variable.load_word_embedding",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "textblob.Sentence",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.full",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "... |
13563725205 | """
the Bot makes everyday posts with some picture+Belarusial legal word and its definition
"""
#!/usr/bin/python3
import telegram
import time
TOKEN = 'your_token_here'
bot = telegram.Bot(token=TOKEN)
chat_id='your_chat_id_here'
f = open('dictionary.txt', 'r', encoding='UTF-8')
words = f.read().split('\n')
f.close()
temp = ''
num=1
for word in words:
temp += word + '\n'
if word[-1] == '*':
post = "#беларуская_лексіка #родныя_словы" + "\n" + "\n" + temp[:-2:]
#pictures named 1.jpg, 2.jpg, 3.jpg, etc.
name='folder_with_pictures'+str(num)+'.jpg'
with open(name, 'rb') as photo:
bot.send_photo(chat_id, photo, post)
temp = ''
num+=1
time.sleep(86400)
| HauryDow/bel_legal_lang_bot | Bel_legal_language_bot.py | Bel_legal_language_bot.py | py | 760 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "telegram.Bot",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 28,
"usage_type": "call"
}
] |
8246364725 | import matplotlib.pyplot as plt
from matplotlib import ticker
# taken from https://scikit-learn.org/stable/auto_examples/manifold/plot_compare_methods.html#sphx-glr-auto-examples-manifold-plot-compare-methods-py
def plot_3d(points, points_color, title):
x, y, z = points.T
fig, ax = plt.subplots(
figsize=(6, 6),
facecolor="white",
tight_layout=True,
subplot_kw={"projection": "3d"},
)
fig.suptitle(title, size=16)
col = ax.scatter(x, y, z, c=points_color, s=50, alpha=0.8)
ax.view_init(azim=-60, elev=9)
ax.xaxis.set_major_locator(ticker.MultipleLocator(4))
ax.yaxis.set_major_locator(ticker.MultipleLocator(4))
ax.zaxis.set_major_locator(ticker.MultipleLocator(4))
fig.colorbar(col, ax=ax, orientation="horizontal", shrink=0.6, aspect=60, pad=0.01)
plt.show()
def plot_2d(points, points_color, title):
fig, ax = plt.subplots(figsize=(3, 3), facecolor="white", constrained_layout=True)
fig.suptitle(title, size=16)
plt.scatter(ax, points, points_color)
plt.show()
| MoritzM00/Bachelor-Thesis | python/plot_utils.py | plot_utils.py | py | 1,064 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "matplotlib.ticker.MultipleLocator",
"line_number": 19,
"usage_type": "call"
},
{
"ap... |
35251753542 | '''PROGRAM DESCRIPITON:
HMTL form for registration is created and after filling the data, the student details like name,email, course are stored in the MongoDB "Registration" database.
'''
# PROGRAMMED BY: PULI SNEHITH REDDY
# MAIL ID : snehithreddyp@gmail.com
# DATE : 23-09-2021
# VERSION : 3.7.9
# CAVEATS : None
# LICENSE : None
from flask import Flask
from flask import render_template
from flask import redirect
from flask import url_for
from flask import request
import json
from pymongo import MongoClient
connection = MongoClient("mongodb://localhost:27017")
def mongo_connection():
if connection:
return True
else:
return False
def mongodb_list():
if mongo_connection() == True:
return connection.list_database_names()
def db_exists( db_name):
if db_name in mongodb_list():
return True
else:
return False
def create_new_collection(db_name, new_collection):
if connection:
db_name = connection[db_name]
new_collection = db_name[new_collection]
return new_collection
else:
return("error")
# timestand for mongodb
def timestamp():
import datetime as dt
return dt.datetime.now()
def insert_data(db_name,collection_name,data):
if connection:
connection[db_name][collection_name].insert_one(data)
return "success"
else:
return "error"
def display(db_name,collection_name):
a=[]
if connection:
for i in connection[db_name][collection_name].find():
a.append(i)
for i in a:
print(i)
print("-----------------------------------------------")
app = Flask(__name__)
def func(name,email,course):
data={}
data["name"]=name
data["email"]=email
data["course"]=course
insert_data("Registration","Data",data)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/submi',methods=['POST'])
def result():
name=request.form['name']
email=request.form['email']
course=request.form['course']
func(name,email,course)
return render_template("submit.html")
@app.route('/user')
def user():
return render_template('user.html')
if __name__ == '__main__':
app.run(debug=True, port=5001)
| SnehithReddy09/Python | Class Assignments/Storing deatils of HTML form in Mongodb/app.py | app.py | py | 2,383 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "flas... |
33283932467 | import datetime
from django.shortcuts import render
from django.db.models import Subquery, Count
from django.http import HttpResponseRedirect
from django.views.decorators.cache import cache_page
from port.models import Port
from stats.models import Submission, PortInstallation
from port.filters import PortFilterByMultiple
from utilities import old_search_redirect
@cache_page(60 * 10)
def index(request):
# Support for old "?search=<QUERY>&search_by=<name,description>" links
# Check if search query is present
if request.GET.get('search'):
return HttpResponseRedirect(old_search_redirect(request))
ports_count = Port.objects.filter(active=True).count()
submissions_unique = Submission.objects.filter(timestamp__gte=datetime.datetime.now(tz=datetime.timezone.utc) - datetime.timedelta(days=30)).order_by('user', '-timestamp').distinct('user')
top_ports = PortInstallation.objects.filter(submission_id__in=Subquery(submissions_unique.values('id')), requested=True).exclude(port__icontains='mpstats').values('port').annotate(num=Count('port')).order_by('-num')[:10]
recently_added = Port.objects.all().order_by('-created_at').only('name')[:10]
return render(request, 'index.html', {
'top_ports': top_ports,
'recently_added': recently_added,
'ports_count': ports_count
})
# Respond to ajax-call triggered by the search box
def search(request):
query = request.GET.get('search_text', '')
search_by = request.GET.get('search_by', '')
ports = PortFilterByMultiple(request.GET, queryset=Port.get_active.all()).qs[:50]
return render(request, 'filtered_table.html', {
'ports': ports,
'query': query,
'search_by': search_by
})
def about_page(request):
return render(request, 'about.html')
| macports/macports-webapp | app/views.py | views.py | py | 1,811 | python | en | code | 49 | github-code | 1 | [
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "utilities.old_search_redirect",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "port.models.Port.objects.filter",
"line_number": 20,
"usage_type": "call"
... |
31623979015 | import socket
import json
import sys # for exit
# Create a UDP socket at client side
UDPClientSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Initialize variables
udp_port = 1234
local_host = socket.gethostname()
serverAddressPort = (local_host, udp_port)
IPAddr = socket.gethostbyname(local_host)
buffer_size = 1024
UDPClientSocket.connect(serverAddressPort)
def send_msg(sending_data):
UDPClientSocket.sendto(sending_data.encode(), serverAddressPort)
def request_options():
options = "Enter 1 -> Send text, 2-> Check for text, 3-> Quit: "
c_option = input(options)
return c_option
def receiving_data(data, opt, local_id):
recv_data, server_add = UDPClientSocket.recvfrom(buffer_size * 2)
recv_data = json.loads(recv_data.decode())
if opt == "2":
print("TEXT RECEIVED: To: {} From: {} Text: {}".format(local_id, recv_data["sender"], recv_data["message"]))
else:
print("MESSAGE RECEIVED FROM SERVER: {}".format(recv_data["message"]))
variables = ProcessData()
variables.client_IP = IPAddr
print("Enter your ID as a string")
variables.client_id = input("->")
option = request_options()
while option == "1" or option == "2":
if option == "1":
variables.client_code = "T"
print("Enter dest ID as a string: ")
variables.dest_id = input("-> ")
request_send_text = "Enter your text: "
variables.send_text = input(request_send_text)
send_data = json.dumps(
{"code": variables.client_code, "dest_id": variables.dest_id, "client_id": variables.client_id,
"send_text": variables.send_text, "client_ip": variables.client_IP})
print("MESSAGE TO SEND: {} To: {} From: {} Text: {}".format(variables.client_code, variables.dest_id,
variables.client_id, variables.send_text))
send_msg(send_data)
receiving_data(variables, option, variables.client_id)
elif option == "2":
variables.client_code = "C"
print("MESSAGE TO SEND: {} From: {} ".format(variables.client_code, variables.client_id))
send_data = json.dumps({"code": variables.client_code, "client_id": variables.client_id})
send_msg(send_data)
receiving_data(variables, option, variables.client_id)
option = request_options()
else:
UDPClientSocket.close()
sys.exit()
| pmdung2011/Simple_Text_SocketProgramming | client.py | client.py | py | 2,409 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "socket.socket",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_DGRAM",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "socket.gethostn... |
38806583081 | import pygame
import math
# Define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
BLUE = (0, 0, 250)
RED = (255, 0, 0)
pygame.init()
# Set the width and height of the screen [width, height]
size = (800, 700)
screen = pygame.display.set_mode(size)
pygame.display.set_caption("My Game")
# Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# -------- Main Program Loop -----------
while not done:
# --- Main event loop
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
# --- Game logic should go here
# --- Drawing code should go here
# First, clear the screen to white. Don't put other drawing commands
# above this, or they will be erased with this command.
screen.fill(WHITE)
for i in range(200):
radians_x = i / 20
radians_y = i / 6
x = int(75 * math.sin(radians_x)) + 200
y = int(75 * math.cos(radians_y)) + 200
pygame.draw.line(screen, BLACK, [x,y], [x+5,y], 5)
for x_offset in range(30, 300, 30):
pygame.draw.line(screen,BLACK,[x_offset,100],[x_offset-10,90],2)
pygame.draw.line(screen,BLACK,[x_offset,90],[x_offset-10,100],2)
# Draw a rectangle
pygame.draw.rect(screen,BLACK,[320,20,250,100],2)
# Draw an ellipse, using a rectangle as the outside boundaries
pygame.draw.ellipse(screen, BLACK, [320,20,250,100], 2)
# Draw an arc as part of an ellipse. Use radians to determine what
# angle to draw.
pygame.draw.arc(screen, GREEN, [100,300,250,200], math.pi/2, math.pi, 2)
pygame.draw.arc(screen, BLACK, [100,300,250,200], 0, math.pi/2, 2)
pygame.draw.arc(screen, RED, [100,300,250,200],3*math.pi/2, 2*math.pi, 2)
pygame.draw.arc(screen, BLUE, [100,300,250,200], math.pi, 3*math.pi/2, 2)
# This draws a triangle using the polygon command
pygame.draw.polygon(screen, BLACK, [[300,300], [300,500], [500,500]], 5)
# Select the font to use, size, bold, italics
font = pygame.font.SysFont('Calibri', 25, True, False)
# Render the text. "True" means anti-aliased text.
# Black is the color. The variable BLACK was defined
# above as a list of [0, 0, 0]
# Note: This line creates an image of the letters,
# but does not put it on the screen yet.
text = font.render("My text", True, BLACK)
# Put the image of the text on the screen at 250x250
screen.blit(text, [500, 400])
score = 100
text = font.render("Score: ", str(score), True, BLACK)
# --- Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# --- Limit to 60 frames per second
clock.tick(60)
# Close the window and quit.
# If you forget this line, the program will 'hang'
# on exit if running from IDLE.
pygame.quit() | omarjcm/p59-programacion_hipermedial | code/rv/caracteristicas/01_taller.py | 01_taller.py | py | 2,876 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pygame.displa... |
36260433523 | import streamlit as st
import pandas as pd
import pickle
from random import randint
books = pd.read_csv('df_2.csv')
df_1 = pickle.load(open('df_1.pkl', 'rb'))
def recommendation(title):
recommendations = pd.DataFrame(df_1.nlargest(11, title)['title'])
recommendations = recommendations[recommendations['title'] != title]
list_name = recommendations['title'].values.tolist()
add_img = []
for name in list_name:
add_img.append(books[books['title'] == name]['image_url'].iloc[0])
return list_name, add_img
st.title('Content Based Book Recommendation System ')
with st.container():
select = st.selectbox(
'Which book would you like to choose?',
books['title'].values)
st.write('You selected:', select)
if st.button('Recommend'):
col = ["a", "b", "c", "d", "e"]
list_books, list_imgs = recommendation(select)
col = st.columns(5)
with st.container():
for i in range(5):
c = col[i]
with c:
st.text(list_books[i])
st.image(list_imgs[i])
with st.container():
col = st.columns(5)
for i in range(5):
c = col[i]
with c:
st.text(list_books[i+5])
st.image(list_imgs[i+5])
with st.container():
st.header('Books')
colb = ['a', 'b', 'c', 'd', 'e']
for i in range(10):
with st.container():
colb = st.columns(5)
for c in colb:
index = randint(0, len(books))
book = books.iloc[index]
with c:
st.text(book.title)
st.image(book.image_url)
| Hainguyendangduc/BookRecommendation | app.py | app.py | py | 1,748 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "streamlit.title",
"lin... |
45140457164 | from telebot import types
from datetime import date, timedelta
from ...utils import constants
def get_days(room_name: str) -> types.InlineKeyboardMarkup:
now = date.today()
markup = types.InlineKeyboardMarkup()
for day in range(7):
""" add in InlineKeyboard """
w_day = now + timedelta(days=day)
if w_day.weekday() in range(5):
if now == w_day:
text = "Сегодня"
callback_data = f"{constants.DAYS_OF_THE_WEEK[w_day.weekday()]} - " \
f"{room_name} - {w_day.strftime('%d-%m-%Y')}"
else:
text = f"{constants.DAYS_OF_THE_WEEK[w_day.weekday()]} - " \
f"{room_name} - {w_day.strftime('%d-%m-%Y')}"
callback_data = f"{constants.DAYS_OF_THE_WEEK[w_day.weekday()]} - " \
f"{room_name} - {w_day.strftime('%d-%m-%Y')}"
markup.add(
types.InlineKeyboardButton(
text=text, callback_data=callback_data
)
)
return markup
| BernarBerdikul/mybooking | mybooking/core/bot_services/get_days.py | get_days.py | py | 1,086 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "datetime.date.today",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "telebot.types.InlineKeyboardMarkup",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "tel... |
16874544930 | #!/usr/bin/env python3
import argparse
import sys
from KaSaAn.functions import prefixed_snapshot_analyzer
def main(args=None):
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(
description='Get cumulative and mean distribution of complex sizes, plus distribution of number of species,'
' based on snapshots sharing a common prefix, like "t_one_snap_7.ka" having the prefix "t_one_".'
' Snapshots must contain the word "snap" and end in ".ka". Files will be produced in the same'
' directory as the snapshots are. They will be prefixed accordingly, e.g.'
' [prefix]distribution_cumulative.csv')
parser.add_argument('-p', '--prefix', type=str, default='',
help='Prefix identifying snapshots to analyze; e.g. "foo_snap_" is the prefix for'
' "foo_snap_76.ka". Files must end with [number].ka')
parser.add_argument('-d', '--working_directory', type=str, default='./',
help='The directory where snapshots are held, and where distribution files will be saved to.')
parser.add_argument('-v', '--verbosity', action='store_true',
help='Print extra information, like number of snapshots found, directory understood, and'
' file names used for output.')
args = parser.parse_args()
prefixed_snapshot_analyzer(base_directory=args.working_directory, snap_prefix=args.prefix, verbosity=args.verbosity)
if __name__ == '__main__':
main()
| yarden/KaSaAn | KaSaAn/scripts/prefixed_snapshot_analyzer.py | prefixed_snapshot_analyzer.py | py | 1,606 | python | en | code | null | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "KaSaAn.functions.prefixed_snapshot_analyzer",
"line_number": 27,
"usage_type": "call"
}
] |
23739113062 | import argparse
import pytorch_lightning as pl
import torch
from torch.nn import functional as F
import constants
from lightling_wrapper import BaseTorchLightlingWrapper, SpeechCommandDataModule
from models.bc_resnet.bc_resnet_model import BcResNetModel
from models.bc_resnet.mel_spec_dataset import MelSpecDataSet, mel_collate_fn
from models.simple_conv.base_dataset import AudioArrayDataSet, simconv_collate_fn
from models.simple_conv.simple_conv_model import SimpleConv
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="conv")
parser.add_argument("--pretrain", type=str)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
if args.model == "conv":
core_model = SimpleConv(
n_channel=constants.N_CHANNEL, kernel_size_l1=constants.KERNEL_SIZE_L1)
loss_fn = F.nll_loss
collate_fn = simconv_collate_fn
dataset_fn = AudioArrayDataSet
elif args.model == "bc_resnet":
core_model = BcResNetModel(
scale=constants.SCALE_BC_RESNET, dropout=constants.DROPOUT)
loss_fn = F.nll_loss
collate_fn = mel_collate_fn
dataset_fn = MelSpecDataSet
else:
raise ValueError("Invalid model name")
model = BaseTorchLightlingWrapper(
core_model=core_model,
loss_fn=loss_fn,
learning_rate=constants.LEARNING_RATE,
)
data_module = SpeechCommandDataModule(
dataset_fn, collate_fn, batch_size=constants.BATCH_SIZE
)
if torch.cuda.is_available():
trainer = pl.Trainer(
accelerator="gpu", devices=1
)
else:
trainer = pl.Trainer()
trainer.test(model, data_module, ckpt_path=args.pretrain)
| egochao/speech_commands_distillation_torch_lightling | test.py | test.py | py | 1,768 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "models.simple_conv.simple_conv_model.SimpleConv",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "constants.N_CHANNEL",
"line_number": 28,
"usage_type": "attribute... |
28993145664 | #!/usr/bin/env python3
from argparse import ArgumentParser
from collections import namedtuple
import hashlib
import json
import os
import shutil
LayerInfo = namedtuple('LayerInfo', ['path', 'metadata', 'contenthash'])
def hash(data):
m = hashlib.sha256()
m.update(bytes(data, 'utf-8'))
return m.hexdigest()
if __name__ == '__main__':
parser = ArgumentParser(description='Build image manifest')
parser.add_argument('layers', nargs='+')
parser.add_argument('--config', required=True)
parser.add_argument('--tag', required=True)
parser.add_argument('--architecture', required=True)
parser.add_argument('--os', required=True)
parser.add_argument('--out', required=True)
opts = parser.parse_args()
non_empty_layers = []
for layer_path in opts.layers:
if os.path.exists(os.path.join(layer_path, 'metadata.json')):
with open(os.path.join(layer_path, 'metadata.json')) as f:
metadata = json.load(f)
with open(os.path.join(layer_path, 'contentsha256')) as f:
contenthash = f"sha256:{f.read().strip()}"
non_empty_layers.append(
LayerInfo(layer_path, metadata, contenthash))
with open(opts.config) as f:
container_config = json.load(f)
config = json.dumps({
'created': '1970-01-01T00:00:00Z',
'rootfs': {
'type': 'layers',
'diff_ids': list(map(lambda x: x.contenthash, non_empty_layers))
},
'config': container_config,
'architecture': opts.architecture,
'os': opts.os,
})
config_hash = hash(config)
manifest = json.dumps({
'schemaVersion': 2,
'config': {
'mediaType': 'application/vnd.oci.image.config.v1+json',
'size': len(config),
'digest': 'sha256:{}'.format(config_hash)
},
'layers': list(map(lambda x: x.metadata, non_empty_layers))
})
manifest_hash = hash(manifest)
metadata_dict = {
'mediaType': 'application/vnd.oci.image.manifest.v1+json',
'size': len(manifest),
'digest': 'sha256:{}'.format(manifest_hash),
'platform': {
'architecture': opts.architecture,
'os': opts.os,
}
}
if opts.tag != '':
metadata_dict['annotations'] = {
'org.opencontainers.image.ref.name': opts.tag
}
blobs_path = os.path.join(opts.out, 'blobs', 'sha256')
os.makedirs(blobs_path)
def open_file(name):
return open(os.path.join(opts.out, name), 'w', encoding='utf-8')
with open_file('config.json') as f:
f.write(config)
os.symlink(f.name, os.path.join(blobs_path, config_hash))
with open_file('manifest.json') as f:
f.write(manifest)
os.symlink(f.name, os.path.join(blobs_path, manifest_hash))
with open_file('metadata.json') as f:
json.dump(metadata_dict, f)
for layer in non_empty_layers:
layer_blobs_path = os.path.join(layer.path, 'blobs', 'sha256')
for path in os.listdir(layer_blobs_path):
shutil.copyfile(os.path.join(layer_blobs_path, path),
os.path.join(blobs_path, path),
follow_symlinks=False)
| iknow/nix-utils | oci/build-image-manifest.py | build-image-manifest.py | py | 3,280 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.namedtuple",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "hashlib.sha256",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path... |
32330400016 | import os
import requests
import time
import pyfiglet
req = requests.get('https://google.com')
os.system('clear')
print ("Fuck Dunia Percintaan")
time.sleep(1)
os.system('clear')
print ("\33[36;1m")
text = pyfiglet.figlet_format("SantriXploiter")
print (text)
print
print ("[1] Tentang SantriXploiter")
print ("[2] Blog SantriXploiter")
print ("[3] Web SantriXploiter")
print ("[4] Instagram SntriXploiter")
print ("[5] Group Telegram ")
print ("[6] Channel Youtube ")
print ("[7] Fanspage Facebook")
print
pilih = int(input("pilih : "))
if pilih == 1:
time.sleep(1)
print("Komunitas dari Pelajar Biasa yang tertarik Pada Dunia IT Dan Blog Yang Membahas Seputar Dunia IT dan Agama")
elif pilih == 2:
time.sleep(1)
os.system('xdg-open https://santrixploiter.blogspot.com')
elif pilih == 3:
time.sleep(1)
os.system('xdg-open http://aliyajnck.000webhostapp.com')
elif pilih == 4:
time.sleep(1)
os.system('xdg-open https://www.instagram.com/santri.xploiter/')
elif pilih == 5:
time.sleep(1)
os.system('xdg-open http://t.me/santrixploiter')
elif pilih == 6:
time.sleep(1)
os.system('xdg-open https://www.youtube.com/channel/UCG8EAqQdcDzphpluzrJWt3Q')
elif pilih == 7:
time.sleep(1)
os.system('xdg-open https://www.facebook.com/santrixploiter/')
else:
print ("Tidak Tersedia") | santrixploiter/santri | sx.py | sx.py | py | 1,344 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 9,
... |
37589968716 | '''
Build a tweet sentiment analyzer
'''
from __future__ import print_function
import six.moves.cPickle as pickle
import time
from collections import OrderedDict
import sys
import time
from sys import argv
import numpy
import theano
from theano import config
import theano.tensor as tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import imdb
from imdb import masklist
import scipy.io as sio
datanamee = 'sat'
datasets = {datanamee: imdb.load_data}
# Set the random number generators' seeds for consistency
SEED = 123
numpy.random.seed(SEED)
def numpy_floatX(data):
# return numpy.asarray(data, dtype=config.floatX)
return numpy.asarray(data, dtype=config.floatX)
def get_minibatches_idx(n, minibatch_size, shuffle=False):
"""
Used to shuffle the dataset at each iteration.
"""
idx_list = numpy.arange(n, dtype="int32")
if shuffle:
numpy.random.shuffle(idx_list)
minibatches = []
minibatch_start = 0
for i in range(n // minibatch_size):
minibatches.append(idx_list[minibatch_start:
minibatch_start + minibatch_size])
minibatch_start += minibatch_size
if (minibatch_start != n):
# Make a minibatch out of what is left
minibatches.append(idx_list[minibatch_start:])
# print('minibatches', minibatches)
return zip(range(len(minibatches)), minibatches)
def get_dataset(name):
return datasets[name]
def zipp(params, tparams):
"""
When we reload the model. Needed for the GPU stuff.
"""
for kk, vv in params.items():
tparams[kk].set_value(vv)
def unzip(zipped):
"""
When we pickle the model. Needed for the GPU stuff.
"""
new_params = OrderedDict()
for kk, vv in zipped.items():
new_params[kk] = vv.get_value()
return new_params
def dropout_layer(state_before, use_noise, trng):
proj = tensor.switch(use_noise,
(state_before *
trng.binomial(state_before.shape,
p=0.5, n=1,
dtype=state_before.dtype)),
state_before * 0.5)
return proj
def _p(pp, name):
return '%s_%s' % (pp, name)
def init_params(options):
"""
Global (not LSTM) parameter. For the embeding and the classifier.
"""
params = OrderedDict()
# embedding
# randn = numpy.random.rand(options['n_words'],
# options['dim_proj'])
# params['Wemb'] = (0.01 * randn).astype(config.floatX)
params = get_layer(options['encoder'])[0](options,
params,
prefix=options['encoder'])
# classifie
for i in range(recyl_maxlen):
# params for label prediction
params['U_' + str(i)] = (numpy.random.randn((i + 1) * options['dim_proj'],
int(options['ydim'])).astype(config.floatX) /
numpy.sqrt((i + 1) * options['dim_proj']))
params['b_' + str(i)] = numpy.zeros((int(options['ydim']),)).astype(config.floatX)
# params for modal prediction
params['U_seq_' + str(i)] = (numpy.random.randn( options['dim_proj'],
int( options['maxlen'])).astype(config.floatX)
/ numpy.sqrt(options['dim_proj']))
params['b_seq_' + str(i)] = numpy.zeros((int(options['maxlen']),)).astype(config.floatX)
return params
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.items():
if kk not in pp:
raise Warning('%s is not in the archive' % kk)
params[kk] = pp[kk]
return params
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.items():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
def get_layer(name):
fns = layers[name]
return fns
def ortho_weight(ndim):
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype(config.floatX)
def param_init_lstm(options, params, prefix='lstm'):
"""
Init the LSTM parameter:
:see: init_params
"""
W = numpy.concatenate([ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj'])], axis=1)
params[_p(prefix, 'W')] = W
U = numpy.concatenate([ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj'])], axis=1)
params[_p(prefix, 'U')] = U
b = numpy.zeros((4 * options['dim_proj'],))
params[_p(prefix, 'b')] = b.astype(config.floatX)
return params
def lstm_layer(tparams, state_below, options, prefix='lstm', mask=None,h_before=None,c_before=None):
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
assert mask is not None
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
return _x[:, n * dim:(n + 1) * dim]
def _step(m_, x_, h_, c_):
preact = tensor.dot(h_, tparams[_p(prefix, 'U')])
preact += x_
i = tensor.nnet.sigmoid(_slice(preact, 0, options['dim_proj']))
f = tensor.nnet.sigmoid(_slice(preact, 1, options['dim_proj']))
o = tensor.nnet.sigmoid(_slice(preact, 2, options['dim_proj']))
c = tensor.tanh(_slice(preact, 3, options['dim_proj']))
c = f * c_ + i * c
c = m_[:, None] * c + (1. - m_)[:, None] * c_
h = o * tensor.tanh(c)
h = m_[:, None] * h + (1. - m_)[:, None] * h_
return h, c
state_below = (tensor.dot(state_below, tparams[_p(prefix, 'W')]) +
tparams[_p(prefix, 'b')])
dim_proj = options['dim_proj']
[h,c] = _step(mask, state_below,h_before,c_before)
return h,c
# ff: Feed Forward (normal neural net), only useful to put after lstm
# before the classifier.
layers = {'lstm': (param_init_lstm, lstm_layer)}
def sgd(lr, tparams, grads, x, x3,y2, mask, y, cost,modal_cost, max_cost):
""" Stochastic Gradient Descent
:note: A more complicated version of sgd then needed. This is
done like that for adadelta and rmsprop.
"""
# New set of shared variable that will contain the gradient
# for a mini-batch.
gshared = [theano.shared(p.get_value() * 0., name='%s_grad' % k)
for k, p in tparams.items()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
# Function that computes gradients for a mini-batch, but do not
# updates the weights.
# f_grad_shared = theano.function([idxs, x, mask, y], cost, updates=gsup,
# name='sgd_f_grad_shared')
f_grad_shared = theano.function([mask, y, x, x3,y2, modal_cost, max_cost], cost, updates=gsup,
name='sgd_f_grad_shared')
pup = [(p, p - lr * g) for p, g in zip(tparams.values(), gshared)]
# Function that updates the weights from the previously computed
# gradient.
f_update = theano.function([lr], [], updates=pup,
name='sgd_f_update')
return f_grad_shared, f_update
def adadelta(lr, tparams, grads, x, x3,y2, mask, y, cost, modal_cost,max_cost):
"""
An adaptive learning rate optimizer
Parameters
----------
lr : Theano SharedVariable
Initial learning rate
tpramas: Theano SharedVariable
Model parameters
grads: Theano variable
Gradients of cost w.r.t to parameres
x: Theano variable
Model inputs
mask: Theano variable
Sequence mask
y: Theano variable
Targets
cost: Theano variable
Objective fucntion to minimize
Notes
-----
For more information, see [ADADELTA]_.
.. [ADADELTA] Matthew D. Zeiler, *ADADELTA: An Adaptive Learning
Rate Method*, arXiv:1212.5701.
"""
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.items()]
running_up2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rup2' % k)
for k, p in tparams.items()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.items()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function([mask, y, x, x3, y2, modal_cost, max_cost], cost, updates=zgup + rg2up,
name='adadelta_f_grad_shared', on_unused_input='ignore', allow_input_downcast=True)
# f_grad_shared = theano.function([idxs, x, mask, y], cost, updates=zgup + rg2up,
# name='adadelta_f_grad_shared')
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in zip(zipped_grads,
running_up2,
running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(tparams.values(), updir)]
f_update = theano.function([lr], [], updates=ru2up + param_up,
on_unused_input='ignore',
name='adadelta_f_update', allow_input_downcast=True)
return f_grad_shared, f_update
def rmsprop(lr, tparams, grads, x, mask, y, cost):
"""
A variant of SGD that scales the step size by running average of the
recent step norms.
Parameters
----------
lr : Theano SharedVariable
Initial learning rate
tpramas: Theano SharedVariable
Model parameters
grads: Theano variable
Gradients of cost w.r.t to parameres
x: Theano variable
Model inputs
mask: Theano variable
Sequence mask
y: Theano variable
Targets
cost: Theano variable
Objective fucntion to minimize
Notes
-----
For more information, see [Hint2014]_.
.. [Hint2014] Geoff Hinton, *Neural Networks for Machine Learning*,
lecture 6a,
http://cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf
"""
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.items()]
running_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad' % k)
for k, p in tparams.items()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.items()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
# f_grad_shared = theano.function([x, mask, y], cost,
# updates=zgup + rgup + rg2up,
# name='rmsprop_f_grad_shared')
f_grad_shared = theano.function([mask, y, x], cost,
updates=zgup + rgup + rg2up,
name='rmsprop_f_grad_shared')
updir = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_updir' % k)
for k, p in tparams.items()]
updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4))
for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads,
running_grads2)]
param_up = [(p, p + udn[1])
for p, udn in zip(tparams.values(), updir_new)]
f_update = theano.function([lr], [], updates=updir_new + param_up,
on_unused_input='ignore',
name='rmsprop_f_update')
return f_grad_shared, f_update
def build_model(tparams, options):
'''
x: traing data
y: traing label
x3: neighbor data, datanum * neighbornum * featuredim
y2: neighbor label
'''
trng = RandomStreams(SEED)
# Used for dropout.
use_noise = theano.shared(numpy_floatX(0.))
maskl = tensor.matrix('maskl', dtype=config.floatX)
y = tensor.vector('y', dtype='int32')
x = tensor.matrix('x', dtype=config.floatX)
n_samples = x.shape[0]
dim_proj = x.shape[1]
maxlen = options['maxlen']
x3 = tensor.tensor3('x3', dtype=config.floatX)
y2 = tensor.matrix('y2', dtype='int32')
neigh_num = x3.shape[1]
x_nerghbors = tensor.reshape(x3, [n_samples * neigh_num, dim_proj])
modal_cost = tensor.vector('modal_cost', dtype=config.floatX)
max_cost = tensor.scalar('max_cost', dtype=config.floatX)
h = tensor.alloc(numpy_floatX(0.),n_samples,dim_proj)
c = tensor.alloc(numpy_floatX(0.),n_samples,dim_proj)
h_n = tensor.alloc(numpy_floatX(0.),n_samples * neigh_num,dim_proj)
c_n = tensor.alloc(numpy_floatX(0.),n_samples * neigh_num,dim_proj)
cost = 0
cost1_mean = []
cost2_mean = []
cost3_mean = []
next_mean = []
mask = tensor.ones_like(x[:,0], dtype= config.floatX) # maks whether instance enter the ith iter
mask_n = tensor.ones_like(x_nerghbors[:,0], dtype= config.floatX)
masks = []
projs = []
masks.append(mask)
next_modal = tensor.zeros_like(x[:,0], dtype= 'int32')
next_modal_n = tensor.zeros_like(x_nerghbors[:,0], dtype= 'int32')
# cost_vector = tensor.alloc(numpy_floatX(0.),n_samples,1)
cost_vector = tensor.alloc(numpy_floatX(0.),1, n_samples)
f_pred_set = []
f_pred_seq_set = []
f_pred_seq_prob_set = []
f_get_fea_set = []
f_fea_other_set = []
def get_other3(x, next_modal):
fea_other = tensor.tile(x,(maxlen,1))
fea_other = x.T
fea_single = fea_other[:,next_modal]
return fea_other,fea_single
def get_other(x):
# change the feature x from dim to the form of maxlen * dim
fea_other = []
for i in range(maxlen):
fea_other.append(x * maskl[i])
return tensor.stack(fea_other)
def get_single(x,next_modal):
# get the current modal' feature
fea_single = x * maskl[next_modal]
return fea_single
def compute_dist(neighbor, pred_neighbor, fea_single, pred, mask, y, y2):
'''
minimize same label neighbor's distance, maximize different label neighbor's distance
neighbor: neighbor's feature
pred_neighbor: neighbor's netmodal's prediction
fea_single: current instance's feature
pred: current instance's prediction
mask: whether current instance stops
y: current instance's label
y2: neighbor instance's label
'''
loss = 0
if mask:
ifsamelabel = -1
for i in range(3):
if y == y2[i]:
ifsamelabel = 1
else:
ifsamelabel = -1
dist = tensor.dot(get_other(neighbor[i]).T, pred_neighbor[i]) - tensor.dot(get_other(fea_single).T,pred)
loss += ifsamelabel * tensor.dot(dist , dist.T)
return loss/3
costs = tensor.tile(modal_cost,(n_samples,1))
xs = []
for i in range(recyl_maxlen):
# set high cost for modal that has been used to prevent predict same modal
costs = tensor.set_subtensor(costs[tensor.arange(n_samples), next_modal], 1)
feas, update = theano.scan(fn = get_single,
sequences=[x, next_modal],
)
fea_single_n, update_n = theano.scan(fn = get_single,
sequences=[x_nerghbors, next_modal_n],
)
fea_single = feas
max_coefficients_supported = 10000
xs.append(fea_single)
[h,c] = get_layer(options['encoder'])[1](tparams, fea_single, options,
prefix=options['encoder'],
mask=mask,h_before = h,c_before = c)
[h_n,c_n] = get_layer(options['encoder'])[1](tparams, fea_single_n, options,
prefix=options['encoder'],
mask=mask_n,h_before = h_n,c_before = c_n)
proj = h
proj_n = h_n
projs.append(proj)
projsmatrix = tensor.stack(projs)
proj_pred = tensor.stack(projs) * tensor.stack(masks)[:, :, None]
proj_pred = tensor.transpose(proj_pred,(1,0,2))
proj_pred = tensor.reshape(proj_pred,[projsmatrix.shape[1],projsmatrix.shape[0] * projsmatrix.shape[2]])
# print('h_n.shape', h_n.shape)
if options['use_dropout']:
proj_pred = dropout_layer(proj_pred, use_noise, trng)
pred = tensor.nnet.softmax(tensor.dot(proj_pred, tparams['U_' + str(i)]) + tparams['b_' + str(i)])
print('i', i)
f_pred_prob = theano.function([ x, maskl,modal_cost, max_cost], pred,
name='f_pred_prob',on_unused_input='ignore', allow_input_downcast=True)
f_pred = theano.function([x, maskl, modal_cost, max_cost], pred.argmax(axis=1),
name='f_pred',on_unused_input='ignore', allow_input_downcast=True)
f_pred_set.append(f_pred)
off = 1e-8
if pred.dtype == 'float16':
off = 1e-6
pred_seq = tensor.nnet.softmax(tensor.dot(proj, tparams['U_seq_' + str(i)]) + tparams['b_seq_' + str(i)])
pred_seq_n = tensor.nnet.softmax(tensor.dot(proj_n, tparams['U_seq_' + str(i)]) + tparams['b_seq_' + str(i)])
f_pred_seq = theano.function([x, maskl, modal_cost,max_cost], pred_seq.argmax(axis=1),
name='f_pred_seq',on_unused_input='ignore',allow_input_downcast=True)
f_pred_seq_set.append(f_pred_seq)
pred_seq_index = pred_seq.argmax(axis=1)
next_modal = pred_seq_index
next_modal_n = pred_seq_n.argmax(axis=1)
next_mean.append(next_modal)
cost1_vector = tensor.log(pred[tensor.arange(n_samples), y] + off)
cost1 = ( cost1_vector * mask).sum() / (mask.sum() + 1)
pred_seq_n3 = tensor.reshape(pred_seq_n, [n_samples , neigh_num, maxlen])
result_loss2, update = theano.scan(fn = compute_dist,
sequences=[x3, pred_seq_n3, x, pred_seq, mask, y, y2],
)
cost2 = result_loss2.mean()
cost3 = (costs * pred_seq).mean()
cost1_mean.append(cost1)
cost2_mean.append(cost2)
cost3_mean.append(cost3)
lamda1 = 0.001
lamda2 = 0.1
if i == recyl_maxlen - 1:
lamda1 = 0.000000001
lamda2 = 0.000000001
cost += -cost1 + lamda1 * cost2 + lamda2 * cost3
# cost += -cost1
# f_fea_other = theano.function([x, x3, y, maskl, modal_cost, max_cost],[nnext, D,cost1,cost2,cost3,mask.sum(),next_modal, fea_single, fea_other, fea_single3, fea_other3], on_unused_input='ignore')
# f_fea_other_set.append(f_fea_other)
result, update = theano.scan(lambda b,a: a[b],
sequences = pred_seq_index,
non_sequences = modal_cost )
if i == 0:
cost_vector = result
else:
cost_vector += result
# mask the instance if its cost larger than max_cost
choice = tensor.nonzero(tensor.gt(-cost_vector,-max_cost))[0]
mask = tensor.zeros_like(x[:,0], dtype = config.floatX)
mask = theano.tensor.set_subtensor(mask[choice],1.)
masks.append(mask)
if i < recyl_maxlen:
cost -= (2 * (1-mask) * cost1_vector).sum() / (mask.sum() + 1)
else:
cost -= cost1
f_fea_other = theano.function([x, x3,y2, y, maskl, modal_cost, max_cost],
[tensor.stack(cost1_mean), tensor.stack(cost2_mean), tensor.stack(cost3_mean)], on_unused_input='ignore')
return use_noise, x,x3, y2, maskl, y, cost,modal_cost,max_cost,f_pred_set,f_pred_seq_set, f_fea_other
def pred_error(maxlen, f_pred_set,f_pred_seq_set, data, iterator,maskl, model_len, modal_cost, max_cost,verbose=False):
"""
Just compute the error
f_pred: Theano fct computing the prediction
prepare_data: usual prepare_data for that dataset.
"""
valid_err = 0
length = 0
meancost = 0
for _, valid_index in iterator:
feax = [data[0][t] for t in valid_index]
feax = numpy.array(feax)
targets = [data[1][t] for t in valid_index]
n_samples = len(valid_index)
mask = numpy.ones_like(feax[:,0], dtype= 'int32')
next_modal = numpy.zeros_like(feax[:,0], dtype= 'int32')
cost_vector = numpy.zeros(n_samples)
next_modals = numpy.zeros([n_samples,maxlen])
mask_matrix = numpy.zeros([n_samples, maxlen])
preds_s = numpy.zeros([n_samples, maxlen])
preds = numpy.zeros(n_samples)
costs = numpy.tile(modal_cost,(n_samples,1))
for i in range(recyl_maxlen):
cost_vector += costs[range(n_samples), next_modal]
costs[range(n_samples), next_modal] = 0
mask_matrix[:,i] = mask
next_modal = f_pred_seq_set[i](feax, maskl, modal_cost, max_cost)
next_modals[:,i] = next_modal
mask = numpy.zeros_like(feax[:,0], dtype= 'int32')
choice = numpy.greater(-cost_vector,-max_cost)
mask[choice] = 1
preds_s[:,i] = f_pred_set[i](feax, maskl, modal_cost, max_cost)
meancost += cost_vector.sum()
for i in range(n_samples):
prem = False
for j in range(maxlen):
if mask_matrix[i,j] == 0:
prem = True
preds[i] = preds_s[i, j-1]
break
if prem == False:
preds[i] = preds_s[i, -1]
valid_err += (preds == targets).sum()
length += len(preds)
valid_err = 1. - numpy_floatX(valid_err) / length
return valid_err, meancost / length
def get_neighbor(sam_traininx, neighinx, trainx, trainy):
neighs = []
neiy = []
trainy = numpy.array(trainy)
for i in range(len(sam_traininx)):
ninx = neighinx[sam_traininx[i]]
neighs.append(trainx[ninx,:])
neiy.append(trainy[ninx])
return numpy.array(neighs), numpy.array(neiy)
def train_lstm(
dim_proj=36, # word embeding dimension and LSTM number of hidden units.
patience=15, # Number of epoch to wait before early stop if no progress
max_epochs=5000, # The maximum number of epoch to run
dispFreq=10, # Display to stdout the training progress every N updates
decay_c=0., # Weight decay for the classifier applied to the U weights.
lrate=0.01, # Learning rate for sgd (not used for adadelta and rmsprop)
optimizer=adadelta, # sgd, adadelta and rmsprop available, sgd very hard to use, not recommanded (probably need momentum and decaying learning rate).
# optimizer = sgd,
encoder='lstm', # TODO: can be removed must be lstm.
saveto='lstm_model_best.npz', # The best model will be saved there
validFreq=100, # Compute the validation error after this number of update.
saveFreq=1110, # Save the parameters after every saveFreq updates
maxlen=4, # Sequence longer then this get ignored
batch_size=64, # The batch size during training.
valid_batch_size=256, # The batch size used for validation/test set.
dataset=datanamee,
modal_costs = [0.1, 0.1, 0.1, 0.1], # the cost for each modal
# model_lens = model_len,
# Parameter for extra option
noise_std=0.,
use_dropout=True, # if False slightly faster, but worst test error
# This frequently need a bigger model.
reload_model=None, # Path to a saved model we want to start from.
test_size=-1, # If >0, we keep only this number of test example.
max_costs = 50, # max cost for each instance to use
):
# Model options
model_options = locals().copy()
print("model options", model_options)
load_data = get_dataset(dataset)
print('Loading data')
train, valid, test, mask, model_len, train_n3 = load_data()
'''
train: (train data, train label)
valid: (valid data, valid label)
test: (test data, test label)
mask: mask each modal's feature
model_len: each model's length
train_n3: each traing data's 3 neighbor's index
'''
ydim = numpy.max(train[1]) + 1
#
model_options['ydim'] = ydim
print('ydim', ydim)
print('numpy.min(train[1])', numpy.min(train[1]))
print('Building model')
# This create the initial parameters as numpy ndarrays.
# Dict name (string) -> numpy ndarray
params = init_params(model_options)
if reload_model:
load_params('lstm_model.npz', params)
# This create Theano Shared Variable from the parameters.
# Dict name (string) -> Theano Tensor Shared Variable
# params and tparams have different copy of the weights.
tparams = init_tparams(params)
(use_noise, x, x3, y2, mask, y, cost,
modal_cost,max_cost,f_pred_set, f_pred_seq_set, f_fea_other) = build_model(tparams, model_options)
if decay_c > 0.:
decay_c = theano.shared(numpy_floatX(decay_c), name='decay_c')
weight_decay = 0.
weight_decay += (tparams['U'] ** 2).sum()
weight_decay *= decay_c
cost += weight_decay
print('starting compute grad...')
print(time.asctime( time.localtime(time.time()) ) )
grads = tensor.grad(cost, wrt=list(tparams.values()))
f_grad = theano.function([mask, y, x,x3, y2, modal_cost,max_cost], grads, name='f_grad',on_unused_input='ignore')
lr = tensor.scalar(name='lr')
print('starting optimizer')
print(time.asctime( time.localtime(time.time()) ) )
f_grad_shared, f_update = optimizer(lr, tparams, grads,
x, x3, y2, mask, y, cost, modal_cost,max_cost)
kf_valid = get_minibatches_idx(len(valid[0]), valid_batch_size)
kf_test = get_minibatches_idx(len(test[0]), valid_batch_size)
print("%d train examples" % len(train[0]))
print("%d valid examples" % len(valid[0]))
print("%d test examples" % len(test[0]))
history_errs = []
best_p = None
bad_count = 0
if validFreq == -1:
validFreq = len(train[0]) // batch_size
if saveFreq == -1:
saveFreq = len(train[0]) // batch_size
uidx = 0 # the number of update done
estop = False # early stop
start_time = time.time()
try:
for eidx in range(max_epochs):
n_samples = 0
# Get new shuffled index for the training set.
kf = get_minibatches_idx(len(train[0]), batch_size, shuffle=False)
for _, train_index in kf:
feax3, feay2 = get_neighbor(train_index, train_n3, train[0], train[1])
'''
feax3: this mini-batch's data's feature
feay2: this mini-batch's data's label
'''
uidx += 1
use_noise.set_value(1.)
# Select the random examples for this minibatch
y = [train[1][t] for t in train_index]
feax = [train[0][t] for t in train_index]
n_samples += len(feax)
maskl = masklist(model_len, dim_proj)
cost = f_grad_shared(maskl, y, feax, feax3,feay2, modal_costs,max_costs)
f_update(lrate)
if numpy.isnan(cost) or numpy.isinf(cost):
print('bad cost detected: ', cost)
return 1., 1., 1.
if numpy.mod(uidx, dispFreq) == 0:
print('Epoch ', eidx, 'Update ', uidx, 'Cost ', cost)
if saveto and numpy.mod(uidx, saveFreq) == 0:
print('Saving...')
if best_p is not None:
params = best_p
else:
params = unzip(tparams)
numpy.savez(saveto, history_errs=history_errs, **params)
pickle.dump(model_options, open('%s.pkl' % saveto, 'wb'), -1)
print('Done')
if numpy.mod(uidx, validFreq) == 0:
use_noise.set_value(0.)
maskls = masklist(model_len, dim_proj)
train_err, tracost = pred_error(maxlen, f_pred_set, f_pred_seq_set, train, kf,maskls, model_len, modal_costs, max_costs)
print('~~~~~~~~~~~train_err',train_err)
print('~~~~~~~~~~~tracost',tracost)
kf_valid = get_minibatches_idx(len(valid[0]), valid_batch_size)
kf_test = get_minibatches_idx(len(test[0]), valid_batch_size)
valid_err, valcost = pred_error(maxlen, f_pred_set, f_pred_seq_set, valid, kf_valid,maskls, model_len, modal_costs, max_costs)
test_err, tstcost = pred_error(maxlen, f_pred_set, f_pred_seq_set, test, kf_test,maskls, model_len, modal_costs, max_costs)
history_errs.append([valid_err, test_err, valcost, tstcost])
print('history_errs)', history_errs)
if (best_p is None or
valid_err <= numpy.array(history_errs)[:,
0].min()):
best_p = unzip(tparams)
bad_counter = 0
print( ('Train ', train_err, 'Valid ', valid_err,
'Test ', test_err) )
print( ('Traincost ', tracost, 'Validcost ', valcost,
'Testcost ', tstcost) )
if (len(history_errs) > patience and
valid_err >= numpy.array(history_errs)[:-patience,
0].min()):
bad_counter += 1
if bad_counter > patience:
print('Early Stop!')
estop = True
break
print('Seen %d samples' % n_samples)
if estop:
break
except KeyboardInterrupt:
print("Training interupted")
end_time = time.time()
if best_p is not None:
zipp(best_p, tparams)
else:
best_p = unzip(tparams)
print('best valid',numpy.array(history_errs)[:,
0].min())
print('best valid',numpy.array(history_errs)[:,
1].min())
use_noise.set_value(0.)
kf_train_sorted = get_minibatches_idx(len(train[0]), batch_size)
train_err, tracost = pred_error(maxlen, f_pred_set, f_pred_seq_set, train, kf_train_sorted,maskls, model_len, modal_costs, max_costs)
kf_valid = get_minibatches_idx(len(valid[0]), valid_batch_size)
kf_test = get_minibatches_idx(len(test[0]), valid_batch_size)
valid_err, valcost = pred_error(maxlen, f_pred_set, f_pred_seq_set, valid, kf_valid,maskls, model_len, modal_costs, max_costs)
test_err,tstcost = pred_error(maxlen, f_pred_set, f_pred_seq_set, test, kf_test,maskls, model_len, modal_costs, max_costs)
print( 'Train ', train_err, 'Valid ', valid_err, 'Test ', test_err )
print( ('Traincost ', tracost, 'Validcost ', valcost,
'Testcost ', tstcost) )
if saveto:
numpy.savez(saveto, train_err=train_err,
valid_err=valid_err, test_err=test_err,
history_errs=history_errs, **best_p)
print('The code run for %d epochs, with %f sec/epochs' % (
(eidx + 1), (end_time - start_time) / (1. * (eidx + 1))))
print( ('Training took %.1fs' %
(end_time - start_time)), file=sys.stderr)
return train_err, valid_err, test_err
if __name__ == '__main__':
# See function train for all possible parameter and there definition.
recyl_maxlen = int(argv[1]) # the max madal can been used for one instance
train_lstm(
max_epochs=100,
test_size=500,
)
| njustkmg/ACML17_DMS | dms.py | dms.py | py | 33,100 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "imdb.load_data",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "numpy.asarray... |
30294078087 | import retrogamelib as rgl
import pygame, random, sys, os
from retrogamelib.constants import *
from objects import spritesheet, flip_images
class Intro(object):
def __init__(self):
load_image = rgl.util.load_image
self.oldman1 = spritesheet("data/lawn-mower.png", (96, 96))
self.bubbman1 = spritesheet("data/bubbman.png", (24, 26))[:3]
self.bubbman2 = flip_images(self.bubbman1)
self.ground = spritesheet("data/grass-2.png", (24, 24))[1]
self.fg = load_image("data/hills-front-1.png")
self.fg2 = load_image("data/hills-back.png")
self.bg = load_image("data/background-1.png")
self.font_white = rgl.font.Font(NES_FONT, (255, 255, 255))
self.font_black = rgl.font.Font(NES_FONT, (1, 1, 1))
self.frame = 0
self.pos = 0
self.bpos = [200, 240-26-20]
self.opos = [0, 240-96-20]
self.ochasing = True
self.run_away = False
self.stopped = None
self.boom = rgl.util.play_sound("data/motor.ogg", 0.5)
self.boom.stop()
def loop(self):
self.running = True
self.ypos = 256
pygame.mixer.music.stop()
while self.running:
self.frame += 1
rgl.clock.tick()
rgl.button.handle_input()
screen = rgl.display.get_surface()
screen.blit(self.bg, (0, 0))
xpos = (self.pos/4) % 256
screen.blit(self.fg2, (xpos, 50))
screen.blit(self.fg2, (xpos+256, 50))
screen.blit(self.fg2, (xpos-256, 50))
xpos = (self.pos/2) % 256
screen.blit(self.fg, (xpos, 50))
screen.blit(self.fg, (xpos+256, 50))
screen.blit(self.fg, (xpos-256, 50))
for x in range(256/24 + 2):
p = self.pos % 256
screen.blit(self.ground, (((x*24 - 24) + p % 24), 240-24))
if rgl.button.is_pressed(START) or rgl.button.is_pressed(A_BUTTON):
self.running = False
bframe = 0
oframe = 0
if self.bpos[0] > 128 and not self.run_away:
self.bpos[0] -= 3
bframe = self.frame/4%2 + 1
else:
if self.stopped == None:
self.stopped = 60
self.stopped -= 1
if self.stopped == 30:
rgl.util.play_sound("data/yell.ogg")
if self.stopped < 30:
self.render_text(screen, "Get off my lawn!!", (128+self.opos[0], 100), 1)
if self.stopped <= 0:
self.run_away = True
if self.run_away:
self.bpos[0] += 3
self.opos[0] += 3
bframe = self.frame/4%2 + 1
oframe = self.frame/4%2
self.bubbman2 = self.bubbman1
if not pygame.mixer.get_busy():
self.boom.play(-1)
if self.opos[0] > 280:
self.running = False
if self.opos[0] > 270:
self.boom.stop()
screen.blit(self.bubbman2[bframe], self.bpos)
screen.blit(self.oldman1[oframe], self.opos)
rgl.display.update()
self.boom.stop()
def render_text(self, surface, text, pos, center=False):
ren1 = self.font_black.render(text)
ren2 = self.font_white.render(text)
p = pos
if center:
p = (pos[0] - ren1.get_width()/2, pos[1])
surface.blit(ren1, (p[0], p[1]+1))
surface.blit(ren1, (p[0], p[1]-1))
surface.blit(ren1, (p[0]+1, p[1]))
surface.blit(ren1, (p[0]-1, p[1]))
surface.blit(ren2, (p[0], p[1]))
del ren1, ren2
| randyheydon/BubbMan2-PND | lib/intro.py | intro.py | py | 3,742 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "retrogamelib.util",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "objects.spritesheet",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "objects.spritesheet",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "object... |
21753112789 | # import the required modules and types for this example...
from typing import Any, Dict, List
# import the paginator and modal...
from discord.ext.modal_paginator import ModalPaginator, PaginatorModal
# import the discord.py module
import discord
from discord.ext import commands
bot = commands.Bot(command_prefix=commands.when_mentioned, intents=discord.Intents(guilds=True, messages=True))
# pre-defined questions and title for each modal
# this could be loaded from a json file or database
# each "modal" is a dict with the following keys:
# title: str - the title of the modal
# required: bool - whether the modal is required to be filled out before the paginator can be finished
# questions: List[str] - a list of questions to ask the user. Max 5 questions per modal.
personal_questions = {
"title": "Personal Questions",
"required": True,
"questions": [
"What is your name?",
"What is your age?",
"Any hobbies?",
"Dad's name?",
"Mom's name?",
],
}
misc_questions = {
"title": "Miscellaneous Questions",
"required": False,
"questions": [
"What is your favorite color?",
"What is your favorite food?",
"What is your favorite animal?",
"What is your favorite movie?",
"What is your favorite game?",
],
}
reason_questions = {
"title": "Why Questions",
"required": True,
"questions": [
"Why do you want to join?",
"Why should we let you in?",
"What do you like about the server?",
"What do you like about the bot?",
"What do you like about the community?",
],
}
# want even more control over the inputs?
# you can use the TextInput class directly
# from discord.ui import TextInput
# and then add the TextInput to the modal
# modal.add_item(TextInput(...)) instead of looping.
# see the discord.py docs for more info on the TextInput class.
# more dict like? use a dict for the questions too
# questions = [
# {
# "label": "What is your name?",
# "min_length": 2,
# "max_length": 200,
# "required": False,
# ...
# },
# ...
# ]
# etc...
# subclass the paginator to define our own on_finish method
# and to add the modals to the paginator via a custom __init__
class VerifyModal(ModalPaginator):
def __init__(self, questions_inputs: List[Dict[str, Any]], *, author_id: int, **kwargs: Any) -> None:
# initialize the paginator with the the author_id kwarg
# and any other kwargs we passed to the constructor.
# possible kwargs are as follows:
# timeout: Optional[int] = None - the timeout for the paginator (view)
# disable_after: bool = True - whether to disable all buttons after the paginator is finished or cancelled.
# can_go_back: bool = True - whether the user can go back to previous modals using the "Previous" button.
# sort_modals: bool = True - whether to sort the modals by the required kwarg.
# See more on the class.
super().__init__(author_id=author_id, **kwargs)
# iterate over the questions_inputs list
for data in questions_inputs:
# unpack the data from the dict
title: str = data["title"]
required: bool = data["required"]
questions: List[str] = data["questions"]
# create a new modal with the title and required kwarg
modal = PaginatorModal(title=title, required=required)
# add the questions to the modal
for question in questions:
modal.add_input(
label=question, # the label of the text input
min_length=2, # the minimum length of the text input
max_length=200, # the maximum length of the text input
# see the discord.py docs for more info on the other kwargs
)
# add the modal to the paginator
self.add_modal(modal)
# override the on_finish method to send the answers to the channel when the paginator is finished.
async def on_finish(self, interaction: discord.Interaction[Any]) -> None:
# create a list of answers
# default format: **Modal Title**\nQuestion: Answer\nQuestion: Answer\n... etc
answers: list[str] = []
for modal in self.modals:
prefix = f"**{modal.title}**\n"
field: discord.ui.TextInput[Any]
for field in modal.children: # type: ignore
prefix += f"{field.label}: {field.value}\n"
answers.append(prefix)
await interaction.response.send_message(f"Answers from {interaction.user.mention}:\n\n" + "\n\n".join(answers))
# define the prefix command.
@bot.command()
async def verify(ctx: commands.Context[commands.Bot]):
# initialize the paginator with all the questions data we defined above in a list
# and the author_id so that only the command invoker can use the paginator.
questions_inputs = [personal_questions, misc_questions, reason_questions]
paginator = VerifyModal(questions_inputs, author_id=ctx.author.id)
# send the paginator to the current channel
await paginator.send(ctx)
# run the bot
bot.run("...")
# run @bot_name verify in a channel with the bot to test the paginator
| Soheab/modal-paginator | examples/verify_command.py | verify_command.py | py | 5,303 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "discord.ext.commands.Bot",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.when_mentioned",
"line_number": 12,
"usage_type": "attribute"
},
{... |
15066006918 | from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='immudb-py',
version='1.4.0',
license="Apache License Version 2.0",
description='Python SDK for Immudb',
long_description=long_description,
long_description_content_type="text/markdown",
author='Codenotary',
url='https://github.com/codenotary/immudb-py',
# download_url='',
packages=['immudb', 'immudb.database', 'immudb.embedded',
'immudb.embedded.ahtree', 'immudb.embedded.htree', 'immudb.embedded.store',
'immudb.grpc', 'immudb.handler', 'immudb.schema'],
keywords=['immudb', 'immutable'],
install_requires=[
'grpcio>=1.31.0',
'dataclasses>=0.6',
'protobuf>=3.13.0,<4.0.0',
'google-api>=0.1.12',
'google-api-core>=1.22.1',
'ecdsa>=0.16.1'
],
classifiers=[
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
python_requires='>=3.6',
)
| codenotary/immudb-py | setup.py | setup.py | py | 1,404 | python | en | code | 40 | github-code | 1 | [
{
"api_name": "setuptools.setup",
"line_number": 6,
"usage_type": "call"
}
] |
6304937748 | import cv2
import math
import numpy as np
import pyautogui as gui
cap = cv2.VideoCapture(1)
while(cap.isOpened()):
ret, img = cap.read()
#cv2.resizeWindow('window1', 768,1366)
cv2.rectangle(img, (0,0), (200,200), (0,255,0),0) ##left hand rectangle
cv2.rectangle(img,(630,200),(430,0),(0,255,0),0) ##right hand rectangle
crop_img = img[0:200, 0:200]
crop_img2=img[0:200,430:630]
grey = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY) ## converted it to the grey scale image
grey2 = cv2.cvtColor(crop_img2,cv2.COLOR_BGR2GRAY)
value = (35, 35)
blurred = cv2.GaussianBlur(grey, value, 0)
blurred2 = cv2.GaussianBlur(grey2,value,0)
_, thresh1 = cv2.threshold(blurred, 127, 255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
_, thresh2 = cv2.threshold(blurred2, 127, 255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
cv2.imshow('Thresholded', thresh1)
cv2.imshow('Thresholded2', thresh2)
# check OpenCV version to avoid unpacking error
(version, _, _) = cv2.__version__.split('.')
if version == '3':
image, contours, hierarchy = cv2.findContours(thresh1.copy(), \
cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
elif version == '2':
contours, hierarchy = cv2.findContours(thresh1.copy(),cv2.RETR_TREE, \
cv2.CHAIN_APPROX_NONE)
else:
contours,hierarchy=cv2.findContours(thresh1.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
contours2,hierarchy1=cv2.findContours(thresh2.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
# find contour with max area
cnt = max(contours, key = lambda x: cv2.contourArea(x))
cnt2= max(contours2, key = lambda x: cv2.contourArea(x))
x, y, w, h = cv2.boundingRect(cnt)
x2, y2, w2, h2 = cv2.boundingRect(cnt2)
M = cv2.moments(cnt)
M2 = cv2.moments(cnt2)
cx = int(M['m10']/M['m00'])
cx2 = int(M2['m10']/M2['m00'])
cy = int(M['m01']/M['m00'])
cy2 = int(M2['m01']/M2['m00'])
cv2.circle(crop_img,(cx,cy),1,(255,0,0),6)
cv2.circle(crop_img2,(cx2,cy2),1,(255,0,0),6)
cv2.rectangle(crop_img, (x, y), (x+w, y+h), (0, 0, 255), 0)
cv2.rectangle(crop_img2, (x2, y2), (x2+w2, y2+h2), (0, 0, 255), 0)
# finding convex hull
hull = cv2.convexHull(cnt)
hull2= cv2.convexHull(cnt2)
drawing = np.zeros(crop_img.shape,np.uint8)
drawing2 = np.zeros(crop_img2.shape,np.uint8)
cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 0)
cv2.drawContours(drawing, [hull], 0,(0, 0, 255), 0)
cv2.drawContours(drawing2, [cnt2], 0, (0, 255, 0), 0)
cv2.drawContours(drawing2, [hull2], 0,(0, 0, 255), 0)
hull = cv2.convexHull(cnt, returnPoints=False)
hull2 = cv2.convexHull(cnt2, returnPoints=False)
defects = cv2.convexityDefects(cnt, hull)
defects2 = cv2.convexityDefects(cnt2, hull2)
count_defects = 0
count_defects2=0
cv2.drawContours(thresh1, contours, -1, (0, 255, 0), 3)
# applying Cosine Rule to find angle for all defects (between fingers)
# with angle > 90 degrees and ignore defects
##LeftWindow
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
start = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
# find length of all sides of triangle
a = math.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2)
b = math.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2)
c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2)
# apply cosine rule here
angle = math.acos((b**2 + c**2 - a**2)/(2*b*c)) * 57
# ignore angles > 90 and highlight rest with red dots
if angle <= 90:
count_defects += 1
cv2.circle(crop_img, far, 1, [0,0,255], -1)
cv2.line(crop_img,start, end, [0,255,0], 2)
if count_defects == 1:
#cv2.putText(img,"I am Vipul", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
gui.moveTo(cx,cy)
elif count_defects == 2:
str = "This is a basic hand gesture recognizer"
#gui.getWindow("Files").maximize()
elif count_defects == 3:
cv2.putText(img,"This is 4 :P", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
elif count_defects == 4:
cv2.putText(img,"Hi!!!", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
else:
cv2.putText(img,"Hello World!!!", (50, 50),cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
cv2.drawContours(thresh2, contours2, -1, (255, 0, 0), 3)
##Right window
for i in range(defects2.shape[0]):
s2,e2,f2,d2 = defects2[i,0]
start2 = tuple(cnt2[s2][0])
end2 = tuple(cnt2[e2][0])
far2 = tuple(cnt2[f2][0])
# find length of all sides of triangle
a2 = math.sqrt((end2[0] - start2[0])**2 + (end2[1] - start2[1])**2)
b2 = math.sqrt((far2[0] - start2[0])**2 + (far2[1] - start2[1])**2)
c2 = math.sqrt((end2[0] - far2[0])**2 + (end2[1] - far2[1])**2)
# apply cosine rule here
angle2 = math.acos((b2**2 + c2**2 - a2**2)/(2*b2*c2)) * 57
# ignore angles > 90 and highlight rest with red dots
if angle2 <= 90:
count_defects2 += 1
cv2.circle(crop_img2, far2, 1, [0,0,255], -1)
cv2.line(crop_img2,start2, end2, [0,255,0], 2)
if count_defects2 == 1:
cv2.putText(img,"I am Vipul", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
#gui.moveTo(cx,cy)
elif count_defects2 == 2:
str = "This is a basic hand gesture recognizer"
#gui.getWindow("Files").maximize()
elif count_defects2 == 3:
cv2.putText(img,"This is 4 :P", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
elif count_defects2 == 4:
cv2.putText(img,"Hi!!!", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
else:
cv2.putText(img,"Hello World!!!", (50, 50),cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
#cv2.namedWindow('window1',cv2.WINDOW_NORMAL)
cv2.imshow('window1',img)
cv2.resizeWindow('window1',(600,600))
k = cv2.waitKey(10)
if k == 27:
break | DB11051998/RGBGesture-control | gest.py | gest.py | py | 6,146 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_... |
7990357254 | import logging
import os
from pprint import pprint
from gensim import corpora, models, similarities
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
if (os.path.exists("tmp/mesh.dict")):
dictionary = corpora.Dictionary.load('tmp/mesh.dict')
corpus = corpora.MmCorpus('tmp/corpus.mm')
print("Used files generated from dict_corpus")
else:
print("Please run dict_corpus.py to generate data set")
if (os.path.exists("tmp/model.tfidf")):
tfidf = models.TfidfModel.load('tmp/model.tfidf')
lsi = models.LsiModel.load('tmp/model.lsi')
lda = models.LdaModel.load('tmp/model.lda')
new_doc = open('sample.txt', 'r', encoding = 'utf-8').read()
print(new_doc)
new_vec = dictionary.doc2bow(new_doc.lower().split())
pprint(new_vec)
vec_tfidf = tfidf[new_vec]
pprint(vec_tfidf)
vec_lsi = lsi[new_vec]
pprint(vec_lsi)
vec_lda = lda[new_vec]
pprint(vec_lda)
| meetsha/gensim | test_models.py | test_models.py | py | 918 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line... |
37571536835 | from rest_framework import serializers
from .models import RedditPost, RedditPostSnapshot
class RedditPostSerializer(serializers.HyperlinkedModelSerializer):
# snapshots = serializers.HyperlinkedRelatedField(
# many=True,
# read_only=True,
# view_name='reddit_post_snapshots'
# )
class Meta:
model = RedditPost
fields = (
'submission_id',
'image_similarity',
'image_url',
'title',
'subreddit',
'subreddit_id',
'permalink',
'submission_created',
'author',
# 'snapshots',
)
class RedditPostSnapshotSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = RedditPostSnapshot
fields = '__all__'
| SDupZ/memex | reddit/serializers.py | serializers.py | py | 811 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "rest_framework.serializers.HyperlinkedModelSerializer",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "models.RedditPost",
"line_number": 13,
"usage_type": ... |
1528409753 | import copy
import os
import numpy as np
from metadrive.manager.base_manager import BaseManager
from metadrive.scenario.scenario_description import ScenarioDescription as SD, MetaDriveType
from metadrive.scenario.utils import read_scenario_data, read_dataset_summary
class ScenarioDataManager(BaseManager):
DEFAULT_DATA_BUFFER_SIZE = 100
PRIORITY = -10
def __init__(self):
super(ScenarioDataManager, self).__init__()
from metadrive.engine.engine_utils import get_engine
engine = get_engine()
self.store_data = engine.global_config["store_data"]
self.directory = engine.global_config["data_directory"]
self.num_scenarios = engine.global_config["num_scenarios"]
self.start_scenario_index = engine.global_config["start_scenario_index"]
# for multi-worker
self.worker_index = self.engine.global_config["worker_index"]
self.available_scenario_indices = [
i for i in range(
self.start_scenario_index + self.worker_index, self.start_scenario_index +
self.num_scenarios, self.engine.global_config["num_workers"]
)
]
self._scenarios = {}
# Read summary file first:
self.summary_dict, self.summary_lookup, self.mapping = read_dataset_summary(self.directory)
self.summary_lookup[:self.start_scenario_index] = [None] * self.start_scenario_index
end_idx = self.start_scenario_index + self.num_scenarios
self.summary_lookup[end_idx:] = [None] * (len(self.summary_lookup) - end_idx)
# sort scenario for curriculum training
self.scenario_difficulty = None
self.sort_scenarios()
# existence check
assert self.start_scenario_index < len(self.summary_lookup), "Insufficient scenarios!"
assert self.start_scenario_index + self.num_scenarios <= len(self.summary_lookup), \
"Insufficient scenarios! Need: {} Has: {}".format(self.num_scenarios,
len(self.summary_lookup) - self.start_scenario_index)
for p in self.summary_lookup[self.start_scenario_index:end_idx]:
p = os.path.join(self.directory, self.mapping[p], p)
assert os.path.exists(p), "No Data at path: {}".format(p)
# stat
self.coverage = [0 for _ in range(self.num_scenarios)]
@property
def current_scenario_summary(self):
return self.current_scenario[SD.METADATA]
def _get_scenario(self, i):
assert i in self.available_scenario_indices, \
"scenario index exceeds range, scenario index: {}, worker_index: {}".format(i, self.worker_index)
assert i < len(self.summary_lookup)
scenario_id = self.summary_lookup[i]
file_path = os.path.join(self.directory, self.mapping[scenario_id], scenario_id)
ret = read_scenario_data(file_path)
assert isinstance(ret, SD)
self.coverage[i - self.start_scenario_index] = 1
return ret
def before_reset(self):
if not self.store_data:
assert len(self._scenarios) <= 1, "It seems you access multiple scenarios in one episode"
self._scenarios = {}
def get_scenario(self, i, should_copy=False):
_debug_memory_leak = False
if i not in self._scenarios:
if _debug_memory_leak:
# inner psutil function
def process_memory():
import psutil
import os
process = psutil.Process(os.getpid())
mem_info = process.memory_info()
return mem_info.rss
cm = process_memory()
# self._scenarios.clear_if_necessary()
if _debug_memory_leak:
lm = process_memory()
print("{}: Reset! Mem Change {:.3f}MB".format("data manager clear scenario", (lm - cm) / 1e6))
cm = lm
ret = self._get_scenario(i)
self._scenarios[i] = ret
if _debug_memory_leak:
lm = process_memory()
print("{}: Reset! Mem Change {:.3f}MB".format("data manager read scenario", (lm - cm) / 1e6))
cm = lm
else:
ret = self._scenarios[i]
# print("===Don't need to get new scenario. Just return: ", i)
if should_copy:
return copy.deepcopy(self._scenarios[i])
# Data Manager is the first manager that accesses data.
# It is proper to let it validate the metadata and change the global config if needed.
return ret
def get_metadata(self):
state = super(ScenarioDataManager, self).get_metadata()
raw_data = self.current_scenario
state["raw_data"] = raw_data
return state
def transform_coordinate(self, scenario):
raise ValueError("Deprecated now as all coordinates is right-handed now")
if not self.engine.global_config["allow_coordinate_transform"]:
assert scenario[SD.METADATA][SD.COORDINATE] == MetaDriveType.COORDINATE_METADRIVE, \
"Only support MetaDrive coordinate!"
else:
# It supports loading WaymoData or exported data in two coordinates
if scenario[SD.METADATA][SD.COORDINATE] == MetaDriveType.COORDINATE_WAYMO:
self._coordinate_transform = True
elif scenario[SD.METADATA][SD.COORDINATE] == MetaDriveType.COORDINATE_METADRIVE:
self._coordinate_transform = False
else:
raise ValueError()
@property
def current_scenario_length(self):
return self.current_scenario[SD.LENGTH]
@property
def current_scenario(self):
return self.get_scenario(self.engine.global_random_seed)
def sort_scenarios(self):
"""
TODO(LQY): consider exposing this API to config
Sort scenarios to support curriculum training. You are encouraged to customize your own sort method
:return: sorted scenario list
"""
if self.engine.max_level == 0:
raise ValueError("Curriculum Level should be greater than 1")
elif self.engine.max_level == 1:
return
def _score(scenario_id):
file_path = os.path.join(self.directory, self.mapping[scenario_id], scenario_id)
scenario = read_scenario_data(file_path)
obj_weight = 0
# calculate curvature
ego_car_id = scenario[SD.METADATA][SD.SDC_ID]
state_dict = scenario["tracks"][ego_car_id]["state"]
valid_track = state_dict["position"][np.where(state_dict["valid"].astype(int))][..., :2]
dir = valid_track[1:] - valid_track[:-1]
dir = np.arctan2(dir[..., 1], dir[..., 0])
curvature = sum(abs(dir[1:] - dir[:-1]) / np.pi) + 1
sdc_moving_dist = SD.sdc_moving_dist(scenario)
num_moving_objs = SD.num_moving_object(scenario, object_type=MetaDriveType.VEHICLE)
return sdc_moving_dist * curvature + num_moving_objs * obj_weight
start = self.start_scenario_index
end = self.start_scenario_index + self.num_scenarios
id_scores = [(s_id, _score(s_id)) for s_id in self.summary_lookup[start:end]]
id_scores = sorted(id_scores, key=lambda scenario: scenario[-1])
self.summary_lookup[start:end] = [id_score[0] for id_score in id_scores]
self.scenario_difficulty = {id_score[0]: id_score[1] for id_score in id_scores}
def clear_stored_scenarios(self):
self._scenarios = {}
@property
def current_scenario_difficulty(self):
return self.scenario_difficulty[self.summary_lookup[self.engine.global_random_seed]
] if self.scenario_difficulty is not None else 0
@property
def current_scenario_id(self):
return self.current_scenario_summary["scenario_id"]
@property
def current_scenario_file_name(self):
return self.summary_lookup[self.engine.global_random_seed]
@property
def data_coverage(self):
return sum(self.coverage) / len(self.coverage) * self.engine.global_config["num_workers"]
| metadriverse/metadrive | metadrive/manager/scenario_data_manager.py | scenario_data_manager.py | py | 8,264 | python | en | code | 471 | github-code | 1 | [
{
"api_name": "metadrive.manager.base_manager.BaseManager",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "metadrive.engine.engine_utils.get_engine",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "metadrive.scenario.utils.read_dataset_summary",
"line_num... |
11710691906 |
"""
1. Imports and definitions
"""
# i) imports
import matplotlib.pyplot as plt
import torch
import pyro
import pyro.distributions as dist
from pyro.infer import SVI, Trace_ELBO
from pyro.optim import Adam
import seaborn as sns
"""
2. Data & stochastic model
"""
# i) Generate data
true_mean = torch.tensor(4.0)
data = dist.Normal(true_mean, 1).sample([100])
# ii) Construct stochastic model
prior_dist = dist.Normal(0., 3.)
def model(data = None):
# Prior over the mean
mu = pyro.sample("mu", prior_dist)
# Likelihood
len_data = len(data) if data is not None else 1
with pyro.plate("data", len_data):
obs = pyro.sample("obs", dist.Normal(mu, 1), obs=data)
return obs
# iii) construct variational distribution
def guide(data = None):
# Variational parameters
mu_loc = pyro.param("mu_loc", torch.tensor(0.))
mu_scale = pyro.param("mu_scale", torch.tensor(1.), constraint=dist.constraints.positive)
# Variational distribution over the mean
mu_sample = pyro.sample("mu", dist.Normal(mu_loc, mu_scale))
return mu_sample
"""
3. SVI inference
"""
# i) Perform inference
pyro.clear_param_store()
svi = SVI(model, guide, Adam({"lr": 0.01}), loss=Trace_ELBO())
for _ in range(1000):
svi.step(data)
# ii) Print out parameters and uncertainties
for name, param in pyro.get_param_store().items():
print('{} : {}'.format(name, param))
"""
4. Plots and illustrations
"""
# i) Sample from the prior and the posterior
prior_samples = torch.tensor([prior_dist.sample() for _ in range(1000)])
posterior_samples = torch.tensor([guide().detach() for _ in range(1000)])
# ii) Sample from the model, from model with posterior swapped in
model_samples = torch.tensor([model().detach() for _ in range(1000)])
def model_with_posterior():
# Posterior over the mean
mu = guide()
# Likelihood
obs = pyro.sample("obs", dist.Normal(mu, 1))
return obs
ppd_samples = torch.tensor([model_with_posterior().detach() for _ in range(1000)])
# iv) Plotting empirical distributions
fig, axs = plt.subplots(3, 2, figsize=(10, 15)) # 3x2 grid of Axes
# data
sns.histplot(data, ax=axs[0, 0], kde=True)
axs[0, 0].set_title('Actual data')
# model
sns.histplot(model_samples, ax=axs[1, 0], kde=True)
axs[1, 0].set_title('Model samples of data using prior distribution')
# ppd
sns.histplot(ppd_samples, ax=axs[1, 1], kde=True)
axs[1, 1].set_title('Model samples of data using posterior distribution (ppd)')
# mu prior
sns.histplot(prior_samples, ax=axs[2, 0], kde=True)
axs[2, 0].set_title('Prior samples of mu')
# mu posterior
sns.histplot(posterior_samples, ax=axs[2, 1], kde=True)
axs[2, 1].set_title('Posterior samples of mu given data')
plt.tight_layout()
plt.show()
predictive = pyro.infer.Predictive(model = model, guide = guide, num_samples = 1000)
predictive_samples = predictive(())
# v) Use predictive functionality to produce values for all sample sites based
# on the posterior density encoded in guide i.e. p(z|x) (samples of mu) and
# p(x'|x) = int p(x'|z)p(z|x) dz (samples of new obs).
fig, axs = plt.subplots(2, 2, figsize=(10, 10)) # 3x2 grid of Axes
# predictive mu
sns.histplot(predictive_samples['mu'], ax=axs[0, 0], kde=True)
axs[0, 0].set_title('Predictive samples mu')
# predictive obs
sns.histplot(predictive_samples['obs'], ax=axs[0, 1], kde=True)
axs[0, 1].set_title('Predictive samples obs')
plt.tight_layout()
plt.show()
| atlasoptimization/stochastic_modelling | pyro_experiments/pyro_tests_posterior_v_model.py | pyro_tests_posterior_v_model.py | py | 3,463 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.tensor",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pyro.distributions.Normal",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pyro.distributions",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "pyro.distri... |
35668813832 | import spacy
import json
nlp = spacy.load('en')
json_data = open('Book_Data_Set.json',encoding="utf8")
data = json.load(json_data)
title = ""
for i in range (0, 10):
doc = nlp(data[i]['description'])
for np in doc:
if np.pos_ == "NOUN" or np.pos_ == "PROPN":
print (np.text,end='****')
print()
| niraj1997/IT556_Magic-Squad_DA-IICT | Assignment 3/Q1.py | Q1.py | py | 316 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "spacy.load",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 5,
"usage_type": "call"
}
] |
22992656893 | # -*- coding: utf-8 -*-
import asyncio
from playwright.async_api import async_playwright
from cf_clearance import async_retry, stealth_async
async def main():
async with async_playwright() as p:
browser = await p.chromium.launch(headless=False, proxy={"server": "socks5://localhost:7890"}, args=[
"--disable-gpu",
'--no-sandbox',
'--disable-dev-shm-usage',
'--no-first-run',
'--no-service-autorun',
'--no-default-browser-check',
'--password-store=basic',
'--start-maximized',
])
content = await browser.new_context(no_viewport=True)
page = await content.new_page()
await stealth_async(page)
await page.goto('https://nowsecure.nl')
res = await async_retry(page)
if res:
cppkies = await page.context.cookies()
for cookie in cppkies:
if cookie.get('name') == 'cf_clearance':
print(cookie.get('value'))
ua = await page.evaluate('() => {return navigator.userAgent}')
print(ua)
else:
print("获取失败")
await browser.close()
asyncio.get_event_loop().run_until_complete(main())
| 0therGuys/cf_clearance | tests/test_async_cf.py | test_async_cf.py | py | 1,256 | python | en | code | null | github-code | 1 | [
{
"api_name": "playwright.async_api.async_playwright",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cf_clearance.stealth_async",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cf_clearance.async_retry",
"line_number": 25,
"usage_type": "call"
},
... |
70716332195 | from django.shortcuts import render, redirect
from django.contrib.auth.forms import AuthenticationForm # 비어있는 폼 제공
from django.contrib.auth import login as auth_login
from django.contrib.auth import logout as auth_logout
# Create your views here.
def login(request):
if request.method == 'POST': # 로그인폼을 입력하고 로그인을 누르는 것
# 로그인 처리를 해줌
# 1) 입력한 아이디와 패스워드가 일치하는지 검사
# 2) 일치하면 session table가서 session 만들고,
# 3) 쿠키에 session id 담아서 전송
# => django에서 가지고 있다.
form = AuthenticationForm(request, request.POST) # (요청들어온 곳에 쿠키 담아서 보냄)
if form.is_valid():
auth_login(request, form.get_user())
# 응답
return redirect('articles:index')
else:
# 비어있는 로그인 페이지를 제공 -> 이미 가지고 있음
form = AuthenticationForm()
context = {'form' : form}
return render(request, 'accounts/login.html', context)
def logout(request):
# 현재 request로 들어왔는데,
# 요청에 있는 쿠키를 열어서 session id가 있으면 꺼내서
# db의 session table과 비교해서 있으면 로그아웃
auth_logout(request)
return redirect('articles:index')
| ysparrk/Django | 230322/01_auth_template/accounts/views.py | views.py | py | 1,386 | python | ko | code | 0 | github-code | 1 | [
{
"api_name": "django.contrib.auth.forms.AuthenticationForm",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 18,
"usage_type": "call... |
73276503395 | """
Microdeploy Configuration manager.
"""
import yaml
import glob
import re
import os
class Config(object):
# config = None
def __init__(self, config_filename=None, default_baudrate=115200, override={}):
if config_filename:
try:
with open(config_filename) as config_file:
config_yaml = yaml.load(config_file.read(), yaml.Loader)
except FileNotFoundError as e:
raise FileNotFoundError(f"Config file not found: '{config_filename}'")
except yaml.scanner.ScannerError as e:
message = str(f'\t{e}').replace('\n', '\n\t') # indent message from ScannerError
raise yaml.scanner.ScannerError(f"Error in config file: {config_filename}\n\n{message}")
else:
config_file = None
config_yaml = {}
self.config = {
'packages': config_yaml.get('packages', {}),
'device': config_yaml.get('device', {}),
'default': {
# 'destination': config_yaml.get('default', {}).get('destination') or '/' or None, # default path of destination for put files to MCU
'baudrate': config_yaml.get('default', {}).get('baudrate') or default_baudrate}}
dict_update(self.config, override)
self.config_filename = config_filename
def device(self) -> dict:
"""Return device configuration, applying `*overrides`."""
device = self.config.get('device', {})
device = dict(device)
return {
'port': device.get('port'),
'baudrate': device.get('baudrate', self.config['default']['baudrate'])}
def package(self, name: str) -> dict:
"""Return package configuration dict."""
try:
package = self.config.get('packages', {})[name]
except KeyError as e:
KeyError(f"Package not found: {name} - packages available: {', '.join(self.config.get('packages', {}).keys())}")
try:
ignore = set()
for package_to_include in package.get('include', []):
to_ignore = self.config.get('packages', {})[package_to_include].get('ignore', [])
if to_ignore:
ignore.update(to_ignore)
package['ignore'] = list(ignore)
except KeyError as e:
raise KeyError(f'Package not found: {package_to_include} - in {name}.include')
return package
def package_files(self, name: str) -> list:
"""Return list of files in package having `name` (processing includes)."""
try:
package_config = self.package(name)
except KeyError as e:
raise ValueError(f"Package not found: {name} - packages available: {', '.join(self.config.get('packages', {}).keys())}")
def ignore(filename):
return any(re.search(ignored, filename) for ignored in package_config.get('ignore', []))
package_files = []
for file_desc in package_config.get('files', []):
if type(file_desc) is not str and not len(file_desc) == 2:
raise ValueError("File definition must be string or tuple, eg. 'main.py' or ('source.py', 'destination.py')")
source, destination = [file_desc, None] if type(file_desc) is str else file_desc
source_relative = self.make_relative_to_configfile(source)
if ignore(source_relative):
continue
if '*' not in source_relative:
package_files.append((source_relative, destination or source))
else:
for source_file in glob.iglob(source_relative, recursive=True): # Note: this allows wildcards in source files, eg. 'tests/*.py' or 'tests/**/*.py``
if os.path.isdir(source_file):
continue
if ignore(source_file):
continue
if destination is not None:
destination_file = destination
else:
relative_path = os.path.relpath(os.path.dirname(self.config_filename) or '.')
destination_file = source_file[1+len(relative_path):]
package_files.append((source_file, destination_file))
for package_to_include in package_config.get('include', []):
try:
package_files = self.package_files(package_to_include) + package_files
except ValueError as e:
raise KeyError(f'Package not found: {package_to_include} - in {name}.include')
return package_files
def make_relative_to_configfile(self, filename):
"""Return `filename` made relative to config file path."""
return os.path.join(os.path.relpath(os.path.dirname(self.config_filename) or '.'), filename)
class Configurable(object):
"""
Abstract class base for components (ie. device, package).
"""
def __init__(self, config: Config):
self.config = config
# Helpers
def dict_update(d1, d2):
"""
Update `d1` with `d2` deeply.
"""
for k in d2:
if k in d1 and isinstance(d1[k], dict) and isinstance(d2[k], dict):
dict_update(d1[k], d2[k])
else:
d1[k] = d2[k]
| damiencorpataux/microdeploy | microdeploy/config.py | config.py | py | 5,313 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "yaml.load",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "yaml.Loader",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "yaml.scanner",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "yaml.scanner.ScannerErro... |
17715159341 | from django.conf.urls import url
from .views import (
get_articles_list,
home,
search_articles,
)
urlpatterns = [
# url(r'^create$', collection_create, name="create"),
# url(r'^(?P<slug>[\w-]+)/add$', link_add, name='add'),
# url(r'^(?P<slug>[\w-]+)(?:/(?P<tag>[\w-]+))?/$', collection_detail, name='detail'),
# url(r'^(?P<slug>[\w-]+)/search$', search_link, name="search_link"),
# url(r'^(?P<slug>[\w-]+)/edit$', collection_update, name='update'),
# url(r'^(?P<slug>[\w-]+)/delete$', collection_delete, name='delete'),
url(r'^$', home, name='home'),
url(r'^api/v1/get-articles-list$', get_articles_list, name='get_articles_list'),
url(r'^api/v1/search-articles$', search_articles, name='search_articles'),
# url(r'^about$', about, name='about'),
# url(r'^link/(?P<id>\d+)/delete$', link_delete, name='link_delete' ),
# url(r'^(?P<slug>[\w-]+)/delete$', collection_delete, name='collection_delete'),
# url(r'^link/(?P<id>\d+)/edit$', link_edit, name='link_edit' ),
] | tohidur/HN_analysis | articles/urls.py | urls.py | py | 1,032 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.conf.urls.url",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "views.home",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "django.conf.urls.url",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "views.get_ar... |
15891473847 | import os
import cv2 as cv
import numpy as np
imagedb_train = ('imagedb_train')
sift = cv.xfeatures2d_SIFT.create()
folders = os.listdir(imagedb_train)
def extract_local_features(path):
img = cv.imread(path)
kp = sift.detect(img)
desc = sift.compute(img, kp)
desc = desc[1]
return desc
train_descs = np.zeros((0, 128), dtype=np.float32)
for folder in folders:
current_folder = os.path.join(imagedb_train, folder)
print(current_folder) #gia na vlepw se poio simio ine o kodikas
files = os.listdir(current_folder)
for file in files:
current_file = os.path.join(current_folder, file)
desc = extract_local_features(current_file)
train_descs = np.concatenate((train_descs, desc), axis=0)
# Create vocabulary
print("K-Means Calculating...") #gia na vlepw se poio simio ine o kodikas
term_crit = (cv.TERM_CRITERIA_EPS, 50, 0.1)
loss, assignments, vocabulary = cv.kmeans(train_descs.astype(np.float32), 50, None, term_crit, 1, 0)
np.save('vocabulary.npy', vocabulary)
#vocabulary = np.load('vocabulary.npy')
# Create Histograms
current_label = np.full((1, 1), 1, dtype=int)
train_class_labels = np.zeros((0, 1), dtype=int)
def getBovwDescriptor(desc, vocabulary):
bow_desc = np.zeros((1, vocabulary.shape[0]), dtype=np.float32)
for d in range(desc.shape[0]):
distances = desc[d, :] - vocabulary
distances = np.abs(distances)
distances = np.sum(distances, axis=1)
mini = np.argmin(distances)
bow_desc[0, mini] += 1
return bow_desc
bow_descs = np.zeros((0, vocabulary.shape[0]), dtype=np.float32)
for folder in folders:
current_folder = os.path.join(imagedb_train, folder)
print(current_folder)
files = os.listdir(current_folder)
for file in files:
current_file = os.path.join(current_folder, file)
print(current_file)
desc = extract_local_features(current_file)
bow_desc = getBovwDescriptor(desc, vocabulary)
bow_descs = np.concatenate((bow_descs, bow_desc), axis=0)
train_class_labels=np.concatenate((train_class_labels,current_label), axis=0)
current_label[0] = current_label[0] + 1
np.save('bow_descs.npy', bow_descs)
np.save('train_class_labels.npy', train_class_labels)
| SteliosMouslech/Computer-Vision-Duth | Project 3/CreateVoc_CreateTrainDiscs.py | CreateVoc_CreateTrainDiscs.py | py | 2,340 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "cv2.xfeatures2d_SIFT.create",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.xfeatures2d_SIFT",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.imr... |
33842505413 | #!/usr/bin/env python
import unittest
import os
import shutil
import multiprocessing
import glob
from matador.scrapers.castep_scrapers import res2dict
ROOT_DIR = os.getcwd()
REAL_PATH = "/".join(os.path.realpath(__file__).split("/")[:-1]) + "/"
TEST_DIR = REAL_PATH + "/tmp_test"
NUM_CORES = multiprocessing.cpu_count()
if os.uname()[1] == "cluster2":
NUM_CORES -= 2
def hull_test(self):
""" Perform a test run of Ilustrado with dummy DFT,
on all cores of current machine.
"""
from matador.hull import QueryConvexHull
from ilustrado.ilustrado import ArtificialSelector
res_files = glob.glob(REAL_PATH + "/data/hull-KP-KSnP_pub/*.res")
cursor = [res2dict(_file, db=True)[0] for _file in res_files]
# prepare best structures from hull as gene pool
hull = QueryConvexHull(
cursor=cursor,
elements=["K", "P"],
subcmd="hull",
no_plot=True,
source=True,
summary=True,
hull_cutoff=0,
)
cursor = hull.hull_cursor[1:-1]
print("Running on {} cores on {}.".format(NUM_CORES, os.uname()[1]))
minsep_dict = {("K", "K"): 2.5}
ArtificialSelector(
gene_pool=cursor,
hull=hull,
debug=False,
fitness_metric="hull",
nprocs=NUM_CORES,
check_dupes=0,
check_dupes_hull=False,
sandbagging=True,
minsep_dict=minsep_dict,
ncores=1,
testing=True,
mutations=["nudge_positions", "permute_atoms", "random_strain", "vacancy"],
max_num_mutations=1,
max_num_atoms=50,
mutation_rate=0.5,
crossover_rate=0.5,
num_generations=3,
population=15,
num_survivors=10,
elitism=0.5,
loglevel="debug",
)
run_hash = glob.glob("*.json")[0].split("-")[0]
new_life = ArtificialSelector(
gene_pool=cursor,
hull=hull,
debug=False,
fitness_metric="hull",
recover_from=run_hash,
load_only=True,
check_dupes=0,
check_dupes_hull=False,
minsep_dict=minsep_dict,
mutations=["nudge_positions", "permute_atoms", "random_strain", "vacancy"],
sandbagging=True,
nprocs=NUM_CORES,
ncores=1,
testing=True,
max_num_mutations=1,
max_num_atoms=50,
mutation_rate=0.5,
crossover_rate=0.5,
num_generations=10,
population=15,
num_survivors=10,
elitism=0.5,
loglevel="debug",
)
self.assertTrue(len(new_life.generations[-1]) >= 15)
self.assertTrue(len(new_life.generations[-1].bourgeoisie) >= 10)
new_life.start()
self.assertTrue(os.path.isdir(new_life.run_hash + "-results"))
num_structures = len(glob.glob(new_life.run_hash + "-results/*.res"))
self.assertTrue(num_structures > 5)
class MatadorHullUnitTest(unittest.TestCase):
""" Tests matador hull init of ilustrado. """
def setUp(self):
if os.path.isdir(TEST_DIR):
shutil.rmtree(TEST_DIR)
os.makedirs(TEST_DIR, exist_ok=True)
os.chdir(TEST_DIR)
def tearDown(self):
os.chdir(ROOT_DIR)
shutil.rmtree(TEST_DIR)
def testIlustradoFromHull(self):
# if main throws an error, so will unit test
print(os.getcwd())
hull_test(self)
if __name__ == "__main__":
unittest.main()
| ml-evs/ilustrado | ilustrado/tests/test_init.py | test_init.py | py | 3,373 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "os.getcwd",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing.cpu_count",
... |
2542445603 |
import colors as c
from utils import ask
intro = c.orange + '''
Welcome to the orange quiz game!!!!!!
''' + c.reset
def q1():
color = ask(c.yellow + 'What is the color of an orange?' + c.reset)
if color == 'orange':
return True
print(c.red + 'You have failed' + c.reset)
return False
def q2():
peel = ask(c.green + 'What is the correct way to eat an orange?' + c.reset)
if peel.startswith("peel"):
return True
print(c.red + "You have failed" + c.reset)
return False
def q3():
type = ask(c.blue + 'Am I refferring to the fruit or color?' + c.reset)
if type == ("fruit"):
return True
print (c.red + 'You have failed' + c.reset)
return False
questions = [q1,q2,q3]
| gorroth1/python-1 | oranges.py | oranges.py | py | 748 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "colors.orange",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "colors.reset",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "utils.ask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "colors.yellow",
"lin... |
43006106252 | # -*- mode: python ; coding: utf-8 -*-
from PyInstaller.utils.hooks import collect_submodules
hiddenimports = ['scipy.spatial.transform._rotation_groups', 'sqlalchemy.sql.default_comparator', 'sklearn.metrics._pairwise_distances_reduction._datasets_pair', 'sklearn.neighbors._partition_nodes', 'sklearn.metrics._pairwise_distances_reduction._datasets_pair', 'cmath']
hiddenimports += collect_submodules('sklearn.utils')
block_cipher = None
a = Analysis(
['run.py'],
pathex=['/opt/homebrew/Caskroom/miniforge/base/envs/scope2screen2'],
binaries=[],
datas=[('minerva_analysis/client', 'minerva_analysis/client'), ('minerva_analysis/__init__.py', 'minerva_analysis/'), ('minerva_analysis/server', 'minerva_analysis/server'), ('/opt/homebrew/Caskroom/miniforge/base/envs/scope2screen2/lib/python3.8/site-packages/xmlschema/schemas', 'xmlschema/schemas')],
hiddenimports=hiddenimports,
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False,
)
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
exe = EXE(
pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='scope2screen_mac',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True,
disable_windowed_traceback=False,
argv_emulation=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None,
)
| labsyspharm/scope2screen | scope2screen_mac.spec | scope2screen_mac.spec | spec | 1,564 | python | en | code | 13 | github-code | 1 | [
{
"api_name": "PyInstaller.utils.hooks.collect_submodules",
"line_number": 5,
"usage_type": "call"
}
] |
40033104803 | # -*-coding: utf-8 -*-
# Python 3.6
# Author:Zhang Haitao
# Email:13163385579@163.com
# TIME:2018-08-15 16:04
# NAME:zht-wind_api.py
import datetime
import multiprocessing
import pickle
from WindPy import w
from utils.dateu import get_today
import numpy as np
DIR=r'e:\tmp_wind'
w.start()
import pandas as pd
import os
DATE_FORMAT='%Y-%m-%d'
def multi_task(func, args_iter, n=8):
pool=multiprocessing.Pool(n)
results=pool.map(func, args_iter)
pool.close()#trick: close the processing every time the pool has finished its task, and pool.close() must be called before pool.join()
pool.join()
#refer to https://stackoverflow.com/questions/38271547/when-should-we-call-multiprocessing-pool-join
return results
def get_stkcd_list():
today = get_today(DATE_FORMAT)
path=os.path.join(DIR,'stkcd_list_{}.pkl'.format(today))
if os.path.exists(path):
return pickle.load(open(path,'rb'))
else:
codes=w.wset("SectorConstituent",u"date={};sector=全部A股".format(today)).Data[1]
f=open(path,'wb')
pickle.dump(codes,f)
return codes
def get_ann_dt():
today=get_today(DATE_FORMAT)
path=os.path.join(DIR,'ann_dt_{}.pkl'.format(today))
if os.path.exists(path):
return pickle.load(open(path,'rb'))
else:
codes=get_stkcd_list()
result=w.wsd(','.join(codes), "stm_issuingdate", "ED-5Q", get_today(), "Period=Q;Days=Alldays")
ann_dt=pd.DataFrame(result.Data,index=result.Codes,columns=result.Times).T
f=open(path,'wb')
pickle.dump(ann_dt,f)
return ann_dt
def get_last_ann_dt():
anns=[an for an in os.listdir(os.path.join(DIR)) if an.startswith('ann_dt') and an[7:-4] !=get_today()]
anns=sorted(anns,key=lambda x:pd.to_datetime(x[7:-4]))
return pd.read_pickle(os.path.join(DIR,anns[-1]))
def get_data_for_one_stk(args):
stkcd, rpdate, indicators=args
data = w.wsd(stkcd, ','.join(indicators), rpdate, rpdate, "unit=1;rptType=1;Period=Q;Days=Alldays").Data
s = pd.Series([d[0] for d in data], index=indicators)
s.fillna(value=np.nan, inplace=True)
s.name=stkcd
print('Getting data for {}-----{}'.format(rpdate,stkcd))
return s
def get_new_df():
ann_dt=get_ann_dt()
last_ann_dt=get_last_ann_dt()
new_index=ann_dt.index.union(last_ann_dt.index)
new_column=ann_dt.columns.union(last_ann_dt.columns)
ann_dt=ann_dt.reindex(index=new_index,columns=new_column)
last_ann_dt=last_ann_dt.reindex(index=new_index,columns=new_column)
new_df=ann_dt[ann_dt!=last_ann_dt]
return new_df
def get_new_data(ann_dt_df):
name_df = pd.read_excel(os.path.join(DIR,r'indicators_name.xlsx'), sheet_name='financial')
dfs = []
for date, row in ann_dt_df.iterrows():
row = row.dropna()
row = row.apply(lambda x: x.strftime(DATE_FORMAT))
rpdate = row.name.strftime(DATE_FORMAT)
stkcds = row.index.tolist()
indicators = name_df['wind_name'].tolist()
# _df=pd.concat(multi_task(get_data_for_one_stk,((stkcd, rpdate, indicators) for stkcd in stkcds)),axis=1).T
_df = pd.concat([get_data_for_one_stk((stkcd, rpdate, indicators)) for stkcd in stkcds], axis=1).T
_df['rpdate'] = rpdate
_df.set_index('rpdate', append=True, inplace=True)
_df.index.names = ['stkcd', 'rpdate']
# df.to_pickle(os.path.join(DIR,'data_{}.pkl'.format(get_today())))
dfs.append(_df)
df = pd.concat(dfs)
return df
# today_updated.to_pickle(os.path.join(DIR, 'today_updated_{}.pkl'.format(get_today())))
#TODO: how to combine the old and the new dataframe
# codes = get_stkcd_list()
# result = w.wsd(','.join(codes), "stm_issuingdate", "ED-5Q", get_today(), "Period=Q;Days=Alldays")
# ann_dt = pd.DataFrame(result.Data, index=result.Codes, columns=result.Times).T
#
# ann_dt.to_pickle(os.path.join(DIR,'ann_dt_{}.pkl'.format(get_today())))
#
# test_ann_dt=ann_dt.iloc[:,30]
#
# df=get_new_data(ann_dt)
def test():
ann_dt=pd.read_pickle(os.path.join(DIR,'ann_dt_{}.pkl'.format(get_today())))
test_ann_dt=ann_dt.iloc[:,:30]
df=get_new_data(test_ann_dt)
df.to_pickle('test_df.pkl')
if __name__ == '__main__':
test()
#TODO: logger and multiprocessing
| luilui163/zht | data/wind/wind_api.py | wind_api.py | py | 4,251 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "WindPy.w.start",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "WindPy.w",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "utils.dateu.get_today",
... |
37658889204 | #!./icao-venv/bin/python3
# needs: opencv2-python
# opencv2-contrib-python
# dlib
import os
from pathlib import Path
import sys
import cv2
import dlib
import numpy as np
import json
show_rectangle = False
# set here, where to look for pretrained models:
datadir = Path("data")
# following files should be present in the datadir:
needed_files = {
"shape_predictor_68_face_landmarks.dat":
"http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2"
}
def check_for_needed_files(datadir: Path):
from urllib import request
if not datadir.is_dir():
datadir.mkdir()
# Downloading needed Files
for filename, url in needed_files.items():
file = datadir.joinpath(filename)
newfile = dlfile = file.parent.joinpath(Path(url).name)
if not file.exists():
print(
"Downloading " + url,
file=sys.stderr
)
request.urlretrieve(
url,
filename=dlfile,
reporthook=lambda blocks, blocksize, fullsize:
print(str((blocks * blocksize * 100) /
fullsize)[:5] + "%" + "\033[F", file=sys.stderr)
)
print("Done" + (" " * 12), file=sys.stderr)
# decompress BZ2 files
if dlfile.suffix == ".bz2":
import bz2
newfile = dlfile.parent.joinpath(dlfile.stem)
if not newfile.exists():
print("Decompressing " + str(dlfile), file=sys.stderr)
newfile.write_bytes(bz2.decompress(dlfile.read_bytes()))
dlfile.unlink()
# mv file to desired name
if newfile.resolve() != file.resolve():
newfile.rename(file)
class Face:
pass
class image_analyzer:
def __init__(self, image_path: str,
pretrained_predictor: str =
datadir.joinpath("shape_predictor_68_face_landmarks.dat"),
upsample_num_times: int = 1
):
self._path = Path(image_path)
if self._path.exists():
self._img = cv2.imread(str(self._path.resolve()))
if(self._img is None):
raise ValueError(
"OpenCV can't read Imagefile: " + str(self._path))
else:
raise FileNotFoundError("Imagefile not found: " + str(self._path))
self._pretrained_predictor = Path(pretrained_predictor)
self._upsample_num_times = upsample_num_times
def img(self):
return self._img
def width(self):
return len(self._img[0])
def height(self):
return len(self._img)
def image_ratio(self):
return len(self._img[0]) / len(self._img)
def check_image_ratio(self, min: float=0.74, max: float=0.80):
return (min <= self.image_ratio() <= max)
def face_detector(self):
# get static face detector
if not hasattr(image_analyzer, '_face_detector') or image_analyzer._face_detector is None:
image_analyzer._face_detector = dlib.get_frontal_face_detector()
return image_analyzer._face_detector
def shape_predictor(self):
# load static shape predictor:
if not hasattr(self, '_shape_predictor') or self._shape_predictor is None:
self._shape_predictor = dlib.shape_predictor(
str(self._pretrained_predictor)
)
return self._shape_predictor
def face_rectangles(self):
# check if image changed:
if not hasattr(self, '_face_rectangles') or self._face_rectangles is None:
self._face_rectangles = self.face_detector()(
image=self._img,
upsample_num_times=self._upsample_num_times
)
return self._face_rectangles
def shape_of_face(self, face: dlib.full_object_detection):
return self.shape_predictor()(self._img, face)
def shapes(self):
# generator for facial landmarks
for face in self.face_rectangles():
yield self.shape_of_face(face)
def faces(self):
for face in self.face_rectangles():
yield Face(face, self)
class Face:
def __init__(self,
rectangle: dlib.full_object_detection,
analyzer: image_analyzer):
self._rect = rectangle
self._img = analyzer.img()
self._shape = [np.array([p.x, p.y])
for p in analyzer.shape_of_face(rectangle).parts()]
def shape(self):
return self._shape
def rect(self):
return self._rect
def eye_left(self):
return (self._shape[36] + self._shape[39]) / 2
def eye_right(self):
return (self._shape[42] + self._shape[45]) / 2
def eye_center(self):
return (self.eye_left() + self.eye_right()) / 2
def mouth_center(self):
return np.sum(self._shape[48:60], axis=0) / 12
def main(argv):
if len(argv) < 2:
print("missing image file", file=sys.stderr)
print("run: $ python3 geometry_check.py /path/to/image/file", file=sys.stderr)
return 1
if not hasattr(cv2, 'face'):
print("you need to install opencv2-contrib-python", file=sys.stderr)
print("$ pip install opencv2-contrib-python", file=sys.stderr)
return 1
checks = dict({
"ratio_correct": False,
"face_detected": False,
"single_face": False,
"h_line_almost_horizontal": False,
"h_line_rotation": None,
"v_line_almost_vertical": False,
"v_line_rotation": None,
"midpoint_in_vertical_center": False,
"midpoint_in_upper_half": False,
"midpoint": None,
"head_width_correct": False,
"head_width_ratio": None
})
# CHECKS STARTING HERE:
analyzer = image_analyzer(argv[1])
checks["ratio_correct"] = bool(
analyzer.check_image_ratio(min=0.74, max=0.80))
faces = [f for f in analyzer.faces()]
checks["face_detected"] = bool(len(faces) > 0)
if len(faces) == 1:
checks["single_face"] = True
face = faces[0]
# I: H line almost horizontal:
# (f.eye_left()-f.eye_right())[1] rund 0
# abs( -||- / ana.height() ) < 0.01 # weniger als 1% rotation
h_rotation = (face.eye_left() - face.eye_right()
)[1] / analyzer.height()
checks["h_line_almost_horizontal"] = bool(abs(h_rotation) < 0.01)
checks["h_line_rotation"] = float(h_rotation)
# II: V line almost vertical,
# H line is more important, because V and H don't need to be perpendicular
# (f.eye_center()-f.mouth_center())[0] rund 0
# abs( -||- / ana.width() ) < 0.05 # weniger als 5% rotation
v_rotation = (face.eye_center() - face.mouth_center()
)[0] / analyzer.width()
checks["v_line_almost_vertical"] = bool(abs(v_rotation) < 0.05)
checks["v_line_rotation"] = float(v_rotation)
# III: Midpoint M needs to be in horizontal center and vertically 30%-50% from top
# f.eye_center()/[ana.width(),ana.height()] = [ 0.45 <= x <= 0.55 , 0.30 <= y <= 0.50 ]
m_rel = face.eye_center() / [analyzer.width(), analyzer.height()]
checks["midpoint_in_vertical_center"] = bool(
(m_rel[0] >= 0.45) and (m_rel[0] <= 0.55))
checks["midpoint_in_upper_half"] = bool(
(m_rel[1] >= 0.30) and (m_rel[1] <= 0.50))
checks["midpoint"] = [float(d) for d in m_rel]
# IV: Headwith ratio
head_width_ratio = face.rect().width() / analyzer.width()
checks["head_width_correct"] = bool(
(head_width_ratio >= 0.5) and (head_width_ratio <= 0.75))
checks["head_width_ratio"] = float(head_width_ratio)
if show_rectangle:
img = analyzer.img()
cv2.namedWindow("Bild", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("Bild", cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_FULLSCREEN)
rect = face.rect()
cv2.rectangle(img, (rect.left(), rect.top()),
(rect.right(), rect.bottom()), (0, 255, 255))
cv2.imshow("Bild", img)
while(1):
k = cv2.waitKey(33)
if k == 27 or k == ord('q'): # Esc key or q to stop
cv2.destroyAllWindows()
break
return checks
if __name__ == '__main__':
check_for_needed_files(datadir)
result = main(sys.argv)
print(json.dumps(result))
sys.exit(0)
| Anaeijon/icao_check | geometry_check/geometry_check.py | geometry_check.py | py | 8,700 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number"... |
26923650558 | from flask import request, jsonify, Blueprint
from flask_jwt_extended import jwt_required, current_user
from models.Containers_model import Container
from models.Image_model import Image
from models.db import db
from controller import Containers_controller as containerctl
from utils import check_authorization
container_bp = Blueprint('container_bp', __name__)
@container_bp.route('/images', methods=["GET"])
@jwt_required()
def get_images_by_user():
images = Image.query.filter_by(author_id=current_user.id).all()
print(images)
responses = [{
"docker_id": image.docker_id,
"name": image.name,
"createdAt": image.createdAt,
"author": image.author.name
} for image in images]
return jsonify(responses), 200
@container_bp.route('/image/<id>', methods=["GET"])
@jwt_required()
def get_image_by_id(id):
image = Image.query.filter_by(docker_id=id).first()
response = {
"docker_id": image.docker_id,
"name": image.name,
"createdAt": image.createdAt,
"author": image.author.name
}
return jsonify(response), 200
@container_bp.route('/image/<id>', methods=["DELETE"])
@jwt_required()
def delete_img(id):
image = Image.query.filter_by(docker_id=id).first()
if not image:
return jsonify({"message": "Image with the given ID is not found"}), 404
auth_check = check_authorization(image.author_id, current_user.id)
if not auth_check:
return jsonify({"success": False, "message": "Unauthorized"}), 401
ok, message = containerctl.delete_image(id, True)
if not ok:
return jsonify({"success": ok, "message": message}), 500
db.session.delete(image)
db.session.commit()
return jsonify({"success": ok, "message": message}), 200
@container_bp.route('/image', methods=["POST"])
@jwt_required()
def create_image():
body = request.get_json()
print(current_user)
result = containerctl.build_image(
current_user.email.split("@")[0].lower(),
body['baseImg'],
body['packages'],
body['desktop'],
body['username'],
body['password'],
body['timezone'],
body.get('memory', 4096)
)
# Insert into Image Table
new_image = Image(
name=result[1],
docker_id=result[2],
author_id=current_user.id
)
try:
db.session.add(new_image)
db.session.commit()
return jsonify({"success": result[0], "image_id": result[2], "image_name": result[1]}), 201
except Exception as e:
db.session.rollback()
return jsonify({"success": False, "message": str(e)}), 400
@container_bp.route('/containers', methods=["GET"])
@jwt_required()
def get_container_by_user():
containers = Container.query.filter_by(author_id=current_user.id).all()
responses = [{
"container_name": container.container_name,
"docker_id": container.docker_id,
"author": container.author.name,
"subdomain": container.subdomain,
"vnc_port": container.vnc_port,
"vnc_host": container.vnc_host,
"vnc_url": container.vnc_url,
"ssh_port": container.ssh_port,
"ws_port": container.ws_port,
"state": container.state,
"image_name": container.image.name,
"image_author": container.image.author.name,
"createdAt": container.createdAt
} for container in containers]
print(containers)
return jsonify(responses), 200
@container_bp.route('/container', methods=["POST"])
@jwt_required()
def create_and_run_container():
body = request.get_json()
image_name = body['image_name']
result = containerctl.run_container(
current_user.email.split("@")[0].lower(),
image_name
)
hostname = request.host.split(":")[0]
subdomain = f"{result[3]}.{hostname}"
# Insert into Container Table
new_container = Container(
docker_id=result[1],
vnc_port=result[2],
author_id=current_user.id,
state=result[4],
container_name=result[3],
vnc_host=hostname,
subdomain=subdomain,
image_id=result[5],
ssh_port=result[7],
ws_port=result[6],
vnc_url=f"ws://{hostname}:{result[6]}"
)
try:
db.session.add(new_container)
db.session.commit()
return jsonify({"success": result[0], "container_id": result[1], "vnc_host": request.host_url.replace("http://", "").replace("/:5000", ""), "vnc_port": result[2], "container_name": result[3], "status": result[4], "ssh_port":result[7],
"ws_port":result[6]}), 201
except Exception as e:
db.session.rollback()
return jsonify({"success": False, "message": str(e)}), 400
@container_bp.route('/container/<container_id>', methods=["GET"])
@jwt_required()
def get_container_by_id(container_id):
container = Container.query.filter_by(docker_id=container_id).first()
if not container:
return jsonify({"message": "Container with the given ID is not found"}), 404
check = check_authorization(container.author_id, current_user.id)
if not check:
return jsonify({"message": "Unauthorized"}), 401
logs = containerctl.get_logs(container.docker_id)
response = {
"container_name": container.container_name,
"docker_id": container.docker_id,
"author": container.author.name,
"subdomain": container.subdomain,
"vnc_port": container.vnc_port,
"vnc_host": container.vnc_host,
"vnc_url": container.vnc_url,
"ssh_port": container.ssh_port,
"ws_port": container.ws_port,
"state": container.state,
"image_name": container.image.name,
"image_author": container.image.author.name,
"createdAt": container.createdAt,
"logs": logs.decode()
}
return jsonify(response), 200
@container_bp.route('/container/<container_id>', methods=["PUT"])
@jwt_required()
def stop_container(container_id):
container_check = Container.query.filter_by(docker_id=container_id).first()
if not container_check:
return jsonify({"message": "Container with the given ID is not found"}), 404
auth_check = check_authorization(container_check.author_id, current_user.id)
if not auth_check:
return jsonify({"message": "Unauthorized"}), 401
ok = containerctl.stop_container(container_id)
container = Container.query.filter_by(docker_id=container_id).first()
container.state = "Stopped"
db.session.commit()
return jsonify({"success": ok}), 200
@container_bp.route('/container/<container_id>', methods=["DELETE"])
@jwt_required()
def delete_container(container_id):
container_check = Container.query.filter_by(docker_id=container_id).first()
if not container_check:
return jsonify({"message": "Container with the given ID is not found"}), 404
auth_check = check_authorization(container_check.author_id, current_user.id)
if not auth_check:
return jsonify({"message": "Unauthorized"}), 401
ok = containerctl.delete_container(container_id)
container = Container.query.filter_by(docker_id=container_id).first()
db.session.delete(container)
db.session.commit()
return jsonify({"success": ok}), 200 | abhirambsn/stuniq-web-desktop | backend/routes/Container_routes.py | Container_routes.py | py | 7,313 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "models.Image_model.Image.query.filter_by",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "models.Image_model.Image.query",
"line_number": 14,
"usage_type": "attribute"
}... |
29720274088 | from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 4
_modified_time = 1224684001.22454
_template_filename='/media/disk/Pylons Book/Code/chapter06/FormExample-01/formexample/templates/derived/form.html'
_template_uri='/derived/form.html'
_template_cache=cache.Cache(__name__, _modified_time)
_source_encoding='utf-8'
from webhelpers.html import escape
_exports = ['person', 'study']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, '/base/index.html', _template_uri)
def render_body(context,**pageargs):
context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
h = context.get('h', UNDEFINED)
def study():
return render_study(context.locals_(__M_locals))
__M_writer = context.writer()
# SOURCE LINE 1
__M_writer(u'\n\n')
# SOURCE LINE 19
__M_writer(u'\n\n')
# SOURCE LINE 51
__M_writer(u'\n\n<h1>Create a Study</h1>\n\n')
# SOURCE LINE 55
__M_writer(escape(h.form(h.url_for(controller='study', action='process'))))
__M_writer(u'\n')
# SOURCE LINE 56
__M_writer(escape(study()))
__M_writer(u'\n')
# SOURCE LINE 57
__M_writer(escape(h.submit(name="action", value="Save")))
__M_writer(u'\n')
# SOURCE LINE 58
__M_writer(escape(h.submit(name="action", value="Add New Person")))
__M_writer(u'\n')
# SOURCE LINE 59
__M_writer(escape(h.end_form()))
__M_writer(u'\n\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_person(context,id):
context.caller_stack._push_frame()
try:
h = context.get('h', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 21
__M_writer(u'\n <fieldset><legend>Person</legend>\n\n <label for="person-')
# SOURCE LINE 24
__M_writer(escape(id))
__M_writer(u'.title">Title</label><br />\n ')
# SOURCE LINE 25
__M_writer(escape(h.text(name="person-%s.title"%(id), id="person-%s.title"%(id))))
__M_writer(u'<br />\n\n <label for="person-')
# SOURCE LINE 27
__M_writer(escape(id))
__M_writer(u'.firstname">First Name</label><br />\n ')
# SOURCE LINE 28
__M_writer(escape(h.text(
name="person-%s.firstname"%(id),
id="person-%s.firstname"%(id
))))
# SOURCE LINE 31
__M_writer(u'<br />\n\n <label for="person-')
# SOURCE LINE 33
__M_writer(escape(id))
__M_writer(u'.surname">Surname</label><br />\n ')
# SOURCE LINE 34
__M_writer(escape(h.text(name="person-%s.surname"%(id), id="person-%s.surname"%(id))))
__M_writer(u'<br />\n\n <label for="person-')
# SOURCE LINE 36
__M_writer(escape(id))
__M_writer(u'.role">Role</label><br />\n ')
# SOURCE LINE 37
__M_writer(escape(h.select(
"person-%s.role"%(id),
[],
[
['1', 'Chief Investigator'],
['2', 'Assistant'],
['3', 'Student'],
],
id="person-%s.role"%(id),
)))
# SOURCE LINE 46
__M_writer(u'<br />\n\n ')
# SOURCE LINE 48
__M_writer(escape(h.submit(name="action", value="Remove %s"%(id))))
__M_writer(u'\n\n </fieldset><br />\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_study(context):
context.caller_stack._push_frame()
try:
h = context.get('h', UNDEFINED)
range = context.get('range', UNDEFINED)
c = context.get('c', UNDEFINED)
def person(id):
return render_person(context,id)
__M_writer = context.writer()
# SOURCE LINE 3
__M_writer(u'\n <fieldset><legend>Study</legend>\n\n <label for="title">Title</label><br />\n ')
# SOURCE LINE 7
__M_writer(escape(h.text(name="title", id="title")))
__M_writer(u'<br />\n\n <label for="start_date">Start Date</label><br />\n ')
# SOURCE LINE 10
__M_writer(escape(h.text(name="start_date", id="startdate")))
__M_writer(u'<br />\n\n <label for="end_date">End Date</label><br />\n ')
# SOURCE LINE 13
__M_writer(escape(h.text(name="end_date", id="enddate")))
__M_writer(u'<br />\n\n </fieldset><br />\n')
# SOURCE LINE 16
for id in range(c.number_of_people):
# SOURCE LINE 17
__M_writer(u' ')
__M_writer(escape(person(id=id)))
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
| Apress/def-guide-to-pylons | Code/chapter06/FormExample-01/data/templates/derived/form.html.py | form.html.py | py | 5,241 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "mako.runtime.UNDEFINED",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "mako.runtime",
"line_number": 2,
"usage_type": "name"
},
{
"api_name": "mako.cache.Cache",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "mako.cache",
... |
21026304473 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 12 14:13:33 2017
预处理获取小波系数、重构系数、模极大值
对预处理后的序列进行特征提取,包括能量、归一化能量、能量熵、能量矩、排列熵、近似熵、样本熵、最大Lyapunov指数、灰度矩
分形维数、奇异熵
@author: baishuhua
"""
import pywt
import numpy as np
from scipy import stats
import sys
sys.path.append('E:\\大数据\\基础研究\\HilbertHuang变换')
import HilbertHuang
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus']=False
def DWT(signal,wavelet,level):
# 实数序列进行dwt变换,返回的是不等长的小波系数,[cAn,cDn,cDn-1,...,cD2,cD1]
# max_level=pywt.dwt_max_level(data_len=len(signal), filter_len=pywt.Wavelet(wavelet).dec_len)
Coeffs=pywt.wavedec(signal,wavelet=wavelet,mode='smooth',level=level)
return Coeffs
def DwtRec(signal,wavelet,level):
# 返回的是每一层的分量,即将原始信号进行了频带分解,时间长度与原始序列signal等长
# 每一层对应一行
CoeffsRev=[]
Coeffs=DWT(signal,wavelet=wavelet,level=level)
for row in range(level+1):
if row==0:
# 重构近似系数,即低频分量
leveltype=level;part='a'
else:
# 重构细节系数,即高频分量
leveltype=level-row+1;part='d'
EachLevelRev=pywt.upcoef(part,Coeffs[row],wavelet=wavelet,level=leveltype,take=len(signal))
CoeffsRev.append(EachLevelRev)
return np.array(CoeffsRev)
def DwtModMax(signal,wavelet,level):
# 返回的是细节系数的模极大值序列
DetailCoeffs=DWT(signal,wavelet=wavelet,level=level)[1:]
ExtremeX=[];ExtremeY=[]
for coeff in DetailCoeffs:
pos=HilbertHuang.findpeaks(coeff)
neg=HilbertHuang.findpeaks(-coeff)
xx=np.concatenate((pos,neg))
yy=np.concatenate((coeff[pos],coeff[neg]))
index=np.argsort(xx)
ExtremeX.append(list(xx[index]));ExtremeY.append(list(yy[index]))
return np.array(ExtremeX),np.array(ExtremeY)
def GetEnergy(coeffs):
# 能量'Energy'
Energy=[];EnergyName=[]
for order,EachLevelCoeff in enumerate(coeffs):
Energy.append(np.sum(EachLevelCoeff**2))
EnergyName.append('Energy-L'+str(len(coeffs)-order))
return np.array(EnergyName),np.array(Energy)
def GetEnergyRatio(coeffs):
# 归一化能量'EnergyRatio'
EnergyName,Energy=GetEnergy(coeffs)
EnergyRatioName=list(map(lambda x:x.replace('Energy','EnergyRatio'),EnergyName))
EnergyRatio=Energy/np.sum(Energy)
return np.array(EnergyRatioName),np.array(EnergyRatio)
def Entropy(pk): # 香农熵,pk参数非负代表的是概率
entropy=stats.entropy(pk,qk=None,base=None)
return entropy
def GetEnergyEn(coeffs):
# 能量熵'EnergyEn'
WEE=[];WEEName=[]
for order,coeff in enumerate(coeffs):
pk=coeff**2/np.sum(coeff**2)
WEE.append(Entropy(pk))
WEEName.append('EnergyEn-L'+str(len(coeffs)-order))
return np.array(WEEName),np.array(WEE)
def GetEnergyTq(coeffs):
# 归一化能量矩'EnergyTq'
WET=[];WETName=[]
for order,coeff in enumerate(coeffs):
weight=np.linspace(1,len(coeff),num=len(coeff))
WET.append(np.dot(weight,coeff**2))
WETName.append('EnergyTq-L'+str(len(coeffs)-order))
WET/=np.sum(WET)
return np.array(WETName),np.array(WET)
def GetGrayTq(coeffs):
# 灰度矩'GrayTq'
m,n=np.shape(coeffs)
M=0
for i,j in zip(np.arange(1,m+1),np.arange(1,n+1)):
M+=abs(coeffs[i-1,j-1])*np.sqrt((i-1)**2+(j-1)**2)
M/=(m*n)
return np.array(['GrayTq']),np.array([M])
def GetSingularEn(coeffs):
# 奇异熵'SingularEn'
U,s,V=np.linalg.svd(coeffs,full_matrices=True)
pk=s/np.sum(s)
WSE=Entropy(pk)
return np.array(['SingularEn']),np.array([WSE])
def GetFractalDims(coeffs):
# 分形维数'FractalDims'
Ref0=np.log2(np.var(np.sum(coeffs,axis=0))) # 原信号方差的对数
temp=coeffs[1:]
xx=np.arange(len(coeffs)-1,0,step=-1).ravel()
yy=np.log2(np.var(temp,axis=1)).ravel()
yy=np.insert(yy,0,Ref0);xx=np.insert(xx,0,0)
index=np.argsort(xx)
xx=xx[index];yy=yy[index]
params=np.polyfit(xx,yy,1)
beta=-params[0];D=1.0/2*(5-beta)
return np.array(['FractalDims']),np.array([D])
def EuclideanDistance(x1,x2):
return np.sqrt(np.dot(x1-x2,x1-x2))
def DistanceMatrix(X,method=EuclideanDistance):
Distance=np.zeros((len(X),len(X)))
for row in np.arange(0,len(X)-1):
Xi=X[row]
for col in np.arange(row,len(X)):
Xj=X[col]
Distance[row,col]=method(Xi,Xj)
Distance+=np.transpose(Distance)
return Distance
def SearchNearest(Distance):
# 按照行,返回与之距离最近的行编号和距离值
XPos0=[];YDist0=[]
for row,dist in enumerate(Distance):
temp=np.argsort(dist)
index=temp[temp!=row][0]
XPos0.append(index)
YDist0.append(dist[index])
return XPos0,YDist0
def GetLyapunov(coeffs):
# 最大Lyapunov指数 'Lyapunov'
if len(coeffs)==np.size(coeffs):
coeffs=coeffs[np.newaxis,:]
m=3;J=5 # 分别对应嵌入维数和延迟参数
N=np.shape(coeffs)[-1] # 对应时间序列的长度
M=N-(m-1)*J # 对应相空间重构矩阵的行数
Lamda=[];LamdaName=[]
for order,coeff in enumerate(coeffs):
X=[] # 对应每层小波系数重构的相空间
for row in range(m):
X.append(coeff[int(row*J):int(row*J+M)])
X=np.transpose(np.array(X))
Distance=DistanceMatrix(X)
XPos0,YDist0=SearchNearest(Distance)
# 第i行代表与第i个样本距离最近的样本索引及其最近距离,公式中的j^与dj(0)
XRef0=np.arange(0,len(Distance))
# 与XPos0对应的参考样本索引,公式中的j
lamda=[]
for indexRef0,indexPos0 in zip(XRef0,XPos0):
xx=[0];yy=[np.log(YDist0[indexRef0]+1e-10)]
for bias in range(min(len(XRef0)-indexRef0,len(XRef0)-indexPos0)-1):
xx.append(bias+1)
yy.append(np.log(Distance[indexRef0+bias+1,indexPos0+bias+1]+1e-10))
if len(xx)>1:
params=np.polyfit(xx,yy,1)
lamda.append(params[0])
else:
continue
Lamda.append(max(lamda))
LamdaName.append('Lyapunov-L'+str(len(coeffs)-order))
return np.array(LamdaName),np.array(Lamda)
def AbsDistance(signal1,signal2):
return np.max(np.abs((signal1-signal2)))
def GetApEn(coeffs):
if len(coeffs)==np.size(coeffs):
coeffs=coeffs[np.newaxis,:]
ApEn=[]; # 对应近似熵
PHY=[];
N=np.shape(coeffs)[-1] # 对应时间序列的长度
for m in [2,3]: # 对应嵌入维数
M=N-m+1 # 对应重构矩阵的行数
PHY_before=[];ApEnName=[]
for order,coeff in enumerate(coeffs):
gama=0.2*np.std(coeff) # 对应相似容限
X=[] # 对应每层小波系数重构的相空间
for row in range(M):
X.append(coeff[int(row):int(row+m)])
Distance=DistanceMatrix(X,method=AbsDistance)
total=list(map(lambda x:sum(x<=gama)-1,Distance))
Ci=np.divide(total,float(M-1))
Ci=Ci[np.nonzero(Ci)]
PHY_before.append(np.mean(np.log(Ci))) # 对应phy-m
ApEnName.append('ApEn-L'+str(len(coeffs)-order))
PHY.append(PHY_before)
PHY=np.array(PHY)
ApEn=PHY[0]-PHY[1]
return np.array(ApEnName),np.array(ApEn)
def GetSampEn(coeffs):
if len(coeffs)==np.size(coeffs):
coeffs=coeffs[np.newaxis,:]
SampEn=[] # 对应样本熵
B=[]
N=np.shape(coeffs)[-1] # 对应时间序列的长度
for m in [2,3]: # 对应嵌入维数
M=N-m+1 # 对应重构矩阵的行数
B_before=[];SampEnName=[]
for order,coeff in enumerate(coeffs):
gama=0.2*np.std(coeff) # 对应相似容限
X=[] # 对应每层小波系数重构的相空间
for row in range(M):
X.append(coeff[int(row):int(row+m)])
Distance=DistanceMatrix(X,method=AbsDistance)
total=list(map(lambda x:sum(x<=gama)-1,Distance))
Ci=np.divide(total,float(M-1))
Ci=Ci[np.nonzero(Ci)]
B_before.append(np.mean(Ci))
SampEnName.append('SampEn-L'+str(len(coeffs)-order))
B.append(B_before)
B=np.array(B)
SampEn=-np.log(B[1]/B[0]) # 分别对应近似熵和样本熵
return np.array(SampEnName),np.array(SampEn)
def GetPermuEn(coeffs):
# 排列熵 'PermutationEn'
if len(coeffs)==np.size(coeffs):
coeffs=coeffs[np.newaxis,:]
m=3;J=5 # 分别对应嵌入维数和延迟参数
N=np.shape(coeffs)[-1] # 对应时间序列的长度
M=N-(m-1)*J # 对应相空间重构矩阵的行数
PermuEn=[];PermuEnName=[]
for order,coeff in enumerate(coeffs):
X=[] # 对应每层小波系数重构的相空间
for row in range(m):
X.append(coeff[int(row*J):int(row*J+M)])
X=np.transpose(np.array(X))
S=list(map(lambda x:np.argsort(x),X))
S=np.array(S)
strS=list(map(str,S))
strCode=list(map(lambda x:'$'.join(x),strS))
Counts={}
for each in strCode:
if each not in Counts.keys():
Counts[each]=1
else:
Counts[each]+=1
pk=np.divide(list(Counts.values()),len(S))
PermuEn.append(Entropy(pk))
PermuEnName.append('PermuEn-L'+str(len(coeffs)-order))
return np.array(PermuEnName),np.array(PermuEn)
# 综合Dwt变换域指标
def FeatureExtractDwt(signal):
# FunctionNames=[GetEnergy,GetEnergyRatio,GetEnergyEn,GetEnergyTq,GetGrayTq,GetSingularEn,\
# GetFractalDims,GetLyapunov,GetApEn,GetSampEn,GetPermuEn]
# 仅提取部分特征
FunctionNames=[GetSingularEn,GetGrayTq,GetFractalDims]
# if np.size(signal)==len(signal):
# signal=signal[:,np.newaxis]
ResultName=[];Result=[]
for functionname in FunctionNames:
featurename,feature=functionname(signal)
ResultName.append(featurename)
Result.append(feature)
FeatureValue=np.concatenate(Result)
FeatureName=np.concatenate(ResultName)
return FeatureName,FeatureValue
# 示例
def ReadRecord(file): # 读取录波数据
ReadFlag=open(file)
ReadFlag.readline();ReadFlag.readline();ReadFlag.readline()
segnum=ReadFlag.readline().strip().split(':')[1]
Time=[]
for seg in range(int(segnum)):
SampleAttr=ReadFlag.readline().strip().split(' ')
[fs,start,terminal]=list(map(int,map(float,SampleAttr)))
if len(Time)<1:
Time.extend(1/fs*(np.linspace(1,terminal-start+1,num=terminal-start+1)))
else:
Time.extend(Time[-1]+1.0/fs*(np.linspace(1,terminal-start+1,num=terminal-start+1)))
FaultIndex=int(ReadFlag.readline().strip().split('fault_index:')[1])
FaultTime=Time[FaultIndex]
SignalNames=ReadFlag.readline().strip().split(' ')
SignalNames=[name for name in SignalNames if name!='']
Data=[];ValidIndex=[];row=0
for record in ReadFlag:
detail=record.strip().split(' ')
if len(detail)==len(SignalNames):
try:
Data.append(list(map(float,detail)))
ValidIndex.append(row)
row=row+1
except ValueError:
row=row+1
else:
row=row+1
continue
ReadFlag.close()
ValidIndex=np.array(ValidIndex)
rows=ValidIndex[ValidIndex<len(Time)]
Time=np.array(Time);Time=Time[rows];Data=np.array(Data);Data=Data[rows];
return Time,Data,np.array(SignalNames),FaultTime
def Interp(x,y,kind='slinear',axis=0): # 沿axis轴拟合x,y,因变量y沿axis维度应等于x维度
from scipy import interpolate
function=interpolate.interp1d(x,y,kind,axis)
return function
if __name__=='__main__':
# 模拟数据
if 0:
# fs=1000
# time=np.arange(0,1,step=1/fs)
# signal=2*np.cos(2*np.pi*50*time)+4+0.25*np.cos(2*np.pi*100*time)+1*np.cos(2*np.pi*150*time)
fs=1000;f0=50;f1=350;f2=150
time=np.arange(0,10,step=1/fs);signal=6*np.cos(2*np.pi*f0*time)+2+0.25*np.cos(2*np.pi*f2*time)
t1=4;t2=6;signal[(time>=t1)&(time<=t2)]+=0.05*np.cos(2*np.pi*f1*time[(time>=t1)&(time<=t2)])
# 仿真数据
if 1:
file = r'E:\关于项目\江苏涌流识别\仿真\bsh\内部故障\5%匝_AB_[0.1 0.12]s.txt'
ReadFlag = open(file)
data = []
for line in ReadFlag:
attr = line.strip().split(' ')
attr = list(map(float,attr))
data.append(attr)
data = np.array(data)
signal = data[:,1]
time = data[:,0]
fs = 1/(time[1]-time[0])
coeffs = DwtRec(signal,wavelet='bior3.5',level=4)
FeatureName,FeatureValue = FeatureExtractDwt(coeffs)
# 实测数据
if 0:
import os
file='E:/大数据/线路故障诊断/解压录波/有效集中128/Test-1/异物_1_0_12_港城站_20140510港遂乙线_SSH850.cfg_sub.dat_AN'
filename=os.path.basename(file)
attr=filename.split('_')
FaultCause=attr[0];FaultPhaseNum=attr[1];FaultPhaseType=attr[-1]
Time,Data,SignalNames,FaultTime=ReadRecord(file)
SelectRow=(Time>=FaultTime-0.06)&(Time<=FaultTime+0.1) # 截取故障前2个周波,故障后10个周波进行分析
Time=Time[SelectRow];Data=Data[SelectRow]
SelectCol=np.array([False,False,False,True,False,False,False,False])
Data=Data[:,SelectCol] # 选择A相电流
fitting=Interp(Time,Data,kind='slinear',axis=0)
fs=1000 # 插值固定的采样率
TimeNew=np.arange(min(Time),max(Time),step=1.0/fs)[:-1]
DataNew=fitting(TimeNew)
time=TimeNew;signal=DataNew.ravel()
if 1:
wavelet='bior3.5';level=4
coeffsRev=DwtRec(signal,wavelet,level)
fig,axes=plt.subplots(level+1,1,figsize=(6,6))
for row,ax in enumerate(axes.ravel(),start=1):
ax.plot(time,coeffsRev[level+1-row]);ax.set_ylabel('尺度'+str(row))
# axes[0].set_title(FaultCause)
plt.show()
if 1:
plt.plot(time,signal);plt.ylabel('原始信号');
yrange=max(signal)-min(signal)
plt.ylim([min(signal)-1/10*yrange,max(signal)+1/10*yrange])
plt.show()
| baishuhuaGitHub/Transformer-Simulink | DwtFeatureExtraction.py | DwtFeatureExtraction.py | py | 14,858 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "matplot... |
40572030080 | #! /usr/bin/env python
# -*- coding : utf-8 -*-
import os
import re
import ujson
from dotmap import DotMap
from subprocess import check_output
from datetime import datetime, timedelta
# output = '(Connected)' in check_output( "scutil --nc list".split( ' ' ) ).decode( 'utf-8' )
# print( output )
refresh_delta = timedelta( seconds = 10 )
now = datetime.utcnow()
refresh_time = now + refresh_delta
print( now )
print( refresh_time )
now_str = now.isoformat()
refresh_time_str = refresh_time.isoformat()
print( now_str )
print( refresh_time_str )
now_reconstituted = datetime.fromisoformat( now_str )
refresh_time_reconstituted = datetime.fromisoformat( refresh_time_str )
print( now_reconstituted )
print( refresh_time_reconstituted )
print( ['DONE'] ) | subokita/dashboard | api/modules/test.py | test.py | py | 801 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.timedelta",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "datetim... |
39624609295 | # author: Jeremy Temple, Eli Manning
import enum
class Tokens(object):
def __init__(self):
self._data = []
def __iter__(self):
return iter(self._data)
def append(self, token):
self._data.append(token)
@property
def lookahead(self):
return self._data[0]
def consume(self):
self._data.pop(0)
def __str__(self):
w = []
for i in self._data:
w.append("Kind: {0}\nValue: {1}\n\n".format(i._kind, i._value))
return "".join(w)
class tokenenum(enum.Enum):
BEGIN = enum.auto()
DO = enum.auto()
END = enum.auto()
FALSE = enum.auto()
HALT = enum.auto()
IF = enum.auto()
PRINT = enum.auto()
PROGRAM = enum.auto()
THEN = enum.auto()
TRUE = enum.auto()
VAR = enum.auto()
WHILE = enum.auto()
class token(object):
def __init__(self, kind, value):
self._kind = kind
self._value = value
def __str__(self):
return "{0} {1}".format(self.kind, self.value)
@property
def kind(self):
return self._kind
@property
def value(self):
return self._value
keywords = {"begin" : tokenenum.BEGIN,
"do" : tokenenum.DO,
"end" : tokenenum.END,
"false" : tokenenum.FALSE,
"halt" : tokenenum.HALT,
"if" : tokenenum.IF,
"print" : tokenenum.PRINT,
"program" : tokenenum.PROGRAM,
"then" : tokenenum.THEN,
"true" : tokenenum.TRUE,
"var" : tokenenum.VAR,
"while" : tokenenum.WHILE}
| ECManning/Computer-Testing-Sictc | token_v1.py | token_v1.py | py | 1,593 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "enum.Enum",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "enum.auto",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "enum.auto",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "enum.auto",
"line_number": 30,... |
26355673371 | import importlib
from pathlib import Path, PurePath
import glob
import os
import logging
import numpy as np
from pydub import AudioSegment
import pandas as pd
import cv2
# TODO: General solution
# TODO: Can string_filtering be called by generate_filenames?
# TODO: output only benign file?
# TODO: think about input output design of generate_filenames
# def generate_filename_list(path, file_key, dir_key='', only_filename=False):
# input_paths, gt_paths = [], []
# for root, dirs, files in os.walk(path):
# for f in files:
# if not only_filename:
# fullpath = os.path.join(root, f)
# else:
# fullpath = f
# if dir_key in fullpath:
# if file_key in fullpath:
# gt_paths.append(fullpath)
# else:
# input_paths.append(fullpath)
# return input_paths, gt_paths
def get_melspec_from_cpp(wav_list_path, out_dir, sampling_rate=None):
# Generate output path list (wav -> csv)
out_path_list = []
wav_list_dir, wav_list_filename = os.path.split(wav_list_path)
with open(wav_list_path, 'r') as fw:
in_path_list = fw.readlines()
in_path_list = [f if f[-3:] == 'wav' else f[:-1] for f in in_path_list]
csv_out_dir = os.path.join(out_dir, 'csv', wav_list_filename[:-4])
os.makedirs(csv_out_dir, exist_ok=True)
data_pair = {}
for in_path in in_path_list:
in_dir, in_file = os.path.split(in_path)
out_file = in_file.replace('wav', 'csv')
out_path = os.path.join(csv_out_dir, out_file)
out_path_list.append(out_path)
key = out_file[:-4]
# label = int(os.path.split(in_dir)[1])
label = 0
data_pair[key] = {'label': label}
csv_list_path = os.path.join(csv_out_dir, wav_list_filename)
with open(csv_list_path, 'w+') as fw:
for path in out_path_list:
fw.write(f'{path}\n')
# Cpp MelSpectrogram
exe_file = Path(r'compute-mfcc.exe')
inputlist = Path(wav_list_path)
outputlist = Path(csv_list_path)
command = (
f'{exe_file.as_posix()} '
f'--inputlist "{inputlist.as_posix()}" '
f'--outputlist "{outputlist.as_posix()}" '
)
if sampling_rate is not None:
command += f'--samplingrate {sampling_rate} '
os.system(command)
# Save CPP feature (csv -> npy)
# csv_list = glob.glob(os.path.join(csv_out_dir, '*.csv'))
csv_list = out_path_list
img_out_dir = os.path.join(out_dir, 'img', wav_list_filename[:-4])
os.makedirs(img_out_dir, exist_ok=True)
img_save_paths = []
for idx, csv_f in enumerate(csv_list):
# if idx>10:break
_, filename = os.path.split(csv_f)
try:
df = pd.read_csv(csv_f)
# XXX: wrong data extraction for temp using
# df = pd.read_csv(csv_f, header=None)
data = df.to_numpy().T
except pd.errors.EmptyDataError:
print(f'- Empty pandas data {csv_f}')
data = np.zeros(1)
save_path = os.path.join(img_out_dir, filename.replace('csv', 'npy'))
img_save_paths.append(save_path)
data_pair[filename[:-4]]['path'] = save_path
np.save(save_path, data)
with open(os.path.join(out_dir, wav_list_filename), 'w+') as fw:
fw.writelines(img_save_paths)
inputs, paths, labels = [], [], []
for k, v in data_pair.items():
inputs.append(k[:-4])
if 'path' in v:
paths.append(v['path'])
else:
paths.append('')
if 'label' in v:
labels.append(v['label'])
else:
labels.append('')
# labels.append(v['label'])
df = pd.DataFrame({
'input': inputs, 'img_path': paths, 'label': labels
})
df.to_csv(os.path.join(out_dir, wav_list_filename).replace('txt', 'csv'))
def get_dir_list(data_path, full_path=True):
dir_list = np.array([], dtype=object)
for f in os.listdir(data_path):
folder_path = os.path.join(data_path, f)
if os.path.isdir(folder_path):
if full_path:
dir_list = np.append(dir_list, folder_path)
else:
dir_list = np.append(dir_list, os.path.split(folder_path)[1])
return list(dir_list)
def get_clips_from_audio(y, clip_time, hop_time):
clips = []
if y.duration_seconds >= clip_time:
for t in range(0, int(y.duration_seconds)-clip_time+1, hop_time):
start_t, end_t = t, t+clip_time
clip = y[1000*start_t:1000*end_t]
clips.append(clip)
return clips
def continuous_split(path, clip_time, hop_time, sr, channels, add_volume, output_format='wav'):
assert clip_time > 0 and hop_time > 0
file_name = os.path.basename(path)
file_format = file_name.split('.')[1]
y = load_audio_waveform(path, file_format, sr, channels)
save_path = os.path.join(os.path.split(path)[0], f'clips_{clip_time}_{hop_time}_{add_volume}dB')
if not os.path.isdir(save_path):
os.makedirs(save_path)
y += add_volume
clips = get_clips_from_audio(y, clip_time, hop_time)
for idx, clip in enumerate(clips, 1):
clip.export(os.path.join(save_path, file_name.replace(f'.{file_format}', f'_{idx:03d}.{output_format}')), output_format)
print('Finish spitting.')
return save_path
def save_aLL_files_name(path, name='file_names', keyword=[], filtering_mode='in', is_fullpath=True, shuffle=True, save_path=None):
# file_names = get_file_names(path, keyword, filtering_mode, is_fullpath, shuffle)
file_names = get_files(path, keys=keyword, return_fullpath=True, sort=True)
if not save_path: save_path = path
save_content_in_txt(
file_names, os.path.join(save_path, f'{name}.txt'), filter_bank=[], access_mode='w+', dir=None)
# with open(os.path.join(save_path, f'{name}.txt'), 'w+') as fw:
# for f in file_names:
# fw.write(f)
# fw.write('\n')
def get_ASUS_snoring_index(save_path, split, balancing=True):
# TODO: Multiple class (miner)
# Load and check dataset
subject_list = []
data_pair = {}
train_pairs, valid_pairs = [], []
df = pd.read_csv(save_path)
for index, row in df.iterrows():
pair = (row['path'], row['label'])
if row['subject'] in data_pair:
data_pair[row['subject']].append(pair)
else:
subject_list.append(row['subject'])
data_pair[row['subject']] = [pair]
# TODO: balancing
# indexing
if balancing:
sample_count = {}
for subject in subject_list:
for sample in data_pair[subject]:
if sample[1] in sample_count[subject]:
sample_count[subject][sample[1]] += 1
else:
sample_count[subject][sample[1]] = 1
else:
sample_count = []
for subject in subject_list:
sample_count.append(len(data_pair[subject]))
sorted_idx = np.argsort(np.array(sample_count))
subject_list = np.take(subject_list, sorted_idx)
# TODO: subject_list[::3]
valid_subjects = subject_list[::3]
train_subjects = list(set(subject_list)-set(valid_subjects))
for subject in train_subjects:
train_pairs.extend(data_pair[subject])
for subject in valid_subjects:
valid_pairs.extend(data_pair[subject])
return train_pairs, valid_pairs
def load_input_data(config):
assert isinstance(config.dataset.data_path, dict)
total_train, total_valid = [], []
for key in config.dataset.data_path:
if key == 'ASUS_snoring':
save_path = save_ASUS_snoring_index(config.dataset.data_path[key])
train, valid = get_ASUS_snoring_index(save_path)
elif key == 'esc-50':
pass
elif key == 'Kaggle_snoring':
pass
total_train.extend(train)
total_valid.extend(valid)
return total_train, total_valid
return datasets_indexed
def get_pydub_sound(filename, audio_format, sr=None, channels=None):
"""
Get Pydub sound object and set the basic params
"""
# sound = AudioSegment.from_file(filename, 'mp4')
sound = AudioSegment.from_file(filename, audio_format)
if sr: sound = sound.set_frame_rate(sr)
if channels: sound = sound.set_channels(channels)
return sound
def load_audio_waveform(filename, audio_format, sr=None, channels=None):
"""
Pydub based waveform loading function
"""
sound = get_pydub_sound(filename, audio_format, sr, channels)
waveform = np.array(sound.get_array_of_samples(), np.float32)
sr = sound.frame_rate
return waveform, sr
def get_files(path, keys=[], return_fullpath=True, sort=True, sorting_key=None):
"""Get all the file name under the given path with assigned keys
Args:
path: (str)
keys: (list, str)
return_fullpath: (bool)
sort: (bool)
sorting_key: (func)
Return:
file_list: (list)
"""
file_list = []
assert isinstance(keys, (list, str))
if isinstance(keys, str): keys = [keys]
# Rmove repeated keys
keys = list(set(keys))
def push_into_filelist(root, f, file_list, is_fullpath):
if is_fullpath:
file_list.append(os.path.join(root, f))
else:
file_list.append(f)
for i, (root, dirs, files) in enumerate(os.walk(path)):
for j, f in enumerate(files):
if keys:
for key in keys:
if key in f:
push_into_filelist(root, f, file_list, return_fullpath)
else:
push_into_filelist(root, f, file_list, return_fullpath)
if file_list:
if sort: file_list.sort(key=sorting_key)
else:
if keys:
logging.warning(f'No file exist with key {keys}.')
else:
logging.warning(f'No file exist.')
return file_list
def generate_filenames(path, keys=None, include_or_exclude=None, is_fullpath=True, loading_formats=None):
"""Get all the file name under the given path with assigned keys and including condition"""
# TODO: index error when keys=['a','a','a'] include_or_exclude=['include', 'exclude', 'exclude']
if len(keys) == 0: keys = None
if len(include_or_exclude) == 0: include_or_exclude = None
if keys and include_or_exclude:
assert len(keys) == len(include_or_exclude)
full_keys = [f'{condition}_{k}' for k, condition in zip(keys, include_or_exclude)]
# TODO: logging instead of print for repeat key (think about raise error or not)
if len(full_keys) != len(list(set(full_keys))):
print('Warning: Repeat keys')
full_keys = list(set(full_keys))
if keys:
filenames = {k: [] for k in full_keys}
else:
filenames = []
for root, dirs, files in os.walk(path):
for f in files:
if loading_formats:
process = False
for format in loading_formats:
if format in f:
process = True
break
else:
process = True
if process:
if is_fullpath:
final_path = os.path.join(root, f)
else:
final_path = f
if keys:
for idx, k in enumerate(keys):
if include_or_exclude[idx] == 'include':
if k in final_path:
filenames[full_keys[idx]].append(final_path)
elif include_or_exclude[idx] == 'exclude':
if k not in final_path:
filenames[full_keys[idx]].append(final_path)
else:
raise ValueError('Unknown key condition')
else:
filenames.append(final_path)
return filenames
def get_class(class_name, modules):
for module in modules:
m = importlib.import_module(module)
clazz = getattr(m, class_name, None)
if clazz:
return clazz
raise RuntimeError(f'Unsupported dataset class: {class_name}')
def load_content_from_txt(path, access_mode='r') -> list:
if isinstance(path, PurePath):
path = str(path)
with open(path, access_mode) as fw:
content = fw.read().splitlines()
return content
def inspect_data_split(data_split):
# TODO: if split in 0.725 0.275
train_split = data_split.get('train', 0)
valid_split = data_split.get('valid', 0)
test_split = data_split.get('test', 0)
if train_split+valid_split+test_split != 1:
raise ValueError('Incorrect splitting of dataset.')
split_code = f'{10*train_split:.0f}{10*valid_split:.0f}{10*test_split:.0f}'
return split_code
def get_data_path(data_path, index_root, data_split, keywords=[]):
# TODO: save in csv
# TODO: save with label
split_code = inspect_data_split(data_split)
dataset_name = os.path.split(data_path)[1]
# Check if an index already exists, create one if not.
index_path = os.path.join(index_root, dataset_name)
if not os.path.isdir(index_path):
os.mkdir(index_path)
file_path_list = get_files(data_path, keys=keywords, return_fullpath=True, sort=True)
train_split = int(data_split.get('train', 0)*len(file_path_list))
valid_split = int(data_split.get('valid', 0)*len(file_path_list))
test_split = int(data_split.get('test', 0)*len(file_path_list))
train_path_list = file_path_list[:train_split]
valid_path_list = file_path_list[train_split:train_split+valid_split]
test_path_list = file_path_list[train_split+valid_split:train_split+valid_split+test_split]
train_path = os.path.join(index_path, f'{dataset_name}_train_{split_code}.txt')
valid_path = os.path.join(index_path, f'{dataset_name}_valid_{split_code}.txt')
test_path = os.path.join(index_path, f'{dataset_name}_test_{split_code}.txt')
save_content_in_txt(train_path_list, train_path)
save_content_in_txt(valid_path_list, valid_path)
save_content_in_txt(test_path_list, test_path)
data_path_dict = {
'train': train_path,
'valid': valid_path,
'test': test_path}
else:
file_path_list = get_files(index_root, return_fullpath=True, sort=True)
data_path_dict
return data_path_dict
def get_data_indices(data_name, data_path, save_path, data_split, mode, generate_index_func):
""""Get dataset indices and create if not exist"""
# # TODO: gt not exist condition
# create index folder and sub-folder if not exist
os.chdir(save_path)
index_dir_name = f'{data_name}_data_index'
sub_index_dir_name = f'{data_name}_{data_split[0]}_{data_split[1]}'
input_data_path = os.path.join(save_path, index_dir_name, sub_index_dir_name)
if not os.path.isdir(input_data_path): os.makedirs(input_data_path)
# generate index list and save in txt file
generate_index_func(data_path, data_split, input_data_path)
# load index list from saved txt
os.chdir(input_data_path)
if os.path.isfile(f'{mode}.txt'):
input_data_indices = load_content_from_txt(f'{mode}.txt')
input_data_indices.sort()
else:
input_data_indices = None
if os.path.isfile(f'{mode}_gt.txt'):
ground_truth_indices = load_content_from_txt(f'{mode}_gt.txt')
ground_truth_indices.sort()
else:
ground_truth_indices = None
return input_data_indices, ground_truth_indices
def generate_kaggle_breast_ultrasound_index(data_path, save_path, data_split):
data_keys = {'input': 'exclude_mask', 'ground_truth': 'include_mask'}
save_input_and_label_index(data_path, save_path, data_split, data_keys, loading_format=['png', 'jpg'])
def generate_kaggle_snoring_index(data_path, save_path, data_split):
# TODO: no ground truth case
# TODO: keys=None, include_or_exclude=None case
data_keys = {'input': None}
save_input_and_label_index(data_path, save_path, data_split, data_keys, loading_format=['wav', 'm4a'])
def save_input_and_label_index(data_path, save_path, data_split, data_keys=None, loading_format=None):
# TODO: test input output
# assert 'input' in data_keys, 'Undefined input data key'
# assert 'ground_truth' in data_keys, 'Undefined ground truth data key'
class_name = os.listdir(data_path)
os.chdir(save_path)
include_or_exclude, keys = [], []
if data_keys:
for v in data_keys.values():
if v:
include_or_exclude.append(v.split('_')[0])
keys.append(v.split('_')[1])
data_dict = generate_filenames(
data_path, keys=keys, include_or_exclude=include_or_exclude, is_fullpath=True, loading_formats=loading_format)
def save_content_ops(data, train_name, valid_name):
# TODO: Is this a solid solution?
data.sort(key=len)
split = int(len(data)*data_split[0])
train_input_data, val_input_data = data[:split], data[split:]
save_content_in_txt(train_input_data, train_name, filter_bank=class_name, access_mode="w+", dir=save_path)
save_content_in_txt(val_input_data, valid_name, filter_bank=class_name, access_mode="w+", dir=save_path)
# if data_keys['input']:
# input_data = data_dict[data_keys['input']]
# else:
# input_data = data_dict
if data_keys:
if 'input' in data_keys:
if data_keys['input']:
input_data = data_dict[data_keys['input']]
else:
input_data = data_dict
save_content_ops(input_data, 'train.txt', 'valid.txt')
if 'ground_truth' in data_keys:
if data_keys['ground_truth']:
ground_truth = data_dict[data_keys['ground_truth']]
else:
ground_truth = data_dict
save_content_ops(ground_truth, 'train_gt.txt', 'valid_gt.txt')
else:
input_data = data_dict
save_content_ops(input_data, 'train.txt', 'valid.txt')
# TODO: mkdir?
def save_content_in_txt(content, path, filter_bank=None, access_mode='a+', dir=None):
# assert isinstance(content, (str, list, tuple, dict))
# TODO: overwrite warning
with open(path, access_mode) as fw:
# def string_ops(s, dir, filter):
# pair = string_filtering(s, filter)
# return os.path.join(dir, list(pair.keys())[0], list(pair.values())[0])
if isinstance(content, str):
# if dir:
# content = string_ops(content, dir, filter=filter_bank)
# # content = os.path.join(dir, content)
fw.write(content)
else:
for c in content:
# if dir:
# c = string_ops(c, dir, filter=filter_bank)
# # c = os.path.join(dir, c)
fw.write(f'{c}\n')
def get_path_generator_in_case_order(data_path, return_fullpath, load_format=[]):
dir_list = get_dir_list(data_path)
for d in dir_list:
file_list = get_files(d, keys=load_format, return_fullpath=return_fullpath)
for file_idx, f in enumerate(file_list):
yield (file_idx, f)
def save_data_label_pair_in_csv(data_path, save_path=None, save_name=None, load_format='wav', return_fullpath=True):
path_loader = get_path_generator_in_case_order(data_path, return_fullpath, load_format=load_format)
nums, ids, labels = [], [], []
for idx, file_and_idx in enumerate(path_loader, 1):
file_idx, f = file_and_idx
file_path, file_name = os.path.split(f)
label = int(file_path[-1])
file_name = file_name.split('.')[0]
print(idx, file_name, label)
nums.append(file_idx)
ids.append(file_name)
labels.append(label)
pair_dict = {'case_index': nums,
'id': ids,
'label': labels}
pair_df = pd.DataFrame(pair_dict)
if not save_name:
save_name = 'train.csv'
if save_path is not None:
pair_df.to_csv(os.path.join(save_path, save_name))
else:
pair_df.to_csv(save_name)
def save_fileanmes_in_txt(glob_path, save_path=None, recursive=True):
files = glob.glob(glob_path, recursive=recursive)
if not save_path:
save_path = 'filenames.txt'
with open(save_path, 'w') as fw:
for file in files:
fw.write(f'{file}\n')
return files
def generate_gt_csv_for_data(id_to_label, save_path):
pass
if __name__ == "__main__":
from dataset.melspec import melspec
melspec()
# data_path = rf'C:\Users\test\Desktop\Leon\Datasets\ASUS_snoring_subset\raw_final_test\freq6_no_limit\2_21\raw_f_h_2_mono_16k'
# save_data_label_pair_in_csv(data_path, save_name='train1.csv')
# data_path = rf'C:\Users\test\Desktop\Leon\Datasets\ASUS_snoring_subset\raw_final_test\freq6_no_limit_shift\2_21\raw_f_h_2_mono_16k'
# save_data_label_pair_in_csv(data_path, save_name='train2.csv')
# data_path = rf'C:\Users\test\Desktop\Leon\Datasets\ESC-50\ESC-50_process\esc50_16k\esc50_16k_2'
# save_data_label_pair_in_csv(data_path, save_name='train3.csv')
# # ASUS snoring
# train_wav_list_path = r'C:\Users\test\Desktop\Leon\Datasets\ASUS_snoring_subset\index\Freq2\2_21_2s_my2\train.txt'
# test_wav_list_path = r'C:\Users\test\Desktop\Leon\Datasets\ASUS_snoring_subset\index\Freq2\2_21_2s_my2\test.txt'
# out_dir = r'C:\Users\test\Desktop\Leon\Datasets\ASUS_snoring_subset\preprocess\2_21_2s_my2'
# # out_dir = r'C:\Users\test\Desktop\Leon\Datasets\ASUS_snoring_cpp\2_21_2s_my2'
# get_melspec_from_cpp(train_wav_list_path, out_dir)
# get_melspec_from_cpp(test_wav_list_path, out_dir)
# # ESC-50
# # wav_list_path = r'C:\Users\test\Desktop\Leon\Datasets\ESC-50\ESC-50_process\esc50\esc50_2\test.txt'
# wav_list_path = r'C:\Users\test\Desktop\Leon\Datasets\ESC-50\ESC-50_process\esc50\esc50_2\file_names.txt'
# out_dir = r'C:\Users\test\Desktop\Leon\Datasets\ASUS_snoring_subset\preprocess\esc50\44100'
# # out_dir = r'C:\Users\test\Desktop\Leon\Datasets\ASUS_snoring_cpp\esc50\44100'
# get_melspec_from_cpp(wav_list_path, out_dir, sampling_rate=44100)
# Kaggle snoring
# wav_list_path = r'C:\Users\test\Desktop\Leon\Datasets\ASUS_snoring_subset\index\Kaggle_snoring_full\valid.txt'
# out_dir = r'C:\Users\test\Desktop\Leon\Datasets\ASUS_snoring_cpp\kaggle'
# get_melspec_from_cpp(wav_list_path, out_dir, sampling_rate=48000)
# # ASUS new
# redmi = r'C:\Users\test\Desktop\Leon\Datasets\ASUS_snoring_0727\wave_split\1658889529250_RedmiNote8\0'
# pixel = r'C:\Users\test\Desktop\Leon\Datasets\ASUS_snoring_0727\wave_split\1658889531056_Pixel4XL\0'
# iphone = r'C:\Users\test\Desktop\Leon\Datasets\ASUS_snoring_0727\wave_split\1658889531172_iPhone11\0'
# sr = 16000
# # for data_path in [redmi, pixel, iphone]:
# for data_path in [redmi]:
# split = os.path.split(os.path.split(data_path)[0])[1]
# glob_path = os.path.join(data_path, '*.wav')
# save_path = os.path.join(data_path, 'filenames.txt')
# save_fileanmes_in_txt(glob_path, recursive=True, save_path=save_path)
# wav_list_path = save_path
# out_dir = r'C:\Users\test\Desktop\Leon\Datasets\ASUS_snoring_cpp'
# out_dir = os.path.join(out_dir, split, str(sr))
# get_melspec_from_cpp(wav_list_path, out_dir, sampling_rate=sr)
| wdwlinda/Snoring_Detection_full | dataset/dataset_utils.py | dataset_utils.py | py | 23,868 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.split",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
73034200353 | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Rahul Handay <rahulha@saltstack.com>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
ensure_in_syspath('../../')
# Import Salt Libs
from salt.states import mdadm
# Globals
mdadm.__salt__ = {}
mdadm.__opts__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class MdadmTestCase(TestCase):
'''
Validate the mdadm state
'''
def test_present(self):
'''
Test to verify that the raid is present
'''
ret = [{'changes': {}, 'comment': 'Raid salt already present',
'name': 'salt', 'result': True},
{'changes': {},
'comment': "Devices are a mix of RAID constituents"
" (['dev0']) and non-RAID-constituents(['dev1']).",
'name': 'salt', 'result': False},
{'changes': {},
'comment': 'Raid will be created with: True', 'name': 'salt',
'result': None},
{'changes': {}, 'comment': 'Raid salt failed to be created.',
'name': 'salt', 'result': False}]
mock = MagicMock(side_effect=[{'salt': True}, {'salt': False},
{'salt': False}, {'salt': False},
{'salt': False}])
with patch.dict(mdadm.__salt__, {'raid.list': mock}):
self.assertEqual(mdadm.present("salt", 5, "dev0"), ret[0])
mock = MagicMock(side_effect=[0, 1])
with patch.dict(mdadm.__salt__, {'cmd.retcode': mock}):
self.assertDictEqual(mdadm.present("salt", 5,
["dev0", "dev1"]),
ret[1])
mock = MagicMock(return_value=True)
with patch.dict(mdadm.__salt__, {'cmd.retcode': mock}):
with patch.dict(mdadm.__opts__, {'test': True}):
with patch.dict(mdadm.__salt__, {'raid.create': mock}):
self.assertDictEqual(mdadm.present("salt", 5, "dev0"),
ret[2])
with patch.dict(mdadm.__opts__, {'test': False}):
with patch.dict(mdadm.__salt__, {'raid.create': mock}):
self.assertDictEqual(mdadm.present("salt", 5, "dev0"),
ret[3])
def test_absent(self):
'''
Test to verify that the raid is absent
'''
ret = [{'changes': {}, 'comment': 'Raid salt already absent',
'name': 'salt', 'result': True},
{'changes': {},
'comment': 'Raid saltstack is set to be destroyed',
'name': 'saltstack', 'result': None},
{'changes': {}, 'comment': 'Raid saltstack has been destroyed',
'name': 'saltstack', 'result': True}]
mock = MagicMock(return_value=["saltstack"])
with patch.dict(mdadm.__salt__, {'raid.list': mock}):
self.assertDictEqual(mdadm.absent("salt"), ret[0])
with patch.dict(mdadm.__opts__, {'test': True}):
self.assertDictEqual(mdadm.absent("saltstack"), ret[1])
with patch.dict(mdadm.__opts__, {'test': False}):
mock = MagicMock(return_value=True)
with patch.dict(mdadm.__salt__, {'raid.destroy': mock}):
self.assertDictEqual(mdadm.absent("saltstack"), ret[2])
if __name__ == '__main__':
from integration import run_tests
run_tests(MdadmTestCase, needs_daemon=False)
| shineforever/ops | salt/tests/unit/states/mdadm_test.py | mdadm_test.py | py | 3,815 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "salttesting.helpers.ensure_in_syspath",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "salt.states.mdadm.__salt__",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "salt.states.mdadm",
"line_number": 25,
"usage_type": "name"
},
... |
4154952558 | #
#
# UtmTrav : calculation of an open and cloded traverse
# projected on planeor UTM projected grid.
#
#
from pygeodesy.dms import toDMS, parseDMS
from pygeodesy import Utm,parseUTM5,Ellipsoids
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import numpy as np
import yaml,sys
from pathlib import Path
###############################################################
def parse_dms( dms ):
return parseDMS( dms, sep=':')
class UTMTraverse:
def __init__(self, YAMLFILE):
with open(YAMLFILE,'r') as f:
YAML = yaml.safe_load( f )
self.df_Sta = pd.DataFrame.from_dict(YAML['FIXPNT'] ,
orient='index', columns=['Easting','Northing'] )
def UtmStr(row):
UTM_PRE = f"{YAML['ZONE']} {YAML['HEMI']} "
return '{} {} {}'.format( UTM_PRE, row.Easting, row.Northing )
self.df_Sta['UTMSTR'] = self.df_Sta.apply( UtmStr, axis='columns')
self.df_Sta['Utm'] = self.df_Sta['UTMSTR'].apply(parseUTM5)
self.df_Sta.drop( columns=['Easting','Northing'], axis='index', inplace=True )
self.df_Sta['Iter'] = -1
self.df_Sta.reset_index(inplace=True)
self.df_Sta.rename( columns={'index':'Name'}, inplace=True)
self.df_Sta['Name'] = self.df_Sta['Name'].astype( str )
##############################
self.df_Dis = pd.DataFrame.from_dict( YAML['DIST'],orient='index',
columns=['dist_m'] )
self.df_Dis['Iter'] = -1
self.df_Dis['ScalFac'] = 0.0
self.df_Dis.reset_index(inplace=True)
self.df_Dis.rename( columns={'index':'Name'}, inplace=True)
##############################
self.df_Ang = pd.DataFrame.from_dict( YAML['ANGL'],orient='index',
columns=['ang_dms'] )
self.df_Ang['ang_deg'] = self.df_Ang['ang_dms'].apply( parse_dms )
self.df_Ang['Iter'] = -1
self.df_Ang[['ac_BS','ac_FS']] = [0.,0.]
self.df_Ang['type'] = 'open'
self.df_Ang.reset_index(inplace=True)
self.df_Ang.rename( columns={'index':'Name'}, inplace=True)
##############################
self.YAML = YAML
#import pdb; pdb.set_trace()
return
def CalcTraverse(self,ITER):
df_Sta = self.df_Sta[( self.df_Sta['Iter']==self.df_Sta['Iter'].max() )
| (self.df_Sta['Iter']==-1) ]
df_Dis = self.df_Dis[self.df_Dis['Iter']==-1]
df_Ang = self.df_Ang[self.df_Ang['Iter']==-1]
for i in range(len(df_Dis)):
DIS_AT,DIS_TO = self.df_Dis.iloc[i]['Name'].split(',')
DIST = self.df_Dis.iloc[i]['dist_m']
ANG_BS,ANG_AT,ANG_FS = self.df_Ang.iloc[i]['Name'].split(',')
ANGL = self.df_Ang.loc[i]['ang_deg']
TYPE = self.df_Ang.loc[i]['type']
UtmBS = self.df_Sta[self.df_Sta['Name']==ANG_BS].iloc[-1]['Utm']
UtmAT = self.df_Sta[self.df_Sta['Name']==ANG_AT].iloc[-1]['Utm']
######################################
if ITER==0:
k = 1.0 ; ac1 = 0.0 ; ac2 = 0.0
else:
UtmFS = self.df_Sta[self.df_Sta['Name']==ANG_FS].iloc[-1]['Utm']
k = self.LineSF( UtmAT, UtmFS)
ac1 = self.LineAC( UtmAT, UtmBS)
ac2 = self.LineAC( UtmAT, UtmFS)
#ac1 = 27.17/3600; #ac2 = 20.67/3600
BS_Azi = self.GridAz( UtmAT, UtmBS )
corrAng = ANGL + (ac1-ac2 )
FS_Azi = BS_Azi + corrAng
_,FS_Azi = divmod( FS_Azi, +360 )
######################################
GD = k*DIST
ETo = UtmAT.easting + GD*np.sin( np.radians(FS_Azi) )
NTo = UtmAT.northing + GD*np.cos( np.radians(FS_Azi) )
######################################
df_StaX = pd.DataFrame( { 'Name': ANG_FS,
'UTMSTR':None, 'Utm': Utm( self.YAML['ZONE'], self.YAML['HEMI'],
ETo,NTo),'Iter': ITER }, index=[0] )
self.df_Sta = pd.concat( [self.df_Sta, df_StaX], axis=0, ignore_index=True )
df_DisX = pd.DataFrame( { 'Name':f'{DIS_AT},{DIS_TO}',
'dist_m': GD, 'AdjAz':FS_Azi, 'Iter': ITER, 'ScalFac': k }, index=[0] )
self.df_Dis = pd.concat( [self.df_Dis,df_DisX], axis=0, ignore_index=True )
df_AngX= pd.DataFrame( { 'Name': f'{ANG_BS},{ANG_AT},{ANG_FS}',
'ang_dms': toDMS(corrAng), 'ang_deg': corrAng, 'Iter': ITER,
'ac_BS': ac1*3600, 'ac_FS': ac2*3600, 'type':TYPE }, index=[0] )
self.df_Ang = pd.concat( [ self.df_Ang, df_AngX ], axis=0,ignore_index=True )
#import pdb; pdb.set_trace()
return self.df_Sta, self.df_Dis, self.df_Ang
def GridAz( self, UtmFr, UtmTo ):
dE = UtmTo.easting-UtmFr.easting
dN = UtmTo.northing-UtmFr.northing
az = np.arctan2( dE, dN )
_,az = divmod( az, np.pi*2 ) # 2PI
return np.degrees( az )
def LineSF( self,StaFr,StaTo ):
utm_mid = Utm( self.YAML['ZONE'], self.YAML['HEMI'],
(StaFr.easting +StaTo.easting)/2,
(StaFr.northing+StaTo.northing)/2 )
sf_to = StaTo.toLatLon().scale
sf_mid = utm_mid.toLatLon().scale
sf_fr = StaFr.toLatLon().scale
sf_line = (1/6)*(sf_fr+4*sf_mid+sf_to) # quadratic mean
#sf_line = (sf_fr+sf_to)/2 # average
return sf_line
def LineAC( self,StaFr,StaTo ):
if 'ARC2CHORD' in self.YAML.keys() and self.YAML['ARC2CHORD'] is True:
utm_mid = Utm( self.YAML['ZONE'], self.YAML['HEMI'],
(StaFr.easting+StaTo.easting)/2,
(StaFr.northing+StaTo.northing)/2 )
rm = Ellipsoids.WGS84.rocGauss(utm_mid.toLatLon().lat)
N2N1 = StaTo.northing-StaFr.northing
E2E1 = (StaTo.easting-500_000) + 2*( StaFr.easting-500_000)
del12 = np.degrees( N2N1*E2E1/(6*rm*rm) )
return del12
else:
return 0.0
def CloseTrav( self ):
print('####### Adjust Interior Angles of the Close Traverse ########')
self.JuncPnt = self.YAML['JUNCTION']
df_JuncAng = self.df_Ang[self.df_Ang['Name'].str.contains(
f',{self.JuncPnt},' )]
assert( len(df_JuncAng) == 2),\
f"***ERROR*** {self.JuncPnt} must have 2 records in ANGL..."
##### partition open & close travesre #####
df_AngCloTrv = self.df_Ang[ self.df_Ang.Name==df_JuncAng.iloc[1].Name ]
nameAng = df_AngCloTrv.Name.str.split(',').to_list()
nameAng = np.array( nameAng )
idx = df_AngCloTrv.index.values.astype(int)[0]
df_AngCloTrv = self.df_Ang.iloc[ idx: ]
##### angle correction due to angel excess #####
sumAng = df_AngCloTrv.ang_deg.sum()
sumAng_= 180*(len(df_AngCloTrv)-2)
nAng = len( df_AngCloTrv )
difAng = sumAng-sumAng_
print(f'Close traverse number of angles : {nAng}')
print(f'Close traverse angle excess (n-2)*180 : {difAng*3600:+.0f} sec.')
self.df_Ang.loc[ idx:idx+nAng, 'ang_deg' ] = \
self.df_Ang.loc[ idx:idx+nAng, 'ang_deg' ]-difAng/nAng # ***ANG_ADJ***
self.df_Ang.loc[ idx:idx+nAng, 'type' ] = 'closed'
self.df_Ang['ang_dms'] = self.df_Ang['ang_deg'].apply( toDMS )
assert(np.isclose(self.df_Ang.iloc[idx:].ang_deg.sum(),sumAng_ )),"Not closed!"
print('Close traverse sum of angels : {:.7f} deg'.format(
df_AngCloTrv.ang_deg.sum() ) )
##### merge junction angles #####
jncAngSum = df_JuncAng.ang_deg.sum()
jncAngNam = self.df_Ang.iloc[idx-1].Name+','+self.df_Ang.iloc[idx].Name
jncAngNam = jncAngNam.split(',')
assert( jncAngNam[1]==jncAngNam[4] or jncAngNam[2]==jncAngNam[3]),\
f'***ERROR*** cannot merge {jncAngNam}'
jncAng = jncAngNam[0:2]+ [jncAngNam[-1],]
jncAng = ",".join( jncAng )
self.df_Ang.loc[ idx, ['Name', 'ang_deg', 'type' ] ] =\
[jncAng,jncAngSum,'junction']
self.df_Ang.drop( idx-1, axis='index', inplace=True )
return
def Clousure( self ):
JPNT=trav.YAML['JUNCTION']
jpnt = df_Sta[df_Sta.Name==JPNT].iloc[0].Utm
jpnt_ = df_Sta[df_Sta.Name==JPNT+'_'].iloc[0].Utm
self.dE = jpnt.easting -jpnt_.easting
self.dN = jpnt.northing-jpnt_.northing
self.dJPNT = np.sqrt( self.dE*self.dE + self.dN*self.dN )
print(f'Closure at junction {JPNT:6} : {self.dJPNT:+.3f} m.')
self.idxJPNT =trav.df_Dis[trav.df_Dis.Name.str.startswith(f'{JPNT},')].iloc[-1].name
self.sumTrv = trav.df_Dis.iloc[self.idxJPNT:].dist_m.sum()
print(f'Sum of closed traverse : {self.sumTrv:,.3f} m.')
print(f'Linear closure : 1: {self.sumTrv/self.dJPNT:,.0f} ')
def AdjustCompassRule( self):
dE= self.dE ; dN=self.dN ; dS=self.dJPNT
df_DL = df_Dis.iloc[self.idxJPNT:].copy()
df_DL.drop( columns=['ScalFac'], axis='index', inplace=True )
#import pdb; pdb.set_trace()
df_DL['Dep'] = df_DL['dist_m']*np.sin(np.radians(df_DL['AdjAz']))
df_DL['Lat'] = df_DL['dist_m']*np.cos(np.radians(df_DL['AdjAz']))
sumDep = df_DL['Dep'].sum()
sumLat = df_DL['Lat'].sum()
sumDis = df_DL.dist_m.sum()
df_DL['AdjDep'] = df_DL['Dep'] - sumDep*df_DL['dist_m']/sumDis
df_DL['AdjLat'] = df_DL['Lat'] - sumLat*df_DL['dist_m']/sumDis
df_DL_ = df_DL.copy()
for col in ( 'Dep','Lat','AdjDep','AdjLat' ):
df_DL_[col] = df_DL_[col].map('{:10.3f}'.format )
print( df_DL_ )
#import pdb; pdb.set_trace()
sumAdjDep = df_DL['AdjDep'].sum()
sumAdjLat = df_DL['AdjLat'].sum()
print( f'sumDist={sumDis:.3f}m | sumDep={sumDep:+.3f}m sumLat={sumLat:+.3f}m | '\
f'sumAdjDep={sumAdjDep:.3f}m sumAdjLat={sumAdjLat:.3f}m' )
#####################################
print( '=== Adjusted Coordiate by Compass Rule ===')
print( '=Station=====Easting========Northing=====')
FRutm = None
for i,row in df_DL.iterrows():
FR,TO = row.Name.split(',')
if FRutm is None:
FRutm = trav.df_Sta[ trav.df_Sta.Name==FR ].iloc[-1].Utm
E,N = FRutm.easting, FRutm.northing
print( f'{FR:6s} {E:15,.3f} {N:15,.3f}' )
E = E + row.AdjDep
N = N + row.AdjLat
print( f'{TO:6s} {E:15,.3f} {N:15,.3f}' )
def Plot(self, TITLE):
df_Sta = self.df_Sta[self.df_Sta.Iter<=0]
df_Dis = self.df_Dis[self.df_Dis.Iter==0]
#import pdb; pdb.set_trace()
for i,row in df_Sta.iterrows():
#print( i,row )
if row.Iter==-1: ms = 20; fc='green'
else: ms = 10; fc='red'
plt.plot( [row.Utm.easting], [row.Utm.northing],
marker="^", markersize=ms, mfc=fc, mec=fc )
plt.text( row.Utm.easting,row.Utm.northing, row.Name )
for i,row in df_Dis.iterrows():
#print( i, row )
FR,TO = row.Name.split(',')
fr = df_Sta[df_Sta.Name==FR].iloc[-1].Utm
to = df_Sta[df_Sta.Name==TO].iloc[-1].Utm
plt.plot( [fr.easting,to.easting] , [fr.northing,to.northing], '-r' )
plt.ticklabel_format(useOffset = False, style = 'plain')
plt.xticks(rotation=90)
plt.gca().set_aspect("equal")
plt.grid()
plt.tight_layout()
plt.title( TITLE )
#plt.show()
plt.savefig( TITLE )
###########################################################
###########################################################
###########################################################
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
if len(sys.argv)==2:
YAMLFILE = Path(sys.argv[1])
else:
YAMLFILE = Path( './Dec17_Travese_Compute/P4_Trav.yaml' )
trav = UTMTraverse( YAMLFILE )
### Case closed traverse , adjust interior angles #########
if 'JUNCTION' in trav.YAML.keys():
trav.CloseTrav()
###########################################################
for i in range(trav.YAML['ITER']):
print(f'========================= Iter {i} ============================')
df_Sta,df_Dis,df_Ang = trav.CalcTraverse(ITER=i)
trav.Clousure()
###########################################################
def _toUtmStr(row):
if row.UTMSTR is None:
return row.Utm.toStr(prec=-3)
else:
return row.UTMSTR
df_Sta['UTMSTR'] = df_Sta.apply( _toUtmStr, axis='columns' )
df_Dis['ScalFac'] = df_Dis['ScalFac'].map( '{:.9f}'.format )
if 1:
print( df_Sta )
print( df_Dis )
print( df_Ang )
if trav.YAML['ITER']>=2:
dE=list(); dN=list()
for grp,row in df_Sta.groupby('Name'):
MAX = row.Iter.max()
if MAX==trav.YAML['ITER']-1:
dE.append( row.iloc[MAX].Utm.easting- row.iloc[MAX-1].Utm.easting )
dN.append( row.iloc[MAX].Utm.northing- row.iloc[MAX-1].Utm.northing )
print( 'Converting@{} dE : {}'.format(trav.YAML['ITER'], np.array( dE ).round(3) ) )
print( 'Converting@{} dN : {}'.format(trav.YAML['ITER'], np.array( dN ).round(3) ) )
for fmt in ('png','pdf','svg'):
PLOT = f'./CACHE/{YAMLFILE.stem}.{fmt}'
print( f'Plotting {PLOT}...')
trav.Plot( PLOT )
#trav.Plot( (YAMLFILE.stem + '.pdf' ) )
#############################################################
trav.AdjustCompassRule()
print( '@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ End of UtmTrav.py @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
#import pdb; pdb.set_trace()
| phisan-chula/Construction_Survey | UtmTraverse/UtmTrav.py | UtmTrav.py | py | 13,935 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "pygeodesy.dms.parseDMS",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "yaml.safe_load",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_dict",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pand... |
18927725262 | #!/usr/bin/env python3.8
"""
Author : Kashif Khan
This script helps to search Endpoint in following criteria.
1: All results
2: Filter by Node ID
3: Filter by EPG
4: Filter by VLAN ID
5: Filter by Interface Name
6: Filter by Tenant Name
7: Filter by MAC Address
"""
# Imports block
from connectivity import get_aci_token
from credentials import credentials
import json
import requests
from pprint import pprint
from prettytable import PrettyTable
import csv
def get_ep_details(aci_cookie, apic_ip):
"""
Fetches EP details using API call
Parameters:
aci_cookie (dict): Session dictionary
apic_ip (string): APIC controller ip address
mac_address (string): (Optional) default is None. It searches specific mac-address
Returns:
dict: API response in JSON
"""
url = f'{apic_ip}/api/node/class/fvCEp.json?rsp-subtree=full&rsp-subtree-class=fvCEp,fvRsCEpToPathEp,fvIp,fvRsHyper,fvRsToNic,fvRsToVm'
headers = {
'cache-control': "no-cache"
}
get_response = requests.get(
url, headers=headers, cookies=aci_cookie, verify=False).json()
return get_response
def get_processed_data(get_ep_details_result):
data = []
fields = ['mac', 'ip','encap','dn','path']
for each_ep in get_ep_details_result['imdata']:
line_dict = {}
for key,value in each_ep['fvCEp'].items():
if isinstance (value,list):
for each_tdn in value:
if each_tdn.get('fvRsCEpToPathEp'):
line_dict.update({'path':each_tdn['fvRsCEpToPathEp']['attributes']['tDn']})
else:
line_dict.update({'mac':value['mac']})
line_dict.update({'ip':value['ip']})
line_dict.update({'encap':value['encap']})
line_dict.update({'dn':value['dn']})
data.append(line_dict)
processed_data = list()
for row in data:
dn_splitted_list = row['dn'].split("/")
tdn_splitted_list = row['path'].split("/")[2:]
switch_splitted = tdn_splitted_list[0].split("-")[1:]
tdn_splitted_list = tdn_splitted_list[1:]
tdn_splitted_list = "/".join(tdn_splitted_list)
interface_splitted = tdn_splitted_list.split("-")[1:]
mac = row['mac']
vlan = row['encap'].lstrip("vlan-")
tenant = dn_splitted_list[1].lstrip("tn-")
ap = dn_splitted_list[2].lstrip("ap-")
epg = dn_splitted_list[3].lstrip("epg-")
switch = "-".join(switch_splitted)
interface = "-".join(interface_splitted).strip("[]")
temp_dict = {'mac_address':mac,'vlan':vlan,'tenant':tenant,'application_profile':ap,'epg':epg,'switch':switch,'interface':interface}
processed_data.append(temp_dict)
return processed_data
def print_details_onscreen(processed_data):
table = PrettyTable()
table.field_names = ['MAC Address','VLAN','Tenant','AP','EPG','Switch','Interface']
table.align['Interface'] = 'l'
table.align['MAC Address'] = 'l'
table.align['VLAN'] = 'l'
table.align['Tenant'] = 'l'
table.align['AP'] = 'l'
table.align['EPG'] = 'l'
table.align['Switch'] = 'l'
count = 0
for each_row in processed_data:
count += 1
only_values = [values for values in each_row.values()]
table.add_row(only_values) # adds each mac address detail in table.
print(table)
print(f"Total number of interfaces where Endpoint mac-address learnd are {count}")
def get_filtered_data_func(filter_value,filter_type,get_data):
get_filtered_data = [filtered_data
for filtered_data in get_data
if filter_value in filtered_data[filter_type]]
print_details_onscreen(get_filtered_data)
def save_to_csv(list_of_all_data):
keys = list_of_all_data[0].keys()
with open('ep_data.csv', 'w', newline='') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(list_of_all_data)
print('\n' + '-'*30)
print("File has been saved!!!")
print('-'*30 + '\n')
def main():
aci_cookie = get_aci_token(
credentials["username"], credentials["password"], credentials["apic_ip"])
get_ep_details_result = get_ep_details(aci_cookie, credentials["apic_ip"])
get_data = get_processed_data(get_ep_details_result)
main_operations_list = ['Exit',
'Print Endpoint details on screen',
'Save data to CSV']
while True:
for index,main_items in enumerate(main_operations_list,0):
print(f"{index}: {main_items}")
main_operation = input('\nChoose number to select type of operation : ')
if main_operation == '0':
break
elif main_operation == '1':
sub_operations1_list = ['Exit',
'All results',
'Filter by Node ID',
'Filter by EPG',
'Filter by VLAN ID',
'Filter by Interface Name',
'Filter by Tenant Name',
'Filter by MAC Address',
]
while True:
for index,sub_menu_items in enumerate(sub_operations1_list,0):
print(f"{index}: {sub_menu_items}")
subops1 = input('\nChoose number to select type of operation : ')
if subops1 == '0':
break
elif subops1 == '1':
print_details_onscreen(get_data)
elif subops1 =='2':
filter_value = input("Enter Node ID: ")
filter_type='switch'
if len(filter_value) != 3:
print('Wrong Node ID! try again')
continue
get_filtered_data_func(filter_value,filter_type,get_data)
elif subops1 =='3':
filter_value = input("Enter EPG: ")
filter_type='epg'
get_filtered_data_func(filter_value,filter_type,get_data)
elif subops1 =='4':
filter_value = input("Enter VLAN ID: ")
filter_type='vlan'
get_filtered_data_func(filter_value,filter_type,get_data)
elif subops1 =='5':
filter_value = input("Enter Interface ID: ")
filter_type='interface'
get_filtered_data_func(filter_value,filter_type,get_data)
elif subops1 =='6':
filter_value = input("Enter Tenant Name: ")
filter_type='tenant'
get_filtered_data_func(filter_value,filter_type,get_data)
elif subops1 =='7':
filter_value = input("Enter MAC Address: ").upper()
filter_type='mac_address'
get_filtered_data_func(filter_value,filter_type,get_data)
elif main_operation == '2':
save_to_csv(get_data)
if __name__ == '__main__':
main()
| me-kashif/dcops | get_ep_details.py | get_ep_details.py | py | 7,553 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "prettytable.PrettyTable",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "csv.DictWriter",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "connectivity.get... |
39411332651 | import pyrealsense2 as rs
import numpy as np
from pybot.cmn_structs import *
from pybot.sensor_structs import *
class RealsenseInterface():
def __init__(self):
self.pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth,1280, 720, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 30)
config.enable_stream(rs.stream.accel, rs.format.motion_xyz32f, 250)
config.enable_stream(rs.stream.gyro, rs.format.motion_xyz32f, 200)
# Start streaming
profile = self.pipeline.start(config)
depth_sensor = profile.get_device().first_depth_sensor()
align_to = rs.stream.color
self.align = rs.align(align_to)
def __next__(self):
frames = self.pipeline.wait_for_frames()
# Align the depth frame to color frame
aligned_frames = self.align.process(frames)
# Get aligned frames
aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
color_frame = aligned_frames.get_color_frame()
for frame in frames:
if frame.is_motion_frame():
pose_frame = frame.as_motion_frame().get_motion_data()
# Validate that both frames are valid
if not aligned_depth_frame or not color_frame:
print("Either color or depth frame is invalid, skipping")
return
depth_image = np.array(aligned_depth_frame.get_data()).astype(np.int64)
color_image = np.array(color_frame.get_data()).astype(np.int64)
i = color_frame.profile.as_video_stream_profile().get_intrinsics()
intrin = np.array([[i.fx,0,i.ppx,0],[0,i.fy,i.ppy,0],[0,0,1,0],[0,0,0,1]]).astype(np.int64)
#convert to our structs
header = make_header()
height,width=color_image.shape[:2]
color_image = Image(header,height,width,"BGR8",color_image)
depth_image = Image(header,height,width,"Z16",depth_image)
intrin = CameraInfo(height,width,None,None,intrin,None,None)
return {'color':color_image,'depth':depth_image,'intrin':intrin}
| glebshevchukk/pybot | pybot/interfaces/realsense_interface.py | realsense_interface.py | py | 2,167 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pyrealsense2.pipeline",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pyrealsense2.config",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pyrealsense2.stream",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pyre... |
23999295068 | import os
import pickle
import numpy as np
import pandas as pd
import sdv
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
from scipy.stats import pearsonr
from sklearn import preprocessing
from sklearn.feature_selection import VarianceThreshold
from sklearn.linear_model import Lasso
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.utils import shuffle
from hyperparameters import classifiers, param_grids
from settings import file_extension, undersample, oversample, noise, synthetic, lasso
def create_folders():
folders = ['results', 'models'] if "validate" not in file_extension else ['results']
for name in folders:
directory = file_extension + '_' + name + '/'
if not os.path.exists(directory): os.makedirs(directory)
def read_files():
df_radiomics = pd.read_excel('data/' + file_extension + "_radiomics.xlsx", engine='openpyxl')
df_radiomics = df_radiomics.loc[:, ~df_radiomics.columns.duplicated()]
start_idx = df_radiomics.columns.get_loc("original_shape_Elongation")
df_radiomics = df_radiomics.iloc[:, start_idx:]
df_events = pd.read_excel('data/' + "db_basis_" + file_extension + ".xlsx", engine='openpyxl')
df_events = df_events.rename(columns={"Randomisatie nummer": "PP"})
df_events = df_events.loc[:, ~df_events.columns.str.contains('^Unnamed')]
df = pd.merge(df_radiomics, df_events, on='PP', how='outer').drop_duplicates()
df = df[df['Event'].notna()]
return df
def train_test_split(df, test_size):
df = shuffle(df)
df_test = df.iloc[:test_size, :]
df_test = df_test.reset_index()
df_test = df_test.drop(columns='index')
df_train = df.iloc[test_size:, :]
df_train = df_train.reset_index()
df_train = df_train.drop(columns='index')
if synthetic:
model = sdv.tabular.GaussianCopula(primary_key='PP')
model.fit(df_train)
df_train += model.sample(200)
return df_train, df_test
def normalize_data(df, min_max_scaler=None):
events = df.Event.values
vals = df.drop(columns=['Event', 'PP']).values
cols = df.drop(columns=['Event', 'PP']).columns
if not min_max_scaler: min_max_scaler = preprocessing.MinMaxScaler()
vals_scaled = min_max_scaler.fit_transform(vals)
df = pd.DataFrame(vals_scaled, columns=cols)
df['Event'] = events
return df, min_max_scaler
def variance_threshold_selector(data, threshold=0.5):
selector = VarianceThreshold(threshold)
selector.fit(data)
return data[data.columns[selector.get_support(indices=True)]]
def remove_variance(df):
min_variance = 1e-3
low_variance = variance_threshold_selector(df, min_variance)
return low_variance
def pearson_correlation(df):
df_pearson = []
for index in range(0, len(df.columns) - 1):
name = df.columns[index]
df_pearson.append({'Name': name,
'Correlation': pearsonr(df.Event, df.iloc[:, index])[0],
'P-value': pearsonr(df.Event, df.iloc[:, index])[1]})
df_pearson = pd.DataFrame(df_pearson)
df_pearson = df_pearson[df_pearson['P-value'] < 0.05]
features = df_pearson.Name.to_list() + ['Event']
df_train = df.loc[:, features]
return df_train, features
def lasso_reduction(df_train, df_test, features):
x_train = df_train.to_numpy()[:, :-1]
y_train = df_train.Event.to_numpy()
pipeline = Pipeline([('scaler', StandardScaler()), ('model', Lasso())])
search = GridSearchCV(pipeline, {'model__alpha': np.arange(0.1, 10, 0.1)}, cv=5, scoring="neg_mean_squared_error",
verbose=0)
search.fit(x_train, y_train)
coefficients = search.best_estimator_.named_steps['model'].coef_
importance = np.abs(coefficients)
features = np.array(features[0:len(features) - 1])
features_lasso = list(features[importance > 0]) + ['Event']
df_train = df_train.loc[:, features_lasso]
df_test = df_test.loc[:, features_lasso]
return df_train, df_test, features_lasso
def preprocess_train_data(df_train):
df_train, min_max_scaler = normalize_data(df_train)
df_train = remove_variance(df_train)
if not lasso: df_train, features = pearson_correlation(df_train)
if lasso: features = df_train.columns
return df_train, min_max_scaler, features
def preprocess_test_data(df, min_max_scaler, features):
df, _ = normalize_data(df, min_max_scaler=min_max_scaler)
return df.loc[:, features]
def preprocess_data(df_train, df_test):
df_train, min_max_scaler, features = preprocess_train_data(df_train)
df_test = preprocess_test_data(df_test, min_max_scaler, features)
if lasso: df_train, df_test, features = lasso_reduction(df_train, df_test, features)
x_train, y_train = df_train.to_numpy()[:, :-1], df_train.Event.to_numpy()
x_test, y_test = df_test.to_numpy()[:, :-1], df_test.Event.to_numpy()
return x_train, y_train, x_test, y_test, min_max_scaler, features
def get_best_grid(x_train, y_train, model):
if 0.9 * len(y_train) - sum(y_train) > sum(y_train):
oversampler = RandomOverSampler(sampling_strategy=0.90)
if oversample: x_train, y_train = oversampler.fit_resample(x_train, y_train)
undersampler = RandomUnderSampler(sampling_strategy='majority')
if undersample: x_train, y_train = undersampler.fit_resample(x_train, y_train)
if noise: x_train = x_train + np.random.normal(0.0, 0.2, size=x_train.shape)
param_grid = param_grids[model]
grid = GridSearchCV(classifiers[model], param_grid=param_grid, scoring="roc_auc", refit=True, cv=5, n_jobs=-1,
verbose=0)
grid.fit(x_train, y_train)
return grid
def save_model(best_grid, x_test, y_test, features, scaler, model):
pickle.dump(best_grid, open(file_extension + '_models/' + model + '_model.pkl', 'wb'))
pickle.dump(x_test, open(file_extension + '_models/' + model + '_x_test.pkl', 'wb'))
pickle.dump(y_test, open(file_extension + '_models/' + model + '_y_test.pkl', 'wb'))
pickle.dump(features, open(file_extension + '_models/' + model + '_features.pkl', 'wb'))
pickle.dump(scaler, open(file_extension + '_models/' + model + '_scaler.pkl', 'wb'))
def jitter(x, scale=0.1):
return x + np.random.normal(0, scale, x.shape)
def jitter_test(classifier, x_test, y_test, scales=np.linspace(0, 0.5, 30), n=5):
out = []
for s in scales:
avg = 0.0
for r in range(n):
avg += roc_auc_score(y_test, classifier.predict_proba(jitter(x_test, s))[:, 1])
out.append(avg / n)
return out, scales
| PHAIR-Consortium/POPF-Predictor | utils.py | utils.py | py | 6,758 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "settings.file_extension",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "settings.file_extension",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.pat... |
16914709414 | import discord
import os
import json
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import asyncio
import random
from keep_alive import keep_alive
scope = ["https://spreadsheets.google.com/feeds",'https://www.googleapis.com/auth/spreadsheets',"https://www.googleapis.com/auth/drive.file","https://www.googleapis.com/auth/drive"]
creds = ServiceAccountCredentials.from_json_keyfile_dict(json.loads(os.getenv("SERVICE CREDS")), scope)
client = gspread.authorize(creds)
sheet = client.open("Amber's Secret Santa 2022").worksheet("Gift Reveal")
participants = sheet.row_count - 1
AMBER_ID = 607544531042304010
GIFTER_ID_COL = 4
GIFTEE_ID_COL = 7
PARTNER_COL = 8
ARTWORK_COL = 10
GALLERY_ID = 1031026351391645767
intents = discord.Intents.default()
intents.members = True
client = discord.Client(intents=intents)
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
#bot recieves dm
if isinstance(message.channel, discord.DMChannel):
#amber sends testids command
if message.content.startswith(">testids") and message.author.id == AMBER_ID:
msg = ""
for i in range(participants):
rowNum = i+2
gifterID = int(sheet.cell(rowNum, GIFTER_ID_COL).value)
gifter = client.get_user(gifterID)
if gifter == None:
amber = client.get_user(AMBER_ID)
msg = msg + str(rowNum) + " " + str(gifterID) + "\n"
else:
amber = client.get_user(AMBER_ID)
msg = msg + str(rowNum) + " <@" + str(gifterID) + ">\n"
#avoid quota limit
if (i + 1) % 30 == 0:
amber = client.get_user(AMBER_ID)
await amber.send(msg)
msg = ""
await asyncio.sleep(60)
else:
sentAll = False
if not sentAll:
amber = client.get_user(AMBER_ID)
await amber.send(msg)
#amber sends sendpartners command
if message.content.startswith(">sendpartners") and message.author.id == AMBER_ID:
for i in range(participants):
rowNum = i+2
#avoid quota limit
if (i + 1) % 30 == 0:
await asyncio.sleep(60)
gifterID = int(sheet.cell(rowNum, GIFTER_ID_COL).value)
gifteeID = int(sheet.cell(rowNum, GIFTEE_ID_COL).value)
gifter = client.get_user(gifterID)
giftee = client.get_user(gifteeID)
try:
if gifter == None:
amber = client.get_user(AMBER_ID)
await amber.send(str(rowNum) + " Gifter " + str(gifterID) + " not in server")
elif giftee == None:
amber = client.get_user(AMBER_ID)
await amber.send(str(rowNum) + " Giftee " + str(gifteeID) + " not in server")
else:
await gifter.send("Hey <@" + str(gifterID) + ">! \n\nYour giftee for Amber's Secret Santa 2022 is " + giftee.name + "#" + giftee.discriminator + "! Don't tell anyone who your giftee is, it's a surprise! To find your giftee's wishlist, search 'in: wishlists from: " + giftee.name + "#" + giftee.discriminator + "' in the server! \n\nPlease dm a sketch of your artwork to your group leader by **December 14th**! Let your group leader know if you need an extension! \n\nWhenever you're ready to submit your finished artwork, dm '>submit' to this bot and attach your file(s) to the message! Please let your group leader know when you've submitted so that we can double check your submission. To change your submission, send the '>submit' command again and attach your updated file(s), this will replace all previous files. The final artwork deadline is **December 23rd**! \n\nHave fun! :D")
sheet.update_cell(rowNum, PARTNER_COL, "sent")
except:
amber = client.get_user(AMBER_ID)
await amber.send(str(rowNum) + " Unknown error sending giftee " + str(gifteeID) + " to gifter " + str(gifterID))
#anyone sends submit command
if message.content.startswith(">submit"):
correctFileTypes = True
senderID = message.author.id
try:
if message.attachments:
allFiles = ""
confirmMsg = ""
fileCount = 0
senderCell = sheet.find(str(senderID), in_row = None, in_column = GIFTER_ID_COL)
if senderCell:
senderRow = senderCell.row
for i in range(len(message.attachments)):
attachment = message.attachments[i]
if attachment.content_type in ('image/jpeg', 'image/gif', 'image/png', 'image/heic', 'video/mp4'):
file = attachment.url
allFiles = allFiles + file + " "
fileCount += 1
confirmMsg = confirmMsg + ":white_check_mark: File " + str(i+1) + " received: " + file + "\n\n"
else:
correctFileTypes = False
confirmMsg = confirmMsg + ":x: Error: File " + str(i+1) + " is not a jpeg, png, gif, heic or mp4 file!\n\n"
await message.channel.send(confirmMsg)
if correctFileTypes:
try:
sheet.update_cell(senderRow, ARTWORK_COL, allFiles)
await message.channel.send(str(fileCount) + " file(s) submitted successfully!")
except:
await message.channel.send("Failed to add artwork to database. Please dm amber ASAP!")
else:
await message.channel.send("Submission failed. Please double check your file formats and try again!")
else:
await message.channel.send("No user found. Please dm amber ASAP!")
else:
await message.channel.send("No file detected. Please attach your file(s) to your '>submit' message!")
except:
await message.channel.send("Unknown error. Please dm amber ASAP!")
#amber sends testsubmit command
'''if message.content.startswith(">testsubmit") and message.author.id == AMBER_ID:
confirmMsg = "gottem"
fileCount = 1
for i in range(participants):
senderID = i
try:
file = "https://cdn.discordapp.com/attachments/1041724795357773957/1045794420781490326/anya_-_angry.png"
senderCell = sheet.find(str(senderID), in_row = None, in_column = GIFTER_ID_COL)
if senderCell:
senderRow = senderCell.row
await message.channel.send(confirmMsg)
try:
sheet.update_cell(senderRow, ARTWORK_COL, file)
await message.channel.send(str(fileCount) + " file(s) submitted!")
except:
await message.channel.send("Failed to submit artwork. Please dm amber!")
else:
await message.channel.send("No user found. Please dm amber!")
except:
await message.channel.send("Unknown error. Please dm amber ASAP!")'''
#amber sends serversendart command
if message.content.startswith(">serversendart") and message.author.id == AMBER_ID:
allPuns = ["Hold on for deer life, this artwork for @ will blow you away!", "This artwork for @ will make you fall in love at frost sight!", "This artwork for @ is snow amazing!", "This artwork for @ is looking tree-mendous!", "This gift for @ sleighs!", "This artwork for @ is the best thing since sliced (ginger)bread!", "This gorgeous gift for @ graces us with its presents!", "It’s rien-ning gorgeous artwork! This one’s for @!", "Tree-t your eyes to this amazing artwork for @!", "This gift for @ is be-yule-tiful!", "You have no i-deer how dazzling this artwork for @ is!", "This amazing gift for @ will leave you feeling santa-mental!", "This pine art for @ will spruce things up!", "This gift for @ is snow joke!", "This jolly artwork for @ will leave a fa-la-la-la-lasting impression!"]
for i in range(participants):
rowNum = i+2
#avoid quota limit
if (i + 1) % 30 == 0:
await asyncio.sleep(60)
gifteeID = int(sheet.cell(rowNum, GIFTEE_ID_COL).value)
giftee = client.get_user(gifteeID)
links = sheet.cell(rowNum, ARTWORK_COL).value
try:
if links == None:
amber = client.get_user(AMBER_ID)
await amber.send(str(rowNum) + " Giftee <@" + str(gifteeID) + "> no links")
else:
pun = random.choice(allPuns)
allPuns.remove(pun)
pun = pun.replace("@", "<@" + str(gifteeID) + ">")
galleryChannel = client.get_channel(GALLERY_ID)
await galleryChannel.send(pun + " " + links)
if len(allPuns) == 0:
allPuns = ["Hold on for deer life, this artwork for @ will blow you away!", "This artwork for @ will make you fall in love at frost sight!", "This artwork for @ is snow amazing!", "This artwork for @ is looking tree-mendous!", "This gift for @ sleighs!", "This artwork for @ is the best thing since sliced (ginger)bread!", "This gorgeous gift for @ graces us with its presents!", "It’s rien-ning gorgeous artwork! This one’s for @!", "Tree-t your eyes to this amazing artwork for @!", "This gift for @ is be-yule-tiful!", "You have no i-deer how dazzling this artwork for @ is!", "This amazing gift for @ will leave you feeling santa-mental!", "This pine art for @ will spruce things up!", "This gift for @ is snow joke!", "This jolly artwork for @ will leave a fa-la-la-la-lasting impression!"]
except:
amber = client.get_user(AMBER_ID)
await amber.send(str(rowNum) + "Unknown error for giftee " + str(gifteeID))
#amber sends dmsendart command
'''if message.content.startswith(">dmsendart") and message.author.id == AMBER_ID:
for i in range(participants):
rowNum = i+2
#avoid quota limit
if (i + 1) % 30 == 0:
await asyncio.sleep(60)
gifteeID = int(sheet.cell(rowNum, GIFTEE_ID_COL).value)
giftee = client.get_user(gifteeID)
links = sheet.cell(rowNum, ARTWORK_COL).value
try:
if giftee == None:
amber = client.get_user(AMBER_ID)
await amber.send(str(rowNum) + " Giftee " + str(gifteeID) + " not in server")
elif links == None:
amber = client.get_user(AMBER_ID)
await amber.send(str(rowNum) + " Giftee <@" + str(gifteeID) + "> no links")
else:
await giftee.send("Hey <@" + str(gifteeID) + ">! Here's the gift you recieved for Amber's Secret Santa 2022! " + links + "\n\nHead over to the #gift-gallery channel in the server to take a look at all the gifts from this event (including what you gifted)! You can also try to guess who your gifter was in the #gifter-guessing channel, the first person to guess correctly will be awarded their own emote in my main discord server, amber's jam jar: <https://discord.gg/hcgwvwApXR>, and everyone who guesses correctly will receive a special role! You have 5 guesses!")
except:
amber = client.get_user(AMBER_ID)
await amber.send(str(rowNum) + " Unknown error for giftee " + str(gifteeID))'''
if message.content.startswith(">editmsg") and message.author.id == AMBER_ID:
channel = client.get_channel(GALLERY_ID)
message = await channel.fetch_message(1056436525002403894)
await message.edit(content="These artworks for <@1044953902065406003> are the best thing since sliced (ginger)bread! https://cdn.discordapp.com/attachments/1056215594644553779/1056602058272292974/ADBDC920-1386-4DC2-B16A-36855DFBE5D6.jpg https://cdn.discordapp.com/attachments/914323249884708965/1056436177206525952/Untitled_Artwork.png (there were a few complications with your gift, so you got 2!)")
keep_alive()
try:
client.run(os.getenv("TOKEN"))
except:
os.system("kill 1")
| amberosia/SecretSantaBot2022 | main.py | main.py | py | 12,031 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_dict",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "oauth2client.service_account.ServiceAccountCredentials",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "json.loads",... |
8139560888 | from collections import defaultdict
class Solution:
def calcEquation(self, equations: list, values: list, queries: list) -> list:
graph = defaultdict(list)
for i in range(len(equations)):
d1, d2 = equations[i]
val = values[i]
graph[d1].append((d2, val))
graph[d2].append((d1, 1 / val))
def dfs(d1, d2, visted, curVal):
if d1 == d2:
return curVal
if d1 in visted:
return -1
visted.add(d1)
tmp = -1
for child, val in graph[d1]:
tmp = max(tmp,dfs(child, d2, visted, curVal * val))
return tmp
ans = []
for d1, d2 in queries:
if d1 in graph and d2 in graph:
ans.append(dfs(d1, d2, set(), 1))
else:
ans.append(-1)
return ans | MinecraftDawn/LeetCode | Medium/399. Evaluate Division.py | 399. Evaluate Division.py | py | 902 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "collections.defaultdict",
"line_number": 6,
"usage_type": "call"
}
] |
27863867426 | import torch
import torch.nn as nn
import torch.optim as optim
import torchtext
from torchtext.datasets import SST
from torchtext.data import Field, LabelField, BucketIterator
from torchtext.vocab import Vectors, GloVe, CharNGram, FastText
import time
import torch.nn.functional as F
from network import LSTM
import argparse
import numpy as np
from losses import loss
from optimizer import create_optimizer
from engine import train, test, validate
from dataset import data_load
from plot import plot_loss_and_acc
import warnings
warnings.filterwarnings('ignore')
def get_args_parser():
parser = argparse.ArgumentParser('CNN Training', add_help=False)
parser.add_argument('--dataset', default='SST', type=str,
help='choose dataset (default: SST)')
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--epoch', type=int, default=10)
# Optimization parameters
parser.add_argument('--opt', default='adam', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adam"')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=0.0001,
help='weight decay (default: 0.05)')
parser.add_argument('--lr', type=float, default=3e-4, metavar='LR',
help='learning rate (default: 4e-3), with total batch size 4096')
parser.add_argument('--layer_decay', type=float, default=1.0)
parser.add_argument('--loss', type=str, default='ce')
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--exp', type=str, default='debug')
parser.add_argument('--model', type=str, default='lstm')
parser.add_argument('--tag', type=str, default='debug')
parser.add_argument('--hidden_dim', type=int, default=128)
parser.add_argument('--num_layers', type=int, default=1)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--pretrained', type=int, default=1)
return parser
def main(args):
# set up fields
TEXT = Field(fix_length=60)
LABEL = Field(sequential=False,dtype=torch.long)
# make splits for data
# DO NOT MODIFY: fine_grained=True, train_subtrees=False
train_data, val_data, test_data = SST.splits(
TEXT, LABEL, fine_grained=True, train_subtrees=False)
# print information about the data
print('train.fields', train_data.fields)
print('len(train)', len(train_data))
print('vars(train[0])', vars(train_data[0]))
# build the vocabulary
# you can use other pretrained vectors, refer to https://github.com/pytorch/text/blob/master/torchtext/vocab.py
TEXT.build_vocab(train_data, vectors=Vectors(name='vector.txt', cache='./data'))
LABEL.build_vocab(train_data)
# We can also see the vocabulary directly using either the stoi (string to int) or itos (int to string) method.
print("itos", TEXT.vocab.itos[:10])
print("stoi", LABEL.vocab.stoi)
print("most_common", TEXT.vocab.freqs.most_common(20))
# print vocab information
print('len(TEXT.vocab)', len(TEXT.vocab))
print('TEXT.vocab.vectors.size()', TEXT.vocab.vectors.size())
batch_size = 64
# make iterator for splits
train_it, val_it, test_it = BucketIterator.splits(
(train_data, val_data, test_data), batch_size=batch_size,
device=torch.device('cuda' if torch.cuda.is_available() else 'cpu'))
# 初始化模型
vocab_size = len(TEXT.vocab)
# print(vocab_size)
# time.sleep(100)
embedding_dim = 300
hidden_dim = args.hidden_dim
output_dim = 5
num_layers = args.num_layers
dropout = args.dropout
model = LSTM(vocab_size, embedding_dim, hidden_dim, output_dim, num_layers, dropout)
# Copy the pre-trained word embeddings we loaded earlier into the embedding layer of our model.
pretrained_embeddings = TEXT.vocab.vectors
print(pretrained_embeddings.shape)
# you should maintain a nn.embedding layer in your network
if args.pretrained:
model.embedding.weight.data.copy_(pretrained_embeddings)
model.embedding.requires_grad = False
# 设置训练设备
device = torch.device(args.device)
# 定义模型
model = model.to(device)
# 打印信息
print(f"使用 {device} device")
print(f"数据集:{args.dataset}")
print(f"模型:{args.model}")
# 定义代价函数
criterion = loss(args)
# 定义优化器
optimizer = create_optimizer(args, model)
# 训练
model_trained, train_los, train_acc, val_los, val_acc = train(model=model,
criterion=criterion,
train_iter=train_it,
val_iter=val_it,
optimizer=optimizer,
device=device,
max_epoch=args.epoch,
disp_freq=100)
# 测试
test(model=model,
criterion=criterion,
test_iter=test_it,
device=device)
# 模型保存
# torch.save(model.state_dict(), 'model/my_model.pth')
# 绘制损失和准确率图
suffix1 = args.tag + args.dataset + '_' + args.model + '_lr' + str(args.lr) + '_' + str(args.opt) + '_wd' + str(args.weight_decay) + '_epoch' + str(args.epoch) + '.png'
path1 = ['los_acc/train_loss_' + suffix1, 'los_acc/train_acc_' + suffix1]
suffix2 = args.tag + args.dataset + '_' + args.model + '_lr' + str(args.lr) + '_' + str(args.opt) + '_wd' + str(args.weight_decay) + '_epoch' + str(args.epoch) + '.png'
path2 = ['los_acc/val_loss_' + suffix2, 'los_acc/val_acc_' + suffix2]
plot_loss_and_acc({'TRAIN': [train_los, train_acc]}, path1)
plot_loss_and_acc({'VAL': [val_los, val_acc]}, path2)
if __name__ == "__main__":
"""
Please design the initial and target state.
"""
parser = argparse.ArgumentParser('Prombelm solver', parents=[get_args_parser()])
args = parser.parse_args()
main(args)
| shiml20/Deep-Learning-Basic-Code | NLP/main.py | main.py | py | 6,336 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torchtext.data.Field",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "... |
36585832061 | # Idea was thought of back in June of 2016 in regards to creating a way for qvpython script to easily manipulate an excel spreadsheet. After obtaining help from Vaso Vasich, this script was actually made in an effort by Vaso Vasich under the SDEP business, which is a subset of Arce Enterprises.
# 2016
# First Author: Vaso Vasich
# Second Author: David Arce
# Program is designed to make a fluid way for product to be added to an excel sheet with minimal effort in order to make it quicker to add mass amounts of product. This excel sheet is then uploaded to an ecommerce website with no changes needed.
part_types = {
'Rocker Arms': {
'specs': [
'Stud Diameter(in.):',
'Ratio:'
],
'set': 16,
'images': {
# still need to add more
'Ultra Pro Magnum': [
'https://s25.postimg.org/vd6zj9x8v/ultra_pro_magnum_rocker_arms_1.jpg',
'https://s25.postimg.org/l4eidg973/ultra_pro_magnum_rocker_arms_2.jpg'
],
'Ultra Gold Aluminum': [
'https://s25.postimg.org/k73hqu8b3/ultra_gold_rocker_arms_1.jpg'
],
}
}
}
def inven():
import openpyxl
from openpyxl.styles import Alignment
wb = openpyxl.load_workbook('testSheet.xlsx')
sheet = wb.active
#VARIABLES
running = True
placer = 0
brand = ""
productNum = ""
prodPrice = 0
prodName = ""
trackInventory = "TRUE"
quantity = 4
backorder1 = "FALSE"
prodWeight = ""
taxable= "TRUE"
hidden = "FALSE"
prodCategory = ""
prodCategoryWeb = "http://sandiegoengineparts.com/t/"
prodBrandWeb = "http://sandiegoengineparts.com/t/"
prodNameWeb = "http://sandiegoengineparts.com/products?keywords="
prodImageUrl = ""
imageIndex = 0
while(running):
question = input("New Product to add! \n Is new product similar to last? y or n: ")
if(question == 'y'):
placer = int(placer)
placer += 1
placer = str(placer)
productNum = input("Part #: ")
prodPrice = float(input("Price: "))
prodPrice = round(prodPrice,2)
imageIndex = checkImages(part_types[prodType]['images'][prodLine], prodType, imageIndex)
prodImageUrl = part_types[prodType]['images'][prodLine][imageIndex]
imageIndex += 1
sheet['A'+placer].alignment = Alignment(wrapText=True)
sheet['A'+placer].value = prodName + " " + productNum
sheet['B'+placer].alignment = Alignment(wrapText=True)
sheet['B'+placer].value = productNum
sheet['C'+placer].alignment = Alignment(wrapText=True)
sheet['C'+placer].value = prodPrice
sheet['F'+placer].alignment = Alignment(wrapText=True)
sheet['F'+placer].value = trackInventory
sheet['G'+placer].alignment = Alignment(wrapText=True)
sheet['G'+placer].value = quantity
sheet['H'+placer].alignment = Alignment(wrapText=True)
sheet['H'+placer].value = backorder1
appliesTo = applies()
prodSpecs = getSpecifications(prodType)
sheet['E'+placer].alignment = Alignment(wrapText=True)
sheet['E'+placer].value ="""<div>
<h2>{1} {2} Specifications</h2>
<ul>
<li><b>Brand: </b><a href="{6}">{3}</a></li>
<li><b>Manufacturer's Part Number: </b>{1}</li>
<li><b>Part Type: </b><a href="{7}">{2}</a></li>
<li><b>Product Line: </b><a href="{8}">{0}</a></li>
{9}
<li><b>Sold in Set of {4}</b></li>
</ul>
<h2>Applies to:</h2>
<ul>
{5}
</ul>
</div>""".format(prodName,productNum,prodType,prodBrand,prodSet,appliesTo,prodBrandWeb,prodCategoryWeb,prodNameWeb,prodSpecs)
sheet['I'+placer].alignment = Alignment(wrapText=True)
sheet['I'+placer].value = prodWeight
sheet['J'+placer].alignment = Alignment(wrapText=True)
sheet['J'+placer].value = taxable
sheet['K'+placer].alignment = Alignment(wrapText=True)
sheet['K'+placer].value = hidden
sheet['L'+placer].alignment = Alignment(wrapText=True)
sheet['L'+placer].value = prodCategory
sheet['M'+placer].alignment = Alignment(wrapText=True)
sheet['M'+placer].value = prodImageUrl
sheet['N'+placer].alignment = Alignment(wrapText=True)
sheet['N'+placer].value = prodName + " " + productNum
sheet['O'+placer].alignment = Alignment(wrapText=True)
sheet['O'+placer].value = "Find " + prodName + " " + productNum +" at San Diego Engine Parts where high quality engine parts meet low costs."
wb.save('testSheet.xlsx')
# New Product
else:
if(question == 'n'):
prodType = input("Type of Part(eg. Rocker Arms): ")
# If (prodType in part_types) do all this...don't need check necessarily now
prodBrand = input("Brand (eg. COMP Cams): ")
prodLine = input("Product Line (eg. Ultra Pro Magnum): ")
productNum = input("Part #: ")
prodCategory = makeCategories(prodType, prodBrand)
prodCategoryWeb += prodType.replace(' ', '-').lower()
# prodCategoryWeb += input("Type of Part URL: ")
prodBrandWeb += prodBrand.replace(' ', '-').lower()
# prodBrandWeb += input("Brand URL: ")
prodName = prodBrand + ' ' + prodLine + ' ' + prodType
# prodName = input("(Brand - Product Line - Type): ")
prodNameWeb += prodName.replace(' ', '+').lower()
# prodNameWeb += input("Product Line URL: ")
prodWeight = int(input("Weight: "))
prodSet = part_types[prodType]['set']
placer = input("Row: ")
# prodRAdiameter = input("Diameter: ")
# prodRAratio = input("Ratio: ")
prodPrice = float(input("Price: "))
prodPrice = round(prodPrice,2)
if (prodLine in part_types[prodType]['images']):
prodImageUrl = part_types[prodType]['images'][prodLine][0]
imageIndex += 1
else:
print('No Image was found for ' + prodLine)
# prodImageUrl = input("Image Url: ")
sheet['A'+placer].alignment = Alignment(wrapText=True)
sheet['A'+placer].value = prodName + " " + productNum
sheet['B'+placer].alignment = Alignment(wrapText=True)
sheet['B'+placer].value = productNum
sheet['C'+placer].alignment = Alignment(wrapText=True)
sheet['C'+placer].value = prodPrice
sheet['F'+placer].alignment = Alignment(wrapText=True)
sheet['F'+placer].value = trackInventory
sheet['G'+placer].alignment = Alignment(wrapText=True)
sheet['G'+placer].value = quantity
sheet['H'+placer].alignment = Alignment(wrapText=True)
sheet['H'+placer].value = backorder1
# running = True
appliesTo = applies()
prodSpecs = getSpecifications(prodType)
sheet['E'+placer].alignment = Alignment(wrapText=True)
sheet['E'+placer].value = """<div>
<h2>{1} {2} Specifications</h2>
<ul>
<li><b>Brand: </b><a href="{6}">{3}</a></li>
<li><b>Manufacturer's Part Number: </b>{1}</li>
<li><b>Part Type: </b><a href="{7}">{2}</a></li>
<li><b>Product Line: </b><a href="{8}">{0}</a></li>
{9}
<li><b>Sold in Set of {4}</b></li>
</ul>
<h2>Applies to:</h2>
<ul>
{5}
</ul>
</div>""".format(prodName,productNum,prodType,prodBrand,prodSet,appliesTo,prodBrandWeb,prodCategoryWeb,prodNameWeb,prodSpecs)
# .format(prodName,productNum,prodType,prodBrand,prodSet,appliesTo,prodBrandWeb,prodCategoryWeb,prodNameWeb,prodRAratio,prodRAdiameter)
sheet['I'+placer].alignment = Alignment(wrapText=True)
sheet['I'+placer].value = prodWeight
sheet['J'+placer].alignment = Alignment(wrapText=True)
sheet['J'+placer].value = taxable
sheet['K'+placer].alignment = Alignment(wrapText=True)
sheet['K'+placer].value = hidden
sheet['L'+placer].alignment = Alignment(wrapText=True)
sheet['L'+placer].value = prodCategory
sheet['M'+placer].alignment = Alignment(wrapText=True)
sheet['M'+placer].value = prodImageUrl
sheet['N'+placer].alignment = Alignment(wrapText=True)
sheet['N'+placer].value = prodName + " " + productNum
sheet['O'+placer].alignment = Alignment(wrapText=True)
sheet['O'+placer].value = "Find " + prodName + " " + productNum +" at San Diego Engine Parts where high quality engine parts meet low costs."
wb.save('testSheet.xlsx')
else:
if(question == 'x'):
running = False
wb.save('testSheet.xlsx')
def applies():
running = True
appliesTo = ""
while(running):
question = input("Continue adding Applies to? y or n: ")
if(question == 'y'):
appliesTo = appliesTo + ' <li>' + input("Applies to: ") + '</li>\n'
else:
if(question == 'n'):
return appliesTo
def getSpecifications(product_type):
running = True
prodSpecs = ""
while(running):
for k in part_types[product_type]['specs']:
v = input('Get ' + k)
prodSpecs += ' <li><b>' + k + '</b> ' + v + '</li>\n'
return prodSpecs
def makeCategories(product_type, product_brand):
categories = product_type + ', Brands/' + product_brand + ', Brands/' + product_brand + '/' + product_brand + ' ' + product_type
return categories
def checkImages(product_line, product_type, index):
imageGroupLength = len(part_types[product_type]['images'])
if (imageGroupLength == 1):
index = 0
return index
else:
if (index < imageGroupLength):
return index
else:
index = 0
return index
inven() | serbboy23/SDEP | addProducts.py | addProducts.py | py | 10,350 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "openpyxl.load_workbook",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Alignment",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Alignment",
"line_number": 70,
"usage_type": "call"
},
{
"api_na... |
74540222432 | # coding: utf-8
from traitlets import Type, Instance, default
from .baseapp import NbGrader
from ..plugins import ExportPlugin, CsvExportPlugin
from ..api import Gradebook
aliases = {
'log-level' : 'Application.log_level',
'db': 'CourseDirectory.db_url',
'to' : 'ExportPlugin.to',
'exporter': 'ExportApp.plugin_class',
'assignment' : 'ExportPlugin.assignment',
'student': 'ExportPlugin.student',
'course': 'CourseDirectory.course_id'
}
flags = {}
class ExportApp(NbGrader):
name = u'nbgrader-export'
description = u'Export information from the database to another format.'
aliases = aliases
flags = flags
examples = """
The default is to export to a file called "grades.csv", i.e.:
nbgrader export
You can customize the filename with the --to flag:
nbgrader export --to mygrades.csv
You can export the grades for a single (or limited set) of students
or assignments with the --assignment and/or --student flag:
nbgrader export --assignment [assignmentID]
--student [studentID1,studentID2]
Where the studentIDs and assignmentIDs are a list of IDs and
assignments. The assignments or studentIDs need to quoted if they
contain not only numbers. The square brackets are obligatory.
To change the export type, you will need a class that inherits from
nbgrader.plugins.ExportPlugin. If your exporter is named
`MyCustomExporter` and is saved in the file `myexporter.py`, then:
nbgrader export --exporter=myexporter.MyCustomExporter
"""
plugin_class = Type(
CsvExportPlugin,
klass=ExportPlugin,
help="The plugin class for exporting the grades."
).tag(config=True)
plugin_inst = Instance(ExportPlugin).tag(config=False)
def init_plugin(self):
self.log.info("Using exporter: %s", self.plugin_class.__name__)
self.plugin_inst = self.plugin_class(parent=self)
@default("classes")
def _classes_default(self):
classes = super(ExportApp, self)._classes_default()
classes.append(ExportApp)
classes.append(ExportPlugin)
return classes
def start(self):
super(ExportApp, self).start()
self.init_plugin()
with Gradebook(self.coursedir.db_url, self.coursedir.course_id) as gb:
self.plugin_inst.export(gb)
| jupyter/nbgrader | nbgrader/apps/exportapp.py | exportapp.py | py | 2,458 | python | en | code | 1,232 | github-code | 1 | [
{
"api_name": "baseapp.NbGrader",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "traitlets.Type",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "plugins.CsvExportPlugin",
"line_number": 57,
"usage_type": "argument"
},
{
"api_name": "plugins.E... |
31904422454 | # dash_callbacks.py
from dash.dependencies import Input, Output
from dashboard.dash_app import app
from dashboard.dash_aux import get_pending_html_table
from session.sc_helpers import QuitMode
from dashboard.sc_df_manager import DataframeManager
from binance import enums as k_binance
from datetime import datetime, timedelta
from basics.sc_perfect_trade import PerfectTradeStatus
from basics.sc_asset import Asset
print('dash_callbacks.py')
dfm = DataframeManager()
# first line data
@app.callback(Output('current-time', 'children'),
Output('neb', 'children'), # perfect trade net profit
Output('qty', 'children'), # orders quantity
Output('target', 'children'), # session target net profit
Output('max-negative-profit-allowed', 'children'), # if reached, session ended at price
Input('update', 'n_intervals'))
def display_value(value):
symbol_name = dfm.dashboard_active_symbol.name
quote_name = dfm.dashboard_active_symbol.quote_asset().name()
base_name = dfm.dashboard_active_symbol.base_asset().name()
return \
f'{datetime.now().strftime("%H:%M:%S")}',\
f'n: {dfm.sm.active_sessions[symbol_name].P_NET_QUOTE_BALANCE:,.2f} {quote_name}',\
f'q: {dfm.sm.active_sessions[symbol_name].P_QUANTITY:,.4f} {base_name}',\
f't: {dfm.sm.active_sessions[symbol_name].checks_manager.P_TARGET_TOTAL_NET_PROFIT:,.2f} {quote_name}',\
f'({dfm.sm.active_sessions[symbol_name].checks_manager.P_MAX_NEGATIVE_PROFIT_ALLOWED:,.2f})'
# **********************************
# ********** Session data **********
# **********************************
# elapsed time
@app.callback(Output('session-count', 'children'),
Output('session-cycle-count', 'children'),
Input('update', 'n_intervals'))
def display_value(value):
symbol_name = dfm.dashboard_active_symbol.name
return f'#{dfm.sm.session_count[symbol_name]:03d}', \
f'{timedelta(seconds=dfm.sm.active_sessions[symbol_name].cmp_count)}'
# perfect trade status info
@app.callback(Output('pt-new', 'children'),
Output('pt-buy', 'children'),
Output('pt-sell', 'children'),
Output('pt-end', 'children'),
Input('update', 'n_intervals'))
def display_value(value):
symbol_name = dfm.dashboard_active_symbol.name
ptm = dfm.sm.active_sessions[symbol_name].ptm
return \
len(ptm.get_pt_by_request(pt_status=[PerfectTradeStatus.NEW])),\
len(ptm.get_pt_by_request(pt_status=[PerfectTradeStatus.BUY_TRADED])),\
len(ptm.get_pt_by_request(pt_status=[PerfectTradeStatus.SELL_TRADED])),\
len(ptm.get_pt_by_request(pt_status=[PerfectTradeStatus.COMPLETED]))
# span, depth, momentum & TBD data
@app.callback(Output('pt-span', 'children'),
Output('pt-span-buy', 'children'),
Output('pt-span-sell', 'children'),
Output('pt-depth', 'children'),
Output('pt-depth-buy', 'children'),
Output('pt-depth-sell', 'children'),
Output('pt-mtm', 'children'),
Output('pt-mtm-buy', 'children'),
Output('pt-mtm-sell', 'children'),
Input('update', 'n_intervals'))
def display_value(value):
session_orders = dfm.get_session_orders()
symbol_name = dfm.dashboard_active_symbol.name
active_session = dfm.sm.active_sessions[symbol_name]
cmp = active_session.cmp
gap = active_session.gap
buy_gap_span, sell_gap_span = active_session.helpers.get_gap_span_from_list(orders=session_orders, cmp=cmp, gap=gap)
total_gap_span = buy_gap_span + sell_gap_span
buy_gap_depth, sell_gap_depth = active_session.helpers.get_gap_depth_from_list(orders=session_orders, cmp=cmp, gap=gap)
total_gap_depth = buy_gap_depth + sell_gap_depth
buy_gap_momentum, sell_gap_momentum = active_session.helpers.get_gap_momentum_from_list(orders=session_orders, cmp=cmp, gap=gap)
total_gap_momentum = buy_gap_momentum + sell_gap_momentum
return \
f'{total_gap_span:.2f}', \
f'{buy_gap_span:.2f}', \
f'{sell_gap_span:.2f}', \
f'{total_gap_depth:.2f}', \
f'{buy_gap_depth:.2f}', \
f'{sell_gap_depth:.2f}', \
f'{total_gap_momentum:.2f}', \
f'{buy_gap_momentum:.2f}', \
f'{sell_gap_momentum:.2f}'
# ********** Session STOP profits **********
@app.callback(Output('actual-profit', 'children'),
Output('stop-price-profit', 'children'),
Output('ntc', 'children'),
Output('time-to-next-try', 'children'),
Output('is-active', 'children'),
Input('update', 'n_intervals'))
def display_value(value):
symbol = dfm.dashboard_active_symbol
symbol_name = symbol.name
cmp = dfm.sm.active_sessions[symbol_name].cmp
qp = symbol.quote_asset().pv()
base_ntc = dfm.sm.active_sessions[symbol_name].checks_manager.base_negative_try_count
quote_ntc = dfm.sm.active_sessions[symbol_name].checks_manager.quote_negative_try_count
cycles_count_for_inactivity = dfm.sm.active_sessions[symbol_name].cycles_count_for_inactivity
cycles_to_new_pt = cycles_count_for_inactivity - dfm.sm.active_sessions[symbol_name].cycles_from_last_trade
cycles_to_new_pt = 0.0 if cycles_to_new_pt < 0 else cycles_to_new_pt
time_to_next_try = timedelta(seconds=cycles_to_new_pt)
is_active = 'ON' if dfm.sm.active_sessions[symbol_name].is_active else 'OFF'
return f'{dfm.sm.active_sessions[symbol_name].ptm.get_total_actual_profit_at_cmp(cmp=cmp):,.{qp}f}',\
f'{dfm.sm.active_sessions[symbol_name].ptm.get_stop_price_profit(cmp=cmp):,.{qp}f}', \
f'{base_ntc} - {quote_ntc}', \
f'{time_to_next_try}', \
f'{is_active}'
# @app.callback(Output('cycles-to-new-pt', 'children'), Input('update', 'n_intervals'))
# def display_value(value):
# symbol_name = dfm.dashboard_active_symbol.name
# cycles_count_for_inactivity = dfm.sm.active_sessions[symbol_name].cycles_count_for_inactivity
# cycles_to_new_pt = cycles_count_for_inactivity - dfm.sm.active_sessions[symbol_name].cycles_from_last_trade
# time_to_new_pt = timedelta(seconds=cycles_to_new_pt)
# return f'({cycles_count_for_inactivity}) {time_to_new_pt}'
# **********************************
# ********** Global data **********
# **********************************
# ********** Global elapsed time **********
@app.callback(Output('global-cycle-count', 'children'), Input('update', 'n_intervals'))
def display_value(value):
symbol_name = dfm.dashboard_active_symbol.name
global_cmp = dfm.sm.terminated_sessions[symbol_name]["global_cmp_count"]
session_cmp = dfm.sm.active_sessions[symbol_name].cmp_count
return f'{timedelta(seconds=global_cmp + session_cmp)}'
# isolated orders info
@app.callback(
Output('isol-orders-placed', 'children'),
Output('isol-orders-pending', 'children'),
Output('isol-orders-pending-buy', 'children'),
Output('isol-orders-pending-sell', 'children'),
Input('update', 'n_intervals'))
def display_value(value):
symbol_name = dfm.dashboard_active_symbol.name
placed = dfm.sm.terminated_sessions[symbol_name]['global_placed_orders_count_at_price']
sell = len(
[order for order in dfm.sm.iom.isolated_orders
if order.k_side == k_binance.SIDE_SELL and order.symbol.name == symbol_name])
buy = len(
[order for order in dfm.sm.iom.isolated_orders
if order.k_side == k_binance.SIDE_BUY and order.symbol.name == symbol_name])
pending = buy + sell
return placed, pending, buy, sell
# Global span, depth, momentum & TBD data
@app.callback(Output('is-span', 'children'),
Output('is-span-buy', 'children'),
Output('is-span-sell', 'children'),
Output('is-depth', 'children'),
Output('is-depth-buy', 'children'),
Output('is-depth-sell', 'children'),
Output('is-mtm', 'children'),
Output('is-mtm-buy', 'children'),
Output('is-mtm-sell', 'children'),
Input('update', 'n_intervals'))
def display_value(value):
all_orders = dfm.get_all_orders()
symbol_name = dfm.dashboard_active_symbol.name
active_session = dfm.sm.active_sessions[symbol_name]
cmp = active_session.cmp
gap = active_session.gap
buy_gap_span, sell_gap_span = active_session.helpers.get_gap_span_from_list(orders=all_orders, cmp=cmp, gap=gap)
total_gap_span = buy_gap_span + sell_gap_span
buy_gap_depth, sell_gap_depth = active_session.helpers.get_gap_depth_from_list(orders=all_orders, cmp=cmp, gap=gap)
total_gap_depth = buy_gap_depth + sell_gap_depth
buy_gap_momentum, sell_gap_momentum = \
active_session.helpers.get_gap_momentum_from_list(orders=all_orders, cmp=cmp, gap=gap)
total_gap_momentum = buy_gap_momentum + sell_gap_momentum
return \
f'{total_gap_span:.2f}',\
f'{buy_gap_span:.2f}',\
f'{sell_gap_span:.2f}', \
f'{total_gap_depth:.2f}', \
f'{buy_gap_depth:.2f}', \
f'{sell_gap_depth:.2f}', \
f'{total_gap_momentum:.2f}', \
f'{buy_gap_momentum:.2f}', \
f'{sell_gap_momentum:.2f}'
# ********** Global STOP profits **********
@app.callback(Output('consolidated-profit', 'children'),
Output('expected-profit-at-cmp', 'children'),
Output('expected-profit', 'children'),
Output('actions-info', 'children'),
Output('actions-rate', 'children'),
Output('canceled-count', 'children'),
Input('update', 'n_intervals'))
def display_value(value):
symbol = dfm.dashboard_active_symbol
symbol_name = symbol.name
cmp = dfm.sm.active_sessions[symbol_name].cmp # s[-1]
qp = symbol.quote_asset().pv()
# coin_symbol = symbol.quote_asset().name()
# called the method in session to check buy_count == sell_count
consolidated = dfm.sm.terminated_sessions[symbol_name]['global_consolidated_profit']
expected = dfm.sm.terminated_sessions[symbol_name]['global_expected_profit']
expected_at_cmp = dfm.sm.iom.get_expected_profit_at_cmp(cmp=cmp, symbol_name=symbol_name)
buy_actions_count, sell_actions_count, actions_balance = \
dfm.sm.active_sessions[symbol_name].checks_manager.get_actions_balance()
buy_actions_rate = consolidated / (buy_actions_count +1)
sell_actions_rate = consolidated / (sell_actions_count +1)
canceled_buy_orders = [order for order in dfm.sm.iom.canceled_orders if order.k_side == k_binance.SIDE_BUY]
canceled_sell_orders = [order for order in dfm.sm.iom.canceled_orders if order.k_side == k_binance.SIDE_SELL]
if consolidated + expected_at_cmp > 100.0:
global_cmp = dfm.sm.terminated_sessions[symbol_name]["global_cmp_count"]
session_cmp = dfm.sm.active_sessions[symbol_name].cmp_count
raise Exception(f'TARGET ACHIEVED!!! in {timedelta(seconds=global_cmp + session_cmp)}'
f' DONE: {consolidated} ACTUAL AL CMP: {expected_at_cmp}')
return f'{consolidated:,.{qp}f}',\
f'{expected_at_cmp:,.{qp}f}',\
f'{expected:,.{qp}f}', \
f'{buy_actions_count}/{sell_actions_count} {actions_balance:,.2f}', \
f'{buy_actions_rate:,.0f} / {sell_actions_rate:,.0f}', \
f'{len(canceled_buy_orders)} / {len(canceled_sell_orders)}'
# ********** PT count / traded orders count **********
# @app.callback(Output('trade-info', 'children'), Input('update', 'n_intervals'))
# def display_value(value):
# symbol_name = dfm.dashboard_active_symbol.name
# pt_count = len(dfm.sm.active_sessions[symbol_name].ptm.perfect_trades)
# buy_count = dfm.sm.active_sessions[symbol_name].buy_count
# sell_count = dfm.sm.active_sessions[symbol_name].sell_count
# return f'pt: {pt_count} b: {buy_count} s: {sell_count}'
# @app.callback(Output('short-prediction', 'children'),
# Output('long-prediction', 'children'),
# Input('update', 'n_intervals'))
# def display_value(value):
# symbol_name = dfm.dashboard_active_symbol.name
# session = dfm.sm.active_sessions[symbol_name]
# short_prediction = session.strategy_manager.get_tendency(session.cmp_pattern_short) - session.cmp
# long_prediction = session.strategy_manager.get_tendency(session.cmp_pattern_long) - session.cmp
# return f'short: {short_prediction:,.0f}', f'long: {long_prediction:,.0f}'
# @app.callback(Output('accounts-info', 'children'), Input('update', 'n_intervals'))
# def display_value(value):
# accounts_info = [f'{account.name}: {account.free:,.2f} ' for account in dfm.sm.am.accounts.values()]
# accounts_info_s = ' '.join(map(str, accounts_info))
# return accounts_info_s
# ********** symbol & accounts data **********
@app.callback(
Output('symbol', 'children'),
Output('cmp-max', 'children'),
Output('cmp', 'children'),
Output('cmp-min', 'children'),
Output('base-asset', 'children'),
Output('base-asset-locked', 'children'),
Output('base-asset-alive', 'children'),
Output('base-asset-free', 'children'),
Output('base-asset-total', 'children'),
Output('quote-asset', 'children'),
Output('quote-asset-locked', 'children'),
Output('quote-asset-alive', 'children'),
Output('quote-asset-free', 'children'),
Output('quote-asset-total', 'children'),
Output('bnb-locked', 'children'),
Output('bnb-alive', 'children'),
Output('bnb-free', 'children'),
Output('bnb-total', 'children'),
Input('update', 'n_intervals')
)
def display_value(value):
symbol = dfm.dashboard_active_symbol
symbol_name = symbol.name
# am = dfm.sm.active_sessions[symbol_name].am
am = dfm.sm.am
base_account = am.get_account(symbol.base_asset().name())
quote_account = am.get_account(symbol.quote_asset().name())
bnb_account = am.get_account('BNB')
quote_pv = symbol.quote_asset().pv()
base_alive = dfm.sm.get_liquidity_for_alive_orders(asset=symbol.base_asset())
quote_alive = dfm.sm.get_liquidity_for_alive_orders(asset=symbol.quote_asset())
bnb_alive = dfm.sm.get_liquidity_for_alive_orders(asset=Asset(name='BNB', pv=6))
# bnb_alive = 1.0
return \
symbol_name, \
f'{dfm.sm.active_sessions[symbol_name].max_cmp:,.{quote_pv}f}', \
f'{dfm.sm.active_sessions[symbol_name].cmp:,.{quote_pv}f}', \
f'{dfm.sm.active_sessions[symbol_name].min_cmp:,.{quote_pv}f}', \
symbol.base_asset().name(),\
f'{base_account.locked:,.{symbol.base_asset().pv()}f}', \
f'{base_alive:,.{symbol.base_asset().pv()}f}', \
f'{base_account.free - base_alive:,.{symbol.base_asset().pv()}f}', \
f'{base_account.get_total():,.{symbol.base_asset().pv()}f}', \
symbol.quote_asset().name(), \
f'{quote_account.locked:,.{symbol.quote_asset().pv()}f}', \
f'{quote_alive:,.{symbol.quote_asset().pv()}f}', \
f'{quote_account.free - quote_alive:,.{symbol.quote_asset().pv()}f}',\
f'{quote_account.get_total():,.{symbol.quote_asset().pv()}f}',\
f'{bnb_account.locked:,.6f}',\
f'{bnb_alive:,.6f}', \
f'{bnb_account.free - bnb_alive:,.6f}', \
f'{bnb_account.get_total():,.6f}'
# ********** alert message **********
@app.callback(Output('alert-msg', 'children'),
Input('update', 'n_intervals'))
def display_value(value):
symbol = dfm.dashboard_active_symbol
# check bnb liquidity and raise ALERT
bnb_liquidity = dfm.sm.active_sessions[symbol.name].am.get_account('BNB').free
if bnb_liquidity < 1.0:
return f'BNB LIQUIDITY ALERT {bnb_liquidity:,.6f}'
else:
return ''
# ********** symbol selection buttons *********
@app.callback(Output('button-symbols', 'children'),
Input('button-symbols', 'n_clicks'),
)
def on_button_click(n):
# set BTCEUR as active symbol if button pressed
current_symbol_name = dfm.dashboard_active_symbol.name
if n is not None:
next_symbol = dfm.get_next_symbol(symbol_name=current_symbol_name)
dfm.set_dashboard_active_symbol(symbol_name=next_symbol)
return next_symbol
else:
return current_symbol_name
# Stop buttons
@app.callback(Output('button-stop-cmp', 'children'), Input('button-stop-cmp', 'n_clicks'))
def on_button_click(n):
if n is not None:
symbol = dfm.dashboard_active_symbol
symbol_name = symbol.name
session = dfm.sm.active_sessions[symbol_name]
session.helpers.quit_particular_session(
quit_mode=QuitMode.TRADE_ALL_PENDING,
session_id=session.session_id,
symbol=session.symbol,
cmp=session.cmp,
iom=session.iom,
cmp_count=session.cmp_count)
return 'STOP-CMP'
@app.callback(Output('button-stop-price', 'children'), Input('button-stop-price', 'n_clicks'))
def on_button_click(n):
if n is not None:
symbol = dfm.dashboard_active_symbol
symbol_name = symbol.name
session = dfm.sm.active_sessions[symbol_name]
session.helpers.quit_particular_session(
quit_mode=QuitMode.PLACE_ALL_PENDING,
session_id=session.session_id,
symbol=session.symbol,
cmp=session.cmp,
iom=session.iom,
cmp_count=session.cmp_count)
return 'STOP-PRICE'
@app.callback(Output('button-stop-cancel', 'children'), Input('button-stop-cancel', 'n_clicks'))
def on_button_click(n):
if n is not None:
symbol = dfm.dashboard_active_symbol
symbol_name = symbol.name
session = dfm.sm.active_sessions[symbol_name]
session.helpers.quit_particular_session(
quit_mode=QuitMode.CANCEL_ALL,
session_id=session.session_id,
symbol=session.symbol,
cmp=session.cmp,
iom=session.iom,
cmp_count=session.cmp_count)
return 'STOP-CANCEL'
@app.callback(Output('button-reboot-global-session', 'children'), Input('button-reboot-global-session', 'n_clicks'))
def on_button_click(n):
if n is not None:
# as first step, perform STOP-PRICE actions
symbol = dfm.dashboard_active_symbol
symbol_name = symbol.name
for session in dfm.sm.active_sessions.values():
# session = dfm.sm.active_sessions[symbol_name]
session.helpers.quit_particular_session(
quit_mode=QuitMode.PLACE_ALL_PENDING,
session_id=session.session_id,
symbol=session.symbol,
cmp=session.cmp,
iom=session.iom,
cmp_count=session.cmp_count)
# finish app ans it will cause a gunicorn workers reboot (reset-like app)
dfm.sm.reboot_global_session()
return 'REBOOT-SESSION'
@app.callback(Output('button-new-pt', 'children'), Input('button-new-pt', 'n_clicks'))
def on_button_click(n):
if n:
symbol = dfm.dashboard_active_symbol
symbol_name = symbol.name
dfm.sm.active_sessions[symbol_name].manually_create_new_pt()
return 'NEW-PT'
@app.callback(Output('button-increase-cmp', 'children'), Input('button-increase-cmp', 'n_clicks'))
def on_button_click(n):
if n:
symbol_name = dfm.dashboard_active_symbol.name
dfm.sm.client_manager.on_button_step(symbol_name=symbol_name, step=10.0)
return '+ 10.0 €'
@app.callback(Output('button-decrease-cmp', 'children'), Input('button-decrease-cmp', 'n_clicks'))
def on_button_click(n):
if n:
symbol_name = dfm.dashboard_active_symbol.name
dfm.sm.client_manager.on_button_step(symbol_name=symbol_name, step=-10.0)
return '- 10.0 €'
# ********** others **********
@app.callback(
Output('new-table', 'children'),
Input('update', 'n_intervals')
)
def update_table(timer):
df = dfm.get_all_orders_df_with_cmp()
# sort by price
df1 = df.sort_values(by=['price'], ascending=False)
# filter by status for each table (monitor-placed & traded)
df_pending = df1[df1.status.isin(['monitor', 'active', 'cmp', 'to_be_traded', 'canceled'])]
qp = dfm.dashboard_active_symbol.quote_asset().pv()
df_pending['price'] = df_pending['price'].map(f'{{:,.{qp}f}}'.format) # two {{ }} to escape { in f-string
df_pending['total'] = df_pending['total'].map(f'{{:,.{qp}f}}'.format)
return get_pending_html_table(df=df_pending[['pt_id', 'name', 'price', 'amount', 'total', 'status']])
| xavibenavent/scorpius | src/dashboard/dash_callbacks.py | dash_callbacks.py | py | 20,621 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dashboard.sc_df_manager.DataframeManager",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 31,
"usage_type": "name"
},
{
"... |
34576692489 | import argparse
import os
import pickle
from datetime import datetime, timedelta
from matplotlib.dates import YearLocator, DateFormatter, MonthLocator
import numpy
from matplotlib import pyplot as plt
from pandas.io.data import get_data_yahoo
class Investor(object):
"""Represents a single investor with initial cash, income, and
set buy/sell thresholds.
:param buy_at float if the pe ratio is less than or equal to this
value, the investor will buy all the shares she can
:param sell_at float if the pe ratio is greater than this value, the
investor will sell all her shares (if any)
:param init_cash float initial cash
:param shares float initial shares
:param income float cash increase on call to `get_paid`
"""
def __init__(self, buy_at, sell_at=None, init_cash=10000., shares=0.,
income=2000.):
self.buy_at = buy_at
if sell_at is None:
sell_at = buy_at
self.sell_at = sell_at
self.init_cash = self.cash = init_cash
self.shares = shares
self.income = income
def get_paid(self):
"""Receive the income"""
self.cash += self.income
def get_net_worth(self, market_price):
"""Total net worth is cash + worth of shares"""
return self.cash + self.shares * market_price
def sell_all(self, market_price):
"""Convert all stocks to cash"""
self.cash += market_price * self.shares
self.shares = 0.
def buy_all(self, market_price):
"""Buy all shares I can afford"""
self.shares += self.cash / market_price
self.cash = 0.
def react_to_pe(self, pe_ratio, market_price):
"""React to P/E depending on investor thresholds (buy, sell, or hold)
"""
if self.shares and pe_ratio > self.sell_at:
self.sell_all(market_price)
elif self.cash and pe_ratio <= self.buy_at:
self.buy_all(market_price)
class CapeValidator(object):
"""
Compares the performance of a suite of investors with different buy/sell
thresholds for CAPE ratio.
:param pe_data_file str csv of pe_data. TODO: write script to download
:param start_date datetime datetime at which to start analysis
:param buy_thresholds [float, ...] list of buy thresholds for suite
of investors. Each entry will instantiate a new investor.
:param sell_thresholds [float, ...] list of sell thresholds
corresponding to `buy_thresholds`. If None, will be equal to
`buy_thresholds`.
:param end_date datetime datetime at which to stop analysis. If None,
will go to current day.
:param index str stock symbol of index to invest in.
"""
def __init__(self, pe_data_file, start_date, buy_thresholds,
sell_thresholds=None, end_date=None, index='^GSPC'):
if sell_thresholds is None:
sell_thresholds = [None] * len(buy_thresholds)
if len(buy_thresholds) != len(sell_thresholds):
raise RuntimeError("Buy and Sell Thresholds must be equal length")
if end_date is None:
end_date = datetime.now()
self.investors = []
self.pe_array = []
self.index = index
self.index_cache = {}
self.load_pe_array(pe_data_file, start_date, end_date)
self.init_investors(buy_thresholds, sell_thresholds)
self.load_index_cache()
size = (len(self.investors), len(self.pe_array))
self.worth_matrix = numpy.empty(size)
self.shares_matrix = numpy.empty(size)
self.cash_matrix = numpy.empty(size)
@property
def _cache_filename(self):
return '.cache_{}.pkl'.format(self.index)
def _parse_pe_date(self, date_str):
# the date_str is the beginning of the month, but we want the end
# because the CAPE for a given month is the avg of prices for the
# entire month
date0 = datetime.strptime(date_str, '%m/%Y')
date = date0 + timedelta(27)
while date.month == date0.month or date.weekday() > 4:
date += timedelta(1)
return date
def load_pe_array(self, pe_data_file, start_date, end_date):
"""Load the CAPE data from the specified file"""
with open(pe_data_file) as fp:
for line in fp:
line = line.strip()
if line:
date_str, pe = line.split(',')
pe = float(pe)
date = self._parse_pe_date(date_str)
if start_date <= date <= end_date:
self.pe_array.append([date, pe])
def init_investors(self, buy_thresholds, sell_thresholds):
"""Initialize Investor instances from threshold lists"""
for b, s in zip(buy_thresholds, sell_thresholds):
self.investors.append(Investor(b, s))
def load_index_cache(self):
"""Load the cache for the specified index, if available"""
if os.path.exists(self._cache_filename):
with open(self._cache_filename, 'rb') as fp:
self.index_cache = pickle.load(fp)
def save_index_cache(self):
"""Save the cache to disk"""
with open(self._cache_filename, 'wb') as fp:
pickle.dump(self.index_cache, fp)
def _get_market_price(self, date, try_next=2):
"""Get the market price for the given date from yahoo or the cache.
Tries to account for holidays by stepping backwards through time when
the specified date is not found. Intermittent network errors may cause
unexpected results here.
"""
while date.weekday() > 4:
date -= timedelta(1)
if date in self.index_cache:
return self.index_cache[date]
try:
df = get_data_yahoo(self.index, date, date)
price = df['Adj Close'][0]
except (IndexError, OSError):
# try to account for holidays and wutnot
if try_next:
date1 = date - timedelta(1)
return self._get_market_price(date1, try_next - 1)
else: # pragma no cover
raise
self.index_cache[date] = price
return price
def calculate_worth_vs_time(self):
"""Calculate the worth, shares, and cash of all the investors across
the specified time interval
"""
i = 0
for date, pe_ratio in self.pe_array:
market_price = self._get_market_price(date)
for j, investor in enumerate(self.investors):
investor.get_paid()
investor.react_to_pe(pe_ratio, market_price)
self.worth_matrix[j][i] = investor.get_net_worth(market_price)
self.shares_matrix[j][i] = investor.shares
self.cash_matrix[j][i] = investor.cash
i += 1
self.save_index_cache()
def plot_worth_vs_time(self, names=None):
"""Plot the worth of each investor vs. time. If names is specified,
will use these names in the legend. Otherwise, will name the investors
based off their thresholds.
"""
if names is None:
names = [
'Investor ({:0.2f},{:0.2f})'.format(inv.buy_at, inv.sell_at)
for inv in self.investors]
dates = [x[0] for x in self.pe_array]
year = YearLocator()
date_fmt = DateFormatter('%Y')
plt.xkcd()
# investor worth plots
fig = plt.figure()
ax = fig.gca()
lines = []
for i in range(len(self.investors)):
result = ax.plot_date(dates, self.worth_matrix[i], '-')
lines.append(result[0])
ax.xaxis.set_major_locator(year)
ax.xaxis.set_major_formatter(date_fmt)
# ax.xaxis.set_minor_formatter(MonthLocator())
ax.autoscale_view()
ax.legend(lines, names, 'upper left')
fig.autofmt_xdate()
return fig
def plot_pe_ratio(self):
"""
Plot the CAPE values for the time interval in question.
"""
dates = [x[0] for x in self.pe_array]
year = YearLocator()
date_fmt = DateFormatter('%Y')
fig = plt.figure()
ax_pe = fig.gca()
ax_pe.plot_date(dates, [x[1] for x in self.pe_array], '-')
ax_pe.xaxis.set_major_locator(year)
ax_pe.xaxis.set_major_formatter(date_fmt)
ax_pe.autoscale_view()
ax_pe.set_title('PE Ratio vs. Time')
fig.autofmt_xdate()
return fig
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="CAPE Value determination")
default_thresholds = ','.join(str(i) for i in range(16, 26)) + ',1000'
parser.add_argument('-t', '--buy_thresholds', default=default_thresholds)
parser.add_argument('--sell_thresholds', default=None)
parser.add_argument('--pe_file', default='pe_data.csv')
parser.add_argument('--index', default='^GSPC')
parser.add_argument('--start_date', default='01/1980')
parser.add_argument('--end_date', default=None)
args = parser.parse_args()
buys = [float(b) for b in args.buy_thresholds.split(',')]
sells = args.sell_thresholds
if sells:
sells = [float(b) for b in sells.split(',')]
d0 = datetime.strptime(args.start_date, '%m/%Y')
if args.end_date:
d1 = datetime.strptime(args.end_date, '%m/%Y')
else:
d1 = datetime.now()
validator = CapeValidator(args.pe_file, d0, buys, sells, d1, args.index)
validator.calculate_worth_vs_time()
import ipdb; ipdb.set_trace()
validator.plot_worth_vs_time()
# validator.plot_pe_ratio()
plt.show()
| wgaggioli/capeval | capeval.py | capeval.py | py | 9,742 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "numpy.empty",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
... |
11011921362 | from django.urls import path
from app_feria.views import *
from django.contrib.auth.views import LogoutView
from app_feria import views
"""
urlpatterns = [
path('', inicio, name="Inicio"),
path('vuelo/', vuelo),
path('personal/', personal),
path('pasajero/', pasajero),
path('sobrenostros', sobrenosotros, name="Sobrenosotros"),
#URL DE CREACION
path('formulario1/', formulariovuelo, name="Crear Vuelos"),
path('formulario2/', formulariopersonal, name="Crear Personal"),
path('formulario3/', formulariopasajero, name="Crear Pasajeros"),
#URL DE LEER
path('leerVuelos/',leerVuelos, name="Leer Vuelos"),
path('leerPersonal/',leerPersonal, name="Leer Personal"),
path('leerPasajeros/',leerPasajero, name="Leer Pasajeros"),
#URL DE EDICION
path('editaVuelo/<numVuelo>/', editaVuelo, name = "EditaVuelo"),
path('editarPersonal/<profesionPersonal>/', editarPersonal, name = "EditarPersonal"),
path('editarPasajero/<idvueloPasajero>/', editarPasajero, name = "EditarPasajero"),
#URL DE ELIMINACION
path('eliminaVuelo/<numVuelo>/', eliminaVuelo, name = "EliminaVuelo"),
path('eliminaPersonal/<profesionPersonal>/', eliminaPersonal, name = "EliminaPersonal"),
path('eliminaPasajero/<idvueloPasajero>/', eliminaPasajero, name = "EliminaPasajero"),
#URL DE BUSQUEDAS
path('bus/', bus, name="Buscar"),
path('buscarVuelos/', busquedaVuelos, name="Buscar Vuelos"),
path('buscarPersonal/', busquedaPersonal, name="Buscar Personal"),
path('buscarPasajero/', busquedaPasajero, name="Buscar Pasajeros"),
##
path('buscar_pasa/',buscar_pasa),
path('buscar_per/',buscar_per),
path('buscar/',buscar),
#URL DE LOGIN Y LOGOUT
path('login/',InicioSesion, name="Login"),
path('registro/',registro, name="Registrarse"),
path('logout/', LogoutView.as_view(template_name="app_feria/logout.html"), name="Logout"),
#URL DE EDICIONUSUARIO
path('editUser/',editarUsuario, name="Editar Usuario"),
path('avatar/', agregarAvatar, name="Avatar"),
]
"""
urlpatterns = [
path('', inicio, name="Inicio"),
path('editar_registro/', editar_registro, name="Editarosalir"),
path('registro_inicio/', registro_inicio, name="Iniciar_o_registrar"),
path('sobrenostros/', sobrenosotros, name="Sobrenosotros"),
path('jean_detail/<int:codigo>/', views.jean_detail, name="Detalle"),
path('remera_detail/<int:codigo>/', views.remera_detail, name="Detalle_remera"),
path('camisa_detail/<int:codigo>/', views.camisa_detail, name="Detalle_camisa"),
path('calzado_detail/<int:codigo>/', views.calzado_detail, name="Detalle_calzado"),
path('todo100_detail/<int:codigo>/', views.todo100_detail, name="Detalle_todo100"),
path('invernal_detail/<int:codigo>/', views.invernal_detail, name="Detalle_invernal"),
path('pantalon_detail/<int:codigo>/', views.pantalon_detail, name="Detalle_pantalon"),
path('campera_detail/<int:codigo>/', views.campera_detail, name="Detalle_campera"),
#URL DE CREACION
path('formulario1/', formulariojean, name="Crear Jeans"),
path('formulario2/', formularioremera, name="Crear Remera"),
path('formulario3/', formulariocamisa, name="Crear Camisa"),
path('formulario4/', formulariotodo, name="Crear todo por 100"),
path('formulario5/', formulariocalzado, name="Crear Calzado"),
path('formulario6/', formularioinvernal, name="Crear Invernal"),
path('formulario7/', formulariopantalon, name="Crear Pantalon"),
path('formulario8/', formulariocampera, name="Crear Campera"),
#URL DE LEER
path('leerJeans',leerJeans, name="Leer Jeans"),
path('leerRemera/',leerRemera, name="Leer Remera"),
path('leerCamisas/',leerCamisa, name="Leer Camisas"),
path('leerTodo100/',leerTodo, name="Leer todo por 100"),
path('leerCalzado/',leerCalzado, name="Leer Calzado"),
path('leerInvernal/',leerInvernal, name="Leer Invernal"),
path('leerPantalon/',leerPantalon, name="Leer Pantalon"),
path('leerCampera',leerCampera, name="Leer Campera"),
#URL DE EDICION
path('editaJean/<numJean>/', editaJean, name = "EditaJean"),
path('editarRemera/<generoRemera>/', editarRemera, name = "EditarRemera"),
path('editarCamisa/<generoCamisa>/', editarCamisa, name = "EditarCamisa"),
path('editarTodo/<generoTodo>/', editarTodo, name = "Editar todo por 100"),
path('editarCalzado/<generoCalzado>/', editarCalzado, name = "EditarCalzado"),
path('editarInvernal/<generoInvernal>/', editarInvernal, name = "EditarInvernal"),
path('editarPantalon/<generoPantalon>/', editarPantalon, name = "EditarPantalon"),
path('editarCampera/<generoCampera>/', editarCampera, name = "EditarCampera"),
#URL DE ELIMINACION
path('eliminaJean/<numJean>/', eliminaJean, name = "EliminaJean"),
path('eliminaRemera/<generoRemera>/', eliminaRemera, name = "EliminaRemera"),
path('eliminaCamisa/<generoCamisa>/', eliminaCamisa, name = "EliminaCamisa"),
path('eliminaTodo/<generoTodo>/', eliminaTodo, name = "Elimina todo por 100"),
path('eliminaCalzado/<generoCalzado>/', eliminaCalzado, name = "EliminaCalzado"),
path('eliminaInvernal/<generoInvernal>/', eliminaInvernal, name = "EliminaInvernal"),
path('eliminaPantalon/<generoPantalon>/', eliminaPantalon, name = "EliminaPantalon"),
path('eliminaCampera/<generoCampera>/', eliminaCampera, name = "EliminaCampera"),
#URL DE BUSQUEDAS
path('bus/', bus, name="Buscar"),
path('buscarJeans/', busquedaJeans, name="Buscar Jeans"),
path('buscarRemera/', busquedaRemera, name="Buscar Remera"),
path('buscarCamisa/', busquedaCamisa, name="Buscar Camisa"),
path('buscarCalzado/', busquedaCalzado, name="Buscar Calzado"),
path('buscarInvernal/', busquedaInvernal, name="Buscar Invernal"),
path('buscarPantalon/', busquedaPantalon, name="Buscar Pantalon"),
path('buscarCampera/', busquedaCampera, name="Buscar Campera"),
##
path('buscar_campe/',buscar_campe),
path('buscar_panta/',buscar_panta),
path('buscar_inve/',buscar_inve),
path('buscar_calza/',buscar_calza),
path('buscar_cami/',buscar_cami),
path('buscar_rem/',buscar_rem),
path('buscar/',buscar),
#URL DE LOGIN Y LOGOUT
path('login/',InicioSesion, name="Login"),
path('registro/',registro, name="Registrarse"),
path('logout/', LogoutView.as_view(template_name="app_feria/logout.html"), name="Logout"),
#URL DE EDICIONUSUARIO
path('editUser/',editarUsuario, name="Editar Usuario"),
]
"""
<li align="center"; class="nav-item"><a class="m-0 text-center text-white" href="{% url 'Sobrenosotros' %}">About</a></li>
""" | Luciano02-web/feriaweb | app_feria/urls.py | urls.py | py | 6,629 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "django.urls.path",... |
74270410272 | ## modified the code: args.batch_size_val == 1
import os
import random
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
from visdom_logger import VisdomLogger
from collections import defaultdict
from .dataset.dataset import get_val_loader
from .util import AverageMeter, batch_intersectionAndUnionGPU, get_model_dir, main_process
from .util import find_free_port, setup, cleanup, to_one_hot, intersectionAndUnionGPU
from .classifier import Classifier
from .model.pspnet import get_model
import torch.distributed as dist
from tqdm import tqdm
from .util import load_cfg_from_cfg_file, merge_cfg_from_list
import argparse
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.multiprocessing as mp
import time
from .visu import make_episode_visualization
from typing import Tuple
def parse_args() -> None:
parser = argparse.ArgumentParser(description='Testing')
parser.add_argument('--config', type=str, required=True, help='config file')
parser.add_argument('--opts', default=None, nargs=argparse.REMAINDER)
args = parser.parse_args()
assert args.config is not None
cfg = load_cfg_from_cfg_file(args.config)
if args.opts is not None:
cfg = merge_cfg_from_list(cfg, args.opts)
return cfg
def main_worker(rank: int,
world_size: int,
args: argparse.Namespace) -> None:
print(f"==> Running DDP checkpoint example on rank {rank}.")
setup(args, rank, world_size)
if args.manual_seed is not None:
cudnn.benchmark = False
cudnn.deterministic = True
torch.cuda.manual_seed(args.manual_seed + rank)
np.random.seed(args.manual_seed + rank)
torch.manual_seed(args.manual_seed + rank)
torch.cuda.manual_seed_all(args.manual_seed + rank)
random.seed(args.manual_seed + rank)
# ========== Model ==========
model = get_model(args).to(rank)
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = DDP(model, device_ids=[rank])
root = get_model_dir(args)
if args.ckpt_used is not None:
filepath = os.path.join(root, f'{args.ckpt_used}.pth')
assert os.path.isfile(filepath), filepath
print("=> loading weight '{}'".format(filepath))
checkpoint = torch.load(filepath)
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded weight '{}'".format(filepath))
else:
print("=> Not loading anything")
# ========== Data ==========
episodic_val_loader, _ = get_val_loader(args)
# ========== Test ==========
val_Iou = episodic_validate(args=args,
val_loader=episodic_val_loader,
model=model,
use_callback=(args.visdom_port != -1),
suffix=f'test')
if args.distributed:
dist.all_reduce(val_Iou)
val_Iou /= world_size
cleanup()
def episodic_validate(args: argparse.Namespace,
val_loader: torch.utils.data.DataLoader,
model: DDP,
use_callback: bool,
suffix: str = 'test') -> Tuple[torch.tensor, torch.tensor]:
print('==> Start testing')
model.eval()
nb_episodes = int(args.test_num)
# ========== Metrics initialization ==========
H, W = args.image_size, args.image_size
c = model.module.bottleneck_dim
if args.image_size == 473:
h, w = 60, 60
else:
h, w = model.module.feature_res # (53, 53)
runtimes = torch.zeros(args.n_runs)
deltas_init = torch.zeros((args.n_runs, nb_episodes, 1))
deltas_final = torch.zeros((args.n_runs, nb_episodes, 1))
val_IoUs0 = np.zeros(args.n_runs)
# ========== Perform the runs ==========
for run in tqdm(range(args.n_runs)):
# =============== Initialize the metric dictionaries ===============
loss_meter = AverageMeter()
iter_num = 0
cls_intersection0 = defaultdict(int) # Default value is 0
cls_union0 = defaultdict(int)
IoU0 = defaultdict(int)
cls_intersection1 = defaultdict(int) # Default value is 0
cls_union1 = defaultdict(int)
IoU1 = defaultdict(int)
# =============== episode = group of tasks ===============
runtime = 0
for e in tqdm(range(nb_episodes)):
t0 = time.time()
iter_num += 1
try:
q_img, q_label, s_img, s_label, subcls, _, _ = iter_loader.next()
except:
iter_loader = iter(val_loader)
q_img, q_label, s_img, s_label, subcls, _, _ = iter_loader.next()
iter_num += 1
q_img = q_img.to(dist.get_rank(), non_blocking=True) # [1, 3, h, w]
q_label = q_label.to(dist.get_rank(), non_blocking=True) # [1, h, w]
s_img = s_img.to(dist.get_rank(), non_blocking=True) # [1, 1, 3, h, w]
s_label = s_label.to(dist.get_rank(), non_blocking=True) # [1, 1, h, w]
classes = [[class_.item() for class_ in subcls]] # All classes considered in the tasks, list of list
def fit_model(s_img_, s_label_, q_img_, q_label_, classes):
features_s = torch.zeros(1, args.shot * args.meta_aug, c, h, w).to(dist.get_rank()) # [1, 2, c, h, w]
features_q = torch.zeros(1, 1, c, h, w).to(dist.get_rank()) # [1, 1, c, h, w]
gt_s = 255 * torch.ones(1, args.shot * args.meta_aug, args.image_size, args.image_size).long().to(dist.get_rank()) # [1, 2, h, w]
gt_q = 255 * torch.ones(1, 1, args.image_size, args.image_size).long().to(dist.get_rank()) # [1, 1, h, w]
n_shots = torch.zeros(1).to(dist.get_rank())
# =========== Generate tasks and extract features for each task ===============
with torch.no_grad():
f_s = model.module.extract_features(s_img_.squeeze(0)) # [shot, ch, h, w]
f_q = model.module.extract_features(q_img_) # [1, ch, h, w]
shot = f_s.size(0)
n_shots[0] = shot
features_s[0, :shot] = f_s.detach()
features_q[0] = f_q.detach()
gt_s[0, :shot] = s_label_ # [1, 2/shot, h, w] ====
gt_q[i, 0] = q_label_ # [1, h, w] ====
# =========== Normalize features along channel dimension ===============
if args.norm_feat:
features_s = F.normalize(features_s, dim=2)
features_q = F.normalize(features_q, dim=2)
# =========== Create a callback is args.visdom_port != -1 ===============
callback = VisdomLogger(port=args.visdom_port) if use_callback else None
# =========== Initialize the classifier + prototypes + F/B parameter Π ===============
classifier = Classifier(args)
classifier.init_prototypes(features_s, features_q, gt_s, gt_q, classes, callback)
batch_deltas = classifier.compute_FB_param(features_q=features_q, gt_q=gt_q)
deltas_init[run, e, :] = batch_deltas.cpu()
# =========== Perform RePRI inference ===============
batch_deltas = classifier.RePRI(features_s, features_q, gt_s, gt_q, classes, n_shots, callback)
deltas_final[run, e, :] = batch_deltas
logits = classifier.get_logits(features_q) # [n_tasks, shot, h, w]
logits = F.interpolate(logits, size=(H, W), mode='bilinear', align_corners=True)
probas = classifier.get_probas(logits).detach()
intersection, union, _ = batch_intersectionAndUnionGPU(probas, gt_q, 2) # [n_tasks, shot, num_class]
intersection, union = intersection.cpu(), union.cpu()
return intersection, union
intersection0, union0 = fit_model(s_img[:,:1], s_label[:, :1], q_img, q_label, classes) # s_img [1, 1, 3, h, w] s_label # [1, 1, h, w]
intersection1, union1 = fit_model(s_img, s_label, q_img, q_label, classes)
runtime += time.time() - t0
# ================== Log metrics ==================
for i, task_classes in enumerate(classes):
for j, class_ in enumerate(task_classes):
cls_intersection0[class_] += intersection0[i, 0, j + 1] # Do not count background
cls_union0[class_] += union0[i, 0, j + 1]
cls_intersection1[class_] += intersection1[i, 0, j + 1] # Do not count background
cls_union1[class_] += union1[i, 0, j + 1]
for class_ in cls_union0:
IoU0[class_] = cls_intersection0[class_] / (cls_union0[class_] + 1e-10)
IoU1[class_] = cls_intersection1[class_] / (cls_union1[class_] + 1e-10)
if (iter_num % 200 == 0):
mIoU0 = np.mean([IoU0[i] for i in IoU0])
mIoU1 = np.mean([IoU1[i] for i in IoU1])
print('Test: [{}/{}] '
'mIoU0 {:.4f}, mIoU1 {:.4f}'.format(iter_num, args.test_num, mIoU0, mIoU1))
# ================== Visualization ==================
runtimes[run] = runtime
mIoU0 = np.mean(list(IoU0.values()))
mIoU1 = np.mean(list(IoU1.values()))
print('mIoU---Val result: mIoU0 {:.4f}, mIoU1 {:.4f}.'.format(mIoU0, mIoU1))
for class_ in cls_union0:
print("Class {} : IoU0 {:.4f}, IoU1 {:.4f}".format(class_, IoU0[class_], IoU1[class_]))
val_IoUs0[run] = mIoU0
# ================== Save metrics ==================
if args.save_oracle:
root = os.path.join('plots', 'oracle')
os.makedirs(root, exist_ok=True)
np.save(os.path.join(root, 'delta_init.npy'), deltas_init.numpy())
np.save(os.path.join(root, 'delta_final.npy'), deltas_final.numpy())
print('Average mIoU over {} runs --- {:.4f}.'.format(args.n_runs, val_IoUs0.mean()))
print('Average runtime / run --- {:.4f}.'.format(runtimes.mean()))
return val_IoUs0.mean()
def standard_validate(args: argparse.Namespace,
val_loader: torch.utils.data.DataLoader,
model: DDP,
use_callback: bool,
suffix: str = 'test') -> Tuple[torch.tensor, torch.tensor]:
print('==> Standard validation')
model.eval()
loss_fn = torch.nn.CrossEntropyLoss(ignore_index=255)
iterable_val_loader = iter(val_loader)
bar = tqdm(range(len(iterable_val_loader)))
loss = 0.
intersections = torch.zeros(args.num_classes_tr).to(dist.get_rank())
unions = torch.zeros(args.num_classes_tr).to(dist.get_rank())
with torch.no_grad():
for i in bar:
images, gt = iterable_val_loader.next()
images = images.to(dist.get_rank(), non_blocking=True)
gt = gt.to(dist.get_rank(), non_blocking=True)
logits = model(images).detach()
loss += loss_fn(logits, gt)
intersection, union, _ = intersectionAndUnionGPU(logits.argmax(1),
gt,
args.num_classes_tr,
255)
intersections += intersection
unions += union
loss /= len(val_loader.dataset)
if args.distributed:
dist.all_reduce(loss)
dist.all_reduce(intersections)
dist.all_reduce(unions)
mIoU = (intersections / (unions + 1e-10)).mean()
loss /= dist.get_world_size()
return mIoU, loss
if __name__ == "__main__":
args = parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(x) for x in args.gpus)
if args.debug:
args.test_num = 500
args.n_runs = 2
world_size = len(args.gpus)
distributed = world_size > 1
args.distributed = distributed
args.port = find_free_port()
mp.spawn(main_worker,
args=(world_size, args),
nprocs=world_size,
join=True) | glbreeze/RePRI_FSS | src/test_ida.py | test_ida.py | py | 12,481 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "argparse.REMAINDER",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "util.load_cfg_from_cfg_file",
"line_number": 36,
"usage_type": "call"
},
{
"api_n... |
37932378891 | import csv
import datetime
FILE_NAME_READ = "Before Eod.csv"
FILE_NAME_WRITE = "After Eod.csv"
FIELDS_NAME = [
"id",
"Nama",
"Age",
"Balanced",
"No 2b Thread-No",
"No 3 Thread-No",
"Previous Balanced",
"Average Balanced",
"No 1 Thread-No",
"Free Transfer",
"No 2a Thread-No",
]
# This will handle question no 1.
def rataanBalance(currentBalance, previousBalance):
return (currentBalance + previousBalance) / 2
# This will handle question no 3.
def additionalBalanceToTop100(users):
for user in users[:100]:
currentBalance = user[FIELDS_NAME[3]]
user[FIELDS_NAME[3]] = currentBalance + 10
def getDataFromCSV(filename):
print(f"Start Read Data From {FILE_NAME_READ} at {datetime.datetime.now()}")
users = []
with open(filename, 'r') as csv_file:
dataFromCSV = csv.DictReader(csv_file, delimiter=';')
for row in dataFromCSV:
balance = int(row['Balanced'])
prevBalance = int(row['Previous Balanced'])
freeTransfer = int(row['Free Transfer'])
users.append(
{
FIELDS_NAME[0]: row[FIELDS_NAME[0]],
FIELDS_NAME[1]: row[FIELDS_NAME[1]],
FIELDS_NAME[2]: row[FIELDS_NAME[2]],
FIELDS_NAME[3]: 25 + balance if balance > 150 else balance,
FIELDS_NAME[4]: '',
FIELDS_NAME[5]: '',
FIELDS_NAME[6]: prevBalance,
FIELDS_NAME[7]: rataanBalance(balance, prevBalance),
FIELDS_NAME[8]: '',
FIELDS_NAME[9]: 5 + freeTransfer if 100 <= balance <= 150 else freeTransfer,
FIELDS_NAME[10]: '',
}
)
additionalBalanceToTop100(users)
print(f"Finish Read Data from {FILE_NAME_READ} at {datetime.datetime.now()}")
return users
def writeDataToCSV(filename, users):
print(f"Start Write Data to {FILE_NAME_WRITE} at {datetime.datetime.now()}")
with open(filename, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=FIELDS_NAME)
writer.writeheader()
writer.writerows(users)
print(f"Finish Write Data to {FILE_NAME_WRITE} at {datetime.datetime.now()}")
if __name__ == '__main__':
users = getDataFromCSV(FILE_NAME_READ)
writeDataToCSV(FILE_NAME_WRITE, users) | ikhwankhaliddd/Test-Techinical-ALAMI-Backend | Python Solution/without_thread.py | without_thread.py | py | 2,404 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "csv.DictReader",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "datetime.... |
4422201464 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for :mod:`orion.algo.evolution_es`."""
import copy
import hashlib
import numpy as np
import pytest
from orion.algo.evolution_es import BracketEVES, EvolutionES, compute_budgets
from orion.algo.space import Fidelity, Real, Space
@pytest.fixture
def space():
"""Create a Space with a real dimension and a fidelity value."""
space = Space()
space.register(Real("lr", "uniform", 0, 1))
space.register(Fidelity("epoch", 1, 9, 1))
return space
@pytest.fixture
def space1():
"""Create a Space with two real dimensions and a fidelity value."""
space = Space()
space.register(Real("lr", "uniform", 0, 1))
space.register(Real("weight_decay", "uniform", 0, 1))
space.register(Fidelity("epoch", 1, 8, 2))
return space
@pytest.fixture
def space2():
"""Create a Space with two real dimensions."""
space = Space()
space.register(Real("lr", "uniform", 0, 1))
space.register(Real("weight_decay", "uniform", 0, 1))
return space
@pytest.fixture
def budgets():
"""Return a configuration for a bracket."""
return [(30, 4), (30, 5), (30, 6)]
@pytest.fixture
def evolution(space1):
"""Return an instance of EvolutionES."""
return EvolutionES(space1, repetitions=1, nums_population=4)
@pytest.fixture
def bracket(budgets, evolution, space1):
"""Return a `Bracket` instance configured with `b_config`."""
return BracketEVES(evolution, budgets, 1, space1)
@pytest.fixture
def evolution_customer_mutate(space1):
"""Return an instance of EvolutionES."""
return EvolutionES(
space1,
repetitions=1,
nums_population=4,
mutate="orion.core.utils.tests.customized_mutate_example",
)
@pytest.fixture
def rung_0():
"""Create fake points and objectives for rung 0."""
points = np.linspace(0, 8, 9)
return dict(
n_trials=9,
resources=1,
results={
hashlib.md5(str([point]).encode("utf-8")).hexdigest(): (point, (1, point))
for point in points
},
)
@pytest.fixture
def rung_1(rung_0):
"""Create fake points and objectives for rung 1."""
values = map(
lambda v: (v[0], (3, v[0])), list(sorted(rung_0["results"].values()))[:3]
)
return dict(
n_trials=3,
resources=3,
results={
hashlib.md5(str([value[0]]).encode("utf-8")).hexdigest(): value
for value in values
},
)
@pytest.fixture
def rung_2(rung_1):
"""Create fake points and objectives for rung 2."""
values = map(
lambda v: (v[0], (9, v[0])), list(sorted(rung_1["results"].values()))[:1]
)
return dict(
n_trials=1,
resources=9,
results={
hashlib.md5(str([value[0]]).encode("utf-8")).hexdigest(): value
for value in values
},
)
@pytest.fixture
def rung_3():
"""Create fake points and objectives for rung 3."""
points = np.linspace(1, 4, 4)
return dict(
n_trials=4,
resources=1,
results={
hashlib.md5(str([point]).encode("utf-8")).hexdigest(): (
point,
(np.power(2, (point - 1)), 1.0 / point, 1.0 / (point * point)),
)
for point in points
},
)
@pytest.fixture
def rung_4():
"""Create duplicated fake points and objectives for rung 4."""
points = np.linspace(1, 4, 4)
return dict(
n_trials=4,
resources=1,
results={
hashlib.md5(str([point]).encode("utf-8")).hexdigest(): (
point,
(1, point // 2, point // 2),
)
for point in points
},
)
def test_compute_budgets():
"""Verify proper computation of budgets on a logarithmic scale"""
# Check typical values
assert compute_budgets(1, 3, 1, 2, 1) == [[(2, 1), (2, 2), (2, 3)]]
assert compute_budgets(1, 4, 2, 4, 2) == [[(4, 1), (4, 2), (4, 4)]]
def test_customized_mutate_population(space1, rung_3, budgets):
"""Verify customized mutated candidates is generated correctly."""
customerized_dict = {
"function": "orion.testing.state.customized_mutate_example",
"multiply_factor": 2.0,
"add_factor": 1,
}
algo = EvolutionES(
space1, repetitions=1, nums_population=4, mutate=customerized_dict
)
algo.brackets[0] = BracketEVES(algo, budgets, 1, space1)
red_team = [0, 2]
blue_team = [1, 3]
population_range = 4
for i in range(4):
for j in [1, 2]:
algo.brackets[0].eves.population[j][i] = list(rung_3["results"].values())[
i
][1][j]
algo.brackets[0].eves.performance[i] = list(rung_3["results"].values())[i][0]
org_data = np.stack(
(
list(algo.brackets[0].eves.population.values())[0],
list(algo.brackets[0].eves.population.values())[1],
),
axis=0,
).T
org_data = copy.deepcopy(org_data)
algo.brackets[0]._mutate_population(
red_team, blue_team, rung_3["results"], population_range
)
mutated_data = np.stack(
(
list(algo.brackets[0].eves.population.values())[0],
list(algo.brackets[0].eves.population.values())[1],
),
axis=0,
).T
# Winner team will be [0, 2], so [0, 2] will be remained, [1, 3] will be mutated.
assert org_data.shape == mutated_data.shape
assert (mutated_data[0] == org_data[0]).all()
assert (mutated_data[2] == org_data[2]).all()
assert (mutated_data[1] != org_data[1]).any()
assert (mutated_data[3] != org_data[3]).any()
assert (mutated_data[1] != org_data[0]).any()
assert (mutated_data[3] != org_data[2]).any()
# For each individual, mutation occurs in only one dimension chosen from two.
# Customized test mutation function is divided by 2 for real type.
if mutated_data[1][0] == org_data[0][0] / customerized_dict["multiply_factor"]:
assert mutated_data[1][1] == org_data[0][1]
else:
assert (
mutated_data[1][1] == org_data[0][1] / customerized_dict["multiply_factor"]
)
if mutated_data[3][0] == org_data[2][0] / customerized_dict["multiply_factor"]:
assert mutated_data[3][1] == org_data[2][1]
else:
assert (
mutated_data[3][1] == org_data[2][1] / customerized_dict["multiply_factor"]
)
class TestEvolutionES:
"""Tests for the algo Evolution."""
def test_register(self, evolution, bracket, rung_0, rung_1):
"""Check that a point is registered inside the bracket."""
evolution.brackets = [bracket]
bracket.hyperband = evolution
bracket.eves = evolution
bracket.rungs = [rung_0, rung_1]
point = (1, 0.0)
point_hash = hashlib.md5(str([0.0]).encode("utf-8")).hexdigest()
evolution.observe([point], [{"objective": 0.0}])
assert len(bracket.rungs[0])
assert point_hash in bracket.rungs[0]["results"]
assert (0.0, point) == bracket.rungs[0]["results"][point_hash]
class TestBracketEVES:
"""Tests for `BracketEVES` class.."""
def test_get_teams(self, bracket, rung_3):
"""Test that correct team is promoted."""
bracket.rungs[0] = rung_3
rung, population_range, red_team, blue_team = bracket._get_teams(0)
assert len(list(rung.values())) == 4
assert bracket.search_space_remove_fidelity == [1, 2]
assert population_range == 4
assert set(red_team).union(set(blue_team)) == {0, 1, 2, 3}
assert set(red_team).intersection(set(blue_team)) == set()
def test_mutate_population(self, bracket, rung_3):
"""Verify mutated candidates is generated correctly."""
red_team = [0, 2]
blue_team = [1, 3]
population_range = 4
for i in range(4):
for j in [1, 2]:
bracket.eves.population[j][i] = list(rung_3["results"].values())[i][1][
j
]
bracket.eves.performance[i] = list(rung_3["results"].values())[i][0]
org_data = np.stack(
(
list(bracket.eves.population.values())[0],
list(bracket.eves.population.values())[1],
),
axis=0,
).T
org_data = copy.deepcopy(org_data)
bracket._mutate_population(
red_team, blue_team, rung_3["results"], population_range
)
mutated_data = np.stack(
(
list(bracket.eves.population.values())[0],
list(bracket.eves.population.values())[1],
),
axis=0,
).T
# Winner team will be [0, 2], so [0, 2] will be remained, [1, 3] will be mutated.
assert org_data.shape == mutated_data.shape
assert (mutated_data[0] == org_data[0]).all()
assert (mutated_data[2] == org_data[2]).all()
assert (mutated_data[1] != org_data[1]).any()
assert (mutated_data[3] != org_data[3]).any()
assert (mutated_data[1] != org_data[0]).any()
assert (mutated_data[3] != org_data[2]).any()
# For each individual, mutation occurs in only one dimension chosen from two.
if mutated_data[1][0] != org_data[0][0]:
assert mutated_data[1][1] == org_data[0][1]
else:
assert mutated_data[1][1] != org_data[0][1]
if mutated_data[3][0] != org_data[2][0]:
assert mutated_data[3][1] == org_data[2][1]
else:
assert mutated_data[3][1] != org_data[2][1]
def test_duplicated_mutated_population(self, bracket, rung_4):
"""Verify duplicated candidates can be found and processed correctly."""
red_team = [0, 2]
blue_team = [0, 2] # no mutate occur at first.
population_range = 4
for i in range(4):
for j in [1, 2]:
bracket.eves.population[j][i] = list(rung_4["results"].values())[i][1][
j
]
points, nums_all_equal = bracket._mutate_population(
red_team, blue_team, rung_4["results"], population_range
)
# In this case, duplication will occur, and we can make it mutate one more time.
# The points 1 and 2 should be different, while one of nums_all_equal should be 1.
if points[1][1] != points[2][1]:
assert points[1][2] == points[2][2]
else:
assert points[1][2] != points[2][2]
assert nums_all_equal[0] == 0
assert nums_all_equal[1] == 0
assert nums_all_equal[2] == 1
assert nums_all_equal[3] == 0
def test_mutate_points(self, bracket, rung_3):
"""Test that correct point is promoted."""
red_team = [0, 2]
blue_team = [0, 2]
population_range = 4
for i in range(4):
for j in [1, 2]:
bracket.eves.population[j][i] = list(rung_3["results"].values())[i][1][
j
]
points, nums_all_equal = bracket._mutate_population(
red_team, blue_team, rung_3["results"], population_range
)
assert points[0] == (1.0, 1.0, 1.0)
assert points[1] == (2, 1.0 / 2, 1.0 / 4)
assert (nums_all_equal == 0).all()
| lebrice/orion | tests/unittests/algo/test_evolution_es.py | test_evolution_es.py | py | 11,353 | python | en | code | null | github-code | 1 | [
{
"api_name": "orion.algo.space.Space",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "orion.algo.space.Real",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "orion.algo.space.Fidelity",
"line_number": 20,
"usage_type": "call"
},
{
"api_name":... |
30076925049 | from selenium import webdriver
from selenium.webdriver.common.by import By
from web_test_base import WebTestBase
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
class TestOrder(WebTestBase):
def test_valid_zipcode_works(self):
driver = self.driver
driver.get(self.WEBSITE_URL)
zipcode_field = driver.find_element(By.CLASS_NAME, "zipcode-field")
input_element = zipcode_field.find_element(By.ID, "order-form")
confirm_button = driver.find_element(By.CLASS_NAME, "order-confirm-button")
input_element.send_keys("98139")
confirm_button.click()
self.assertIn("Vi levererar till dig", driver.page_source)
def test_invalid_zipcode_works(self):
driver = self.driver
driver.get(self.WEBSITE_URL)
zipcode_field = driver.find_element(By.CLASS_NAME, "zipcode-field")
input_element = zipcode_field.find_element(By.ID, "order-form")
confirm_button = driver.find_element(By.CLASS_NAME, "order-confirm-button")
input_element.send_keys("123456")
confirm_button.click()
self.assertIn("Tyvärr kör vi inte ut inom detta område", driver.page_source)
def test_keyshortcuts_works(self):
driver = self.driver
driver.get(self.WEBSITE_URL)
zipcode_field = driver.find_element(By.CLASS_NAME, "zipcode-field")
input_element = zipcode_field.find_element(By.ID, "order-form")
input_element.send_keys("98139")
input_element.send_keys(Keys.RETURN)
self.assertIn("Vi levererar till dig", driver.page_source)
| NTI-Gymnasieingenjor/pizza | tests/webtests/test_order.py | test_order.py | py | 1,654 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "web_test_base.WebTestBase",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CLASS_NAME",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 14,
"usage_type... |
30352443016 | from config import *
from lib import cords, led
from utils.time import sleep_ms
def _is_row(side):
return side == cords.TOP or side == cords.BOTTOM
def lines(gen, source=cords.TOP):
side = DISPLAY_ROWS if _is_row(source) else DISPLAY_COLUMNS
other_side = DISPLAY_COLUMNS if _is_row(source) else DISPLAY_ROWS
for i in range(side):
color = gen.generate()
for j in range(other_side):
pos = cords.Cords(j, i)
if source == cords.RIGHT or source == cords.BOTTOM:
pos.mirror_x()
if source == cords.LEFT or source == cords.RIGHT:
pos.flip()
led._pixel(color, pos)
led._show()
sleep_ms(100)
| LeLuxNet/GridPy | animations/coded/lines.py | lines.py | py | 714 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "lib.cords.TOP",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "lib.cords",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "lib.cords.BOTTOM",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "lib.cords.TOP",
"... |
44962919854 | from tkinter import *
from tkinter import messagebox
from PIL import ImageTk, Image
import pygame
import time
import threading
import serial
pygame.mixer.init()
arduinoData = serial.Serial('COM3', 9600)
test = False
musica = False
'''
Esta función se encarga de crear la ventana principal, donde se muestra la consola de comandos y el robot virtual.
Parámetros:
None
Retorna:
No retorna
'''
def ventana():
ventana = Tk()
ventana.title('Robot Virtual')
ventana.minsize(1280, 720)
ventana.resizable(width=NO, height=NO)
ventana.configure(background='#C3DFF4')
canvas = Canvas(ventana, width=450, height=400, background='#7293C4')
canvas.place(x=40, y=150)
canvas2 = Canvas(ventana, width=650, height=600, background='White')
canvas2.place(x=600, y=50)
'''
Objeto Robot
atributos: ventana, nombre (str), imagen, fecha_creacion (str), felicidad (int), contador (int), anim (boolean)
metódos:
hello_ani(): se encarga de crear la animación del robot saludando
sayhello(): muestra en un messagebox el nombre del robot y llama a la función hello_ani
forward_ani(): se encarga de crear la animación del robot caminando hacia adelante
forward(): cambia la imagen del robot para que este mire hacia adelante
backward_ani(): se encarga de crear la animación del robot caminando hacia atrás
backward(): cambia la imagen del robot para que este mire hacia atrás
stop(): se encarga de detener cualquier animación
backflip_ani(): se encarga de crear la animación del robot haciendo un backflip
backflip(): se encarga de llamar a la funcion backflip_ani
music_on(): carga un archivo mp3 y lo reproduce
music_off(): detiene la reproducción del archivo mp3 y lo cierra
'''
class Robot:
def __init__(self, ventana, nombre, imagen, contador, anim):
self.ventana = ventana
self.nombre = nombre
self.imagen = imagen
self.contador = contador
self.anim = anim
def hello_ani(self):
self.anim = True
canvas2.delete('imgR')
while self.contador != 12:
if self.anim == False:
break
nombre = 'Imagenes/wave/wave'+str(self.contador)+'.jpg'
imgR = ImageTk.PhotoImage(Image.open(nombre).resize((700,400)))
canvas2.create_image(0,100, image=imgR, anchor=NW, tag='imgR')
self.contador+=1
time.sleep(0.15)
canvas2.delete('imgR')
self.contador = 1
canvas2.create_image(0, 100, image=self.imagen, anchor=NW)
def sayhello(self):
messagebox.showinfo(title='Nombre:', message='Hola! Mi nombre es ' + str(self.nombre))
threading.Thread(target=self.hello_ani).start()
def forward_ani(self):
self.anim = True
canvas2.delete('imgR')
while self.contador != 22:
if self.anim == False:
break
nombre = 'Imagenes/walkF/walkF'+str(self.contador)+'.jpg'
imgR = ImageTk.PhotoImage(Image.open(nombre).resize((700,400)))
canvas2.create_image(0, 100, image=imgR, anchor=NW, tag='imgR')
self.contador+=1
time.sleep(0.15)
canvas2.delete('imgR')
self.contador = 1
canvas2.create_image(0, 100, image=self.imagen, anchor=NW)
def forward(self):
canvas2.delete('imgR')
self.imagen = ImageTk.PhotoImage(Image.open('Imagenes/defaultF.png').resize((700,400)))
canvas2.create_image(0,100, image=self.imagen, anchor=NW, tag='imgR')
threading.Thread(target=self.forward_ani).start()
def backward_ani(self):
self.anim = True
canvas2.delete('imgR')
while self.contador != 22:
if self.anim == False:
break
nombre = 'Imagenes/walkB/walkB'+str(self.contador)+'.jpg'
imgR = ImageTk.PhotoImage(Image.open(nombre).resize((700,400)))
canvas2.create_image(0, 100, image=imgR, anchor=NW, tag='imgR')
self.contador+=1
time.sleep(0.15)
canvas2.delete('imgR')
self.contador = 1
canvas2.create_image(0, 100, image=self.imagen, anchor=NW)
def backward(self):
canvas2.delete('imgR')
self.imagen = ImageTk.PhotoImage(Image.open('Imagenes/defaultB.png').resize((700,400)))
canvas2.create_image(0,100, image=self.imagen, anchor=NW, tag='imgR')
threading.Thread(target=self.backward_ani).start()
def stop(self):
self.anim = False
def backflip_ani(self):
self.anim = True
canvas2.delete('imgR')
while self.contador != 38:
if self.anim == False:
break
nombre = 'Imagenes/backflip/backflip ('+str(self.contador)+').jpg'
imgR = ImageTk.PhotoImage(Image.open(nombre).resize((700,400)))
canvas2.create_image(0, 100, image=imgR, anchor=NW, tag='imgR')
self.contador+=1
time.sleep(0.15)
canvas2.delete('imgR')
self.contador = 1
canvas2.create_image(0, 100, image=self.imagen, anchor=NW)
def backflip(self):
threading.Thread(target=self.backflip_ani).start()
def music_on(self):
pygame.mixer.music.load('aespa.mp3')
pygame.mixer.music.play()
def music_off(self):
pygame.mixer.music.stop()
pygame.mixer.music.unload()
'''
Esta función se encarga de recibir un dato por medio de una entry y devuelve uno de los métodos del objeto robot segun lo que se introdujo en la entry
Parámetros:
None
Retorna:
No retorna
'''
def aceptar():
comando = consola.get()
if comando == 'sayhello':
R.sayhello()
if comando == 'forward':
R.forward()
if comando == 'backward':
R.backward()
if comando == 'stop':
R.stop()
if comando == 'backflip':
R.backflip()
if comando == 'music_on':
R.music_on()
if comando == 'music_off':
R.music_off()
'''
Esta funcion se encarga de cambiar la variable test a un valor boolean True
Parametros:
None
Retorna:
wait_for_button(test)
'''
def test1():
global test
test = True
return wait_for_button(test)
'''
Esta funcion se encarga de cambiar la variable test a un valor boolean True
Parametros:
test: se encarga de
Retorna:
Segun el boton que se presiona, retorna uno de los comandos del robot
'''
def wait_for_button(test):
global musica
while test == True:
command = arduinoData.read(2)
if len(command) == 2:
if command == b'B1':
test = False
R.sayhello()
elif command == b'B2':
test = False
R.forward()
elif command == b'B3':
test = False
R.backward()
elif command == b'B4':
test = False
R.backflip()
elif command == b'B5':
if musica == True:
test = False
musica = False
R.music_off()
else:
test = False
musica = True
R.music_on()
Label0 = Label(canvas, text='Consola de comandos', bg='#A2C4EC', fg='#383A3D', font=('Baskerville Old Face', 10))
Label0.place(x=20, y=10)
Label1 = Label(canvas, text='Los comandos disponibles son: ', bg='#A2C4EC', fg='#383A3D', font=('Baskerville Old Face', 10), width=25)
Label2 = Label(canvas, text='- sayhello\n- forward\n- backward\n- stop\n- dance\n- music_on\n- music_off', bg='#A2C4EC', width=10, justify=LEFT, fg='#383A3D', font=('Baskerville Old Face', 10), anchor='w')
Label1.place(x=20, y=120)
Label2.place(x=110, y=150)
consola = Entry(canvas, bg='#A2C4EC')
consola.place(x=20, y=35)
btn = Button(canvas, text='Aceptar', bg='#A2C4EC', fg='#383A3D', font=('Baskerville Old Face', 10), command=test1)
btn.place(x=60, y=60)
imgR = ImageTk.PhotoImage(Image.open('Imagenes/defaultF.png').resize((700,400)))
canvas2.create_image(0,100, image=imgR, anchor=NW, tag='imgR')
R = Robot(ventana, 'Linaria', imgR, 1, False)
ventana.mainloop()
ventana()
| josuect0212/Proyecto-3-Robot-con-Control-CE-1102 | ProyectoIII.py | ProyectoIII.py | py | 8,931 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "pygame.mixer.init",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "serial.Serial",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk.PhotoImag... |
17847754743 |
from __future__ import nested_scopes
from twisted.internet import defer
import sys
class _DeferredCache:
""" Wraps a call that returns a deferred in a cache. Any subsequent
calls with the same argument will wait for the first call to
finish and return the same result (or errback).
"""
hashableArgs = False
inProgressOnly = True
def __init__(self, op, hashableArgs=None, inProgressOnly=None):
self.op = op
self.cache = {}
if hashableArgs is not None:
self.hashableArgs = hashableArgs
if inProgressOnly is not None:
self.inProgressOnly = inProgressOnly
def cb_triggerUserCallback(self, res, deferred):
#print "triggering", deferred
deferred.callback(res)
return res
def cb_triggerUserErrback(self, failure, deferred):
deferred.errback(failure)
return failure
def _genCache(self, args, kwargs):
# This could be better, probably
try:
arghash = hash(args)
except TypeError:
return None
kwit = kwargs.items()
kwit.sort()
try:
kwhash = hash(tuple(kwit))
except TypeError:
return None
return (arghash, kwhash)
def _removeCacheVal(self, res, cacheVal):
del self.cache[cacheVal]
return res
def clearCache(self):
self.cache = {}
def call(self, *args, **kwargs):
# Currently not in progress - start it
#print "called with", args
cacheVal = self._genCache(args, kwargs)
if cacheVal is None and self.hashableArgs:
raise TypeError('DeferredCache(%s) arguments must be hashable'%(
self.op.func_name))
opdef = self.cache.get(cacheVal)
if not opdef:
# XXX assert that it returns a deferred?
opdef = self.op(*args, **kwargs)
if cacheVal is not None:
self.cache[cacheVal] = opdef
if self.inProgressOnly and cacheVal:
opdef.addCallbacks(lambda x: self._removeCacheVal(x, cacheVal),
lambda x: self._removeCacheVal(x, cacheVal))
userdef = defer.Deferred()
opdef.addCallbacks(lambda x: self.cb_triggerUserCallback(x, userdef),
lambda x: self.cb_triggerUserErrback(x, userdef))
return userdef
def DeferredCache(op=None, hashableArgs=None, inProgressOnly=None):
""" Use this as a decorator for a function or method that returns a
deferred. Any subsequent calls using the same arguments will
be all triggered off the original deferred, all returning the
same result.
"""
if op is None:
return lambda x: DeferredCache(x, hashableArgs, inProgressOnly)
c = _DeferredCache(op, hashableArgs, inProgressOnly)
def func(*args, **kwargs):
return c.call(*args, **kwargs)
if sys.version_info > (2,4):
func.func_name = op.func_name
func.clearCache = c.clearCache
func.cache_hashableArgs = c.hashableArgs
func.cache_inProgressOnly = c.inProgressOnly
return func
| rcarmo/divmod.org | Sine/xshtoom/defcache.py | defcache.py | py | 3,176 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "twisted.internet.defer.Deferred",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "twisted.internet.defer",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "sys.version_info",
"line_number": 89,
"usage_type": "attribute"
}
] |
42698701042 | import socket
from socket import setdefaulttimeout
import ipaddress
import itertools
import json
from multiprocessing.pool import ThreadPool
import subprocess
import argparse
import base64
import os
class IPScan():
def __init__(self):
setdefaulttimeout(0.10)
self.open_ports = []
self.ports = list(range(0,1000))
def scan_ips(self, start_ip, end_ip):
start_ip = ipaddress.IPv4Address(start_ip)
end_ip = ipaddress.IPv4Address(end_ip)
results = []
for ip in range(int(start_ip), int(end_ip)):
results.append(self.host(ipaddress.IPv4Address(ip)))
return results
def network_scan(self, network_expression):
results = []
for ip in ipaddress.IPv4Network(network_expression):
ip = str(ip)
results.append(self.host(ip))
return results
def port_check(self, ip, port):
try:
sock = socket.socket()
sock.settimeout(0.10)
result = sock.connect_ex((ip, port))
try:
if result == 0:
self.open_ports.append("port {0}: open".format(str(port)))
else:
self.open_ports.append("port {0}: closed".format(str(port)))
except Exception as e:
self.open_ports.append("port {0} conn fail: {1}".format(str(port), e))
finally:
sock.close()
except:
self.open_ports.append("networking failed for port {0} : {1}".format(str(port), e))
def scan_host(self, ip):
_host_temp = str(ip)
returns = {}
returns['_hostname'] = socket.getfqdn(_host_temp)
returns['ipAddress'] = _host_temp
self.threads = []
pool = ThreadPool(10)
results = pool.starmap(self.port_check, zip(itertools.repeat(_host_temp), self.ports))
pool.close()
pool.join()
self.open_ports.sort()
returns['openPorts'] = self.open_ports
return returns
def set_ports(self, ports):
self.ports = ports
return None
| JetBlackHackerCat/RedPython | NetworkScan.py | NetworkScan.py | py | 2,104 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "socket.setdefaulttimeout",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "ipaddress.IPv4Address",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "ipaddress.IPv4Address",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "... |
39951557453 | import pygame
import sys
class UI:
def __init__(self, UIdeck):
self.drawMap = {0:self.drawPlayer1, 1:self.drawPlayer2, 2:self.drawPlayer3, 3:self.drawPlayer4}
self.discardMap = {0:self.discardCardP1, 1:self.discardCardP2, 2:self.discardCardP3, 3:self.discardCardP4}
pygame.init()
width = 800
height = 600
gameOver = False
## makes the screen
self.screen = pygame.display.set_mode((width,height))
pygame.display.set_caption('Uno')
black = (0,0,0)
white = (255,255,255)
self.transparent = (0, 0, 0, 0)
self.cards= ['images/black_+4.png','images/black_+4.png','images/black_+4.png','images/black_+4.png','images/black_wildcard.png','images/black_wildcard.png','images/black_wildcard.png','images/black_wildcard.png',
'images/blue_+2.png','images/blue_0.png', 'images/blue_1.png', 'images/blue_2.png', 'images/blue_3.png', 'images/blue_4.png', 'images/blue_5.png', 'images/blue_6.png', 'images/blue_7.png', 'images/blue_8.png', 'images/blue_9.png', 'images/blue_reverse.png', 'images/blue_skip.png',
'images/green_+2.png','images/green_0.png', 'images/green_1.png', 'images/green_2.png', 'images/green_3.png', 'images/green_4.png', 'images/green_5.png', 'images/green_6.png', 'images/green_7.png', 'images/green_8.png', 'images/green_9.png', 'images/green_reverse.png', 'images/green_skip.png',
'images/red_+2.png','images/red_0.png', 'images/red_1.png', 'images/red_2.png', 'images/red_3.png', 'images/red_4.png', 'images/red_5.png', 'images/red_6.png', 'images/red_7.png', 'images/red_8.png', 'images/red_9.png', 'images/red_reverse.png', 'images/red_skip.png',
'images/yellow_+2.png','images/yellow_0.png', 'images/yellow_1.png', 'images/yellow_2.png', 'images/yellow_3.png', 'images/yellow_4.png', 'images/yellow_5.png', 'images/yellow_6.png', 'images/yellow_7.png', 'images/yellow_8.png', 'images/yellow_9.png', 'images/yellow_reverse.png', 'images/yellow_skip.png',]
for card in UIdeck:
if card.color == 'wild':
card.color = 'black'
if card.value == 'basic':
card.value = 'wildcard'
# print(UIdeck)
self.cardMap = {card:'images/' + card.color + '_' + str(card.value) + '.png' for card in UIdeck}
self.reverseMap = {'images/' + card.color + '_' + str(card.value) + '.png':card for card in UIdeck}
self.deck = []
self.discard = []
self.player1Hand = []
self.player2Hand = []
self.player3Hand = []
self.player4Hand = []
self.player1XPoints = []
self.player2XPoints = []
self.player3XPoints = []
self.player4XPoints = []
self.screen.fill(black)
self.turnCondition = True
self.dealDeck(card)
## deals all the cards to player 1 and loads them onto the screen, keeping track of what cards are in player 1's hand and where they are on the screen
def dealPlayer1(self, hand):
x = 0
y = 10
for card in hand:
image = pygame.image.load(self.cardMap[card])
self.player1Hand.append(self.cardMap[card])
self.screen.blit(image, (x,y))
self.player1XPoints.append(x)
x += 50
## deals all the cards to player 2 and loads them onto the screen, keeping track of what cards are in player 2's hand and where they are on the screen
def dealPlayer2(self, hand):
x = 10
y = 135
for card in hand:
image = pygame.image.load(self.cardMap[card])
self.player2Hand.append(self.cardMap[card])
self.screen.blit(image, (x,y))
self.player2XPoints.append(x)
x += 25
## deals all the cards to player 3 and loads them onto the screen, keeping track of what cards are in player 3's hand and where they are on the screen
def dealPlayer3(self, hand):
x = 10
y = 260
for card in hand:
image = pygame.image.load(self.cardMap[card])
self.player3Hand.append(self.cardMap[card])
self.screen.blit(image, (x,y))
self.player3XPoints.append(x)
x += 25
## deals all the cards to player 4 and loads them onto the screen, keeping track of what cards are in player 4's hand and where they are on the screen
def dealPlayer4(self, hand):
x = 10
y = 385
for card in hand:
image = pygame.image.load(self.cardMap[card])
self.player4Hand.append(self.cardMap[card])
self.screen.blit(image, (x,y))
self.player4XPoints.append(x)
x += 25
## after cards are dealt to each player, the remaining cards are displayed as a stack in the center of the screen
## and flips the first card over to start the game
def dealDeck(self, card = None):
x = 540
y = 260
image = pygame.image.load('images/back.png')
self.screen.blit(image, (x,y))
# image = pygame.image.load(self.cardMap[card])
# self.screen.blit(image, (440,260))
def draw(self, player, card):
self.drawMap[player](card)
def drawPlayer1(self, card):
self.player1Hand.append(self.cardMap[card])
size1X = len(self.player1XPoints)
nextPoint = self.player1XPoints[size1X-1] + 25
self.player1XPoints.append(nextPoint)
image = pygame.image.load(self.cardMap[card])
self.screen.blit(image, (nextPoint,10))
pygame.display.update()
## when called, will draw a card from the draw pile and add it to player 2's hand
def drawPlayer2(self, card):
self.player2Hand.append(self.cardMap[card])
size2X = len(self.player2XPoints)
nextPoint = self.player2XPoints[size2X-1] + 25
self.player2XPoints.append(nextPoint)
image = pygame.image.load(self.cardMap[card])
self.screen.blit(image, (nextPoint,135))
pygame.display.update()
## when called, will draw a card from the draw pile and add it to player 3's hand
def drawPlayer3(self, card):
self.player3Hand.append(self.cardMap[card])
size3X = len(self.player3XPoints)
nextPoint = self.player3XPoints[size3X-1] + 25
self.player3XPoints.append(nextPoint)
image = pygame.image.load(self.cardMap[card])
self.screen.blit(image, (nextPoint,260))
pygame.display.update()
## when called, will draw a card from the draw pile and add it to player 4's hand
def drawPlayer4(self, card):
self.player4Hand.append(self.cardMap[card])
size4X = len(self.player4XPoints)
nextPoint = self.player4XPoints[size4X-1] + 25
self.player4XPoints.append(nextPoint)
image = pygame.image.load(self.cardMap[card])
self.screen.blit(image, (nextPoint,385))
pygame.display.update()
def discardfoo(self, player, card):
# print(self.player1Hand)
# print(self.player2Hand)
# print(self.player3Hand)
# print(self.player4Hand)
# print(card)
# print(self.cardMap[card])
print('p'+str(player+1), 'discarded', card)
self.discardMap[player](card)
def discardCardP1(self, card):
self.discard.append(self.cardMap[card])
image = pygame.image.load(self.cardMap[card])
self.screen.blit(image, (440,260))
pygame.display.update()
self.player1Hand.remove(self.cardMap[card])
lastI = len(self.player1XPoints)-1
self.player1XPoints.remove(self.player1XPoints[lastI])
pygame.draw.rect(self.screen, self.transparent,(0,10,600,125))
pygame.display.update()
for x_point in self.player1XPoints:
index = self.player1XPoints.index(x_point)
card = self.player1Hand[index]
image = pygame.image.load(card)
self.screen.blit(image, (x_point,10))
pygame.display.update()
def discardCardP2(self, card):
self.discard.append(self.cardMap[card])
image = pygame.image.load(self.cardMap[card])
self.screen.blit(image, (440,260))
pygame.display.update()
self.player2Hand.remove(self.cardMap[card])
lastI = len(self.player2XPoints)-1
self.player2XPoints.remove(self.player2XPoints[lastI])
pygame.draw.rect(self.screen, self.transparent,(10,135,600,125))
pygame.display.update()
for x_point in self.player2XPoints:
index = self.player2XPoints.index(x_point)
card = self.player2Hand[index]
image = pygame.image.load(card)
self.screen.blit(image, (x_point,135))
pygame.display.update()
def discardCardP3(self, card):
# print(card)
# print(self.cardMap[card])
self.discard.append(self.cardMap[card])
image = pygame.image.load(self.cardMap[card])
self.screen.blit(image, (440,260))
pygame.display.update()
self.player3Hand.remove(self.cardMap[card])
lastI = len(self.player3XPoints)-1
self.player3XPoints.remove(self.player3XPoints[lastI])
pygame.draw.rect(self.screen, self.transparent, (10,260,400,125))
pygame.display.update()
for x_point in self.player3XPoints:
index = self.player3XPoints.index(x_point)
card = self.player3Hand[index]
image = pygame.image.load(card)
self.screen.blit(image, (x_point,260))
pygame.display.update()
def discardCardP4(self, card):
self.discard.append(self.cardMap[card])
image = pygame.image.load(self.cardMap[card])
self.screen.blit(image, (440,260))
pygame.display.update()
self.player4Hand.remove(self.cardMap[card])
lastI = len(self.player4XPoints)-1
self.player4XPoints.remove(self.player4XPoints[lastI])
pygame.draw.rect(self.screen, self.transparent,(10,385,600,125))
pygame.display.update()
for x_point in self.player4XPoints:
index = self.player4XPoints.index(x_point)
card = self.player4Hand[index]
image = pygame.image.load(card)
self.screen.blit(image, (x_point,385))
pygame.display.update()
def humanTurn(self, drawnCard = None):
print('v')
events = []
drawn = False
while True:
# print('y')
for event in pygame.event.get():
if event.type == pygame.QUIT:
print('quitted')
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN: ## if there is a mouse click
if not drawn:
print('sdf')
if event.pos[0] > 540 and event.pos[0] < 600:
if event.pos[1] > 265 and event.pos[1] < 365: ## if the click is on the draw pile -> player 1 gets that card in it's hand
if self.turnCondition: return 'draw a card'
drawn = True
self.player1Hand.append(self.cardMap[drawnCard])
size1X = len(self.player1XPoints)
nextPoint = self.player1XPoints[size1X-1] + 50
self.player1XPoints.append(nextPoint)
print(self.cardMap[drawnCard])
image = pygame.image.load(self.cardMap[drawnCard])
self.screen.blit(image, (nextPoint,10))
pygame.display.update()
if event.pos[1] > 10 and event.pos[1] < 110:
for xPos in self.player1XPoints:
if event.pos[0] > xPos and event.pos[0] < xPos + 50:
index = self.player1XPoints.index(xPos)
selectedCard = self.player1Hand[index]
self.discard.append(selectedCard)
image = pygame.image.load(selectedCard)
self.screen.blit(image, (440,260))
pygame.display.update()
self.player1Hand.remove(selectedCard)
lastI = len(self.player1XPoints)-1
self.player1XPoints.remove(self.player1XPoints[lastI])
pygame.draw.rect(self.screen, self.transparent,(0,10,600,125))
pygame.display.update()
for x_point in self.player1XPoints:
index = self.player1XPoints.index(x_point)
card = self.player1Hand[index]
image = pygame.image.load(card)
self.screen.blit(image, (x_point,10))
pygame.display.update()
return ('discard', self.reverseMap[selectedCard])
# deal()
# chooseCardP2(random.choice(player2Hand))
# chooseCardP3(random.choice(player3Hand))
# chooseCardP4(random.choice(player4Hand))
# ##drawPlayer3()
# ##drawPlayer4()
# pygame.display.update()
# while not gameOver:
# for event in pygame.event.get():
# if event.type == pygame.QUIT:
# sys.exit()
# if event.type == pygame.MOUSEBUTTONDOWN: ## if there is a mouse click
# if event.pos[0] > 540 and event.pos[0] < 600:
# if event.pos[1] > 265 and event.pos[1] < 365: ## if the click is on the draw pile -> player 1 gets that card in it's hand
# card = deck.pop()
# player1Hand.append(card)
# size1X = len(player1XPoints)
# nextPoint = player1XPoints[size1X-1] + 50
# player1XPoints.append(nextPoint)
# image = pygame.image.load(card)
# screen.blit(image, (nextPoint,10))
# pygame.display.update()
#
# if event.pos[1] > 10 and event.pos[1] < 110:
# for xPos in player1XPoints:
# if event.pos[0] > xPos and event.pos[0] < xPos + 50:
# index = player1XPoints.index(xPos)
# selectedCard = player1Hand[index]
#
# discard.append(selectedCard)
# image = pygame.image.load(selectedCard)
# screen.blit(image, (440,260))
# pygame.display.update()
# player1Hand.remove(selectedCard)
# lastI = len(player1XPoints)-1
# player1XPoints.remove(player1XPoints[lastI])
# pygame.draw.rect(screen, self.transparent,(0,10,600,125))
# pygame.display.update()
#
# for x_point in player1XPoints:
# index = player1XPoints.index(x_point)
# card = player1Hand[index]
# image = pygame.image.load(card)
# screen.blit(image, (x_point,10))
# pygame.display.update()
#
# break
#
#
# pygame.display.update()
##pygame.quit()
##quit()
| mseverinov/Uno | draw.py | draw.py | py | 15,510 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pygame.display... |
10786776517 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup as bs
def get_historical_data(name, number_of_days):
data = []
url = "https://finance.yahoo.com/quote/" + name + "/history/"
content = requests.get(url).content
rows = bs(content, 'html.parser').findAll('table')[0].tbody.findAll('tr')
for each_row in rows:
divs = each_row.findAll('td')
if divs[1].span.text != 'Dividend':
data.append({'Date': divs[0].span.text, 'Open': float(divs[1].span.text.replace(',', ''))})
return data[:number_of_days]
if __name__ == '__main__':
get_historical_data('AAPL', 10)
| missweetcxx/fragments | projects/yahoo_finance/get_historical_data.py | get_historical_data.py | py | 658 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 12,
"usage_type": "call"
}
] |
11131287505 | from urllib.request import Request, urlopen
from fake_useragent import UserAgent
import json
import csv
headers = {"user-agent": UserAgent().chrome, "referer": "https://finance.daum.net/"}
path = "./RPAbasic/crawl/download/"
data = []
try:
url = "https://finance.daum.net/api/search/ranks?limit=10"
res = urlopen(Request(url, headers=headers)).read().decode("UTF-8")
# print(res) # JSON 형태로 반환
# JSON To Dictionary
rank_json = json.loads(res)["data"]
for item in rank_json:
print(
"순위 {}, 금액 {}, 회사명 {}".format(
item["rank"], item["tradePrice"], item["name"]
)
)
data.append(item)
# 크롤링 데이터 저장 ( txt 파일 / csv 파일 )
with open(path + "finance.txt", "a", encoding="UTF-8") as txt:
with open(path + "finance.csv", "a", encoding="UTF-8", newline="") as csvfile:
# 텍스트 저장
txt.write(
"순위 {}, 금액 {}, 회사명 {}\n".format(
item["rank"], item["tradePrice"], item["name"]
)
)
# csv 저장
output = csv.writer(csvfile)
# Column 타이틀 (Header)
output.writerow(data[0].keys())
for row in data:
output.writerow(row.values()) # value 저장
except Exception as e:
print(e)
# 비동기식 데이터 ( Ajax ) 는 값을 가져오지 못하고 있음
# 브라우저 설정 Network 탭 ( F12 )
# DOC : 실제 브라우저 페이지에 대한 요청 정보
# Fetch/XHR : 비동기식 데이터 접근을 위한 요청 정보
# 일부 사이트는 보안상의 문제로 AJax URL 접근을 제한 (Security ...) >> 403 Error 발생
# 데이터 확인 법 ( preview 탭에서 주고받은 데이터 확인 가능 )
# ( headers 탭 ) referer: https://finance.daum.net/ ( 정상적으로 동작하기 위해서는 : https://finance.daum.net 에서 해당 데이터를 요청해야 함 )
# referer : 데이터 요청이 들어온 페이지 ( 크롤링 할 때, fake 주소를 지정해서 Ajax 데이터를 조회해야 한다 )
| hayeong25/Python_Soldesk | rpa/crawl/urllib/5_daum_kosdaq.py | 5_daum_kosdaq.py | py | 2,167 | python | ko | code | 0 | github-code | 1 | [
{
"api_name": "fake_useragent.UserAgent",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "urllib.request.Request",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": ... |
16515330397 | import torch
import pytorch_ssim
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
nc = 3
imsize = 256
def ssim(img, ref,weight_ssim):
img = img.reshape(1, nc, imsize, imsize)
img.requires_grad_()
ssim_value = pytorch_ssim.ssim(ref, img)
ssim_loss = pytorch_ssim.SSIM()
ssim_out = -weight_ssim * ssim_loss(ref, img)
ssim_out.backward()
# del ref
# torch.cuda.empty_cache()
return ssim_value, img.grad.flatten()
def ssim_opt(m0, temp, ref,weight_ssim):
temp = temp.reshape(1, nc, imsize, imsize)
# _, nc, imsize, imsize = temp.shape
temp.requires_grad_()
ssim_value = pytorch_ssim.ssim(ref, temp)
ssim_loss = pytorch_ssim.SSIM()
ssim_out = -weight_ssim * ssim_loss(ref, temp)
comp = ((-weight_ssim * m0) - ssim_out) ** 2
comp.backward()
return comp, temp.grad
| XG196/MAD613 | ssim.py | ssim.py | py | 885 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.device",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pytorch_ssim.ssim",... |
28729835092 | from datetime import timedelta
from random import randint, choices
import faker_commerce
from django.core.management.base import BaseCommand
from django_seed import Seed
from faker import Faker
from catalog.models import Category, Discount, Producer, Promocode, Product
class Command(BaseCommand):
help = "Filling database"
__DEFAULT_COUNT_PRODUCERS = 200
__DEFAULT_COUNT_CATEGORIES = 75
__DEFAULT_COUNT_PRODUCTS = 2000
__DEFAULT_COUNT_PROMOCODES = 250
__DEFAULT_COUNT_DISCOUNTS = 20
def __init__(self, *args, **kwargs):
self.seeder = Seed.seeder()
self.fake = Faker()
self.fake.add_provider(faker_commerce.Provider)
super().__init__(*args, **kwargs)
def handle(self, *args, **options):
self.__create_producers()
self.__create_categories()
self.__create_discounts()
self.__create_promocodes()
self.__create_products()
return "Fake data is created"
def __create_producers(self):
self.seeder.add_entity(
model=Producer,
number=self.__DEFAULT_COUNT_PRODUCERS,
customFieldFormatters={
"name": lambda x: self.fake.company(),
"description": lambda x: self.fake.paragraph(nb_sentences=3),
"country": lambda x: self.fake.country(),
},
)
self.seeder.execute()
def __create_categories(self):
self.seeder.add_entity(
model=Category,
number=self.__DEFAULT_COUNT_CATEGORIES,
customFieldFormatters={
"name": lambda x: self.fake.ecommerce_category(),
"description": lambda x: self.fake.paragraph(nb_sentences=2),
},
)
self.seeder.execute()
def __create_discounts(self):
self.seeder.add_entity(
model=Discount,
number=self.__DEFAULT_COUNT_DISCOUNTS,
customFieldFormatters={
"percent": lambda x: randint(1, 50),
"name": lambda x: self.fake.sentence(nb_words=1)[:-1],
"date_start": lambda x: self.fake.date_this_month(),
"date_end": lambda x: self.fake.date_this_month()
+ timedelta(days=randint(5, 100)),
},
)
self.seeder.execute()
def __create_promocodes(self):
self.seeder.add_entity(
model=Promocode,
number=self.__DEFAULT_COUNT_PROMOCODES,
customFieldFormatters={
"name": lambda x: self.fake.swift(length=8),
"percent": lambda x: randint(1, 50),
"date_start": lambda x: self.fake.date_this_month(),
"date_end": lambda x: self.fake.date_this_month()
+ timedelta(days=randint(20, 200)),
"is_cumulative": lambda x: self.fake.pybool(),
},
)
self.seeder.execute()
def __create_products(self):
discounts = Discount.objects.all()
categories = Category.objects.all()
producers = Producer.objects.all()
self.seeder.add_entity(
model=Product,
number=self.__DEFAULT_COUNT_PRODUCTS,
customFieldFormatters={
"name": lambda x: self.fake.ecommerce_name(),
"price": lambda x: self.fake.ecommerce_price(),
"count_on_stock": lambda x: randint(100, 10000),
"articul": lambda x: self.fake.isbn10(),
"description": lambda x: self.fake.paragraph(nb_sentences=2),
"discount": lambda x: choices(discounts)[0],
"category": lambda x: choices(categories)[0],
"producer": lambda x: choices(producers)[0],
},
)
self.seeder.execute()
| maximchikAlexandr/shop | catalog/management/commands/fakedata.py | fakedata.py | py | 3,802 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django_seed.Seed.seeder",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django_seed.Seed",
"line_number": 21,
"usage_type": "name"
},
{
"... |
12579239104 | #defino librerias
import csv
import datetime as ti
#constantes y variables
opciones="""
Bienvenido
__________________________
seleccione una opcion
1.Ingresar datos
2.salir
"""
runTime=True
datetime = ti.date.today()
#funciones
"""Recive el nombre del archivo atraves de un input de usuario y si el archivo existe convierte los datos a valores dentro
de un diccionario donde definimos las claves.
"""
def readFile(urlfile):
cheques=[]
file=open(urlfile+".csv","r")
csvfile=csv.reader(file)
for row in csvfile:
if row != []:
data = {"Nrocheque":row[0],"codigoBanco":row[1],
"codigoSucursal":row[2],"ctaOrigen":row[3],"ctaDestino":row[4],
"valor":row[5],"fechaOrigen":row[6],"fechaPago":row[7],"DNI":row[8],"tipo":row[9],"estado":row[10],}
cheques.append(data)
file.close
return cheques
"""Funcion hecha para devolver los datos requeridos por el usuario, que son filtrados por el dni y el tipo de cheque
solicitado a traves de un input que son verificados en torno a las claves y valores del diccionario realizado en la funcion anterior"""
def buscarDni(dni, tipo):
busqueda=[]
cantidad = 0
cheques = readFile(urlfile)
for cheque in cheques:
if cheque["DNI"]==dni and cheque["tipo"]==tipo:
cantidad += 1
print("cheque encontrado\n")
busqueda.append(cheque)
repetidos = []
for i in busqueda:
repetidos = map(lambda x:x["Nrocheque"], busqueda)
if i["Nrocheque"] in busqueda:
repetidos.append(i["Nrocheque"])
if repetidos !=[]:
print("Error cheques duplicados")
else:
print(f"se encontraron {cantidad} cheques")
print(busqueda)
def CSVDES(dni, busqueda):
file = open(dni+"__"+datetime+"csv","w")
csvfile=csv.writer(file)
for row in busqueda:
csvfile.writerow([row["Nrocheque"],row["codigoBanco"],row[
"codigoSucursal"],row["ctaOrigen"],row["ctaDestino"],row[
"valor"],row["fechaOrigen"],row["fechaPago"],row["DNI"],row["tipo"],row["estado"]])
file.close
print("csv grabado")
#metodo principal
if __name__=="__main__":
while runTime:
print(opciones)
op = input()
if op == "1":
urlfile = input("ingrese el nombre del archivo: \n")
dni = input("ingrese el DNI del usuario a consultar: \n")
tipo = input("tipo de cheque a buscar EMITIDO o DEPOSITADO: \n")
salida = input("desea la salida por pantalla o csv: \n")
print(urlfile,dni,tipo,salida)
try:
resultado = buscarDni(dni, tipo)
if salida =="PANTALLA":
print(resultado)
elif salida == "CSV":
CSVDES(resultado)
else:
print("invalido")
except:
print("invalido")
elif op =="2":
runTime = False
else:
runTime = False
| NicolasGabM/Cheques | listado_cheques.py | listado_cheques.py | py | 3,145 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "datetime.date.today",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "csv.reader",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"l... |
12651433402 | import requests
import re
# Manual PDF: https://www.correios.com.br/a-a-z/pdf/rastreamento-de-objetos/manual_rastreamentoobjetosws.pdf
class Objeto(object):
def __init__(self, *args, **kwargs):
self.cepDestino = ""
self.dataPostagem = ""
self.eventos = list()
self.numero = kwargs.get('numero', '')
self.categoria = kwargs.get('categoria', '')
self.sigla = kwargs.get('sigla', '')
self.nome = kwargs.get('nome', '')
self.json = ""
if 'evento' in kwargs and len(kwargs.get('evento', list())) > 0:
evento = kwargs.get('evento')[0]
self.cepDestino = evento.get('cepDestino', '')
self.dataPostagem = evento.get('dataPostagem', '')
for evento in kwargs.get('evento', list()):
self.eventos.append(Evento(**evento))
class Evento(object):
def __init__(self, *args, **kwargs):
self.tipo = kwargs.get('tipo', '')
self.data = kwargs.get('data', '')
self.hora = kwargs.get('hora', '')
self.criacao = kwargs.get('criacao', '')
self.prazoGuarda = kwargs.get('prazoGuarda', '')
self.diasUteis = kwargs.get('diasUteis', '')
self.descricao = kwargs.get('descricao', '')
self.detalhe = kwargs.get('detalhe', '')
self.status = kwargs.get('status', '')
if 'unidade' in kwargs:
self.unidade = Unidade(**kwargs.get('unidade', dict()))
if 'destino' in kwargs and len(kwargs.get('destino', list())) > 0:
self.destino = Destino(**kwargs.get('destino')[0])
if 'detalheOEC' in kwargs:
self.detalheOEC = OEC(**kwargs.get('detalheOEC', dict()))
class Unidade(object):
def __init__(self, *args, **kwargs):
self.tipounidade = kwargs.get('tipounidade', '')
self.local = kwargs.get('local', '')
self.sto = kwargs.get('sto', '')
self.codigo = kwargs.get('codigo', '')
self.uf = kwargs.get('uf', '')
self.cidade = kwargs.get('cidade', '')
if 'endereco' in kwargs:
self.endereco = Endereco(**kwargs.get('endereco', dict()))
class Endereco(object):
def __init__(self, *args, **kwargs):
self.numero = kwargs.get('numero', '')
self.cep = kwargs.get('cep', '')
self.localidade = kwargs.get('localidade', '')
self.bairro = kwargs.get('bairro', '')
self.codigo = kwargs.get('codigo', '')
self.logradouro = kwargs.get('logradouro', '')
self.uf = kwargs.get('uf', '')
self.latitude = kwargs.get('latitude', '')
self.longitude = kwargs.get('longitude', '')
class Destino(object):
def __init__(self, *args, **kwargs):
self.bairro = kwargs.get('bairro', '')
self.local = kwargs.get('local', '')
self.cidade = kwargs.get('cidade', '')
self.uf = kwargs.get('uf', '')
self.codigo = kwargs.get('codigo', '')
if 'endereco' in kwargs:
self.endereco = Endereco(**kwargs.get('endereco', dict()))
class OEC(object):
def __init__(self, *args, **kwargs):
self.lista = kwargs.get('lista', '')
self.longitude = kwargs.get('longitude', '')
self.latitude = kwargs.get('latitude', '')
self.carteiro = kwargs.get('carteiro', '')
self.distrito = kwargs.get('distrito', '')
self.unidade = kwargs.get('unidade', '')
if 'endereco' in kwargs:
self.endereco = Endereco(**kwargs.get('endereco', dict()))
def generate_valid_code(cod, with_cep=False):
cod = cod.strip()
if len(cod) < 12 or 13 < len(cod):
return ""
prefix = cod[0:2]
number = cod[2:10]
suffix = cod[-2:]
multipliers = [8, 6, 4, 2, 3, 5, 9, 7]
if len(number) < 8 and len(cod) == 12:
diff = 8 - len(number)
zeros = "0" * diff
number = zeros + number
sum_ = sum(int(number[i]) * multipliers[i] for i in range(8))
rest = sum_ % 11
if rest == 0:
verifying_digit = "5"
elif rest == 1:
verifying_digit = "0"
else:
verifying_digit = str(11 - int(rest))
valid_code = prefix + number + verifying_digit + suffix
if with_cep:
obj = track(valid_code)
return valid_code, str(obj.cepDestino)
return valid_code
def track(cod):
if not is_valid(cod):
return None
body = '''
<rastroObjeto>
<usuario>MobileXect</usuario>
<senha>DRW0#9F$@0</senha>
<tipo>L</tipo>
<resultado>T</resultado>
<objetos>{obj}</objetos>
<lingua>101</lingua>
<token>QTXFMvu_Z-6XYezP3VbDsKBgSeljSqIysM9x</token>
</rastroObjeto>
'''
r = requests.post('http://webservice.correios.com.br/service/rest/rastro/rastroMobile',
data=body.format(obj=cod),
headers={'Content-Type': 'application/xml'})
if r.status_code == 200:
result = r.json().get('objeto', list())
if result:
obj = Objeto(**result[0])
obj.json = r.json()
return obj
return None
def is_cod(cod):
if re.match('^[a-zA-Z]{2}[0-9]{9}[a-zA-Z]{2}$', cod):
return True
return False
def is_valid(cod):
return re.match('^[a-zA-Z]{2}[0-9]{9}[a-zA-Z]{2}$', cod) | lmassaoy/docker-rastreio-correios | python/external_communication/correios.py | correios.py | py | 5,318 | python | pt | code | 12 | github-code | 1 | [
{
"api_name": "requests.post",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 169,
"usage_type": "call"
}
] |
26598747074 | import sqlite3 as sql
import json
import os
from datetime import datetime
db = sql.connect('MainScripts/JIRATable.sqlite')
c = db.cursor()
#
# c.execute('drop table if exists JIRAData')
c.execute('''create table if not exists JIRAData (Key text, Status text, Summary text,Reporter text,IssueCreatedOn text, IssueUpdatedOn text, RefreshDate text)''' )
#
ins = 'insert into JIRAData (Key,Status,Summary,Reporter,IssueCreatedOn,IssueUpdatedOn,RefreshDate) values (?,?,?,?,?,?,?)'
#
#
with open('MainScripts/JIRAData.json','r') as f:
JData = json.load(f)
#
i = 0
#
for r in JData:
RefreshDate = JData[i]['RefreshDate']
val=(JData[i]['Key'],JData[i]['Status'],JData[i]['Summary'],JData[i]['Reporter'],JData[i]['IssueCreatedDate'],JData[i]['UpdatedDate'],RefreshDate)
i = i+1
c.execute(ins,val)
print(c.fetchall())
db.commit()
# c.execute('select count(*) From JIRAData')
# res = c.fetchall()
# for row in res:
# print(row)
# db.close()
# os.remove('JIRATable.sqlite')
# print(JData[0]['Key'])
# print(JData[1]['Key'])
| kleban31/Webscrape-JIRA-Python | MainScripts/ins_JIRAData.py | ins_JIRAData.py | py | 1,055 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlite3.connect",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 17,
"usage_type": "call"
}
] |
3516381702 | import stanza
import json,re
# # stanza.download('en') # This downloads the English models for the neural pipeline
nlp = stanza.Pipeline('en') # This sets up a default neural pipeline in English
field = "yugioh"
f = open(field+"_entitys.json")
field_entitys = json.load(f)
f.close()
max_num = 5000
def exist(text,entitys):
flag = False
label_title = None
match_type = None
if text in entitys:#exact match
flag = True
label_title = text
match_type = "exact match"
return flag,label_title,match_type
return flag,label_title,match_type
src = open("./data/zeshel/documents/"+field+".json")
out_f = open("exact_"+field+"_tmp.jsonl",'w')
cnt = 0
succ_cnt = 0
line = src.readline()
finish_flag = False
while line:
if finish_flag:
break
cnt += 1
if cnt%100==0:
print(cnt)
text = json.loads(line)["text"]
from_doc = json.loads(line)["title"]
try:
doc = nlp(text)
except:
line = src.readline()
continue
mention_list = []
entity_list = []
doc_flag = False
for sentence in doc.sentences:
if doc_flag:
break
for ent in sentence.ents:
if doc_flag:
break
if ent.text in mention_list:
continue
if ent.type == "DATE" or ent.type == "CARDINAL" or ent.type == "ORDINAL":#不要日期
continue
flag, label_title,match_type = exist(ent.text.lower(),field_entitys)
if flag:
if from_doc.lower() == label_title:
continue
start,end = ent.start_char,ent.end_char
sample = {}
sample["context_left"] = text[:start]
if len(sample["context_left"].split())<64:
continue
sample["context_left"] = " ".join(sample["context_left"].split()[-128:])
sample["mention"] = ent.text
sample["context_right"] = text[end:]
sample["context_right"] = " ".join(sample["context_right"].split()[:128])
sample["label_title"] = label_title
sample["world"] = field
sample["from_doc"] = from_doc
sample["match_type"] = match_type
out_f.write(json.dumps(sample)+"\n")
succ_cnt += 1
if succ_cnt>=max_num:
finish_flag = True
entity_list.append(label_title)
mention_list.append(ent.text)
doc_flag = True
line = src.readline()
src.close()
out_f.close()
f = open(field+"_entity_index_by_title")
entity_index_by_title = json.load(f)
f.close()
out_f = open("exact_"+field+".jsonl",'w')
f = open("exact_"+field+"_tmp.jsonl")
line = f.readline()
while line:
line = json.loads(line)
new_line = {}
new_line["context_left"] = line["context_left"]
new_line["context_right"] = line["context_right"]
label_title = line["label_title"]
new_line["label"] = entity_index_by_title[label_title][0]
new_line["label_title"] = label_title
new_line["label_id"] = entity_index_by_title[label_title][1]
new_line["mention"] = line["mention"]
new_line["from_doc"] = line["from_doc"]
new_line["world"] = line["world"]
new_line["match_type"] = line["match_type"]
out_f.write(json.dumps(new_line)+"\n")
line = f.readline()
f.close()
out_f.close() | leezythu/MetaBLINK | pseudo_sample.py | pseudo_sample.py | py | 3,469 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "stanza.Pipeline",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 38... |
15976848887 | #!/usr/bin/env python
# coding: utf-8
import numpy as np
import librosa
from pathlib import Path
from microphone import record_audio
import matplotlib.mlab as mlab
from os import listdir
from os.path import isfile, join
import song_titles_artists as sta
def load_song_from_path(path: str):
"""
Loads a song from a path.
Returns the samples, sampling rate, and duration.
Parameters
------------
path: str
The path for the specified audio file
Returns
-------
Tuple[np.ndarray, int, float]
The list of samples, the sampling rate and the duration of the song
"""
song_path = Path(path)
samples, sampling_rate = librosa.load(song_path, sr=None)
duration = librosa.get_duration(y=samples, sr=sampling_rate)
samples.astype("float32")
samples /= np.max(samples)
samples *= 2**15
return samples, sampling_rate, duration
def load_music_files(directory: str):
"""
Loads the files in a folder.
Returns the samples from each file.
Parameters
------------
directory: str
The path for the specified folder
Returns
-------
List
Names of all of the songs
np.ndarray size: (M,N)
The array of samples for each song
"""
files = sorted([f for f in listdir(directory) if isfile(join(directory, f))])
music_list = np.array([load_song_from_path(directory + paths)[0] for paths in files])
id_dict = sta.song_name_to_ID(files)
ids = [id_dict[title] for title in files]
return ids, music_list
def return_specgram(samples: np.ndarray, sampling_rate: int):
"""
Creates the spectrogram for a specific set of samples
Returns the spectrogram, the frequencies, and the times
Parameters
------------
samples: np.ndarray
The array of samples
sampling_rate: int
The sampling rate
Returns
-------
Tuple[np.narray, np.narray, np.narray]
The spectrogram 2d array, the frequencies and the timestamps
"""
S, freqs, times = mlab.specgram(
samples,
NFFT=4096,
Fs=sampling_rate,
window=mlab.window_hanning,
noverlap=int(4096 / 2),
mode='magnitude'
)
return S, freqs, times
def record_sound(time: float):
"""
Records sound for a specific duration of time
Returns the frames and the sample rate
Parameters
------------
time: float
The duration of the recording in seconds
Returns
-------
Tuple[np.ndarray, int]
The frames and the sample rate
"""
listen_time = time # seconds
frames, sample_rate = record_audio(listen_time)
samples = np.empty(0)
for i in frames:
samples = np.append(samples, np.frombuffer(i, np.int16))
samples = samples.astype("float32")
samples /= np.max(samples)
samples *= 2 ** 15
return samples, sample_rate
| andrewyang89/spectrazam | song_loading.py | song_loading.py | py | 2,915 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "librosa.load",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "librosa.get_duration",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_... |
34707682040 | """A Flask webapp.
Application factory.
From the ``Flask`` docs:
Instead of creating a ``Flask`` instance globally, we will create
it inside a function. This function is known as the application
factory. Any configuration, registration, and other set up the
application needs will happen inside the function, then the
application will be returned.
The app will be passed through to each ``init_app`` declaration, unique
to its respective module. These functions will configure the app
designed in this package.
Registers:
* This app's commandline interface
* Config object from config module
* Exceptions for handling error pages
* This app's registered ``Flask`` extensions
* Loggers from logging
* Blueprints containing routing logic
* This app's interactive shell
"""
from flask import Flask
from app import cli, config, exceptions, extensions, fs, log, shell, views
from app.version import __version__
def create_app() -> Flask:
"""Create ``Flask`` web application object.
Accessible through wsgi.py.
:return: Web application initialized and processed through factory
pattern.
"""
app = Flask(__name__)
config.init_app(app)
log.init_app(app)
extensions.init_app(app)
views.init_app(app)
exceptions.init_app(app)
shell.init_app(app)
cli.init_app(app)
fs.init_app(app)
return app
__all__ = ["__version__", "create_app"]
| jshwi/jss | app/__init__.py | __init__.py | py | 1,447 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "app.config.init_app",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "app.config",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "app.log.init_app",
"li... |
15007520934 | """Handles different kind of browser streams."""
import copy
import logging
from typing import Dict
import cv2
import imutils
import tornado.ioloop
import tornado.web
from tornado.queues import Queue
from viseron.config.config_camera import MJPEG_STREAM_SCHEMA
from viseron.const import TOPIC_FRAME_PROCESSED, TOPIC_STATIC_MJPEG_STREAMS
from viseron.data_stream import DataStream
from viseron.helpers import draw_contours, draw_mask, draw_objects, draw_zones
from viseron.nvr import FFMPEGNVR
LOGGER = logging.getLogger(__name__)
class StreamHandler(tornado.web.RequestHandler):
"""Represents a stream."""
async def process_frame(self, nvr: FFMPEGNVR, frame, mjpeg_stream_config):
"""Return JPG with drawn objects, zones etc."""
if mjpeg_stream_config["width"] and mjpeg_stream_config["height"]:
resolution = mjpeg_stream_config["width"], mjpeg_stream_config["height"]
frame.resize(
"tornado", mjpeg_stream_config["width"], mjpeg_stream_config["height"]
)
# TODO move this to a preprocess
processed_frame = frame.get_preprocessed_frame(
"tornado"
).get() # Convert to Mat
else:
resolution = nvr.camera.resolution
processed_frame = frame.decoded_frame_mat_rgb
if mjpeg_stream_config["draw_motion_mask"] and nvr.config.motion_detection.mask:
draw_mask(
processed_frame,
nvr.config.motion_detection.mask,
)
if mjpeg_stream_config["draw_motion"] and frame.motion_contours:
draw_contours(
processed_frame,
frame.motion_contours,
resolution,
nvr.config.motion_detection.area,
)
if mjpeg_stream_config["draw_zones"]:
draw_zones(processed_frame, nvr.zones)
if mjpeg_stream_config["draw_objects"]:
draw_objects(
processed_frame,
frame.objects,
resolution,
)
if mjpeg_stream_config["rotate"]:
processed_frame = imutils.rotate_bound(
processed_frame, mjpeg_stream_config["rotate"]
)
if mjpeg_stream_config["mirror"]:
processed_frame = cv2.flip(processed_frame, 1)
# Write a low quality image to save bandwidth
ret, jpg = cv2.imencode(
".jpg", processed_frame, [int(cv2.IMWRITE_JPEG_QUALITY), 100]
)
return ret, jpg
class DynamicStreamHandler(StreamHandler):
"""Represents a dynamic stream using query parameters."""
async def get(self, camera):
"""Handle a GET request."""
request_arguments = {k: self.get_argument(k) for k in self.request.arguments}
LOGGER.debug(request_arguments)
mjpeg_stream_config = MJPEG_STREAM_SCHEMA(request_arguments)
LOGGER.debug(mjpeg_stream_config)
nvr = FFMPEGNVR.nvr_list.get(camera, None)
if not nvr:
self.set_status(404)
self.write(f"Camera {camera} not found.")
self.finish()
return
self.set_header(
"Content-Type", "multipart/x-mixed-replace;boundary=--jpgboundary"
)
self.set_header("Connection", "close")
frame_queue = Queue(maxsize=10)
frame_topic = f"{nvr.config.camera.name_slug}/{TOPIC_FRAME_PROCESSED}/*"
unique_id = DataStream.subscribe_data(frame_topic, frame_queue)
while True:
try:
item = await frame_queue.get()
frame = copy.copy(item.frame)
ret, jpg = await self.process_frame(nvr, frame, mjpeg_stream_config)
if ret:
self.write("--jpgboundary")
self.write("Content-type: image/jpeg\r\n")
self.write("Content-length: %s\r\n\r\n" % len(jpg))
self.write(jpg.tobytes())
await self.flush()
except tornado.iostream.StreamClosedError:
DataStream.unsubscribe_data(frame_topic, unique_id)
LOGGER.debug(f"Stream closed for camera {nvr.config.camera.name_slug}")
break
class StaticStreamHandler(StreamHandler):
"""Represents a static stream defined in config.yaml."""
active_streams: Dict[str, object] = {}
@tornado.gen.coroutine
def stream(self, nvr, mjpeg_stream, mjpeg_stream_config, publish_frame_topic):
"""Subscribe to frames, draw on them, then publish processed frame."""
frame_queue = Queue(maxsize=10)
subscribe_frame_topic = (
f"{nvr.config.camera.name_slug}/{TOPIC_FRAME_PROCESSED}/*"
)
unique_id = DataStream.subscribe_data(subscribe_frame_topic, frame_queue)
while self.active_streams[mjpeg_stream]:
item = yield frame_queue.get()
frame = copy.copy(item.frame)
ret, jpg = yield self.process_frame(nvr, frame, mjpeg_stream_config)
if ret:
DataStream.publish_data(publish_frame_topic, jpg)
DataStream.unsubscribe_data(subscribe_frame_topic, unique_id)
LOGGER.debug(f"Closing stream {mjpeg_stream}")
async def get(self, camera, mjpeg_stream):
"""Handle GET request."""
nvr = FFMPEGNVR.nvr_list.get(camera, None)
if not nvr:
self.set_status(404)
self.write(f"Camera {camera} not found.")
self.finish()
return
mjpeg_stream_config = nvr.config.camera.static_mjpeg_streams.get(
mjpeg_stream, None
)
if not mjpeg_stream_config:
self.set_status(404)
self.write(f"Stream {mjpeg_stream} not defined.")
self.finish()
return
frame_queue = Queue(maxsize=10)
frame_topic = (
f"{TOPIC_STATIC_MJPEG_STREAMS}/{nvr.config.camera.name_slug}/{mjpeg_stream}"
)
unique_id = DataStream.subscribe_data(frame_topic, frame_queue)
if self.active_streams.get(mjpeg_stream, False):
self.active_streams[mjpeg_stream] += 1
LOGGER.debug(
"Stream {mjpeg_stream} already active, number of streams: "
f"{self.active_streams[mjpeg_stream]}"
)
else:
LOGGER.debug(f"Stream {mjpeg_stream} is not active, starting")
self.active_streams[mjpeg_stream] = 1
tornado.ioloop.IOLoop.current().spawn_callback(
lambda: self.stream(
nvr, mjpeg_stream, mjpeg_stream_config, frame_topic
),
)
self.set_header(
"Content-Type", "multipart/x-mixed-replace;boundary=--jpgboundary"
)
self.set_header("Connection", "close")
while True:
try:
jpg = await frame_queue.get()
self.write("--jpgboundary")
self.write("Content-type: image/jpeg\r\n")
self.write("Content-length: %s\r\n\r\n" % len(jpg))
self.write(jpg.tobytes())
await self.flush()
except tornado.iostream.StreamClosedError:
DataStream.unsubscribe_data(frame_topic, unique_id)
LOGGER.debug(
f"Stream {mjpeg_stream} closed for camera "
f"{nvr.config.camera.name_slug}"
)
break
self.active_streams[mjpeg_stream] -= 1
| patmosxx-v2/viseron | viseron/webserver/stream_handler.py | stream_handler.py | py | 7,556 | python | en | code | null | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tornado.ioloop.web",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "tornado.ioloop",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "viseron.nvr.... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.