seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
29752726253 | import sys
sys.path.append('../')
import cnvfc
import numpy as np
import pandas as pd
import pathlib as pal
root_p = pal.Path('../data/')
pheno_p = root_p / 'pheno/Pheno.csv'
connectome_p = root_p / 'preprocessed/connectome/sample_connectome/python/'
connectome_t = 'connectome_s{}_mist64.npy'
label_p = root_p / 'parcellation/Parcel_Information/MIST_64.csv'
out_p = root_p / 'processed/fc_profiles/'
if not out_p.is_dir():
out_p.mkdir()
conn_mask = np.tril(np.ones((64, 64))).astype(bool)
pheno = pd.read_csv(pheno_p)
pheno.rename(columns={'Unnamed: 0': 'niak_id'}, inplace=True)
labels = pd.read_csv(label_p, sep=';')
roi_labels = labels.label.values
# Read in the paths to the pre-computed individual seed by seed FC matrices and put them in an array
paths = [(connectome_p / connectome_t.format(row.Subject)).resolve() for rid, row in pheno.iterrows()]
conn_stack = np.array([np.load(p)[conn_mask] for p in paths])
group = 'DX_GROUP'
regressors = ' + '.join(['SITE_ID', 'FD_scrubbed', 'AGE_AT_SCAN'])
# Define case-control contrast
glm = cnvfc.stats.glm_wrap_cc(conn_stack, pheno, group, case='Diagnosed', control='Control',
regressors=regressors, report=True)
table, table_stand_beta, table_qval = cnvfc.tools.summarize_glm(glm, conn_mask, roi_labels)
# Store the results
table.to_csv(out_p / 'icc_case_vs_con.tsv', sep='\t')
table_stand_beta.to_csv(out_p / 'icc_case_vs_con_standardized_betas.tsv', sep='\t')
table_qval.to_csv(out_p / 'icc_case_vs_con_fdr_corrected_pvalues.tsv', sep='\t')
| surchs/Neuropsychiatric_CNV_code_supplement | Scripts/FC_case_control_contrast.py | FC_case_control_contrast.py | py | 1,540 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.tril",
"line_number... |
71534665953 | import json
def get_utterance_indices(utterance):
utterance += " "
index = 0
for i in range(len(utterance)):
if utterance[i] == ' ':
print(utterance[index:i]+" "+str(index))
index = i+1
print("Last index : {}".format(len(utterance)-1))
with open('final_result.json') as json_file:
data = json.load(json_file)
#print(data['rasa_nlu_data']['common_examples']) #you have your dictinary here
for item in data['rasa_nlu_data']['common_examples']:
#print("Utterance : {}".format(item['text']))
#print("Intent : {}".format(item['intent']))
print(item)
if len(item['entities'])>0:
item['entities'][0]['start'] = int (item['entities'][0]['start'] )
item['entities'][0]['end'] = int(item['entities'][0]['end'])
with open('final_result2.json', 'w') as fp:
json.dump(data, fp,indent=4) | guptaSneha31/Bot-Assignment- | json_repair.py | json_repair.py | py | 903 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 22,
"usage_type": "call"
}
] |
20623982774 | from sympy import symbols, sin, tan, cos, limit, pi, oo, latex
def main():
def limnote(expr, n):
r"""Expr and n must be a pure expr"""
lat = r"\lim_{x \rightarrow " + str(latex(n)) + r"} " + latex(expr)
return str(lat)
x = symbols('x')
# expr = ((x * sin(5*x))/(tan(2*x)**2))
# n = 0
# result = limit(expr, x, n)
# print(f"{expr}, n = {n} = {result}")
expr = [
# Dummy supaya index[0] ga diikut sertakan
(1),
# No.1
((x * sin(5*x))/(tan(2*x)**2)),
# No.2
((12*x-4*x**2)/sin(4*x)),
# No.3
((cos(7*x) - cos(3*x))) / ((cos(4*x) - 1)),
# No.4
((1 - sin(6*x))/(cos(6*x)**2)),
# No. 5
(x * tan(6*x))/(sin(2*x)**2),
# No.6
((5*x**2-10*x)/(sin(5*x))),
]
n = [
None, # 0th index
0, 0, 0, pi/12, 0, 0, # Actual n value
]
for i in range(1, 6):
result = limit(expr[i], x, n[i])
# print(f"No.{i} {expr[i]}, n = {n[i]} = {result}")
# print(f"\\text{{No.{i}}} {latex(expr[i])}, n = {latex(n[i])} = {latex(result)} \\\\")
print(f"\\text{{No.{i}}}\t {limnote(expr[i], n[i])} = {latex(result)}" + "\\\\")
main() | David-Sirait01/SCnLM-Informatika-23-24 | Limit & Integral/Limit/Short/main.py | main.py | py | 1,372 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sympy.latex",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sympy.symbols",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sympy.sin",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sympy.tan",
"line_number": 21,
... |
37213121317 | import numpy as np
import pandas as pd
import os
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
import mlflow
from ydata_profiling import ProfileReport
from trail import Trail
import inspect
def dataloader():
for dirname, _, filenames in os.walk('./input'):
for filename in filenames:
print(os.path.join(dirname, filename))
train_data = pd.read_csv('./input/train.csv')
train_data.head()
test_data = pd.read_csv('./input/test.csv')
return train_data, test_data
def split(train_data, test_data):
train, test = train_test_split(train_data, test_size=0.2, random_state=42)
train.head()
test.head()
train.to_csv('./input/train_split.csv')
test.to_csv('./input/test_split.csv')
return train
def trainer(train_data):
#train_data = train_data[:400]
y = train_data["Survived"]
features = ["Pclass", "SibSp", "Parch"]
X = pd.get_dummies(train_data[features])
PROFILE_PATH = "./Metadata/Exp1_ML/train_data_report.html"
profile = ProfileReport(train_data, title="train_data Profiling Report")
profile.to_file(PROFILE_PATH)
model = RandomForestClassifier(n_estimators=100, max_depth=10, random_state=1)
with mlflow.start_run():
with Trail("myProjectAlias") as trail:
trail.put_hypothesis("Baseline RandomForest training on all samples without the feature `sex`")
trail.put_artifact(PROFILE_PATH, "profiling_result.html", "data")
model.fit(X, y)
mlflow.log_metric("accuracy", model.score(X, y))
precision = precision_score(y, model.predict(X))
recall = recall_score(y, model.predict(X))
mlflow.log_metric("precision", precision)
mlflow.log_metric("recall", recall)
mlflow.log_param("n_estimators", 100)
mlflow.log_param("max_depth", 10)
mlflow.log_param("random_state", 1)
get_file()
trail.put_artifact("./Metadata/Exp1_ML/code.txt", "code", "code")
def precision_score(y, y_pred):
return np.sum(y == y_pred) / len(y)
def recall_score(y, y_pred):
return np.sum(y == y_pred) / len(y)
def get_file():
code_txt = inspect.getsource(inspect.getmodule(inspect.currentframe()))
with open("./Metadata/Exp1_ML/code.txt", "w") as f:
f.write(code_txt)
if __name__ == '__main__':
a, b = dataloader()
train_data = split(a, b)
trainer(train_data)
| NikolausPinger/titanic | baseline.py | baseline.py | py | 2,499 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.walk",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number"... |
11026928784 | from bs4 import BeautifulSoup
import requests
import numpy as np
import pandas as pd
import re
import warnings
warnings.simplefilter(action='ignore')
titles = list()
locations = list()
pap = list()
dau = list()
serv = list()
desc = list()
bbt = list()
info = list()
title_text = list()
location_text = list()
pap_text = list()
dau_text = list()
desc_text = list()
serv_text = list()
bbt_text = list()
# Update the loop range as required when scraping this website.
for i in range(801, 896):
url = f'https://www.propertypro.ng/property-for-rent/in/lagos?page={i}'
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
lists = soup.find_all('div', class_='single-room-text')
for item in lists:
title = item.find('h4', class_='listings-property-title')
location = item.findNext('h4', class_='')
price_and_period = item.find('div', class_='n50')
date_added_updated = item.find('h5', class_='')
serviced = item.find('div', class_='furnished-btn')
description = item.find('p', class_='d-none d-sm-block')
bed_bath_toilet = item.find('div', class_='fur-areea')
info.append([title, location, price_and_period, date_added_updated,
serviced, description, bed_bath_toilet])
for item in info:
titles.append(item[0])
locations.append(item[1])
pap.append(item[2])
dau.append(item[3])
serv.append(item[4])
desc.append(item[5])
bbt.append(item[6])
for title in titles:
if title is not None:
title_text.append(title.text)
for location in locations:
if location is not None:
location_text.append(location.text)
for i in pap:
if i is not None:
pap_t = i.text.replace('\n₦', '').split(' ')[2]
pap_text.append(pap_t)
for i in dau:
if i is None:
pass
else:
dau_text.append(i.text)
for i in serv:
if i is None:
pass
else:
serv_text.append(i.text.replace('\n', ''))
for i in desc:
if i is None:
pass
else:
desc_text.append(i.text)
for i in bbt:
if i is None:
pass
else:
bbt_text.append(i.text.replace('\n', ''))
lag_rents_pp = pd.DataFrame(
{'Location': location_text,
'Price_Period': pap_text,
'Date_Added_Updated': dau_text,
'Description': desc_text,
'Serviced': serv_text,
'Bed_Bath_Toilet': bbt_text})
lag_rents_pp.drop_duplicates(keep='last', inplace=True)
lag_rents_pp.to_csv('lag_rents_pp_8.csv', index=False)
# %%
df = pd.read_csv('lag_rents_pp_8.csv')
df.info()
| Williamz4lyf/My_Projects | lagos_listings/scrape_propertypro.py | scrape_propertypro.py | py | 2,743 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "warnings.simplefilter",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame"... |
72153311074 | """Model serialization/deserialization schema."""
import inspect
from typing import Type
from pydent.marshaller.descriptors import DataAccessor
from pydent.marshaller.exceptions import CallbackValidationError
from pydent.marshaller.exceptions import MultipleValidationError
from pydent.marshaller.exceptions import SchemaException
from pydent.marshaller.fields import Callback
from pydent.marshaller.fields import Field
from pydent.marshaller.registry import SchemaRegistry
from pydent.marshaller.utils import make_signature_str
class DynamicSchema(metaclass=SchemaRegistry):
"""A dynamically added schema.
Should be added using '@add_schema` decorator.
"""
ignore = ()
fields = dict()
@classmethod
def _get_model_fields(cls):
return cls.fields
@classmethod
def init_field_accessors(cls):
"""Initializes callback accessors.
:return:
:rtype:
"""
for field_name, field in cls._get_model_fields().items():
field.register(field_name, cls.model_class)
@classmethod
def init_data_accessors(cls, instance, data, add_extra=True):
"""Initializes data accessors.
:param instance:
:type instance:
:param data:
:type data:
:param add_extra:
:return:
:rtype:
"""
if cls.model_class is not instance.__class__:
raise SchemaException("Instance and model class are different")
for k, v in dict(data).items():
if k in cls.ignore:
data.pop(k, None)
continue
if not add_extra and k not in cls.model_class.fields:
raise AttributeError(
"Expected field missing. Cannot initialize accessor for '{}' for "
"'{}' because it is not a field."
" It may be missing from the 'field' dictionary.".format(
k, cls.model_class
)
)
if k not in cls.model_class.__dict__:
setattr(cls.model_class, k, DataAccessor(k, cls.model_class._data_key))
try:
setattr(instance, k, v)
except AttributeError as e:
raise e
@classmethod
def validate_callbacks(cls):
"""Validates expected callback signature found in any callback in the
registered model fields.
:return: None
:rtype: None
:raises: ModelValidationError
"""
missing_callbacks = []
not_callable = []
wrong_signature = []
model_class = cls.model_class
for rname, callback_field in cls.grouped_fields[Callback.__name__].items():
callback_func = callback_field.callback
if callback_func is None:
missing_callbacks.append(
"Callback for {model}.{name} cannot be None".format(
model=model_class.__name__, name=rname
)
)
continue
error_prefix = "Callback '{callback}' for relationship '{model}.{name}'".format(
callback=callback_func, model=model_class.__name__, name=rname
)
if not callable(callback_func) and not hasattr(model_class, callback_func):
if callback_func not in missing_callbacks:
missing_callbacks.append(error_prefix + " is missing.")
else:
args_to_send = list(callback_field.callback_args)
kwargs_to_send = dict(callback_field.callback_kwargs)
if isinstance(callback_func, str):
callback_func = getattr(model_class, callback_func)
args_to_send = ["self"] + args_to_send
if not callable(callback_func):
not_callable.append(error_prefix + " is not callable.")
else:
signature = inspect.getfullargspec(callback_func)
expected_args = signature.args
expected_kwargs = {}
if signature.defaults:
default_args = expected_args[-len(signature.defaults) :]
expected_args = expected_args[: -len(signature.defaults)]
expected_kwargs = dict(zip(default_args, signature.defaults))
if (
len(expected_args) != len(args_to_send)
and not signature.varargs
):
wrong_signature.append(
error_prefix
+ " expects arguments {receive_args} but would receive "
"arguments {sent_args}.".format(
receive_args=make_signature_str(
expected_args, expected_kwargs
),
sent_args=make_signature_str(
list(args_to_send), kwargs_to_send
),
)
)
else:
for k in kwargs_to_send:
invalid_keys = []
if k not in expected_kwargs and not signature.varkw:
invalid_keys.append(k)
if invalid_keys:
wrong_signature.append(
error_prefix
+ " does not recognize named key(s) {invalid_keys} "
"from signature {receive_args}.".format(
invalid_keys=", ".join(
['"{}"'.format(key) for key in invalid_keys]
),
receive_args=make_signature_str(
expected_args, expected_kwargs
),
)
)
wrong_signature.append(callback_func)
with MultipleValidationError("") as e:
if missing_callbacks:
for w in missing_callbacks:
e.r(CallbackValidationError(w))
if not_callable:
for w in not_callable:
e.r(CallbackValidationError(w))
if wrong_signature:
for w in wrong_signature:
e.r(CallbackValidationError(w))
@classmethod
def register(cls, model_class: Type):
"""Registers the schema to a model class. Saves the schema class to the
class attribute.
:param model_class: a model class
:type model_class: type
:return: None
:rtype: None
"""
setattr(model_class, "_model_schema", cls)
schema_fields = {}
ignored_fields = {}
fields_key = model_class._fields_key
if hasattr(model_class, fields_key):
model_fields = getattr(model_class, fields_key)
ignore = model_fields.pop("ignore", ())
if isinstance(ignore, str):
ignore = (ignore,)
cls.ignore = ignore
for field_name, field in model_fields.items():
if field_name in cls.ignore:
ignored_fields[field_name] = field
elif issubclass(type(field), Field):
if field.data_key is None:
field.data_key = field_name
if issubclass(type(field), Field):
schema_fields[field_name] = field
setattr(model_class, fields_key, schema_fields)
setattr(model_class, "_ignored_fields", ignored_fields)
setattr(cls, "_fields", schema_fields)
setattr(cls, "_ignored_fields", ignored_fields)
cls.init_field_accessors()
cls.validate_callbacks()
| aquariumbio/pydent | pydent/marshaller/schema.py | schema.py | py | 8,155 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "pydent.marshaller.registry.SchemaRegistry",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "pydent.marshaller.exceptions.SchemaException",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "pydent.marshaller.descriptors.DataAccessor",
"line_num... |
11337631853 |
import pandas as pd
import numpy as np
import scipy.signal as sig
import matplotlib.pyplot as plt
import control as con
import scipy.fftpack
def FFT(x, fs):
N = len(x)
X_fft = scipy.fftpack.fft(x)
X_fft_shifted = scipy.fftpack.fftshift(X_fft)
freq = np.arange(-N/2, N/2) * fs/N
X_mag = np.abs(X_fft_shifted)/N
X_phi = np.angle(X_fft_shifted)
for n in range(len(X_phi)):
if np.abs(X_mag[n]) < 1e-10:
X_phi[n] = 0
return freq, X_mag, X_phi
def make_stem(ax, x, y, color = 'k', style = 'solid', label ='', linewidths = 2.5,** kwargs):
ax.axhline(x[0], x[-1], 0, color = 'r')
ax.vlines(x, 0, y, color = color, linestyles = style, label = label, linewidths = linewidths)
ax.set_ylim([1.05*y.min(), 1.05*y.max()])
#Get a noisy signal
X = pd.read_csv('NoisySignal.csv')
t = X['0'].values
Signal = X['1'].values
plt.figure(figsize = (7, 5))
plt.plot(t, Signal)
plt.xlabel('t (s)')
plt.ylabel('Amp(V)')
plt.grid()
plt.title('Noisy Signal')
fs = 1/(t[1]-t[0])
Sig_F, Sig_Mag, Sig_Phi = FFT(Signal, fs)
fig, ax = plt.subplots(figsize=(7, 3))
make_stem(ax, Sig_F, Sig_Mag)
plt.ylabel('Magnitude')
plt.xlabel('F(Hz)')
plt.title("Frequency Responce of The Noisy Signal")
fig, ax = plt.subplots(figsize=(7, 3))
make_stem(ax, Sig_F, Sig_Mag)
plt.xlim(0, 1800)
plt.ylabel('Magnitude')
plt.xlabel('F(Hz)')
plt.title("Frequency Responce of Low Frequency Noise")
fig, ax = plt.subplots(figsize =(7, 3))
make_stem(ax, Sig_F, Sig_Mag)
plt.xlim(1790, 2010)
plt.ylabel('Magnitude')
plt.xlabel('F(Hz)')
plt.title("Frequency Responce of Position Signal")
fig, ax = plt.subplots(figsize =(7, 3))
make_stem(ax, Sig_F, Sig_Mag)
plt.xlim(40000, 60000)
plt.ylabel('Magnitude')
plt.xlabel('Freq(Hz)')
plt.title("Frequency Responce of High Frequency Noise")
#Filter Paramaters
R = 3e3
L = 0.8
C = 9e-9
Filter_Num = [R/L, 0]
Filter_Den = [1, R/L, 1/(L*C)]
step = 0.5
w = np.arange(100, 6e5+step, step)
plt.figure(figsize = (7, 4))
sys = con.TransferFunction(Filter_Num, Filter_Den)
_ = con.bode(sys, w, dB = True, Hz = True, deg = True, Plot = True)
plt.figure(figsize = (7, 4))
sys = con.TransferFunction(Filter_Num, Filter_Den)
_ = con.bode(sys, w, dB = True, Hz = True, deg = True, Plot = True)
plt.xlim(0, 1800)
plt.figure(figsize = (7, 4))
sys = con.TransferFunction(Filter_Num, Filter_Den)
_ = con.bode(sys, w, dB = True, Hz = True, deg = True, Plot = True)
plt.xlim(1600, 2200)
plt.figure(figsize = (7, 4))
sys = con.TransferFunction(Filter_Num, Filter_Den)
_ = con.bode(sys, w, dB = True, Hz = True, deg = True, Plot = True)
plt.xlim(40000, 60000)
F_Num, F_Den = sig.bilinear(Filter_Num, Filter_Den, fs=fs)
Filtered_Sig = sig.lfilter(F_Num, F_Den, Signal)
Sig_F, Sig_Mag, Sig_Phi = FFT(Filtered_Sig, fs)
plt.figure(figsize = (7, 3))
plt.subplot(1, 1, 1)
plt.plot(t, Filtered_Sig)
plt.xlabel('t(s)')
plt.ylabel('Amp (V)')
plt.title('Filtered Signal')
plt.grid()
fig, ax = plt.subplots(figsize=(7, 3))
make_stem(ax, Sig_F, Sig_Mag)
plt.title("Frequency Responce of the Filtered Signal")
plt.ylabel('Magnitude')
plt.xlabel('F(Hz)')
fig, ax = plt.subplots(figsize=(7, 3))
make_stem(ax, Sig_F, Sig_Mag)
plt.title("Frequency Responce of the Filtered Low Frequency Noise")
plt.ylabel('Magnitude')
plt.xlabel('F(Hz)')
plt.xlim(0, 1800)
fig, ax = plt.subplots(figsize =(7, 3))
make_stem(ax, Sig_F, Sig_Mag)
plt.title("Frequency Responce of the Filtered Position Signal")
plt.ylabel('Magnitude')
plt.xlabel('F(Hz)')
plt.xlim(1790, 2010)
fig, ax = plt.subplots(figsize =(7, 3))
make_stem(ax, Sig_F, Sig_Mag)
plt.title("Frequency Responce of the Filtered High Frequency Noise")
plt.ylabel('Magnitude')
plt.xlabel('Freq(Hz)')
plt.xlim(40000, 60000)
plt.show()
| Shujaea/ECE351_CODE | Lab12 Project.py | Lab12 Project.py | py | 3,866 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "scipy.signal.fftpack.fft",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "scipy.signal.fftpack",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "scipy.signal",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "scipy... |
12307570528 |
import math
import numpy as np
from scipy.special import erf
from scipy import stats
norm = stats.norm
class BSOPM_Class:
def disc_function(self, FV, r, T):
PV = FV * np.exp(-r*T)
return PV
def bs_d1_d2(self,St,r,t,K,call,sig):
d1 = np.log(St/K)
d1 += ( sig*sig/2 + r)*t
with np.errstate(divide='ignore',invalid='ignore'):
d1/=sig * t**0.5
d2=d1-sig * t**0.5
return d1,d2
def cdf_approx(self,dn,call):
if call:
Ndn = (0.50 * (1.0 + erf(dn / math.sqrt(2.0))))
else:
Ndn = (0.50 * (1.0 + erf(-dn / math.sqrt(2.0))))
return Ndn
def bs_delta(self,d1,d2,call):
Nd1 = self.cdf_approx(dn=d1,call=call)
Nd2 = self.cdf_approx(dn=d2,call=call)
return Nd1,Nd2
def bs_gamma(self,d1,St,sig,t):
gamma = norm.pdf(d1)
with np.errstate(divide='ignore',invalid='ignore'):
gamma /= (St*sig*np.sqrt(t))
return gamma
def bs_price(self,St,r,t,K,call,Nd1,Nd2,T):
pvk = self.disc_function(K,r, T-t)
if call:
price = St*Nd1-pvk*Nd2
else:
price = pvk * Nd2 - St * Nd1
if (abs(St-pvk)<=0.0000000000001)and(T-t<=0.0000000000001):
price= 0 #expires at-the-money aka worthless
return price
def opt_payoff(self, ST, K, call=True):
if call == True:
payoff=np.maximum(ST-K,0)
else:
payoff=np.maximum(K-ST,0)
return payoff
def __init__(self,S0,r,sigma,t,T,K,call=True):
self.S0 = S0
self.r = r
self.sigma = sigma
self.T = T
self.K = K
self.call = call
self.t = t
self.d1,self.d2=self.bs_d1_d2(St=self.S0,r=self.r,t=self.T-self.t,K=self.K,call=self.call,sig=self.sigma)
self.Nd1,self.Nd2=self.bs_delta(d1=self.d1,d2=self.d2,call=self.call)
self.delta=self.Nd1
self.gammas = self.bs_gamma(d1=self.d1,St=self.S0,sig=self.sigma,t=self.T-self.t)
self.price = self.bs_price(St=self.S0,r=self.r,t=self.t,K=self.K,call=self.call,Nd1=self.Nd1,Nd2=self.Nd2,T=self.T)
self.payoff = self.opt_payoff(self.S0,self.K,self.call)
| aclime/vix | bsopm.py | bsopm.py | py | 2,041 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "scipy.stats.norm",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "scipy.stats",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "numpy.exp",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_numbe... |
35157210724 | import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import accuracy_score
import pickle
# Step 1: Load the dataset
df = pd.read_csv('Pose.csv')
# Step 2: Split the dataset into features (X) and target (y)
X = df.drop('class', axis=1)
y = df['class']
# Step 3: Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1234)
# Step 4: Define the pipelines for different models
pipelines = {
'lr': make_pipeline(StandardScaler(), LogisticRegression()),
'rc': make_pipeline(StandardScaler(), RidgeClassifier()),
'rf': make_pipeline(StandardScaler(), RandomForestClassifier()),
'gb': make_pipeline(StandardScaler(), GradientBoostingClassifier()),
}
# Step 5: Train the models and store them in a dictionary
fit_models = {}
for algo, pipeline in pipelines.items():
model = pipeline.fit(X_train, y_train)
fit_models[algo] = model
# Step 6: Print accuracy scores for each model
for algo, model in fit_models.items():
yhat = model.predict(X_test)
print(algo, accuracy_score(y_test, yhat))
# Step 7: Save the trained Random Forest model using pickle
with open('body_language.pkl', 'wb') as f:
pickle.dump(fit_models['rf'], f)
| ManishSinghh/Major_project | train_model.py | train_model.py | py | 1,517 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sklearn.pipeline.make_pipeline",
"line_number": 22,
"usage_type": "call"
},
... |
40377723310 | import re
import requests
from tasks.m3u.record import M3uItemType, M3URecord
class M3UDeserializer( object ):
__RE_ITEM = re.compile( r"(?:^|\n)#EXTINF:((?:-)\d+(\.\d+)?)([^,]+)?,([A-Z].*?)[\r\n]+(.*)" )
__RE_ATTRIBUTE = re.compile( r"(\w*-\w*)=([\"'].*?[\"'])" )
__RE_SERIE = re.compile( r'([\w\s&!-_]+)(([Ss]\d{1,2})([ -]+|)([EeXx]\d{1,2}))' )
def __init__( self, url_filename, media_files = None, **kwargs ):
self.__DATA = ''
self.__MEDIA_FILES = [ '.mp4', '.avi', '.mkv', '.flv' ]
self.__kwargs = kwargs
if isinstance( media_files, ( list, tuple ) ):
for item in media_files:
if item not in self.__MEDIA_FILES:
self.__MEDIA_FILES.append( item )
if url_filename.startswith( ( 'http://', 'https://' ) ):
self.__downloadUrl( url_filename )
elif url_filename.startswith( 'file://' ):
self.__openFile( url_filename[ 7: ] )
else:
self.__openFile( url_filename )
return
def __openFile( self, filename ):
self.__DATA = open( filename, 'r' ).read()
return
def __downloadUrl( self, url ):
r = requests.get( url )
if r.status_code == 200:
self.__DATA = r.text
return
def __iter__(self):
"""This iterate through the M3U data, and yields ( <type>, <title>, <record> )
where
<type> M3uItemType
<title> str
<record> dict
:return:
"""
# Conversion needed as enswith() only accepts str or tuple
record = M3URecord()
for item in self.__RE_ITEM.findall( self.__DATA ):
record.clear()
record.set( item )
yield record
return
| pe2mbs/iptv-server | tasks/m3u/reader.py | reader.py | py | 1,832 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.compile",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 37,
... |
9512230126 | from datetime import timedelta
from feast import FeatureView, Field
from feast.types import Float64, String
import pandas as pd
from entities import properties_entity
from data_sources import properties_source
properties_fv = FeatureView(
name="properties_fv",
entities=[properties_entity],
ttl=timedelta(days=36500),
schema=[
Field(name="area", dtype=Float64, description="Area of the property"),
Field(name="width", dtype=Float64, description="Width of the property"),
Field(name="length", dtype=Float64, description="Length of the property"),
Field(name="num_bedrooms", dtype=Float64, description="Number of bedrooms in the property"),
Field(name="num_bathrooms", dtype=Float64, description="Number of bathrooms in the property"),
Field(name="district", dtype=String, description="The property located district name"),
Field(name="city", dtype=String, description="The property located city name"),
Field(name="legal_document", dtype=String, description="The legal document of the property"),
],
source=properties_source,
tags={},
owner="phancaothng@gmail.com"
) | Thangphan0102/RealEstateProject | feature_repo/feature_views.py | feature_views.py | py | 1,165 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "feast.FeatureView",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "entities.properties_entity",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "feast... |
25058294272 | import torch, vtk
import numpy as np
import h5py
from vtk_utils import *
import os
from scipy.stats import zscore
path = 'Twin_Transformers'
folder_name = '/media/shawey/SSD8T/GyraSulci_Motor/'+path+'/figs'
weis = os.listdir(folder_name)
filename = "/media/shawey/SSD8T/GyraSulci_Motor/H5_vtk/100408.h5"
f1 = h5py.File(filename,'r+')
roi = f1['roi']
len(roi[roi[:] == True])
reco_subs = []
for wei in weis:
print(wei)
sub_id = wei.split('_')[0]
if sub_id in reco_subs:
continue
reco_subs.append(sub_id)
ws = [sub_id + '_gyri_weight.pkl',sub_id + '_sulci_weight.pkl' ]
#write this signal
for w in ws:
part = w.split('_')[1]
scalars = []
labels = []
#w1 = torch.load(folder_name+"/" + w).cpu().numpy() #122317_gyri_weight.pkl
#w1 = zscore(w1) #!!! WITH OR WITHOUT zscore
temp = torch.load(folder_name+"/" + w,torch.device("cpu")).t()
w1 = zscore(temp)[:59412,:]
w1 = np.transpose(w1)
for i in range(100):
signal = np.zeros(64984)
signal[roi[:] == True] = w1[i][:59412]
#write this signal
scalars.append(signal)
labels.append("label{}".format(i))
rewrite_scalars("/media/shawey/SSD8T/GyraSulci_Motor/InflatedSurface/InflatedSurface.vtk", \
"/media/shawey/SSD8T/GyraSulci_Motor/"+path+"/generated_vtks/"+\
sub_id+"_"+part+"_zscore.vtk",new_scalars=scalars,new_scalar_names=labels)
#break
| Shawey94/Gyral_Sulci_Project | Code/pkl2vtkV2.py | pkl2vtkV2.py | py | 1,535 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.listdir",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 33,... |
8479598404 | import sqlalchemy
from fastapi import Depends
from sqlalchemy.orm import Session
from src.data_layer.bot_io import OrderInput
from src.data_layer.db_connector import get_db
from src.models.order import OrderModel
from fastapi import APIRouter
router = APIRouter()
@router.post("/order")
def get_order_by_hospital_id(hospital_id: str, db: Session = Depends(get_db)):
"""
Create order by hospital ID, items and emergency
"""
# Create OrderModel per item
order_item_list = []
for item in order.items:
order = {"user_id": order.user_id,
"to_hospital_id": order.to_hospital_id,
"item_id": item.id,
"emergency": order.emergency}
order_item_list.extend([OrderModel(**order) for i in range(item.quantity)])
for order_model in order_item_list:
# Commit to DB
db.add(order_model)
db.commit()
db.refresh(order_model)
return order_item_list
@router.post("/order")
def create_order(order: OrderInput, db: Session = Depends(get_db)):
"""
Create order by hospital ID, items and emergency
"""
# Create OrderModel per item
order_item_list = []
for item in order.items:
order = {"user_id": order.user_id,
"to_hospital_id": order.to_hospital_id,
"item_id": item.id,
"emergency": order.emergency}
order_item_list.extend([OrderModel(**order) for i in range(item.quantity)])
for order_model in order_item_list:
# Commit to DB
db.add(order_model)
db.commit()
db.refresh(order_model)
return order_item_list
@router.put("/order")
def put_one_order(order_model: OrderInput, db: Session = Depends(get_db)):
"""
Update one order
It reads parameters from the request field and update finds the entry and update it
:param order_model: OrderModel class that contains requested field to update
:param db: DB session
:return: Updated order
"""
try:
# Get audio_format by ID
order_to_put = db.query(OrderModel).filter(OrderModel.id == order_model.id).one()
# Update model class variable for requested fields
for var, value in vars(order_model).items():
setattr(order_to_put, var, value) if value else None
# Commit to DB
db.add(order_to_put)
db.commit()
db.refresh(order_to_put)
return {"id": str(order_to_put.id), "processed": order_to_put.processed, "approved": order_to_put.approved}
except sqlalchemy.orm.exc.NoResultFound:
raise Exception(f"{order_model.id} does not exist")
| AshikaInnovate/LocAid_Project | src/endpoints/order.py | order.py | py | 2,646 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "src.data_laye... |
43198314648 | """
✘ Commands Available -
• `{i}fullpromote <reply user/ username>`
Promote User With All rights
"""
import asyncio
from telethon.errors import BadRequestError
from telethon.errors.rpcerrorlist import ChatNotModifiedError, UserIdInvalidError
from telethon.tl.functions.channels import DeleteUserHistoryRequest, EditAdminRequest
from telethon.tl.functions.channels import ExportMessageLinkRequest as ExpLink
from telethon.tl.functions.messages import SetHistoryTTLRequest
from telethon.tl.types import Chat, ChatAdminRights, InputMessagesFilterPinned
from . import *
@ultroid_cmd(
pattern="fullpromote ?(.*)",
admins_only=True,
)
async def prmte(ult):
xx = await eor(ult, get_string("com_1"))
await ult.get_chat()
user, rank = await get_user_info(ult)
if not rank:
rank = "••Aᴅᴍɪɴ••"
if not user:
return await xx.edit("`Reply to a user to promote him with all rights!`")
try:
await ult.client(
EditAdminRequest(
ult.chat_id,
user.id,
ChatAdminRights(
add_admins=True,
invite_users=True,
change_info=True,
ban_users=True,
delete_messages=True,
pin_messages=True,
manage_call=True,
),
rank,
),
)
await xx.edit(
f"**{inline_mention(user)}** `is now an admin with full rights in` **{ult.chat.title}** `with title` **{rank}**.",
)
except BadRequestError:
return await xx.edit("`I don't have the right to promote you.`")
await asyncio.sleep(5)
await xx.delete() | LoopXS/addons | fullpromote.py | fullpromote.py | py | 1,738 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "telethon.tl.functions.channels.EditAdminRequest",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "telethon.tl.types.ChatAdminRights",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "telethon.errors.BadRequestError",
"line_number": 54,
"u... |
22593934162 | # -*- coding: utf-8 -*-
import cv2
import numpy as np
from PIL import Image
import glob
import os
from tqdm import tqdm
def color_map(N=256, normalized=False):
def bitget(byteval, idx):
return ((byteval & (1 << idx)) != 0)
dtype = 'float32' if normalized else 'uint8'
cmap = np.zeros((N, 3), dtype=dtype)
for i in range(N):
r = g = b = 0
c = i
for j in range(8):
r = r | (bitget(c, 0) << 7-j)
g = g | (bitget(c, 1) << 7-j)
b = b | (bitget(c, 2) << 7-j)
c = c >> 3
cmap[i] = np.array([r, g, b])
cmap = cmap/255 if normalized else cmap
return cmap
def im2index(im,rgb2idx):
"""
turn a 3 channel RGB image to 1 channel index image
"""
assert len(im.shape) == 3
height, width, ch = im.shape
assert ch == 3
m_lable = np.zeros((height, width, 1), dtype=np.uint8)
for w in range(width):
for h in range(height):
b, g, r = im[h, w, :]
m_lable[h, w, :] = rgb2idx[(r, g, b)]
return np.squeeze(m_lable)
def convert(source_filename,target_filename,cmap,rgb2idx):
"""
convert a RGB format image to P
"""
palette=list(cmap.reshape(-1))
cv_img_rgb=cv2.imread(source_filename)
idx_img=im2index(cv_img_rgb,rgb2idx)
# print(np.unique(idx_img))
pil_img=Image.fromarray(idx_img,mode='P')
pil_img.putpalette(palette)
pil_img.save(target_filename)
if __name__ == '__main__':
src_files=glob.glob('test/comp6_test_cls/*.png')
target_dir='test/comp6_test_cls_voc'
des_files=[os.path.join(target_dir,os.path.basename(f)) for f in src_files]
print(src_files[0:3])
print(des_files[0:3])
cmap=color_map()
rgb2idx={tuple(v):idx for idx,v in enumerate(cmap)}
for src,des in tqdm(zip(src_files,des_files)):
convert(src,des,cmap,rgb2idx)
| ISCAS007/torchseg | tools/covert_voc_format.py | covert_voc_format.py | py | 1,890 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "numpy.zeros",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": ... |
13428488153 | import torch
from torch.utils.data import Dataset, DataLoader, SequentialSampler, RandomSampler
from utils_MTL import (str2bool,
init_seed,
load_json_data,
get_pretrained_tokenizer,
get_pretrained_model,
get_optimizer_and_scheduler)
from tokenizer_correct_baselines import SPTokenizer
from dataset_MTL import MTLDataset
from model_MTL import MultiTaskLearner
from trainer_MTL import iteration, predict_detection, predict_correction, predict_beam1
import os
import re, ast
import argparse
if __name__ == '__main__':
os.environ['TOKENIZERS_PARALLELISM'] = 'false' # Disabling parallelism to avoid deadlocks
torch.multiprocessing.set_start_method('spawn') # to use CUDA with multiprocessing
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=str,
default=torch.device('cuda' if torch.cuda.is_available() else 'cpu'))
parser.add_argument('--data_path', type=str, default='/HDD/seunguk/KGECdataset/kgec_kowiki_0907')
parser.add_argument('--model_path', type=str)
parser.add_argument('--with_pretrained', type=str2bool, default='true')
#parser.add_argument('--tokenizer_path', type=str, default='/home/seunguk/KGEC/0821/correct_baselines/tokenizer/')
parser.add_argument('--vocab_size', type=int, default=30000)
#parser.add_argument('--vocab_with', type=str, choices=['bpe', 'char'])
parser.add_argument('--save_path', type=str)
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--num_workers', type=int, default=10)
parser.add_argument('--max_tokens_per_sent', type=int, default=64)
#parser.add_argument('--batch_size', type=int)
parser.add_argument('--beam_size', type=int)
print('========== Loading All Parse Arguments\n')
args = parser.parse_args()
init_seed(args.seed)
print('========== Loading Tokenizer\n')
#tokenizer = SPTokenizer(args)
tokenizer = get_pretrained_tokenizer(args.model_path)
pad_num = tokenizer('[PAD]', add_special_tokens=False)['input_ids'][0]
args.vocab_size = tokenizer.vocab_size
# get best epoch
def only_text(text):
text = re.sub(r'[=:\n.,]+', '', text)
text = re.sub(r'[0-9]+', '', text)
text = re.sub(r'[\[\]]', '', text)
return text.strip()
with open(args.save_path+'_result.txt', 'r') as f:
result = f.readlines()
result_ = [only_text(t) for t in result]
valid_epoch_loss = result[result_.index('Valid Epoch Loss')]
def preprocess_result(text):
text = re.sub(r'[a-zA-Z]+', '', text)
text = re.sub(r'[=:\n]+', '', text)
text = text.strip()
array = ast.literal_eval(text)
return array
valid_epoch_loss = preprocess_result(valid_epoch_loss)
best_epoch = valid_epoch_loss.index(min(valid_epoch_loss))
print(f'========== Loading Test Dataset & DataLoader\n')
test_data = load_json_data(args.data_path+'test.json')
test_dataset = MTLDataset(args, test_data, tokenizer)
test_loader = DataLoader(test_dataset, batch_size=1,
sampler=RandomSampler(test_dataset), num_workers=args.num_workers)
print(f'beam size: {args.beam_size}')
#test_word_metrics, test_char_metrics = predict(args, tokenizer, best_epoch, test_dataset)
predict_beam1(args, tokenizer, best_epoch, test_loader) | woog2ee/KGEC-MTL | MTL/test_MTL.py | test_MTL.py | py | 3,502 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "torch.multiprocessing.set_start_method",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.multiprocessing",
"line_number": 20,
"usage_type": "attribute"
},
{
... |
44569908079 |
import heapq
import config
import logbot
log = logbot.getlogger("ASTAR")
class PathNode(object):
def __init__(self, coords, cost=1):
self.coords = coords
self.cost = cost
self.g = 0
self.h = 0
self.f = 0
self.step = 0
self._parent = None
self.hash = hash(self.coords)
def __str__(self):
return "%s:%s:g%s:h%s:f%s" % \
(str(self.coords), self.cost, self.g, self.h, self.f)
def __repr__(self):
return self.__str__()
def __lt__(self, other):
return self.f < other.f
def __eq__(self, other):
return self.hash == other.__hash__()
def __hash__(self):
return self.hash
def __getitem__(self, i):
return self.coords[i]
@property
def parent(self):
return self._parent
@parent.setter
def parent(self, p):
self._parent = p
self.step = p.step + 1
def set_score(self, g, h):
self.g = g
self.h = h
self.f = g + h
class Path(object):
def __init__(self, goal=None):
self.goal = goal
self.nodes = []
self.reconstruct_path(self.goal)
self.step_index = len(self.nodes)
def __str__(self):
return "Path nodes %s" % [str(n) for n in self.nodes]
def reconstruct_path(self, current):
self.nodes.append(current)
while current.parent is not None:
self.nodes.append(current.parent)
current = current.parent
def __iter__(self):
self.iter_index = len(self.nodes)
return self
def next(self):
self.iter_index -= 1
if self.iter_index < 0:
raise StopIteration()
return self.nodes[self.iter_index]
def has_next(self):
return self.step_index > 0
def next_step(self):
self.step_index -= 1
if self.step_index < 0:
raise Exception("Path consumed")
return self.nodes[self.step_index]
class AStar(object):
def __init__(self, navgrid, start, goal, max_cost=config.PATHFIND_LIMIT):
self.navgrid = navgrid
self.succesors = self.navgrid.graph.get_succ
self.get_node = self.navgrid.graph.get_node
self.start_node = PathNode(start)
self.goal_node = PathNode(goal)
self.max_cost = max_cost
self.path = None
self.closed_set = set()
self.open_heap = [self.start_node]
self.open_set = set([self.start_node])
self.start_node.set_score(
0, self.heuristic_cost_estimate(self.start_node, self.goal_node))
def reconstruct_path(self, current):
nodes = []
nodes.append(current)
while current.parent is not None:
nodes.append(current.parent)
current = current.parent
nodes.reverse()
return nodes
def get_edge_cost(self, node_from, node_to):
return self.navgrid.graph.get_edge(node_from.coords, node_to.coords)
def neighbours(self, start):
for node, cost in self.succesors(start.coords):
if node not in self.closed_set:
yield PathNode(node, cost=cost)
def heuristic_cost_estimate(self, start, goal):
h_diagonal = min(abs(start[0] - goal[0]), abs(start[2] - goal[2]))
h_straight = (abs(start[0] - goal[0]) + abs(start[2] - goal[2]))
h = config.COST_DIAGONAL * h_diagonal + \
config.COST_DIRECT * (h_straight - 2 * h_diagonal)
return h
def next(self):
if not self.open_set:
log.err("Did not find path between %s and %s" % (self.start_node.coords, self.goal_node.coords))
raise StopIteration()
x = heapq.heappop(self.open_heap)
if x == self.goal_node:
self.path = Path(goal=x)
raise StopIteration()
self.open_set.remove(x)
self.closed_set.add(x)
for y in self.neighbours(x):
if y in self.closed_set:
continue
tentative_g_core = x.g + self.get_edge_cost(x, y)
if y not in self.open_set or tentative_g_core < y.g:
y.set_score(tentative_g_core,
self.heuristic_cost_estimate(y, self.goal_node))
y.parent = x
if y not in self.open_set:
heapq.heappush(self.open_heap, y)
self.open_set.add(y)
if y.step > self.max_cost:
log.err("Finding path over limit between %s and %s" % (self.start_node.coords, self.goal_node.coords))
raise StopIteration()
| FrederickGeek8/TwistedBot | twistedbot/pathfinding.py | pathfinding.py | py | 4,625 | python | en | code | null | github-code | 1 | [
{
"api_name": "logbot.getlogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "config.PATHFIND_LIMIT",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "config.COST_DIAGONAL",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": ... |
9640228574 | from openpyxl.styles import NamedStyle, Font, Border, Side, Alignment, PatternFill
from openpyxl.formatting.rule import ColorScaleRule
# 薄緑(主にheader用)
style_00 = NamedStyle(name="style_00",
font=Font(bold=True, size=10.5),
fill=PatternFill(patternType='solid', start_color='E6EDDD', end_color='E6EDDD'),
border=Border(left=Side(border_style='thin', color='000000'),
right=Side(border_style='thin',color='000000'),
top=Side(border_style='thin',color='000000'),
bottom=Side(border_style='thin', color='000000')),
alignment=Alignment(horizontal='center',
vertical = 'center'))
# グレー(主にindex用)
style_01 = NamedStyle(name="style_01",
font=Font(bold=True, size=10.5),
fill=PatternFill(patternType='solid', start_color='F2F2F2', end_color='F2F2F2'),
border=Border(left=Side(border_style='thin', color='000000'),
right=Side(border_style='thin',color='000000'),
top=Side(border_style='thin',color='000000'),
bottom=Side(border_style='thin', color='000000')),
alignment=Alignment(horizontal='center',
vertical = 'center'))
# 塗りつぶしなし(主にcell用)
style_02 = NamedStyle(name="style_02",
font=Font(bold=False, size=10.5),
border=Border(left=Side(border_style='thin', color='000000'),
right=Side(border_style='thin',color='000000'),
top=Side(border_style='thin',color='000000'),
bottom=Side(border_style='thin', color='000000')),
alignment=Alignment(horizontal='center',
vertical = 'center'))
# 淡赤(主に強調したいcell用)
style_03 = NamedStyle(name="style_03",
border=Border(left=Side(border_style=None),
right=Side(border_style=None),
top=Side(border_style=None),
bottom=Side(border_style=None)),
font=Font(bold=True, size=10.5),
fill=PatternFill(patternType='solid', start_color='EEDDDC', end_color='EEDDDC'),
alignment=Alignment(horizontal='center',
vertical = 'center'))
# 淡青(主に強調したいcell用)
style_04 = NamedStyle(name="style_04",
border=Border(left=Side(border_style=None),
right=Side(border_style=None),
top=Side(border_style=None),
bottom=Side(border_style=None)),
font=Font(bold=True, size=10.5),
fill=PatternFill(patternType='solid', start_color='DEE6F0', end_color='DEE6F0'),
alignment=Alignment(horizontal='center',
vertical = 'center'))
# 淡緑(主に強調したいcell用)
style_05 = NamedStyle(name="style_05",
border=Border(left=Side(border_style=None),
right=Side(border_style=None),
top=Side(border_style=None),
bottom=Side(border_style=None)),
font=Font(bold=True, size=10.5),
fill=PatternFill(patternType='solid', start_color='E6EDDD', end_color='E6EDDD'),
alignment=Alignment(horizontal='center',
vertical = 'center'))
# グラデーション(緑)
colorscale_00 = ColorScaleRule(start_type='min',
start_color='e5f5f9',
end_type='max',
end_color='2ca25f')
# グラデーション(赤)
colorscale_01 = ColorScaleRule(start_type='min',
start_color='fff7ec',
end_type='max',
end_color='990000')
# グラデーション(青->赤)
colorscale_02 = ColorScaleRule(start_type='num',
start_value=-1,
start_color='0571b0',
mid_type='num',
mid_value=0,
mid_color='f7f7f7',
end_type='num',
end_value=1,
end_color='ca0020') | copipe/nclick | nclick/excel/cell_styles.py | cell_styles.py | py | 4,958 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "openpyxl.styles.NamedStyle",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Font",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.PatternFill",
"line_number": 8,
"usage_type": "call"
},
{
"api_name... |
16190921784 | import torch
import torch.nn as nn
import torch.nn.functional as F
from itertools import chain
class GaussianActorCriticNet(nn.Module):
def __init__(self, state_size, action_size, shared_layers, actor_layers, critic_layers, std_init=0):
super(GaussianActorCriticNet, self).__init__()
self.shared_network = GaussianActorCriticNet._create_shared_network(state_size, shared_layers)
shared_output_size = state_size if len(shared_layers) == 0 else shared_layers[-1]
self.actor_network = GaussianActorCriticNet._create_actor_network(shared_output_size, actor_layers, action_size)
self.critic_network = GaussianActorCriticNet._create_critic_network(shared_output_size, critic_layers)
self.std = nn.Parameter(torch.ones(action_size) * std_init)
@staticmethod
def _create_shared_network(state_size, shared_layers):
iterator = chain([state_size], shared_layers)
last_size = None
args = []
for layer_size in iterator:
if last_size is not None:
args.append(nn.Linear(last_size, layer_size))
args.append(nn.ReLU())
last_size = layer_size
return nn.Sequential(*args)
@staticmethod
def _create_actor_network(input_size, actor_layers, action_size):
iterator = chain([input_size], actor_layers, [action_size])
last_size = None
args = []
for layer_size in iterator:
if last_size is not None:
args.append(nn.Linear(last_size, layer_size))
args.append(nn.ReLU())
last_size = layer_size
# Replace last ReLU layer with tanh
del args[-1]
args.append(nn.Tanh())
return nn.Sequential(*args)
@staticmethod
def _create_critic_network(input_size, critic_layers):
iterator = chain([input_size], critic_layers, [1])
last_size = None
args = []
for layer_size in iterator:
if last_size is not None:
args.append(nn.Linear(last_size, layer_size))
args.append(nn.ReLU())
last_size = layer_size
# Remove last ReLU layer
del args[-1]
return nn.Sequential(*args)
def forward(self, states, action=None, std_scale=1.0):
shared_output = self.shared_network(states)
mu = self.actor_network(shared_output)
value = self.critic_network(shared_output)
distribution = torch.distributions.Normal(mu, std_scale * F.softplus(self.std))
if action is None:
action = distribution.sample() if std_scale > 0 else mu
log_prob = distribution.log_prob(action)
entropy = distribution.entropy().sum(-1).unsqueeze(-1)
return {
'v': value.squeeze(1),
'action': action,
'log_prob': log_prob,
'entropy': entropy
}
| telmo-correa/DRLND-project-2 | model.py | model.py | py | 2,913 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_n... |
16845484949 | import errno
import os
import re
import threading
import unittest
try:
from unittest import mock
except ImportError:
import mock
import ptracer
eperm_mock = mock.Mock(
side_effect=OSError(errno.EPERM, 'Operation not permitted'))
class TestPtracer(unittest.TestCase):
@mock.patch('ptracer.ptrace.attach_and_wait', eperm_mock)
def test_ptracer__fail_01(self):
with self.assertRaisesRegexp(ptracer.PtracerError,
'Operation not permitted'):
with ptracer.context(lambda s: None):
f = open('/dev/zero', 'r')
f.close()
@mock.patch('ptracer.ptrace.syscall', eperm_mock)
def test_ptracer__fail_02(self):
with self.assertRaisesRegexp(ptracer.PtracerError,
'Operation not permitted'):
with ptracer.context(lambda s: None):
f = open('/dev/zero', 'r')
f.close()
@mock.patch('ptracer.ptrace.syscall_exit', eperm_mock)
def test_ptracer__fail_03(self):
with self.assertRaisesRegexp(ptracer.PtracerError,
'Operation not permitted'):
with ptracer.context(lambda s: None):
f = open('/dev/zero', 'r')
f.close()
@mock.patch('ptracer.ptrace.ptrace.getsiginfo', eperm_mock)
def test_ptracer__fail_04(self):
with self.assertRaisesRegexp(ptracer.PtracerError,
'Operation not permitted'):
with ptracer.context(lambda s: None):
f = open('/dev/zero', 'r')
f.close()
def test_ptracer_basic(self):
syscalls = []
with ptracer.context(syscalls.append):
f = open('/dev/zero', 'r')
f.close()
self.assertGreater(len(syscalls), 0)
def test_ptracer_filter_01(self):
syscalls = []
def _trace(pattern):
syscalls[:] = []
with ptracer.context(syscalls.append, filter=pattern):
f = open('/dev/null', 'w')
f.close()
f = open('/dev/zero', 'r')
f.close()
try:
open('/dev/nonexistent', 'r')
except IOError:
pass
_trace([
ptracer.SysCallPattern(name=re.compile('op.*'))
])
self.assertEqual(len(syscalls), 3)
_trace([
ptracer.SysCallPattern(
name=re.compile('openat'),
args=[
None,
b'/dev/null'
]
)
])
self.assertEqual(len(syscalls), 1)
_trace([
ptracer.SysCallPattern(
name=re.compile('openat'),
args=[
None,
b'/dev/null'
]
)
])
self.assertEqual(len(syscalls), 1)
_trace([
ptracer.SysCallPattern(
name=re.compile('openat'),
args=[
None,
re.compile(b'.*/null'),
]
)
])
self.assertEqual(len(syscalls), 1)
_trace([
ptracer.SysCallPattern(
name=re.compile('openat'),
args=[
None,
None,
lambda arg: arg.value & os.O_WRONLY
]
)
])
self.assertEqual(len(syscalls), 1)
_trace([
ptracer.SysCallPattern(
name=re.compile('op.*'),
result=lambda res: res.value < 0
)
])
self.assertEqual(len(syscalls), 1)
def test_ptracer_threading(self):
syscalls = []
def _thread():
f = open('/dev/zero', 'r')
f.close()
flt = ptracer.SysCallPattern(
name='openat',
args=[
None,
b'/dev/zero'
]
)
with ptracer.context(syscalls.append, filter=flt):
thread = threading.Thread(target=_thread)
thread.start()
thread.join()
self.assertEqual(len(syscalls), 1)
| pinterest/ptracer | tests/test_ptracer.py | test_ptracer.py | py | 4,300 | python | en | code | 150 | github-code | 1 | [
{
"api_name": "mock.Mock",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "errno.EPERM",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "unittest.TestCase",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "ptracer.PtracerErro... |
71681309794 | import json
import datetime
import requests
from common.logger import warning, debug
def attribute_test(test: dict, attributes: list, kind: type) -> None:
buf = []
try:
for i in attributes:
buf.append(i)
test = test[i]
except KeyError:
warning('fatal error, key ' + '.'.join(buf) + ' not exist')
exit(1)
if type(test) != kind:
warning('fatal error, type of ' + '.'.join(buf) + ' unexpected')
exit(1)
def date_varify(test: dict) -> None:
try:
datetime.datetime.strptime(test['utils']['date'], '%Y-%m-%d')
except ValueError as e:
warning('fatal error, date parse failed: ' + str(e))
exit(1)
def update_id(info: dict) -> None:
debug('start update app id, open id, user id, lover id')
request = requests.post('https://tree.welove520.com/v2/game/tree/getInfo', data={
'access_token': config['auth']['token']
})
if 'token无效' in request.text:
warning('fatal error, invalid token')
exit(1)
debug(request.text + '\n')
parse = json.loads(request.text)
info['auth']['app_id'] = parse['lover_qq_open_id']
info['auth']['open_id'] = parse['qq_open_id']
info['auth']['user_id'] = parse['cocos_user_id']
info['auth']['lover_id'] = parse['cocos_lover_id']
def love_days() -> int:
return (datetime.datetime.now() - datetime.datetime.strptime(config['utils']['date'], '%Y-%m-%d')).days + 1
try:
with open('common/config.json', 'r', encoding='utf-8') as file:
config = json.loads(file.read())
except IOError:
warning('fatal error, config file failed to open')
exit(1)
except json.decoder.JSONDecodeError:
warning('fatal error, json file failed to parse')
exit(1)
attribute_test(config, ['auth', 'token'], str)
attribute_test(config, ['utils', 'date'], str)
attribute_test(config, ['enable'], dict)
date_varify(config)
update_id(config)
debug('config init succeed\n')
| Chenrt-ggx/ObjectOriented | common/config.py | config.py | py | 1,970 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "common.logger.warning",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "common.logger.warning",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 23,
"usage_type": "call"
},
{
"api_name":... |
2422288587 | import logging
from odoo import api, fields, models, _
from odoo.exceptions import UserError, MissingError
_logger = logging.getLogger(__name__)
class ProcurementGroup(models.Model):
_inherit = 'procurement.group'
@api.model
def _run_scheduler_tasks(self, use_new_cursor=False, company_id=False):
# Check confirmed moves in MO or pickings
self.sudo()._try_mts_moves_without_orderpoint(
use_new_cursor=use_new_cursor, company_id=company_id
)
super(ProcurementGroup, self)._run_scheduler_tasks(
use_new_cursor=use_new_cursor, company_id=company_id
)
@api.model
def _try_mts_moves_without_orderpoint(
self, use_new_cursor=False, company_id=False
):
if company_id and self.env.user.company_id.id != company_id:
# To ensure that the company_id is taken into account for
# all the processes triggered by this method
# i.e. If a PO is generated by the run of the procurements the
# sequence to use is the one for the specified company not the
# one of the user's company
self = self.with_context(
company_id=company_id, force_company=company_id
)
# Find and select only valid moves
moves_to_reorder = self._get_mts_moves_to_reorder()
for product_id in moves_to_reorder.mapped('product_id'):
try:
with self._cr.savepoint():
self._action_cannot_reorder_product(product_id)
except UserError as error:
self.env['stock.rule']._log_next_activity(
product_id, error.name
)
if use_new_cursor:
self._cr.commit()
@api.model
def _get_mts_moves_to_reorder(self):
# Search all active pickings
picking_ids = self.env['stock.picking'].search(
[('state', 'in', ('waiting', 'confirmed', 'assigned'))]
)
# Select only valid moves
picking_res = self._filter_mts_picking_moves_to_reorder(picking_ids)
# Search all active manufacturing orders
production_ids = self.env['mrp.production'].search(
[('state', 'not in', ('done', 'cancel'))]
)
# Select only valid moves
production_res = self._filter_mts_production_moves_to_reorder(
production_ids
)
return picking_res + production_res
@api.model
def _filter_mts_picking_moves_to_reorder(self, picking_ids):
return picking_ids.mapped('move_lines').filtered(
lambda x: \
x.state in ('confirmed') and \
x.procure_method == 'make_to_stock' and \
x.product_id.nbr_reordering_rules == 0 and \
x.location_id == self.env.ref('stock.stock_location_stock') and \
x.created_purchase_line_id.id == False and \
x.created_production_id.id == False and \
x.orderpoint_created_production_ids.ids == [] and \
x.orderpoint_created_purchase_line_ids.ids == []
)
@api.model
def _filter_mts_production_moves_to_reorder(self, production_ids):
return production_ids.mapped('move_raw_ids').filtered(
lambda x: \
x.state in ('confirmed') and \
x.procure_method == 'make_to_stock' and \
x.product_id.nbr_reordering_rules == 0 and \
x.location_id == self.env.ref('stock.stock_location_stock') and \
x.created_purchase_line_id.id == False and \
x.created_production_id.id == False and \
x.orderpoint_created_production_ids.ids == [] and \
x.orderpoint_created_purchase_line_ids.ids == []
)
@api.model
def _action_cannot_reorder_product(self, product_id):
product_id.ensure_one()
error_msg = _('Not enough stock and no minimum orderpoint rule')
raise UserError(error_msg)
| decgroupe/odoo-addons-dec | procurement_run_mts/models/procurement_group.py | procurement_group.py | py | 3,969 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "odoo.models.Model",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "odoo.models",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "odoo.api.model",
... |
20994301540 | from __future__ import absolute_import, division, print_function, unicode_literals
from typing import List, Dict, Tuple, Iterable, Type, Any
import fidia
# Python Standard Library Imports
# from collections import OrderedDict, Mapping
from copy import deepcopy
# Other Library Imports
# import pandas as pd
import sqlalchemy as sa
from sqlalchemy.sql import and_
from sqlalchemy.orm import relationship, reconstructor, object_session
from sqlalchemy.orm.collections import attribute_mapped_collection
# FIDIA Imports
import fidia.base_classes as bases
from ..exceptions import *
from ..utilities import fidia_classname, MultiDexDict, MappingMixin
from ..database_tools import database_transaction
from fidia.sample import SampleLikeMixin
import fidia.traits as traits
import fidia.column as fidia_column
# Other modules within this package
# from fidia.base_classes import BaseArchive
# Set up logging
import fidia.slogging as slogging
log = slogging.getLogger(__name__)
log.setLevel(slogging.WARNING)
log.enable_console_logging()
__all__ = ['Archive', 'KnownArchives', 'ArchiveDefinition']
class Archive(SampleLikeMixin, MappingMixin, bases.Archive, bases.Sample, bases.SQLAlchemyBase, bases.PersistenceBase):
# noinspection PyUnresolvedReferences
"""An archive of data in FIDIA.
Instances of `.Archive` class are created by calling the constructor for an
`.ArchiveDefinition`, which defines the objects, data, and schema of the
Archive. `.Archive` and it's sub-classes are generic across all specific
Archives in the system.
An `.ArchiveDefinition` can define Traits and TraitCollections, which are
checked and registered when the corresponding archive is created. As part of
the registration, each TraitMapping is validated. This validation checks
each Trait's slots have been correctly filled (e.g. with another trait or a
column of a particular type).
An `.Archive` also behaves like a `.Sample` in that its objects can be
looked up looked up by subscripting. So these two are equivalent:
>>> ea = fidia.archive.example_archive.ExampleArchive(basepath=test_data_dir) # type: fidia.Archive
>>> sample = fidia.Sample.new_from_archive(ea)
>>> mass = sample['Gal1'].dmu['StellarMasses'].table['StellarMasses'].stellar_mass
and
>>> ea = fidia.ExampleArchive(basepath=test_data_dir)
>>> mass = ea['Gal1'].dmu['StellarMasses'].table['StellarMasses'].stellar_mass
"""
# Set up how Archive objects will appear in the MappingDB
__tablename__ = "archives"
_db_id = sa.Column(sa.Integer, sa.Sequence('archive_seq'), primary_key=True)
_db_archive_class = sa.Column(sa.String)
_db_archive_id = sa.Column(sa.String)
_db_calling_arguments = sa.Column(sa.PickleType) # type: Dict[str, Any]
# _db_contents = sa.Column(sa.PickleType)
__mapper_args__ = {
'polymorphic_on': '_db_archive_class',
'polymorphic_identity': 'Archive'}
_mappings = relationship('TraitMapping',
cascade="all, delete, delete-orphan"
) # type: List[traits.TraitMapping]
columns = relationship('FIDIAColumn',
collection_class=attribute_mapped_collection('id'),
cascade="all, delete, delete-orphan",
back_populates="_archive"
) # type: Dict[str, fidia_column.FIDIAColumn]
def __init__(self, **kwargs):
"""Pass through initializer. Initialization is handled by `ArchiveDefinition.__new__()`
Warnings:
This should be empty, or nearly so. It will be called when an
Archive is reconstructed from the database (which is non-standard
behavior for SQLAlchemy). See `Archive.__db_init__`
"""
self._local_trait_mappings = None
super(Archive, self).__init__()
@reconstructor
def __db_init__(self):
"""Initializer called when the object is reconstructed from the database."""
super(Archive, self).__db_init__()
# Since this archive is being recovered from the database, it must have
# requested persistence.
# self._db_session = Session()
self._local_trait_mappings = None
# Call initializers of subclasses so they can reset attributes stored in _db_calling_args
self.__init__(**self._db_calling_arguments)
def register_mapping(self, mapping):
# type: (traits.TraitMapping) -> None
"""Register a new TraitMapping to this Archive."""
self._register_mapping_locally(mapping)
self._mappings.append(mapping)
self._update_trait_pointers()
def _register_mapping_locally(self, mapping):
"""Add a TraitMapping to the `_local_trait_mappings`."""
if isinstance(mapping, traits.TraitMapping):
mapping.validate()
key = mapping.mapping_key
log.debug("Registering mapping for key %s", key)
# Check if key already exists in this database
if key in self._local_trait_mappings:
raise FIDIAException("Attempt to add/change an existing mapping")
self._local_trait_mappings[key] = mapping
# @TODO: Also link up superclasses of the provided Trait to the FIDIA level.
else:
raise ValueError("TraitManager can only register a TraitMapping, got %s"
% mapping)
@property
def contents(self):
# type: () -> List[str]
from ..astro_object import AstronomicalObject
# Get a valid database session:
#
# I'm not sure if the object_session is required, but it guarantees
# that we are working with the session that this archive belongs to.
# In theory, fidia.mappingdb_session should be the only session present.
session = object_session(self)
if session is None:
session = fidia.mappingdb_session
query = session.query(AstronomicalObject._identifier)
query_results = query.filter_by(_db_archive_id=self._db_archive_id).all()
# Results will contain a list of tuples, so we must get the first column out so we have a simple list.
contents = [i[0] for i in query_results] # type: List[str]
return contents
@contents.setter
def contents(self, value):
# type: (Iterable) -> None
"""Data Objects contained in this archive.
Warnings
--------
The order of these objects is not preserved currently, because of how
the data are stored in the persistence database. This may be valuable in
future. In the short term, efforts have been made to make sure
everything is checked and ensured to be done in the order of the
contents for each instance, regardless of the original ordering.
"""
from ..astro_object import AstronomicalObject
# Get a valid database session:
#
# I'm not sure if the object_session is required, but it guarantees
# that we are working with the session that this archive belongs to.
# In theory, fidia.mappingdb_session should be the only session present.
session = object_session(self)
if session is None:
session = fidia.mappingdb_session
# Work out what's changed
new_contents = set(value)
existing_contents = set(self.contents)
to_add = new_contents.difference(existing_contents)
to_remove = existing_contents.difference(new_contents)
# Make those changes to the underlying Objects table in the database
object_table = AstronomicalObject.__table__
if len(to_add) > 0:
session.execute(object_table.insert(),
[{"_identifier": i, "_db_archive_id": self._db_archive_id} for i in to_add])
if len(to_remove) > 0:
session.execute(object_table.delete().where(and_(object_table.c._db_archive_id == self._db_archive_id,
object_table.c._identifier in to_remove)))
@property
def archive_id(self):
return self._db_archive_id
@property
def trait_mappings(self):
# type: () -> Dict[Tuple[str, str], fidia.traits.TraitMapping]
if self._local_trait_mappings is None:
# Have not been initialized
# Confirm that the database has loaded its data, and if not, skip initialization.
if len(self._mappings) == 0:
return dict()
# Otherwise, initialize the trait_mappings from the mappings stored int he database.
self._local_trait_mappings = MultiDexDict(2) # type: Dict[Tuple[str, str], fidia.traits.TraitMapping]
for mapping in self._mappings:
self._register_mapping_locally(mapping)
self._update_trait_pointers()
return self._local_trait_mappings
def get_archive_id(self, archive, id):
# Part of the "sample-like interface
if archive is not self:
raise FIDIAException("Object in Archive cannot get id's for other archives.")
else:
return id
def archive_for_column(self, column_id):
# type: (str) -> fidia.Archive
"""The `.Archive` instance that that has the column id given.
This is part of the sample-like interface for Archives.
See Also
--------
`Sample.archive_for_column`
"""
# NOTE: changes to the logic here may also need to be made in `Sample.archive_for_column`
column_id = fidia_column.ColumnID.as_column_id(column_id)
log.debug("Column requested: %s", column_id)
column_type = column_id.type # Cache locally to avoid recalculating.
if column_type != 'full':
# This column is not fully defined in the FIDIA sense. Either:
# (1) there was an error or problem in associating the column with
# this archive--check the execution of `replace_aliases_trait_mappings`
# and `expand_column_ids_in_trait_mappings` in `ArchiveDefinition.__new__`
# (2) the column id string does not conform to the FIDIA standard, presumably
# because the data access layer recognises a special column id. In this
# we assume that the column is associated with this Archive (what else
# can we do?).
if column_type == 'non-conformant':
# Case (2) above.
return self
else:
# Case (1) above.
raise FIDIAException("Column %s does not seem to have been correctly associated with any archive" %
column_id)
if column_id.archive_id != self.archive_id:
log.error("Archive ID mismatch for column %s. This archive: %s, column: %s",
column_id, self.archive_id, column_id.archive_id)
raise FIDIAException("Object in Archive cannot get columns from other archives.")
return self
def find_column(self, column_id):
# Part of the "sample-like interface
column_id = fidia.column.ColumnID.as_column_id(column_id)
return self.columns[column_id]
def validate(self, raise_exception=False):
self._validate_mapping_column_ids(raise_exception=raise_exception)
self._validate_all_columns_mapped(raise_exception=raise_exception)
def _validate_mapping_column_ids(self, raise_exception=False):
# @TODO: This is draft code and not well tested.
# See also SubTraitMapping.check_columns and TraitMapping.check_columns
missing_columns = []
for mapping in self._mappings:
missing_columns.extend(mapping.check_columns(self.columns))
if raise_exception and len(missing_columns) > 0:
raise ValidationError("Trait Mappings of this archive reference Columns not defined in the Archive.")
else:
return missing_columns
def _validate_all_columns_mapped(self, raise_exception=False):
# @TODO: This is draft code and not well tested.
mapped_columns = set()
for mapping in self._mappings:
mapped_columns.update(mapping.referenced_column_ids)
unmapped_columns = set(self.columns) - mapped_columns
if raise_exception and len(unmapped_columns) > 0:
raise ValidationError("Trait Mappings of this archive reference Columns not defined in the Archive.")
elif len(unmapped_columns) > 0:
return unmapped_columns
else:
return
def __getattr__(self, item):
"""Backup get-attr that will handle cases like a freshly loaded archive."""
if not item.startswith("_") and self._local_trait_mappings is None and len(self._mappings) > 0:
self.trait_mappings
return getattr(self, item)
else:
raise AttributeError("Unknown attribute %s" % item)
def __getitem__(self, key):
# type: (str) -> AstronomicalObject
"""Make archives able to retrieve their astro-objects in the same manner as samples."""
# Part of the "sample-like interface
from ..astro_object import AstronomicalObject
if key in self.contents:
return AstronomicalObject(self, identifier=key)
else:
raise NotInSample("Archive '%s' does not contain object '%s'" % (self, key))
def __iter__(self):
"""Iterate over the objects in the sample.
Pat of the Mapping interface (collections.abc.Mapping).
NOTE: This could be better implemented by integrating more carefully
with the code at `self.contents`
"""
for i in self.contents:
yield i
def __len__(self):
"""Number of objects in the Archive.
Pat of the Mapping interface (collections.abc.Mapping).
NOTE: This could be better implemented by integrating more carefully
with the code at `self.contents`
"""
return len(self.contents)
def __repr__(self):
return "FIDIAArchive:" + self.archive_id
def __str__(self):
return "FIDIA Archive \"{}\"".format(self.archive_id)
def _repr_pretty_(self, p, cycle):
# p.text(self.__str__())
if cycle:
p.text(self.__str__())
else:
p.text("FIDIA Archive \"{}\"".format(str(self.archive_id)))
self._sub_trait_repr_pretty(p, cycle)
class BasePathArchive(Archive):
__mapper_args__ = {'polymorphic_identity': 'BasePathArchive'}
def __init__(self, **kwargs):
"""Initializer.
Note: Normally, this initialiser would not be called when reconstructing
form the database, but for Archives, it is. See `Archive.__db_init__`.
"""
self.basepath = kwargs['basepath']
super(BasePathArchive, self).__init__(**kwargs)
class DatabaseArchive(Archive):
__mapper_args__ = {'polymorphic_identity': 'DatabaseArchive'}
def __init__(self, **kwargs):
"""Initializer.
Note: Normally, this initialiser would not be called when reconstructing
form the database, but for Archives, it is. See `Archive.__db_init__`.
"""
self.database_url = kwargs['database_url']
super(DatabaseArchive, self).__init__(**kwargs)
def replace_aliases_trait_mappings(mappings, alias_mappings):
for mapping in mappings:
if isinstance(mapping, fidia.traits.TraitPropertyMapping):
if mapping.id in alias_mappings:
log.debug("Replacing alias %s with actual ID %s", mapping.id, alias_mappings[mapping.id])
mapping.id = alias_mappings[mapping.id]
else:
continue
else:
log.debug("Recursing on mapping %s", mapping)
replace_aliases_trait_mappings(mapping, alias_mappings)
def expand_column_ids_in_trait_mappings(mappings, archive_columns):
# type: (List[Any], Dict[str, fidia_column.FIDIAColumn]) -> None
short_to_long = None
for mapping in mappings:
if isinstance(mapping, fidia.traits.TraitPropertyMapping):
mapping_column_id = fidia_column.ColumnID.as_column_id(mapping.id)
if mapping_column_id.type == 'short':
if short_to_long is None:
# Create a database of short to long IDs to make updating quick.
short_to_long = dict()
for colid in archive_columns:
colid = fidia_column.ColumnID.as_column_id(colid)
short_to_long[colid.column_type + ":" + colid.column_name] = colid
log.debug("Replacing short ColumnID %s with full ID %s", mapping.id, short_to_long[mapping.id])
mapping.id = short_to_long[mapping.id]
else:
continue
else:
log.debug("Recursing on mapping %s", mapping)
expand_column_ids_in_trait_mappings(mapping, archive_columns)
class ArchiveDefinition(object):
"""A definition of the columns (data), objects, and traits (schema) making up an archive.
This class should be subclassed to define a new Archive for use in FIDIA.
Typically, the subclass needs to override all of the attributes.
"""
archive_id = None
"""The ID uniquely identifying this archive (str).
Typically, this will be
composed of the Survey/Archive name. If individual instances of the
archive may need to refer to different data (e.g., because it is stored
in different directories, then the ID should contain this distinction in
the form of a path or similar. If the data to be referred to is always
the same (more typical) then the ID should not contain the path.
"""
archive_type = Archive # type: Type[Archive]
"""The class of the `.Archive` to be constructed.
`.BasePathArchive` is the most common example, which provides a base path
abstraction to remove details of the particular local path from the rest of
the definition of the archive.
"""
writable = False
contents = [] # type: Iterable
"""An iterable containing the names of the objects to be included in this Archive.
May be instance specific (but only if the `.archive_id` is also
instance specific).
"""
trait_mappings = [] # type: List[traits.TraitMapping]
"""List of TraitMapping objects defining the schemas of the data in this archive."""
column_definitions = fidia_column.ColumnDefinitionList() # type: Dict[str, fidia_column.ColumnDefinition]
"""List of definitions of the columns of data to be included in this archive.
When the `.ArchiveDefinition` factory creates an instances of
an `.Archive`, these definitions will be interpreted into actual
references to columns of data.
"""
is_persisted = True
"""Does FIDIA write Archive instances from this definition into the MappingDB.
Typically only set to False for testing.
"""
def __init__(self, **kwargs):
# __init__ of superclasses not called.
pass
# noinspection PyProtectedMember
def __new__(cls, **kwargs):
# type: (Any) -> Archive
from fidia import known_archives
definition = object.__new__(cls)
definition.__init__(**kwargs)
# Allow an archive not to (individually) be persisted in the database
is_persisted = kwargs.pop("persist", True) and definition.is_persisted
# @TODO: Validate definition
if is_persisted:
# Check if archive already exists:
try:
return known_archives.by_id[definition.archive_id]
except KeyError:
pass
# Archive doesn't exist, so it must be created
archive = definition.archive_type.__new__(definition.archive_type) # type: Archive
# I n i t i a l i s e t h e A r c h i v e
archive.__init__(**kwargs)
# We wrap the rest of the initialisation in a database transaction, so
# if the archive cannot be initialised, it will not appear in the
# database.
with database_transaction(fidia.mappingdb_session):
# Basics
archive._db_archive_id = definition.archive_id
archive.writeable = definition.writable
archive.contents = definition.contents
archive._db_calling_arguments = kwargs
# TraitMappings
# Note: To ensure that this instance of the archive has local copies
# of all of the Trait Mappings and Column definitions, we make
# copies of the originals. This is necessary so that e.g. if they
# are defined on a class instead of an instance, the copies
# belonging to an instance are unique. Without this, SQLAlchemy will
# complain that individual TraitMappings are owned by multiple
# archives.
archive._local_trait_mappings = MultiDexDict(2) # type: Dict[Tuple[str, str], traits.TraitMapping]
archive._mappings = deepcopy(definition.trait_mappings)
for mapping in archive._mappings:
archive._register_mapping_locally(mapping)
archive._update_trait_pointers()
archive._db_archive_class = fidia_classname(archive)
# Columns
column_definitions = deepcopy(definition.column_definitions) # type: List[Tuple[str, fidia_column.ColumnDefinition]]
# Associate column instances with this archive instance
alias_mappings = dict()
for alias, column in column_definitions:
log.debug("Associating column %s with archive %s", column, archive)
instance_column = column.associate(archive)
archive.columns[instance_column.id] = instance_column
alias_mappings[alias] = instance_column.id
# Update any columns that have been referred to by an alias:
replace_aliases_trait_mappings(archive._mappings, alias_mappings)
# Update short column ids in mappings to point to local columns where possible
expand_column_ids_in_trait_mappings(archive._mappings, archive.columns)
# self._db_session.add(self.trait_manager)
if is_persisted:
fidia.mappingdb_session.add(archive)
return archive
class KnownArchives(object):
_instance = None
@property
def _query(self):
# type: () -> sa.orm.query.Query
return fidia.mappingdb_session.query(Archive)
# The following __new__ function implements a Singleton.
def __new__(cls, *args, **kwargs):
if KnownArchives._instance is None:
instance = object.__new__(cls)
KnownArchives._instance = instance
return KnownArchives._instance
@property
def all(self):
# type: () -> List[Archive]
return self._query.order_by('_db_archive_id').all()
def remove_archive(self, archive_id):
archive = self.by_id[archive_id]
log.info("Deleting Archive \"%s\" from the persistence database", str(archive))
fidia.mappingdb_session.delete(archive)
class by_id(object):
def __getitem__(self, item):
# type: (str) -> Archive
log.debug("Retrieving archive with id \"%s\"...", item)
log.debug("Query object: %s", KnownArchives()._query)
try:
archive = KnownArchives()._query.filter_by(_db_archive_id=item).one()
except:
log.warn("Request for unknown archive %s.", item)
raise KeyError("No archive with id '%s'" % item)
else:
log.debug("...archive %s found.", item)
return archive
by_id = by_id()
| astrogreen/fidia | fidia/archive/archive.py | archive.py | py | 24,238 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fidia.slogging.getLogger",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "fidia.slogging",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "fidia.slogging.WARNING",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "f... |
2341825486 | from fastapi import APIRouter, WebSocket
from ..wallets import router as walletRouter
router = APIRouter(
tags=['Websocket endpoints'],
responses={
404: {"description": "Resource does not exist"}
},
)
router.include_router(
router=walletRouter,
prefix='/wallets',
)
@router.websocket('/')
async def ping(websocket: WebSocket):
await websocket.accept()
while True:
data = await websocket.receive_text()
await websocket.send_text(f"Message text was: {data}")
| timmypelumy/web3_utils_fastapi | routers/ws/index.py | index.py | py | 517 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "wallets.router",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "fastapi.WebSocket",
"line_number": 22,
"usage_type": "name"
}
] |
2420058577 | from odoo import api, models, fields, tools
import logging
_logger = logging.getLogger(__name__)
class MailActivityTeam(models.AbstractModel):
_inherit = 'mail.activity.team'
# image: all image fields are base64 encoded and PIL-supported
image = fields.Binary(
string="Image",
attachment=True,
help="This field holds the image used for this team, limited "
"to 1024x1024px",
)
image_medium = fields.Binary(
string="Medium-sized image",
attachment=True,
help="Medium-sized image of this team. It is automatically "
"resized as a 128x128px image, with aspect ratio preserved. "
"Use this field in form views or some kanban views."
)
image_small = fields.Binary(
string="Small-sized image",
attachment=True,
help="Small-sized image of this team. It is automatically "
"resized as a 64x64px image, with aspect ratio preserved. "
"Use this field anywhere a small image is required."
)
@api.model_create_multi
def create(self, vals_list):
for vals in vals_list:
tools.image_resize_images(vals)
return super(MailActivityTeam, self).create(vals_list)
@api.multi
def write(self, vals):
tools.image_resize_images(vals)
return super(MailActivityTeam, self).write(vals)
| decgroupe/odoo-addons-dec | mail_activity_team_image/models/mail_activity_team.py | mail_activity_team.py | py | 1,369 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "odoo.models.AbstractModel",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "odoo.models",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "odoo.fields... |
10353210307 | import cv2
from tracker import KCFTracker
def tracker(cam, frame, bbox):
tracker = KCFTracker(True, True, True) # (hog, fixed_Window, multi_scale)
tracker.init(bbox, frame)
while True:
ok, frame = cam.read()
timer = cv2.getTickCount()
bbox = tracker.update(frame)
bbox = list(map(int, bbox))
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
# Tracking success
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
# Put FPS
cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
cv2.imshow("Tracking", frame)
# Exit if ESC pressed
k = cv2.waitKey(1) & 0xff
if k == 27:
break
cam.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
video = cv2.VideoCapture(0)
# ok, frame = video.read()
ok, frame = video.read()
bbox = cv2.selectROI('Select ROI', frame, False)
if min(bbox) == 0: exit(0)
tracker(video, frame, bbox)
| ryanfwy/KCF-DSST-py | run.py | run.py | py | 1,159 | python | en | code | 68 | github-code | 1 | [
{
"api_name": "tracker.KCFTracker",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "tracker.init",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.getTickCount",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tracker.update",
"... |
20868560186 | from struct import unpack_from
from collections import namedtuple
from smbus2 import SMBus
Measurement = namedtuple("Measurement", "temperature,humidity,dust")
class DrsDust:
REGISTER=0
def __init__(self, bus=1, i2c_address=0x08):
self.address = i2c_address
self.bus = SMBus(bus)
def measure(self):
raw_data = self.bus.read_i2c_block_data(self.address, self.REGISTER, 12)
return Measurement(*unpack_from('fff', bytes(raw_data)))
if __name__ == '__main__':
ddust = DrsDust()
m = ddust.measure()
print("Temperature: {:0.2f} \u2103".format(m.temperature))
print("Humidity: {:0.2f} %".format(m.humidity))
print("Dust density: {:0.2f} ug/m\u00B3".format(m.dust))
| satanowski/smog-o-meter | smogometer/services/DrsDust.py | DrsDust.py | py | 752 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.namedtuple",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "smbus2.SMBus",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "struct.unpack_from",
"line_number": 18,
"usage_type": "call"
}
] |
74003467553 | import pygame, random
from functools import reduce
class Meteor(pygame.sprite.Sprite):
"""Clase para crear los sprites
para generar los meteoros
Args:
pygame ([type]): [description]
"""
def __init__(self) -> None:
super().__init__()
self.image = pygame.image.load("meteor.png").convert()
# Sacamos el Fondo
self.image.set_colorkey(BLACK)
# Coordenadas de la Imagen
self.rect = self.image.get_rect()
class Player(pygame.sprite.Sprite):
"""Clase para crear los sprites
para generar el jugador
Args:
pygame ([type]): [description]
"""
def __init__(self) -> None:
super().__init__()
self.image = pygame.image.load("player.png").convert()
# Sacamos el Fondo
self.image.set_colorkey(BLACK)
# Coordenadas de la Imagen
self.rect = self.image.get_rect()
# Iniciamos pygame
pygame.init()
# Definimos Tamaño de la Ventana
SCREEN_WIDTH = 900
SCREEN_HEIGHT = 600
SIZE = (SCREEN_WIDTH, SCREEN_HEIGHT)
# Creamos la Ventana
screen = pygame.display.set_mode(SIZE)
# Titulo de la Ventana
pygame.display.set_caption("Sprite")
# Control de los FPS
clock = pygame.time.Clock()
# Visibilidad del Mouse
pygame.mouse.set_visible(0)
# Definimos Colores
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
GREEN = ( 0, 255, 0)
RED = ( 255, 0, 0)
BLUE = ( 0, 0, 255)
# Constante de Salida
gameOver = False
score = 0
# Lista del Grupo de Sprites
meteors = pygame.sprite.Group()
allSprites = pygame.sprite.Group()
# Creamos los meteoros
for i in range(50):
meteor = Meteor()
meteor.rect.x = random.randrange(900)
meteor.rect.y = random.randrange(600)
# Agregamos la instancia de Meteor a la lista de sprites
meteors.add(meteor)
allSprites.add(meteor)
# Jugador
player = Player()
allSprites.add(player)
# Estructura Principal
while not gameOver:
# Recorremos todos los eventos
for event in pygame.event.get():
# Comprobamos si el evento es salir paramos la ejecución
if event.type == pygame.QUIT:
gameOver = True
### Inicio Lógica del Juego ###
# Obtenemos la posición del mouse
mousePosition = pygame.mouse.get_pos()
player.rect.x = mousePosition[0]
player.rect.y = mousePosition[1]
# Colisiones
meteorsHit = pygame.sprite.spritecollide(player, meteors, True)
for meteor in meteorsHit:
score += 1
print(score)
### Fin Lógica del Juego ###
# Fondo
screen.fill(WHITE)
### Inicio Zona de Dibujo ###
# Dibujamos los sprites
allSprites.draw(screen)
### Fin Zona de Dibujo ###
# Actualizar Pantalla
pygame.display.flip()
clock.tick(60)
# Salimos del Juego
pygame.quit() | baubyte/pygamePractice | games/meteors/meteors.py | meteors.py | py | 2,779 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "pygame.sprite",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite",... |
19466962586 | # -*- coding: utf-8 -*-
"""
@file
@brief Function solving the TSP problem
"""
import random
def distance_point(p1, p2):
"""
Returns the Euclidian distance between two points.
Retourne la distance euclidienne entre deux points.
@param p1 point 1
@param p2 point 2
@return distance
"""
d = 0
for a, b in zip(p1, p2):
d += (a - b) ** 2
return d ** 0.5
def distance_circuit(points):
"""
Computes the distance of this circuit.
Calcule la longueur d'un circuit.
@param points list of points, the circuit assumes they are giving in that order
@return distance
"""
d = 0
for i in range(1, len(points)):
d += distance_point(points[i - 1], points[i])
return d + distance_point(points[0], points[-1])
def permutation(points, i, j):
"""
Switches two points and returns a new path.
Echange deux points et retourne le nouveau circuit.
@param points circuit
@param i first index
@param j second index (< len(points))
@return new circuit
"""
points = points.copy()
points[i], points[j] = points[j], points[i]
return points
def reverse(points, i, j):
"""
Reverses a sub part of circuit.
Retourne une partie du circuit.
@param points circuit
@param i first index
@param j second index (<= len(points))
@return new circuit
"""
points = points.copy()
if i > j:
i, j = j, i
c = points[i:j]
c.reverse()
points[i:j] = c
return points
def voyageur_commerce_simple(points):
"""
Solves the TSP using basic permutations,
points are 2D coordinates.
Résoud le problème du voyageur de commerce.
@param points list of points
"""
d0 = distance_circuit(points)
dnew = d0
n = len(points) - 1
first = True
while dnew < d0 or first:
first = False
d0 = dnew
# first pass : random
for i in range(len(points)):
h1 = random.randint(0, n)
h2 = random.randint(0, n)
p = permutation(points, h1, h2)
d = distance_circuit(p)
if d < dnew:
dnew = d
points = p
h1 = random.randint(0, n)
h2 = random.randint(h1 + 1, n + 1)
p = reverse(points, h1, h2)
d = distance_circuit(p)
if d < dnew:
dnew = d
points = p
# second pass : no reverse
for i in range(len(points)):
for j in range(i + 1, len(points) + 1):
p = reverse(points, i, j)
d = distance_circuit(p)
if d < dnew:
dnew = d
points = p
return points
def plot_circuit(points, ax=None, **kwargs):
"""
Plots the circuit on a graph.
Dessine la solution du voyageur de commerce.
@param points points
@param ax axe
@param kwargs sent to ``plt.subplots``
@return ax
"""
if ax is None:
import matplotlib.pyplot as plt
_, ax = plt.subplots(**kwargs)
ax.plot([_[0] for _ in points], [_[1] for _ in points], "o")
p2 = points + [points[0]]
ax.plot([_[0] for _ in p2], [_[1] for _ in p2], "-")
return ax
| sdpython/code_beatrix | src/code_beatrix/algorithm/tsp.py | tsp.py | py | 3,498 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "random.randint",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "random.randint",
"li... |
30685906080 | """
验证码识别
1.简单的图像文字可以用tesseract或CNN神经网络训练数据集预测,再不行就云打码平台
2.极验(滑动)验证码要先计算窗口偏移量大小然后selenium模拟拖动按钮
tesseract是一个将图像翻译成文字的OCR库(optical character recognition) --> 识别验证码效果一般,还是用云打码平台吧
windows安装tesseract-ocr并配置环境变量
from PIL import Image
import pytesseract
img = Image.open("./test.jpg")
# 此处可能需要做降噪和二值化处理,去除干扰线等
print(pytesseract.image_to_string(img))
"""
import requests
import json
from aip import AipOcr
from lxml import etree
import base64
import urllib.request
import urllib.parse
from PIL import Image
class GetCode(object):
def __init__(self):
# 创建应用生成的API_Key和Secret_Key
self.API_Key = "s8GHTluI1Xy1OvM7UU0wx4wl"
self.Secret_Key = "lnUFZRN05rMYshbmRGcZvYsrZnMbtXro"
# 获取access_token的url
self.url = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id={}&client_secret={}'
# 识别验证码的url
self.api = "https://aip.baidubce.com/rest/2.0/ocr/v1/webimage?access_token={}"
self.headers = {
"Content-Type": 'application/x-www-form-urlencoded'
}
def get_access_token(self):
"""获取access_token"""
response = requests.post(self.url.format(self.API_Key, self.Secret_Key), headers=self.headers)
access_token = json.loads(response.text)['access_token']
return access_token
def get_img_src(self):
# 网站注册地址
url = 'https://id.ifeng.com/user/register/'
response = requests.get(url, headers=self.headers)
html = etree.HTML(response.text)
# 图片链接
img_src = html.xpath('//img[@id="js-mobile-reg-code-pic"]/@src')[0]
urllib.request.urlretrieve(img_src, './code.jpg')
return img_src
def init_table(self, threshold=155):
table = []
for i in range(256):
if i < threshold:
table.append(0)
else:
table.append(1)
return table
def opt_image(self):
im = Image.open("./code.jpg")
im = im.convert('L')
im = im.point(self.init_table(), '1')
im.save('./code1.jpg')
return "./code1.jpg"
def get_file_content(self, file_path):
with open(file_path, 'rb') as f:
base64_data = base64.b64encode(f.read())
data = {'images': base64_data.decode()}
decoded_data = urllib.parse.urlencode(data)
return decoded_data
def show_code(self):
image = self.get_file_content(self.opt_image())
headers = {
"Content-Type": "application/x-www-form-urlencoded"
}
res = requests.post(self.api.format(self.get_access_token()), headers=headers, data=image)
print(res.text)
# def main(self):
# APP_ID = '16721750'
# API_KEY = 's8GHTluI1Xy1OvM7UU0wx4wl'
# SECRET_KEY = 'lnUFZRN05rMYshbmRGcZvYsrZnMbtXro'
# client = AipOcr(APP_ID, API_KEY, SECRET_KEY)
# url = 'https://id.ifeng.com/public/authcode'
# # 可选参数
# options = {
# "language_type": "CHN_ENG",
# "detect_direction": "true",
# }
# # 调用通用文字识别
# # code = client.webImage(self.get_file_content(), options)
# code = client.enhancedGeneralUrl(self.get_img_src(), options)
# print(code)
if __name__ == '__main__':
gc = GetCode()
gc.get_img_src()
gc.show_code() | okccc/python | crawl/09_验证码识别.py | 09_验证码识别.py | py | 3,682 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.post",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "lxml.etree.HTML",
"line_num... |
7417790842 | #!/usr/bin/env python3
import os
import traceback
## Not sure what this was originally intended for. Leaving for now. BLF
# for key, val in gemsModules.deprecated.delegator.settings.subEntities:
# if val in gemsModules.deprecated.delegator.settings.deprecated:
# pass
# prototype: need to build import statement
# from gemsModules.deprecated.deprecated_20221212. + val + import
from gemsModules.deprecated.sequence import io as sequenceio
from gemsModules.deprecated.sequence import receiver_tasks
from gemsModules.deprecated.common.loggingConfig import loggers, createLogger
from gemsModules.deprecated.common.logic import writeStringToFile
if loggers.get(__name__):
pass
else:
log = createLogger(__name__)
# @brief The main way Delegator interacts with this module. Request handling.
# @param Transaction receivedTransactrion
def receive(receivedTransaction: sequenceio.Transaction) -> sequenceio.Transaction:
log.info("sequence.receive() was called:\n")
# log.debug("The received transaction contains this incoming string: ")
# log.debug(receivedTransaction.incoming_string)
# log.debug("request dict: ")
# log.debug(receivedTransaction.request_dict)
# ## Initialize the transaction
from pydantic import ValidationError
thisTransaction = sequenceio.Transaction(receivedTransaction.incoming_string)
try:
thisTransaction.populate_transaction_in()
except ValidationError as e:
log.error(e)
log.error(traceback.format_exc())
thisTransaction.generateCommonParserNotice(
noticeBrief='JsonParseEror',
additionalInfo={'hint': str(e)})
return thisTransaction
try:
thisTransaction.initialize_transaction_out_from_transaction_in()
# For convenience, make a short alias for the entity in the transaction_in
if thisTransaction.transaction_out.timestamp is None:
from datetime import datetime
thisTransaction.transaction_out.timestamp=datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
thisSequenceEntity = thisTransaction.transaction_out.entity
except Exception as error:
log.error(
"There was a problem initializing the outgoing transaction : " + str(error))
log.error(traceback.format_exc())
raise # not sure if this will work as desired - BLFoley 2023-03-15
# it might be necessary to generate a transaction to return and
# populate it with an error message.
## If there is a sequence, ensure that it is valid
the_sequence = receiver_tasks.get_sequence(thisSequenceEntity)
if the_sequence is not None:
from gemsModules.deprecated.sequence import build
#Ok Oliver, here we should try/catch:
try:
carbBuilder = build.getCbBuilderForSequence(the_sequence)
except Exception as error:
log.debug("Just about to call generateCommonParserNotice with the outgoing project. The transaction_out is : ")
log.debug(thisTransaction.transaction_out.json(indent=2))
thisTransaction.generateCommonParserNotice(noticeBrief='InvalidInputPayload', exitMessage=str(error))
thisTransaction.build_outgoing_string()
return thisTransaction # do this or no? # if the process has altered this Transaction, then yes
log.debug("Sequence is valid.")
# If there are no explicit services
if thisSequenceEntity.services == []:
log.debug("'services' was not present in the request. Do the default.")
thisTransaction = receiver_tasks.doDefaultService(thisTransaction)
return thisTransaction
# ## Initialize the project
try:
need_new_project = receiver_tasks.we_should_start_new_project(thisTransaction)
if need_new_project:
log.debug("We need a new project. Starting a new one.")
from gemsModules.deprecated.project import io as projectio
thisProject = projectio.CbProject()
# log.debug("Initializing the non-filesystem-writing parts of the outgoing project")
if thisTransaction.transaction_in.project is not None:
if thisTransaction.transaction_in.project.filesystem_path is not None:
thisProject.setFilesystemPath(thisTransaction.transaction_in.project.filesystem_path, noClobber=False)
else:
thisProject.setFilesystemPath()
log.debug("About to load the version info")
thisProject.loadVersionsFileInfo()
thisTransaction.transaction_out.project=thisProject.copy(deep=True)
need_filesystem_writes = receiver_tasks.we_need_filesystem_writes(thisSequenceEntity)
log.debug("Initializing the filesystem parts of the outgoing project, if any")
if need_filesystem_writes:
# ## TODO - this will probably fail if more than one service needs filesystem
# ## access ~AND~ each service needs a different location.
# ## Project generation / filling needs to be put down into the
# ## services. The new design will facilitate this fix.
return_value = receiver_tasks.set_up_filesystem_for_writing(thisTransaction)
if return_value != 0:
thisTransaction.generateCommonParserNotice(
noticeBrief='GemsError',
additionalInfo={'Message': 'Something went wrong while setting up the filesystem.'})
except Exception as error:
log.error(
"There was a problem initializing the outgoing project: " + str(error))
log.error(traceback.format_exc())
raise Exception
# log.debug("Just initialized the outgoing project. The transaction_out is : ")
# log.debug(thisTransaction.transaction_out.json(indent=2))
###################################################################
#
# these are for logging/debugging and can go if they get heavy
#
log.debug("The entity type is : " + thisSequenceEntity.entityType)
# log.debug("The services are: ")
# log.debug(thisSequenceEntity.services)
vals = thisSequenceEntity.services.values()
for j in vals:
if 'Build3DStructure' in j.typename:
log.debug("Found a build 3d request.")
elif 'Evaluate' in j.typename:
log.debug("Found an evaluation request.")
elif 'Validate' in j.typename:
log.debug("Found a validation request.")
elif 'Status' in j.typename:
log.debug("Found a status request.")
elif 'Marco' in j.typename:
log.debug("Found a marco request.")
else:
log.debug("Found an unknown service: '" + str(j.typename))
# log.debug("The Seqence Entity's inputs looks like:")
# log.debug(thisSequenceEntity.inputs)
# log.debug("The Seqence Entity's inputs.Sequence looks like:")
# log.debug(thisSequenceEntity.inputs.sequence.payload)
###################################################################
# for each explicit service:
for currentService in thisSequenceEntity.services:
log.debug("service, currentService: " + str(currentService))
thisService = thisSequenceEntity.services[currentService]
if 'Evaluate' in thisService.typename:
log.debug("Evaluate service requested from sequence entity.")
try:
thisTransaction.evaluateCondensedSequence()
except Exception as error:
log.error(
"There was a problem evaluating the condensed sequence: " + str(error))
log.error(traceback.format_exc())
noticeMsg="There was a problem evaluating the condensed sequence. Was a default build attempted? "
thisTransaction.generateCommonParserNotice(
noticeBrief='InvalidInputPayload',
additionalInfo={'hint': noticeMsg})
elif 'Build3DStructure' in thisService.typename:
log.debug("Build3DStructure service requested from sequence entity.")
# Sequence was validated above. Should not be needed again.
# An evaluation is needed for checking other things.
try:
thisTransaction.evaluateCondensedSequence()
thisTransaction.setIsEvaluationForBuild(True)
thisTransaction.manageSequenceBuild3DStructureRequest()
except Exception as error:
log.error(
"There was a problem with manageSequenceBuild3DStructureRequest(): " + str(error))
raise error
elif "Validate" in thisService.typename:
# this should be able to become part of previous validation, but leaving in for now
log.debug("Validate service requested from sequence entity.")
try:
thisTransaction.evaluateCondensedSequence(validateOnly=True)
except Exception as error:
log.error(
"There was a problem validating the condensed sequence: " + str(error))
thisTransaction.generateCommonParserNotice(
noticeBrief='InvalidInputPayload')
elif "Status" in thisService.typename:
# this should be able to become part of previous validation, but leaving in for now
log.debug("Status service requested from sequence entity.")
try:
thisTransaction = receiver_tasks.do_status(thisTransaction)
except Exception as error:
log.error(
"There was a problem getting status for sequence: " + str(error))
thisTransaction.generateCommonParserNotice(
noticeBrief='InvalidInputPayload')
elif "Marco" in thisService.typename:
# this should be able to become part of previous validation, but leaving in for now
log.debug("Marco service requested from sequence entity.")
try:
thisTransaction = receiver_tasks.do_marco(thisTransaction)
except Exception as error:
log.error(
"There was a problem running marco for sequence: " + str(error))
thisTransaction.generateCommonParserNotice(
noticeBrief='InvalidInputPayload')
else:
log.error("got to the else, so something is wrong")
thisTransaction.generateCommonParserNotice(
noticeBrief='ServiceNotKnownToEntity')
# prepares the transaction for return to the requestor, success or fail.
# NOTE!!! This uses the child method in sequence.io - a better method!
thisTransaction.build_outgoing_string()
if need_filesystem_writes:
thisProject=thisTransaction.transaction_out.project
outgoingResponse = thisTransaction.transaction_out.json(indent=2, by_alias=True)
outgoingPath = os.path.join(thisProject.logs_dir, "response.json")
log.debug("Writing the outgoing response to: " + outgoingPath)
writeStringToFile(outgoingResponse, outgoingPath)
return thisTransaction
def main():
log.info("main() was called.\n")
if __name__ == "__main__":
main()
| GLYCAM-Web/gems | gemsModules/deprecated/sequence/receive.py | receive.py | py | 11,290 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "gemsModules.deprecated.common.loggingConfig.loggers.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "gemsModules.deprecated.common.loggingConfig.loggers",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "gemsModules.deprecated.common.logging... |
17715925911 | """The medical image object.
This module defines :class:`MedicalVolume`, which is a wrapper for nD volumes.
"""
import warnings
from copy import deepcopy
from mmap import mmap
from numbers import Number
from typing import Sequence, Tuple, Union
import nibabel as nib
import numpy as np
import pydicom
from nibabel.spatialimages import SpatialFirstSlicer as _SpatialFirstSlicerNib
from numpy.lib.mixins import NDArrayOperatorsMixin
from packaging import version
from dosma.core import orientation as stdo
from dosma.core.device import Device, cpu_device, get_array_module, get_device, to_device
from dosma.core.io.format_io import ImageDataFormat
from dosma.defaults import SCANNER_ORIGIN_DECIMAL_PRECISION
from dosma.utils import env
if env.sitk_available():
import SimpleITK as sitk
if env.cupy_available():
import cupy as cp
if env.package_available("h5py"):
import h5py
__all__ = ["MedicalVolume"]
# PyTorch version introducing complex tensor support.
_TORCH_COMPLEX_SUPPORT_VERSION = version.Version("1.5.0")
class MedicalVolume(NDArrayOperatorsMixin):
"""The class for medical images.
Medical volumes use ndarrays to represent medical data. However, unlike standard ndarrays,
these volumes have inherent spatial metadata, such as pixel/voxel spacing, global coordinates,
rotation information, all of which can be characterized by an affine matrix following the
RAS+ coordinate system. The code below creates a random 300x300x40 medical volume with
scanner origin ``(0, 0, 0)`` and voxel spacing of ``(1,1,1)``:
>>> mv = MedicalVolume(np.random.rand(300, 300, 40), np.eye(4))
Medical volumes can also store header information that accompanies pixel data
(e.g. DICOM headers). These headers are used to expose metadata, which can be fetched
and set using :meth:`get_metadata()` and :meth:`set_metadata()`, respectively. Headers are
also auto-aligned, which means that headers will be aligned with the slice(s) of data from
which they originated, which makes Python slicing feasible. Currently, medical volumes
support DICOM headers using ``pydicom`` when loaded with :class:`dosma.DicomReader`.
>>> mv.get_metadata("EchoTime") # Returns EchoTime
>>> mv.set_metadata("EchoTime", 10.0) # Sets EchoTime to 10.0
Standard math and boolean operations are supported with other ``MedicalVolume`` objects,
numpy arrays (following standard broadcasting), and scalars. Boolean operations are performed
elementwise, resulting in a volume with shape as ``self.volume.shape``.
If performing operations between ``MedicalVolume`` objects, both objects must have
the same shape and affine matrix (spacing, direction, and origin). Header information
is not deep copied when performing these operations to reduce computational and memory
overhead. The affine matrix (``self.affine``) is copied as it is lightweight and
often modified.
2D images are also supported when viewed trivial 3D volumes with shape ``(H, W, 1)``:
>>> mv = MedicalVolume(np.random.rand(10,20,1), np.eye(4))
Many operations are in-place and modify the instance directly (e.g. `reformat(inplace=True)`).
To allow chaining operations, operations that are in-place return ``self``.
>>> mv2 = mv.reformat(ornt, inplace=True)
>>> id(mv2) == id(mv)
True
Medical volumes can interface with the gpu using the :mod:`cupy` library.
Volumes can be moved between devices (see :class:`Device`) using the ``.to()`` method.
Only the volume data will be moved to the gpu. Headers and affine matrix will remain on
the cpu. The following code moves a MedicalVolume to gpu 0 and back to the cpu:
>>> from dosma import Device
>>> mv = MedicalVolume(np.random.rand((10,20,30)), np.eye(4))
>>> mv_gpu = mv.to(Device(0))
>>> mv_cpu = mv.cpu()
Note, moving data across devices results in a full copy. Above, ``mv_cpu.volume`` and
``mv.volume`` do not share memory. Saving volumes and converting to other images
(e.g. ``SimpleITK.Image``) are only supported for cpu volumes. Volumes can also only
be compared when on the same device. For example, both commands below will raise a
RuntimeError:
>>> mv_gpu == mv_cpu
>>> mv_gpu.is_identical(mv_cpu)
While CuPy requires the current device be set using ``cp.cuda.Device(X).use()`` or inside
the ``with`` context, ``MedicalVolume`` automatically sets the appropriate context
for performing operations. This means the CuPy current device need to be the same as the
``MedicalVolume`` object. For example, the following still works:
>>> cp.cuda.Device(0).use()
>>> mv_gpu = MedicalVolume(cp.ones((3,3,3)), np.eye(4))
>>> cp.cuda.Device(1).use()
>>> mv_gpu *= 2
MedicalVolumes also have a limited NumPy/CuPy-compatible interface.
Standard numpy/cupy functions that preserve array shapes can be performed
on MedicalVolume objects:
>>> log_arr = np.log(mv)
>>> type(log_arr)
<class 'dosma.io.MedicalVolume'>
>>> exp_arr_gpu = cp.exp(mv_gpu)
>>> type(exp_arr_gpu)
<class 'dosma.io.MedicalVolume'>
**ALPHA**: MedicalVolumes are also interoperable with popular image data structures
with zero-copy, meaning array data will not be copied. Formats currently include the
SimpleITK Image, Nibabel Nifti1Image, and PyTorch tensors:
>>> sitk_img = mv.to_sitk() # Convert to SimpleITK Image
>>> mv_from_sitk = MedicalVolume.from_sitk(sitk_img) # Convert back to MedicalVolume
>>> nib_img = mv.to_nib() # Convert to nibabel Nifti1Image
>>> mv_from_nib = MedicalVolume.from_nib(nib_img)
>>> torch_tensor = mv.to_torch() # Convert to torch tensor
>>> mv_from_tensor = MedicalVolume.from_torch(torch_tensor, affine)
**ALPHA**: MedicalVolumes can also be used with memmapped arrays.
This makes loading much faster and allows interaction with larger-than-memory
arrays. Only when the volume is modified will the volume be loaded
into memory and modified. If you take a slice of the memmaped array, the underlying
array will also remain memmapped:
>>> arr = np.load("/path/to/volume.npy", mmap_mode="r")
>>> mv = MedicalVolume(arr, np.eye(4))
>>> mv.is_mmap # returns True
We also preserve Nibabel's memmapping of certain file types (e.g. ``.nii``):
>>> nib_img = nibabel.load("path/to/volume.nii")
>>> mv = MedicalVolume.from_nib(nib_img, mmap=True)
Args:
volume (array-like): nD medical image.
affine (array-like): 4x4 array corresponding to affine matrix transform in RAS+ coordinates.
Must be on cpu (i.e. no ``cupy.ndarray``).
headers (array-like[pydicom.FileDataset]): Headers for DICOM files.
"""
def __init__(self, volume, affine, headers=None):
if not isinstance(volume, np.memmap):
xp = get_array_module(volume)
volume = xp.asarray(volume)
self._volume = volume
self._affine = np.array(affine)
self._headers = self._validate_and_format_headers(headers) if headers is not None else None
def save_volume(self, file_path: str, data_format: ImageDataFormat = ImageDataFormat.nifti):
"""Write volumes in specified data format.
Args:
file_path (str): File path to save data. May be modified to follow convention
given by the data format in which the volume will be saved.
data_format (ImageDataFormat): Format to save data.
"""
import dosma.core.io.format_io_utils
device = self.device
if device != cpu_device:
raise RuntimeError(f"MedicalVolume must be on cpu, got {self.device}")
writer = dosma.core.io.format_io_utils.get_writer(data_format)
writer.save(self, file_path)
def reformat(self, new_orientation: Sequence, inplace: bool = False) -> "MedicalVolume":
"""Reorients volume to a specified orientation.
Flipping and transposing the volume array (``self.volume``) returns a view if possible.
Reorientation method:
---------------------
- Axis transpose and flipping are linear operations and therefore can be treated
independently.
- working example: ('AP', 'SI', 'LR') --> ('RL', 'PA', 'SI')
1. Transpose volume and RAS orientation to appropriate column in matrix
eg. ('AP', 'SI', 'LR') --> ('LR', 'AP', 'SI') - transpose_inds=[2, 0, 1]
2. Flip volume across corresponding axes
eg. ('LR', 'AP', 'SI') --> ('RL', 'PA', 'SI') - flip axes 0,1
Reorientation method implementation:
------------------------------------
1. Transpose: Switching (transposing) axes in volume is the same as switching columns
in affine matrix
2. Flipping: Negate each column corresponding to pixel axis to flip (i, j, k) and
reestablish origins based on flipped axes
Args:
new_orientation (Sequence): New orientation.
inplace (bool, optional): If `True`, do operation in-place and return ``self``.
Returns:
MedicalVolume: The reformatted volume. If ``inplace=True``, returns ``self``.
"""
xp = self.device.xp
device = self.device
headers = self._headers
new_orientation = tuple(new_orientation)
if new_orientation == self.orientation:
if inplace:
return self
return self._partial_clone(volume=self._volume)
temp_orientation = self.orientation
temp_affine = np.array(self._affine)
transpose_inds = stdo.get_transpose_inds(temp_orientation, new_orientation)
all_transpose_inds = transpose_inds + tuple(range(3, self._volume.ndim))
with device:
volume = xp.transpose(self.volume, all_transpose_inds)
if headers is not None:
headers = np.transpose(headers, all_transpose_inds)
for i in range(len(transpose_inds)):
temp_affine[..., i] = self._affine[..., transpose_inds[i]]
temp_orientation = tuple([self.orientation[i] for i in transpose_inds])
flip_axs_inds = list(stdo.get_flip_inds(temp_orientation, new_orientation))
with device:
volume = xp.flip(volume, axis=tuple(flip_axs_inds))
if headers is not None:
headers = np.flip(headers, axis=tuple(flip_axs_inds))
a_vecs = temp_affine[:3, :3]
a_origin = temp_affine[:3, 3]
# phi is a vector of 1s and -1s, where 1 indicates no flip, and -1 indicates flip
# phi is used to determine which columns in affine matrix to flip
phi = np.ones([1, len(a_origin)]).flatten()
phi[flip_axs_inds] *= -1
b_vecs = np.array(a_vecs)
for i in range(len(phi)):
b_vecs[:, i] *= phi[i]
# get number of pixels to shift by on each axis.
# Should be 0 when not flipping - i.e. phi<0 mask
vol_shape_vec = (
(np.asarray(volume.shape[:3]) - 1) * (phi < 0).astype(np.float32)
).transpose()
b_origin = np.round(
a_origin.flatten() - np.matmul(b_vecs, vol_shape_vec).flatten(),
SCANNER_ORIGIN_DECIMAL_PRECISION,
)
temp_affine = np.array(self.affine)
temp_affine[:3, :3] = b_vecs
temp_affine[:3, 3] = b_origin
temp_affine[temp_affine == 0] = 0 # get rid of negative 0s
if inplace:
self._affine = temp_affine
self._volume = volume
self._headers = headers
mv = self
else:
mv = self._partial_clone(volume=volume, affine=temp_affine, headers=headers)
assert (
mv.orientation == new_orientation
), f"Orientation mismatch: Expected: {self.orientation}. Got {new_orientation}"
return mv
def reformat_as(self, other, inplace: bool = False) -> "MedicalVolume":
"""Reformat this to the same orientation as ``other``.
Equivalent to ``self.reformat(other.orientation, inplace)``.
Args:
other (MedicalVolume): The result volume has the same orientation as ``other``.
inplace (bool, optional): If `True`, do operation in-place and return ``self``.
Returns:
MedicalVolume: The reformatted volume. If ``inplace=True``, returns ``self``.
"""
return self.reformat(other.orientation, inplace=inplace)
def is_identical(self, mv):
"""Check if another medical volume is identical.
Two volumes are identical if they have the same pixel_spacing, orientation,
scanner_origin, and volume.
Args:
mv (MedicalVolume): Volume to compare with.
Returns:
bool: `True` if identical, `False` otherwise.
"""
if not isinstance(mv, MedicalVolume):
raise TypeError("`mv` must be a MedicalVolume.")
idevice = self.device
odevice = mv.device
if idevice != odevice:
raise RuntimeError(f"Expected device {idevice}, got {odevice}.")
with idevice:
return self.is_same_dimensions(mv) and (mv.volume == self.volume).all()
def _allclose_spacing(self, mv, precision: int = None, ignore_origin: bool = False):
"""Check if spacing between self and another medical volume is within tolerance.
Tolerance is `10 ** (-precision)`.
Args:
mv (MedicalVolume): Volume to compare with.
precision (`int`, optional): Number of significant figures after the decimal.
If not specified, check that affine matrices between two volumes are identical.
Defaults to `None`.
ignore_origin (bool, optional): If ``True``, ignore matching origin in the affine
matrix.
Returns:
bool: `True` if spacing between two volumes within tolerance, `False` otherwise.
"""
if precision is not None:
tol = 10 ** (-precision)
return np.allclose(mv.affine[:3, :3], self.affine[:3, :3], atol=tol) and (
ignore_origin or np.allclose(mv.scanner_origin, self.scanner_origin, rtol=tol)
)
else:
return (mv.affine == self.affine).all() or (
ignore_origin and (mv.affine[:, :3] == self.affine[:, :3]).all()
)
def is_same_dimensions(self, mv, precision: int = None, err: bool = False):
"""Check if two volumes have the same dimensions.
Two volumes have the same dimensions if they have the same pixel_spacing,
orientation, and scanner_origin.
Args:
mv (MedicalVolume): Volume to compare with.
precision (`int`, optional): Number of significant figures after the decimal.
If not specified, check that affine matrices between two volumes are identical.
Defaults to `None`.
err (bool, optional): If `True` and volumes do not have same dimensions,
raise descriptive ValueError.
Returns:
bool: ``True`` if pixel spacing, orientation, and scanner origin
between two volumes within tolerance, ``False`` otherwise.
Raises:
TypeError: If ``mv`` is not a MedicalVolume.
ValueError: If ``err=True`` and two volumes do not have same dimensions.
"""
if not isinstance(mv, MedicalVolume):
raise TypeError("`mv` must be a MedicalVolume.")
is_close_spacing = self._allclose_spacing(mv, precision)
is_same_orientation = mv.orientation == self.orientation
is_same_shape = mv.volume.shape == self.volume.shape
out = is_close_spacing and is_same_orientation and is_same_shape
if err and not out:
tol_str = f" (tol: 1e-{precision})" if precision else ""
if not is_close_spacing:
raise ValueError(
"Affine matrices not equal{}:\n{}\n{}".format(tol_str, self._affine, mv._affine)
)
if not is_same_orientation:
raise ValueError(
"Orientations not equal: {}, {}".format(self.orientation, mv.orientation)
)
if not is_same_shape:
raise ValueError(
"Shapes not equal: {}, {}".format(self._volume.shape, mv._volume.shape)
)
assert False # should not reach here
return out
def match_orientation(self, mv):
"""Reorient another MedicalVolume to orientation specified by self.orientation.
Args:
mv (MedicalVolume): Volume to reorient.
"""
warnings.warn(
"`match_orientation` is deprecated and will be removed in v0.1. "
"Use `mv.reformat_as(self, inplace=True)` instead.",
DeprecationWarning,
)
if not isinstance(mv, MedicalVolume):
raise TypeError("`mv` must be a MedicalVolume.")
mv.reformat(self.orientation, inplace=True)
def match_orientation_batch(self, mvs): # pragma: no cover
"""Reorient a collection of MedicalVolumes to orientation specified by self.orientation.
Args:
mvs (list[MedicalVolume]): Collection of MedicalVolumes.
"""
warnings.warn(
"`match_orientation_batch` is deprecated and will be removed in v0.1. "
"Use `[x.reformat_as(self, inplace=True) for x in mvs]` instead.",
DeprecationWarning,
)
for mv in mvs:
self.match_orientation(mv)
def clone(self, headers=True):
"""Clones the medical volume.
Args:
headers (bool, optional): If `True`, clone headers.
If `False`, headers have shared memory.
Returns:
mv (MedicalVolume): A cloned MedicalVolume.
"""
return MedicalVolume(
self.volume.copy(),
self.affine.copy(),
headers=deepcopy(self._headers) if headers else self._headers,
)
def to(self, device):
"""Move to device.
If on same device, no-op and returns ``self``.
Args:
device: The device to move to.
Returns:
MedicalVolume
"""
device = Device(device)
if self.device == device:
return self
return self._partial_clone(volume=to_device(self._volume, device))
def cpu(self):
"""Move to cpu."""
return self.to("cpu")
def astype(self, dtype, **kwargs):
"""Modifies dtype of ``self._volume``.
Note this operation is done in place. ``self._volume`` is modified, based
on the ``astype`` implementation of the type associated with ``self._volume``.
No new MedicalVolume is created - ``self`` is returned.
Args:
dtype (str or dtype): Typecode or data-type to which the array is cast.
Returns:
self
"""
if (
env.package_available("h5py")
and isinstance(self._volume, h5py.Dataset)
and version.parse(env.get_version(h5py)) < version.parse("3.0.0")
):
raise ValueError("Cannot cast h5py.Dataset to dtype for h5py<3.0.0")
self._volume = self._volume.astype(dtype, **kwargs)
return self
def to_nib(self):
"""Converts to nibabel Nifti1Image.
Returns:
nibabel.Nifti1Image: The nibabel image.
Raises:
RuntimeError: If medical volume is not on the cpu.
Examples:
>>> mv = MedicalVolume(np.ones((10,20,30)), np.eye(4))
>>> mv.to_nib()
<nibabel.nifti1.Nifti1Image>
"""
device = self.device
if device != cpu_device:
raise RuntimeError(f"MedicalVolume must be on cpu, got {self.device}")
return nib.Nifti1Image(self.A, self.affine.copy())
def to_sitk(self, vdim: int = None, transpose_inplane: bool = False):
"""Converts to SimpleITK Image.
SimpleITK Image objects support vector pixel types, which are represented
as an extra dimension in numpy arrays. The vector dimension can be specified
with ``vdim``.
MedicalVolume must be on cpu. Use ``self.cpu()`` to move.
SimpleITK loads DICOM files as individual slices that get stacked in ``(z, x, y)``
order. Thus, ``sitk.GetArrayFromImage`` returns an array in ``(y, x, z)`` order.
To return a SimpleITK Image that will follow this convention, set
``transpose_inplace=True``. If you have been using SimpleITK to load DICOM files,
you will likely want to specify this parameter.
Args:
vdim (int, optional): The vector dimension.
transpose_inplane (bool, optional): If ``True``, transpose inplane axes.
Recommended to be ``True`` for users who are familiar with SimpleITK's
DICOM loading convention.
Returns:
SimpleITK.Image
Raises:
ImportError: If `SimpleITK` is not installed.
RuntimeError: If MedicalVolume is not on cpu.
Note:
Header information is not currently copied.
"""
if not env.sitk_available():
raise ImportError("SimpleITK is not installed. Install it with `pip install simpleitk`")
device = self.device
if device != cpu_device:
raise RuntimeError(f"MedicalVolume must be on cpu, got {self.device}")
arr = self.volume
ndim = arr.ndim
if vdim is not None:
if vdim < 0:
vdim = ndim + vdim
axes = tuple(i for i in range(ndim) if i != vdim)[::-1] + (vdim,)
else:
axes = range(ndim)[::-1]
arr = np.transpose(arr, axes)
affine = self.affine.copy()
affine[:2] = -affine[:2] # RAS+ -> LPS+
origin = tuple(affine[:3, 3])
spacing = self.pixel_spacing
direction = affine[:3, :3] / np.asarray(spacing)
img = sitk.GetImageFromArray(arr, isVector=vdim is not None)
img.SetOrigin(origin)
img.SetSpacing(spacing)
img.SetDirection(tuple(direction.flatten()))
if transpose_inplane:
pa = sitk.PermuteAxesImageFilter()
pa.SetOrder([1, 0, 2])
img = pa.Execute(img)
return img
def to_torch(
self, requires_grad: bool = False, contiguous: bool = False, view_as_real: bool = False
):
"""Zero-copy conversion to torch tensor.
If torch version supports complex tensors (i.e. torch>=1.5.0), complex MedicalVolume
arrays will be converted into complex tensors (torch.complex64/torch.complex128).
Otherwise, tensors will be returned as the real view, where the last dimension has
two channels (`tensor.shape[-1]==2`). `[..., 0]` and `[..., 1]` correspond to the
real/imaginary channels, respectively.
Args:
requires_grad (bool, optional): Set ``.requires_grad`` for output tensor.
contiguous (bool, optional): Make output tensor contiguous before returning.
view_as_real (bool, optional): If ``True`` and underlying array is complex,
returns a real view of a complex tensor.
Returns:
torch.Tensor: The torch tensor.
Raises:
ImportError: If ``torch`` is not installed.
Note:
This method does not convert affine matrices and headers to tensor types.
Examples:
>>> mv = MedicalVolume(np.ones((2,2,2)), np.eye(4)) # zero-copy on CPU
>>> mv.to_torch()
tensor([[[1., 1.],
[1., 1.]],
[[1., 1.],
[1., 1.]]], dtype=torch.float64)
>>> mv_gpu = MedicalVolume(cp.ones((2,2,2)), np.eye(4)) # zero-copy on GPU
>>> mv.to_torch()
tensor([[[1., 1.],
[1., 1.]],
[[1., 1.],
[1., 1.]]], device="cuda:0", dtype=torch.float64)
>>> # view complex array as real tensor
>>> mv = MedicalVolume(np.ones((3,4,5), dtype=np.complex), np.eye(4))
>>> tensor = mv.to_torch(view_as_real)
>>> tensor.shape
(3, 4, 5, 2)
"""
if not env.package_available("torch"):
raise ImportError( # pragma: no cover
"torch is not installed. Install it with `pip install torch`. "
"See https://pytorch.org/ for more information."
)
import torch
from torch.utils.dlpack import from_dlpack
device = self.device
array = self.A
if any(np.issubdtype(array.dtype, dtype) for dtype in (np.complex64, np.complex128)):
torch_version = env.get_version(torch)
supports_cplx = version.Version(torch_version) >= _TORCH_COMPLEX_SUPPORT_VERSION
if not supports_cplx or view_as_real:
with device:
shape = array.shape
array = array.view(dtype=array.real.dtype)
array = array.reshape(shape + (2,))
if device == cpu_device:
tensor = torch.from_numpy(array)
else:
tensor = from_dlpack(array.toDlpack())
tensor.requires_grad = requires_grad
if contiguous:
tensor = tensor.contiguous()
return tensor
def headers(self, flatten=False):
"""Returns headers.
If headers exist, they are currently stored as an array of
pydicom dataset headers, though this is subject to change.
Args:
flatten (bool, optional): If ``True``, flattens header array
before returning.
Returns:
Optional[ndarray[pydicom.dataset.FileDataset]]: Array of headers (if they exist).
"""
if flatten and self._headers is not None:
return self._headers.flatten()
return self._headers
def get_metadata(self, key, dtype=None, default=np._NoValue):
"""Get metadata value from first header.
The first header is defined as the first header in ``np.flatten(self._headers)``.
To extract header information for other headers, use ``self.headers()``.
Args:
key (``str`` or pydicom.BaseTag``): Metadata field to access.
dtype (type, optional): If specified, data type to cast value to.
By default for DICOM headers, data will be in the value
representation format specified by pydicom. See
``pydicom.valuerep``.
default (Any): Default value to return if `key`` not found in header.
If not specified and ``key`` not found in header, raises a KeyError.
Examples:
>>> mv.get_metadata("EchoTime")
'10.0' # this is a number type ``pydicom.valuerep.DSDecimal``
>>> mv.get_metadata("EchoTime", dtype=float)
10.0
>>> mv.get_metadata("foobar", default=0)
0
Raises:
RuntimeError: If ``self._headers`` is ``None``.
KeyError: If ``key`` not found and ``default`` not specified.
Note:
Currently header information is tied to the ``pydicom.FileDataset`` implementation.
This function is synonymous to ``dataset.<key>`` in ``pydicom.FileDataset``.
"""
if self._headers is None:
raise RuntimeError("No headers found. MedicalVolume must be initialized with `headers`")
headers = self.headers(flatten=True)
if key not in headers[0] and default != np._NoValue:
return default
else:
element = headers[0][key]
val = element.value
if dtype is not None:
val = dtype(val)
return val
def set_metadata(self, key, value, force: bool = False):
"""Sets metadata for all headers.
Args:
key (str or pydicom.BaseTag): Metadata field to access.
value (Any): The value.
force (bool, optional): If ``True``, force the header to
set key even if key does not exist in header.
Raises:
RuntimeError: If ``self._headers`` is ``None``.
"""
if self._headers is None:
if not force:
raise ValueError(
"No headers found. To generate headers and write keys, `force` must be True."
)
self._headers = self._validate_and_format_headers([pydicom.Dataset()])
warnings.warn(
"Headers were generated and may not contain all attributes "
"required to save the volume in DICOM format."
)
VR_registry = {float: "DS", int: "IS", str: "LS"}
for h in self.headers(flatten=True):
if force and key not in h:
try:
setattr(h, key, value)
except TypeError:
h.add_new(key, VR_registry[type(value)], value)
else:
h[key].value = value
def materialize(self):
if not self.is_mmap:
return self
def round(self, decimals=0, affine=False) -> "MedicalVolume":
"""Round array (and optionally affine matrix).
Args:
decimals (int, optional): Number of decimals to round to.
affine (bool, optional): The rounded medical volume.
Returns:
MedicalVolume: MedicalVolume with rounded.
"""
from dosma.core.numpy_routines import around
return around(self, decimals, affine)
def sum(
self,
axis=None,
dtype=None,
out=None,
keepdims=False,
initial=np._NoValue,
where=np._NoValue,
) -> "MedicalVolume":
"""Compute the arithmetic sum along the specified axis. Identical to :meth:`sum_np`.
See :meth:`sum_np` for more information.
Args:
axis: Same as :meth:`sum_np`.
dtype: Same as :meth:`sum_np`.
out: Same as :meth:`sum_np`.
keepdims: Same as :meth:`sum_np`.
initial: Same as :meth:`sum_np`.
where: Same as :meth:`sum_np`.
Returns:
Union[Number, MedicalVolume]: If ``axis=None``, returns a number or a scalar type of
the underlying ndarray. Otherwise, returns a medical volume containing sum
values.
"""
from dosma.core.numpy_routines import sum_np
# `out` is required for cupy arrays because of how cupy calls array.
if out is not None:
raise ValueError("`out` must be None")
return sum_np(self, axis=axis, dtype=dtype, keepdims=keepdims, initial=initial, where=where)
def mean(
self, axis=None, dtype=None, out=None, keepdims=False, where=np._NoValue
) -> Union[Number, "MedicalVolume"]:
"""Compute the arithmetic mean along the specified axis. Identical to :meth:`mean_np`.
See :meth:`mean_np` for more information.
Args:
axis: Same as :meth:`mean_np`.
dtype: Same as :meth:`mean_np`.
out: Same as :meth:`mean_np`.
keepdims: Same as :meth:`mean_np`.
initial: Same as :meth:`mean_np`.
where: Same as :meth:`mean_np`.
Returns:
Union[Number, MedicalVolume]: If ``axis=None``, returns a number or a scalar type of
the underlying ndarray. Otherwise, returns a medical volume containing mean
values.
"""
from dosma.core.numpy_routines import mean_np
# `out` is required for cupy arrays because of how cupy calls array.
if out is not None:
raise ValueError("`out` must be None")
return mean_np(self, axis=axis, dtype=dtype, keepdims=keepdims, where=where)
@property
def A(self):
"""The pixel array. Same as ``self.volume``.
Examples:
>>> mv = MedicalVolume([[[1,2],[3,4]]], np.eye(4))
>>> mv.A
array([[[1, 2],
[3, 4]]])
"""
return self.volume
@property
def volume(self):
"""ndarray: ndarray representing volume values."""
return self._volume
@volume.setter
def volume(self, value):
"""
If the volume is of a different shape, the headers are no longer valid,
so delete all reorientations are done as part of MedicalVolume,
so reorientations are permitted.
However, external setting of the volume to a different shape array is not allowed.
"""
if value.ndim != self._volume.ndim:
raise ValueError("New volume must be same as current volume")
if self._volume.shape != value.shape:
self._headers = None
self._volume = value
self._device = get_device(self._volume)
@property
def pixel_spacing(self):
"""tuple[float]: Pixel spacing in order of current orientation."""
vecs = self._affine[:3, :3]
ps = tuple(np.sqrt(np.sum(vecs ** 2, axis=0)))
assert len(ps) == 3, "Pixel spacing must have length of 3"
return ps
@property
def orientation(self):
"""tuple[str]: Image orientation in standard orientation format.
See orientation.py for more information on conventions.
"""
nib_orientation = nib.aff2axcodes(self._affine)
return stdo.orientation_nib_to_standard(nib_orientation)
@property
def scanner_origin(self):
"""tuple[float]: Scanner origin in global RAS+ x,y,z coordinates."""
return tuple(self._affine[:3, 3])
@property
def affine(self):
"""np.ndarray: 4x4 affine matrix for volume in current orientation."""
return self._affine
@property
def shape(self) -> Tuple[int, ...]:
"""The shape of the underlying ndarray."""
return self._volume.shape
@property
def ndim(self) -> int:
"""int: The number of dimensions of the underlying ndarray."""
return self._volume.ndim
@property
def device(self) -> Device:
"""The device the object is on."""
return get_device(self._volume)
@property
def dtype(self):
"""The ``dtype`` of the ndarray. Same as ``self.volume.dtype``."""
return self._volume.dtype
@property
def is_mmap(self) -> bool:
"""bool: Whether the volume is a memory-mapped array."""
# important to check if .base is a python mmap object, since a view of a mmap
# is also a memmap object, but should not be symlinked or copied
return isinstance(self.A, np.memmap) and isinstance(self.A.base, mmap)
@classmethod
def from_nib(
cls, image, affine_precision: int = None, origin_precision: int = None, mmap: bool = False
) -> "MedicalVolume":
"""Constructs MedicalVolume from nibabel images.
Args:
image (nibabel.Nifti1Image): The nibabel image to convert.
affine_precision (int, optional): If specified, rounds the i/j/k coordinate
vectors in the affine matrix to this decimal precision.
origin_precision (int, optional): If specified, rounds the scanner origin
in the affine matrix to this decimal precision.
mmap (bool, optional): If True, memory map the image.
Returns:
MedicalVolume: The medical image.
Examples:
>>> import nibabel as nib
>>> nib_img = nib.Nifti1Image(np.ones((10,20,30)), np.eye(4))
>>> MedicalVolume.from_nib(nib_img)
MedicalVolume(
shape=(10, 20, 30),
ornt=('LR', 'PA', 'IS')),
spacing=(1.0, 1.0, 1.0),
origin=(0.0, 0.0, 0.0),
device=Device(type='cpu')
)
"""
affine = np.array(image.affine) # Make a copy of the affine matrix.
if affine_precision is not None:
affine[:3, :3] = np.round(affine[:3, :3], affine_precision)
if origin_precision:
affine[:3, 3] = np.round(affine[:3, 3], origin_precision)
data = image.dataobj.__array__() if mmap else image.get_fdata()
mv = cls(data, affine)
if mmap and not mv.is_mmap:
raise ValueError(
"Underlying array in the nibabel image is not mem-mapped. " "Please set mmap=False."
)
return mv
@classmethod
def from_sitk(cls, image, copy=False, transpose_inplane: bool = False) -> "MedicalVolume":
"""Constructs MedicalVolume from SimpleITK.Image.
Use ``transpose_inplane=True`` if the SimpleITK image was loaded with SimpleITK's
DICOM reader or if ``transpose_inplace=True`` was used to create the Image
with :meth:`to_sitk`. See the discussion of SimpleITK's data ordering convention
in :meth:`to_sitk` for more information.
If you are getting a segmentation fault, try using ``copy=True``.
Args:
image (SimpleITK.Image): The image.
copy (bool, optional): If ``True``, copies array.
transpose_inplane (bool, optional): If ``True``, transposes the inplane axes.
Set this to ``True`` if the SimpleITK image was loaded with SimpleITK's
DICOM reader. May need to set ``copy=True`` to avoid segmentation fault.
Returns:
MedicalVolume
Note:
Metadata information is not copied.
"""
if not env.sitk_available():
raise ImportError("SimpleITK is not installed. Install it with `pip install simpleitk`")
if len(image.GetSize()) < 3:
raise ValueError("`image` must be 3D.")
is_vector_image = image.GetNumberOfComponentsPerPixel() > 1
if transpose_inplane:
pa = sitk.PermuteAxesImageFilter()
pa.SetOrder([1, 0, 2])
image = pa.Execute(image)
if copy:
arr = sitk.GetArrayFromImage(image)
else:
arr = sitk.GetArrayViewFromImage(image)
ndim = arr.ndim
if is_vector_image:
axes = tuple(range(ndim)[-2::-1]) + (ndim - 1,)
else:
axes = range(ndim)[::-1]
arr = np.transpose(arr, axes)
origin = image.GetOrigin()
spacing = image.GetSpacing()
direction = np.asarray(image.GetDirection()).reshape(-1, 3)
affine = np.zeros((4, 4))
affine[:3, :3] = direction * np.asarray(spacing)
affine[:3, 3] = origin
affine[:2] = -affine[:2] # LPS+ -> RAS+
affine[3, 3] = 1
return cls(arr, affine)
@classmethod
def from_torch(cls, tensor, affine, headers=None, to_complex: bool = None) -> "MedicalVolume":
"""Zero-copy construction from PyTorch tensor.
Args:
tensor (torch.Tensor): A PyTorch tensor where first three dimensions correspond
to spatial dimensions.
affine (np.ndarray): See class parameters.
headers (np.ndarray[pydicom.FileDataset], optional): See class parameters.
to_complex (bool, optional): If ``True``, interprets tensor as real view of complex
tensor and attempts to restructure it as a complex array.
Returns:
MedicalVolume: A medical image.
Raises:
RuntimeError: If ``affine`` is not on the cpu.
ValueError: If ``tensor`` does not have at least three spatial dimensions.
ValueError: If ``to_complex=True`` and shape is not size ``(..., 2)``.
ImportError: If ``tensor`` on GPU and ``cupy`` not installed.
Examples:
>>> import torch
>>> tensor = torch.ones((2,2,2))
>>> MedicalVolume.from_torch(tensor, affine=np.eye(4))
MedicalVolume(
shape=(2, 2, 2),
ornt=('LR', 'PA', 'IS')),
spacing=(1.0, 1.0, 1.0),
origin=(0.0, 0.0, 0.0),
device=Device(type='cpu')
)
>>> tensor = torch.ones((2,2,2), device="cuda") # zero-copy from GPU 0
>>> MedicalVolume.from_torch(tensor, affine=np.eye(4))
MedicalVolume(
shape=(2, 2, 2),
ornt=('LR', 'PA', 'IS')),
spacing=(1.0, 1.0, 1.0),
origin=(0.0, 0.0, 0.0),
device=Device(type='cuda', index=0)
)
>>> tensor = torch.ones((3,4,5,2)) # treat this tensor as view of complex tensor
>>> mv = MedicalVolume.from_torch(tensor, affine=np.eye(4), to_complex=True)
>>> print(mv)
MedicalVolume(
shape=(3,4,5),
ornt=('LR', 'PA', 'IS')),
spacing=(1.0, 1.0, 1.0),
origin=(0.0, 0.0, 0.0),
device=Device(type='cuda', index=0)
)
>>> mv.dtype
np.complex128
"""
if not env.package_available("torch"):
raise ImportError( # pragma: no cover
"torch is not installed. Install it with `pip install torch`. "
"See https://pytorch.org/ for more information."
)
import torch
from torch.utils.dlpack import to_dlpack
torch_version = env.get_version(torch)
supports_cplx = version.Version(torch_version) >= _TORCH_COMPLEX_SUPPORT_VERSION
# Check if tensor needs to be converted to np.complex type.
# If tensor is of torch.complex64 or torch.complex128 dtype, then from_numpy will take
# care of conversion to appropriate numpy dtype, and we do not need to do the to_complex
# logic.
to_complex = to_complex and (
not supports_cplx
or (supports_cplx and tensor.dtype not in (torch.complex64, torch.complex128))
)
if isinstance(affine, torch.Tensor):
if Device(affine.device) != cpu_device:
raise RuntimeError("Affine matrix must be on the cpu")
affine = affine.numpy()
if (not to_complex and tensor.ndim < 3) or (to_complex and tensor.ndim < 4):
raise ValueError(
f"Tensor must have three spatial dimensions. Got shape {tensor.shape}."
)
if to_complex and tensor.shape[-1] != 2:
raise ValueError(
f"tensor.shape[-1] must have shape 2 when to_complex is specified. "
f"Got shape {tensor.shape}."
)
device = Device(tensor.device)
if device == cpu_device:
array = tensor.detach().numpy()
else:
if env.cupy_available():
array = cp.fromDlpack(to_dlpack(tensor))
else:
raise ImportError( # pragma: no cover
"CuPy is required to convert a GPU torch.Tensor to array. "
"Follow instructions at https://docs.cupy.dev/en/stable/install.html to "
"install the correct binary."
)
if to_complex:
with get_device(array):
if array.dtype == np.float32:
array = array.view(np.complex64)
elif array.dtype == np.float64:
array = array.view(np.complex128)
array = array.reshape(array.shape[:-1])
return cls(array, affine, headers=headers)
def _partial_clone(self, **kwargs) -> "MedicalVolume":
"""Copies constructor information from ``self`` if not available in ``kwargs``."""
if kwargs.get("volume", None) is False:
# special use case to not clone volume
kwargs["volume"] = self._volume
for k in ("volume", "affine"):
if k not in kwargs or (kwargs[k] is True):
kwargs[k] = getattr(self, f"_{k}").copy()
if "headers" not in kwargs:
kwargs["headers"] = self._headers
elif isinstance(kwargs["headers"], bool) and kwargs["headers"]:
kwargs["headers"] = deepcopy(self._headers)
return self.__class__(**kwargs)
def _validate_and_format_headers(self, headers):
"""Validate headers are of appropriate shape and format into standardized shape.
Headers are stored an ndarray of dictionary-like objects with explicit dimensions
that match the dimensions of ``self._volume``. If header objects are not
Assumes ``self._volume`` and ``self._affine`` have been set.
"""
headers = np.asarray(headers)
if headers.ndim > self._volume.ndim:
raise ValueError(
f"`headers` has too many dimensions. "
f"Got headers.ndim={headers.ndim}, but volume.ndim={self._volume.ndim}"
)
for dim in range(-headers.ndim, 0)[::-1]:
if headers.shape[dim] not in (1, self._volume.shape[dim]):
raise ValueError(
f"`headers` must follow standard broadcasting shape. "
f"Got headers.shape={headers.shape}, but volume.shape={self._volume.shape}"
)
ndim = self._volume.ndim
shape = (1,) * (ndim - len(headers.shape)) + headers.shape
headers = np.reshape(headers, shape)
return headers
def _extract_input_array_ufunc(self, input, device=None):
if device is None:
device = self.device
device_err = "Expected device {} but got device ".format(device) + "{}"
if isinstance(input, Number):
return input
elif isinstance(input, np.ndarray):
if device != cpu_device:
raise RuntimeError(device_err.format(cpu_device))
return input
elif env.cupy_available() and isinstance(input, cp.ndarray):
if device != input.device:
raise RuntimeError(device_err.format(Device(input.device)))
return input
elif isinstance(input, MedicalVolume):
if device != input.device:
raise RuntimeError(device_err.format(Device(input.device)))
assert self.is_same_dimensions(input, err=True)
return input._volume
else:
return NotImplemented
def _check_reduce_axis(self, axis: Union[int, Sequence[int]]) -> Tuple[int]:
if axis is None:
return None
is_sequence = isinstance(axis, Sequence)
if not is_sequence:
axis = (axis,)
axis = tuple(x if x >= 0 else self.volume.ndim + x for x in axis)
assert all(x >= 0 for x in axis)
if any(x < 3 for x in axis):
raise ValueError("Cannot reduce MedicalVolume along spatial dimensions")
if not is_sequence:
axis = axis[0]
return axis
def _reduce_array(self, func, *inputs, **kwargs) -> "MedicalVolume":
"""
Assumes inputs have been verified.
"""
device = self.device
xp = device.xp
keepdims = kwargs.get("keepdims", False)
reduce_axis = self._check_reduce_axis(kwargs["axis"])
kwargs["axis"] = reduce_axis
if not isinstance(reduce_axis, Sequence):
reduce_axis = (reduce_axis,)
with device:
volume = func(*inputs, **kwargs)
if xp.isscalar(volume) or volume.ndim == 0:
return volume
if self._headers is not None:
headers_slices = tuple(
slice(None) if x not in reduce_axis else slice(0, 1) if keepdims else 0
for x in range(self._headers.ndim)
)
headers = self._headers[headers_slices]
else:
headers = None
return self._partial_clone(volume=volume, headers=headers)
def __getitem__(self, _slice):
if isinstance(_slice, MedicalVolume):
_slice = _slice.reformat_as(self).A
slicer = _SpatialFirstSlicer(self)
try:
_slice = slicer.check_slicing(_slice)
except ValueError as err:
raise IndexError(*err.args)
volume = self._volume[_slice]
if any(dim == 0 for dim in volume.shape):
raise IndexError("Empty slice requested")
headers = self._headers
if headers is not None:
_slice_headers = []
for idx, x in enumerate(_slice):
if headers.shape[idx] == 1 and not isinstance(x, int):
_slice_headers.append(slice(None))
elif headers.shape[idx] == 1 and isinstance(x, int):
_slice_headers.append(0)
else:
_slice_headers.append(x)
headers = headers[_slice_headers]
affine = slicer.slice_affine(_slice)
return self._partial_clone(volume=volume, affine=affine, headers=headers)
def __setitem__(self, _slice, value):
"""
Note:
When ``value`` is a ``MedicalVolume``, the headers from that value
are not copied over. This may be changed in the future.
"""
if isinstance(value, MedicalVolume):
image = self[_slice]
assert value.is_same_dimensions(image, err=True)
value = value._volume
with self.device:
self._volume[_slice] = value
if self.is_mmap and self._volume.mode == "c":
self._volume = np.asarray(self._volume)
def __repr__(self) -> str:
nl = "\n"
nltb = "\n "
return (
f"{self.__class__.__name__}({nltb}shape={self.shape},{nltb}"
f"ornt={self.orientation}),{nltb}spacing={self.pixel_spacing},{nltb}"
f"origin={self.scanner_origin},{nltb}device={self.device}{nl})"
)
def _iops(self, other, op):
"""Helper function for i-type ops (__iadd__, __isub__, etc.)"""
if isinstance(other, MedicalVolume):
assert self.is_same_dimensions(other, err=True)
other = other.volume
if isinstance(op, str):
op = getattr(self._volume, op)
op(other)
if self.is_mmap and self._volume.mode == "c":
self._volume = np.asarray(self._volume)
return self
def __iadd__(self, other):
return self._iops(other, self._volume.__iadd__)
def __ifloordiv__(self, other):
return self._iops(other, self._volume.__ifloordiv__)
def __imul__(self, other):
return self._iops(other, self._volume.__imul__)
def __ipow__(self, other):
return self._iops(other, self._volume.__ipow__)
def __isub__(self, other):
return self._iops(other, self._volume.__isub__)
def __itruediv__(self, other):
return self._iops(other, self._volume.__itruediv__)
def __array__(self):
"""Wrapper for performing numpy operations on MedicalVolume array.
Examples:
>>> a = np.asarray(mv)
>>> type(a)
<class 'numpy.ndarray'>
Note:
This is not valid when ``self.volume`` is a ``cupy.ndarray``.
All CUDA ndarrays must first be moved to the cpu.
"""
try:
return np.asarray(self.volume)
except TypeError:
raise TypeError(
"Implicit conversion to a NumPy array is not allowed. "
"Please use `.cpu()` to move the array to the cpu explicitly "
"before constructing a NumPy array."
)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
def _extract_inputs(inputs, device):
_inputs = []
for input in inputs:
input = self._extract_input_array_ufunc(input, device)
if input is NotImplemented:
return input
_inputs.append(input)
return _inputs
if method not in ["__call__", "reduce"]:
return NotImplemented
device = self.device
_inputs = _extract_inputs(inputs, device)
if _inputs is NotImplemented:
return NotImplemented
if method == "__call__":
with device:
volume = ufunc(*_inputs, **kwargs)
if volume.shape != self._volume.shape:
raise ValueError(
f"{self.__class__.__name__} does not support operations that change shape. "
f"Use operations on `self.volume` to modify array objects."
)
return self._partial_clone(volume=volume)
elif method == "reduce":
return self._reduce_array(ufunc.reduce, *_inputs, **kwargs)
def __array_function__(self, func, types, args, kwargs):
from dosma.core.numpy_routines import _HANDLED_NUMPY_FUNCTIONS
if func not in _HANDLED_NUMPY_FUNCTIONS:
return NotImplemented
# Note: this allows subclasses that don't override
# __array_function__ to handle MedicalVolume objects.
if not all(issubclass(t, (MedicalVolume, self.__class__)) for t in types):
return NotImplemented
return _HANDLED_NUMPY_FUNCTIONS[func](*args, **kwargs)
@property
def __cuda_array_interface__(self):
"""Wrapper for performing cupy operations on MedicalVolume array."""
if self.device == cpu_device:
raise TypeError(
"Implicit conversion to a CuPy array is not allowed. "
"Please use `.to(device)` to move the array to the gpu explicitly "
"before constructing a CuPy array."
)
return self.volume.__cuda_array_interface__
class _SpatialFirstSlicer(_SpatialFirstSlicerNib):
def __init__(self, img):
self.img = img
def __getitem__(self, slicer):
raise NotImplementedError("Slicing should be done by `MedicalVolume`")
| ad12/DOSMA | dosma/core/med_volume.py | med_volume.py | py | 54,208 | python | en | code | 49 | github-code | 1 | [
{
"api_name": "dosma.utils.env.sitk_available",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "dosma.utils.env",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "dosma.utils.env.cupy_available",
"line_number": 26,
"usage_type": "call"
},
{
"api... |
72401623715 | import cv2
import numpy as np
import os
from os.path import join
from time import time
from os import listdir
import matplotlib.pyplot as plt
from src.data.utils.make_dir import make_dir
import src.data.constants as c
# Set working directory to script location
c.setcwd(__file__)
files = c.RAW_FILES_GENERALIZE
kernel = c.MEDIAN_FILTER_KERNEL
threshold = c.SIMPLE_THRESHOLD
data_dir = c.DATA_DIR
mode = 'train'
# Create folder in case it doesn't exist yet
folder_name = c.MASK_DIR
folder = join(data_dir, mode, folder_name)
make_dir(folder)
# How often to print out with matplotlib
debug_every = c.DBG_EVERY
# Name of the folder in which the images will reside
imgs_path = join(c.DATA_DIR, mode, c.IMG_DIR)
masks_path = join(c.DATA_DIR, mode, c.MASK_DIR)
# List of filenames of the .npy images
# .jpg files are for visualizing the process
images = [image for image in listdir(imgs_path) if '.npy' in image]
masks = [mask for mask in listdir(masks_path) if '.npy' in mask]
# Get full image paths from filename list `images`
image_paths = sorted([join(imgs_path, image) for image in images])
# This is the index we will start on, in case there are already
# data files in there
# So, we are only adding to the existing list of files in /imgs/
# -1 for zero-indexing, +1 because we want to start at the next free index
img_idx = len(masks) - 1 + 1
idx = img_idx if img_idx > 0 else 0 # numbering for images
thresholds = []
idx = 0
tic = time()
for i, path in enumerate(image_paths):
img = np.int16(np.load(path))
# Convert to PIL image; requirement for the model
img = cv2.normalize(img, img, alpha=0, beta=255,
dtype=cv2.CV_8UC1, norm_type=cv2.NORM_MINMAX)
name = f'{idx:05d}'
# Folder to save image to
save = join(folder, name)
idx = idx + 1
filtered = cv2.medianBlur(img, kernel)
_, thresh = cv2.threshold(filtered,
threshold, 255, cv2.THRESH_BINARY)
# np.save(save, thresh)
# Save as .jpg for debugging
# if i % debug_every == 1:
dirs = os.path.dirname(save)
file = os.path.basename(save)
plt.imsave(f'{dirs}/_{file}.{c.IMG_EXT}', thresh, cmap='gray')
toc = time()
print(f'annotate.py complete after {(toc-tic)/60: .1f} minutes.')
| gummz/cell | src/data/annotate_old.py | annotate_old.py | py | 2,259 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "src.data.constants.setcwd",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "src.data.constants",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "src.data.constants.RAW_FILES_GENERALIZE",
"line_number": 14,
"usage_type": "attribute"
},
... |
74262851234 | from app.models import Product,db,SCHEMA,environment
from sqlalchemy.sql import text
from datetime import date
from faker import Faker
fake = Faker()
def seed_products():
product1 = Product(
name = 'Shirt', price = 30.00, description= 'Workout shirt', created_at=fake.date_between(start_date='-5y', end_date='today')
)
product2 = Product(
name = 'Shorts', price = 25.00, description= 'Workout shorts', created_at=fake.date_between(start_date='-5y', end_date='today')
)
product3 = Product(
name = 'Whey protein', price = 40.00, description= 'Whey', created_at=fake.date_between(start_date='-5y', end_date='today')
)
product4 = Product(
name = 'Wrist wraps', price = 60.00, description= 'Supprt wraps', created_at=fake.date_between(start_date='-5y', end_date='today')
)
product5 = Product(
name = 'Pre workout', price = 15.00, description= 'Supplement', created_at=fake.date_between(start_date='-5y', end_date='today')
)
product6 = Product(
name = 'Creatine', price = 50.00, description= 'Supplement', created_at=fake.date_between(start_date='-5y', end_date='today')
)
products = [product1,product2,product3,product4,product5,product6]
[db.session.add(product) for product in products]
db.session.commit()
def undo_products():
if environment == "production":
db.session.execute(f"TRUNCATE table {SCHEMA}.products RESTART IDENTITY CASCADE;")
else:
db.session.execute(text("DELETE FROM products"))
db.session.commit()
| xuantien93/IronReligion | app/seeds/products.py | products.py | py | 1,558 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "faker.Faker",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "app.models.Product",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "app.models.Product",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "app.models.Product",... |
70646935393 | import re, nltk, bs4
import pandas as pd
import numpy as np
from numpy.linalg import norm
import scipy as sp
from scipy.sparse import csr_matrix as csr
import random
from string import punctuation
from nltk.corpus import stopwords
from collections import defaultdict
file_path = 'C:\\path\\to\\both_files\\'
def tf(n=5, source='reuters'):
# define number of relevant words n
nltk.download('stopwords')
punc = list(punctuation)
stop_words = set(stopwords.words('english'))
if source == 'reuters':
file_path = './reut.html'
def extract_reuters_news(path_file):
file = open(path_file , 'r').read()
soup = bs4.BeautifulSoup(file)
all_bodies = [el.text for el in soup.find_all('content')] # replace body with content, otherwise bs4 wont find any body other then main body
return all_bodies
data = extract_reuters_news(file_path)
NO_OF_ENTRIES = len(data)
if source == 'imdb':
data = pd.read_csv('./imdb.csv')
NO_OF_ENTRIES = len(data) //2
NO_OF_ENTRIES
data = data.review.iloc[:NO_OF_ENTRIES]
## GLOBAL DICTS
## countains overall count of the term among all documents
maximum_per_document = defaultdict(int) # maximum a term occurs in one doc. denominator for first equation
number_docs_containing_term = defaultdict(int) ## How many documents contain a term --> denominator for second equation
# bow_count will clean the input, create sets for every sentence and return a dict {word:count} & int(maximum count per doc)
def bow_count(sentences):
new_sentence = ''
sentences = re.sub(r'<\s*br\s*\/s*>', '', sentences)
sentences = re.sub(r'\n>', ' ', sentences)
sentences = re.sub(r'\s+', ' ', sentences)
sentences = re.sub(r'\.+\s*', '.', sentences)
sentences = re.sub(r'who\'ll', 'who will', sentences)
sentences = re.sub(r'[IiyouYousheSHE]\'ll', 'i will', sentences)
sentences = re.sub(r'[wW]ouldn\'t', 'would not', sentences)
sentences = re.sub(r'[mM]mustn\'t', 'must not', sentences)
sentences = re.sub(r'[tT]hat\'s', 'that is', sentences)
for el in sentences:
if el.isspace() or el.isalpha() or el == '.': #or el.isnumeric():
new_sentence += el.lower()
new_sentences = new_sentence.split('.')
new_sentences = [set(e for e in el.split() if e not in stop_words) for el in new_sentence.split('.')]
temp_set = set()
temp_count = defaultdict(int)
for el in new_sentences:
for l in el:
temp_count[l] += 1
temp_set.add(l)
doc_max_term_count = [v for k,v in sorted(temp_count.items(), key= lambda x : x[1], reverse=True)][0]
for term in temp_set:
number_docs_containing_term[term] += 1
return temp_count, doc_max_term_count ## returning a list of sets, where every set is a sentence
docs = []
for i,doc in enumerate(data):
counted_terms, m = bow_count(doc)
maximum_per_document[i] = m
docs.append(counted_terms)
def get_tf_idf(w,doc_index):
tf_idf = {}
tf = {}
for k,v in w.items():
tf[k] = v / maximum_per_document[doc_index]
ni = number_docs_containing_term[k]
from math import log
idf = log(NO_OF_ENTRIES / ni)
tf_idf[k] = tf[k] * idf
return tf_idf
result = []
words_vector = set()
for ind, words in enumerate(docs):
ranked_words = get_tf_idf(words, ind)
top_n = {k:v for k,v in sorted(ranked_words.items(), key=lambda x: (-x[1]) )[:n] }
result.append(top_n)
top_set = set([el for el in top_n.keys()])
words_vector |= top_set
all_word_vector = np.zeros(len(words_vector))
## global list that will then be stacked to sparse matrix
similarity_to_stack = []
## create a similariy vector of all words -> which is then used to create per-result-datapoint-vectors --> stacking those to matrix
def similarity_vector(words):
doc_vec = all_word_vector.copy()
for i,word in enumerate(words_vector):
if word in words:
doc_vec[i] = 1
doc_vec_norm = np.linalg.norm(doc_vec)
doc_vec /= doc_vec_norm
return doc_vec # which is a vector that is normalized and can be compared to all others
# iterate over all entries in result (dictonary with n entries of top words)
for progress,r in enumerate(result):
similarity_to_stack.append(similarity_vector(list(r.keys())))
if progress%1000 == 0:
print(progress, ' records analysed.')
# stack all results similarity vectors to one matrix
m = csr(np.vstack(similarity_to_stack))
m.shape
# print the stacked matrix:
# m: number of datapoints, n: number of words in all_word_vector
plt.spy(m, marker='.', markersize=1)
# create a similarity vector, by multiplying each element with all others
ref = m.dot(m.T).toarray()
return ref, data
ref,data = tf()
# quick & dirty: identify similar articles/blogs/reviews
for ind,ary in enumerate(ref):
for i,el in enumerate(ary):
if el > .6 and ind != i :
print(ind, ' and ', i)
break
| guenter-r/knn | tf_idf.py | tf_idf.py | py | 5,501 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "nltk.download",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "string.punctuation",
"line_number": 17,
"usage_type": "argument"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "nltk... |
35889571199 | from contextlib import closing
import wolframalpha
import boto3
import sys
import os
wolfram_app_id = "APPID-WOLFRAM"
wolfram_client = wolframalpha.Client(wolfram_app_id)
query = str(sys.argv[1])
print(":: Question: " + query + "\n")
print("++ Getting Answer from WolframAlpha...\n")
wolfram_response = wolfram_client.query(query)
wolfram_answer = next(wolfram_response.results).text.encode('ascii','ignore')
print(":: Answer:")
print(wolfram_answer)
cwd = os.getcwd()
aws_client = boto3.client('polly')
print("\n++ Getting synthesized audio file from Amazon Polly...\n")
aws_response = aws_client.synthesize_speech(
OutputFormat='pcm',
SampleRate='16000',
Text="for the question " + query + ", the answer is: " + wolfram_answer + ".",
TextType='text',
VoiceId='Joanna',
)
if "AudioStream" in aws_response:
with closing(aws_response["AudioStream"]) as stream:
output = cwd + "/answer.pcm"
try:
with open(output, "wb") as file:
file.write(stream.read())
except IOError as error:
print(error)
sys.exit(-1)
print("++ Playing the synthesized audio file..\n")
os.system("play -t raw -r 16k -e signed -b 16 -c 1 " + cwd + "/answer.pcm")
| panggi/question-answering-wolfram-polly | wolframsynth.py | wolframsynth.py | py | 1,229 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "wolframalpha.Client",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_nu... |
11637370478 | from flask import Flask
from . import provedores
def create_app():
"""Create Flask application."""
app = Flask(
__name__,
static_folder = "assets",
instance_relative_config = False
)
with app.app_context():
# ------------
# Provedores
# ------------
app.secret_key = '12345'
from .home import home
from .about import about
from .provedores.pdt import pdt
# -------------------
# Register Blueprints
# -------------------
app.register_blueprint(home.home_bp)
app.register_blueprint(about.about_bp)
app.register_blueprint(pdt.pdt_bp)
app.charts_cache = dict()
app.charts_cache.update(
pdt.grafico_despesas()
)
return app
| brasil-em-numeros/brasil-em-numeros | dashboard/__init__.py | __init__.py | py | 833 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "home.home.home_bp",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "home.home",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "about.about.about_bp",
... |
38572335275 | from numpy import sign
from functools import cmp_to_key
INPUT_FILENAME ="input"
TEST1_FILENAME ="test1.txt"
TEST2_FILENAME ="test2.txt"
PART = 1
DEBUG = False
DIVIDER_PACKETS = [[[2]], [[6]]]
def debug_print(s: str):
if(DEBUG):
print(s)
def parse_pair(pair: str):
left, right = pair.splitlines()
return eval(left), eval(right)
def compare(left, right):
type_left = type(left)
type_right = type(right)
if type_left == int and type_right == int:
if left < right:
return 1
elif left == right:
return 0
else:
return -1
if type_left == list and type_right == list:
len_left = len(left)
len_right = len(right)
if len_left > len_right:
if len_right == 0:
ret = -1
ret = compare(left[:len_right], right)
if ret == 0:
ret = -1
return ret
elif len_left == len_right:
ret = [compare(l,r) for l, r in zip(left, right)]
if -1 in ret and 1 in ret:
first_ordered_index = ret.index(1)
first_unordered_index = ret.index(-1)
return sign(first_unordered_index - first_ordered_index)
else:
return sign(sum(ret))
else:
if len_left == 0:
return 1
ret = compare(left, right[:len_left])
if ret == 0:
ret = 1
return ret
else:
if type_left == int:
return compare([left], right)
else:
return compare(left, [right])
if __name__ == "__main__":
with open(INPUT_FILENAME) as input_f:
pairs = input_f.read().split("\n\n")
sum_of_indices = 0
full_list = DIVIDER_PACKETS
for index, pair in enumerate(pairs):
left, right = parse_pair(pair)
full_list.append(left)
full_list.append(right)
if compare(left, right) == 1:
sum_of_indices += (index + 1)
print(f"Sum of indices is {sum_of_indices}")
full_list.sort(key=cmp_to_key(compare), reverse=True)
decoder_key = (full_list.index([[2]]) + 1) * (full_list.index([[6]]) + 1)
print(f"Decoder key is {decoder_key}") | MPinna/AOC22 | 13/solve13.py | solve13.py | py | 2,284 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.sign",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.sign",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "functools.cmp_to_key",
"line_number": 83,
"usage_type": "call"
}
] |
38946099835 | import logging
from json import load
from os.path import isdir
from time import time
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.tree import DecisionTreeRegressor
def get_setting(arg_setting_name, arg_settings):
if arg_setting_name in arg_settings.keys():
result = arg_settings[arg_setting_name]
return result
else:
logger.warning('required key %s is not in the settings. Quitting.' % arg_setting_name)
quit()
def check_exists(arg_folder_name, arg_descriptor):
folder_exists = isdir(arg_folder_name)
if folder_exists:
logger.debug('using %s as the %s folder' % (arg_folder_name, arg_descriptor))
else:
logger.warning('%s %s does not exist. Quitting.' % (arg_descriptor, arg_folder_name))
quit()
if __name__ == '__main__':
start_time = time()
formatter = logging.Formatter('%(asctime)s : %(name)s :: %(levelname)s : %(message)s')
logger = logging.getLogger('main')
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
console_handler.setLevel(logging.DEBUG)
logger.debug('started')
with open('./settings-predict.json') as settings_fp:
settings = load(settings_fp)
logger.debug(settings)
input_folder = get_setting('input_folder', settings)
check_exists(input_folder, 'input')
output_folder = get_setting('output_folder', settings)
check_exists(output_folder, 'output')
training_data_file = get_setting('training_data_file', settings)
full_training_data_file = input_folder + training_data_file
logger.debug('loading data from %s' % full_training_data_file)
train_df = pd.read_csv(full_training_data_file)
logger.debug('training data has shape %d x %d' % train_df.shape)
test_data_file = get_setting('test_data_file', settings)
full_test_data_file = input_folder + test_data_file
logger.debug('loading data from %s' % full_test_data_file)
test_df = pd.read_csv(full_test_data_file)
logger.debug('training data has shape %d x %d' % test_df.shape)
# get the target before we do any feature engineering
target = train_df['TARGET'].values
# get the fields where we want to do label encoding
fields_to_label_encode = get_setting('fields_to_label_encode', settings)
logger.debug('we will use the label encoder for the following fields: %s' % fields_to_label_encode)
logger.debug(train_df.dtypes)
for field in fields_to_label_encode:
if train_df.dtypes[field] == 'object':
train_df[field].replace(np.nan, '', regex=True, inplace=True)
test_df[field].replace(np.nan, '', regex=True, inplace=True)
encoder = LabelEncoder()
logger.debug('field %s has unique values %s' % (field, train_df[field].unique()))
encoder.fit(train_df[field])
train_df[field] = encoder.transform(train_df[field])
logger.debug('done transforming the training data, field %s' % field)
test_df[field] = encoder.transform(test_df[field])
logger.debug('done transforming the test data, field %s' % field)
logger.debug(train_df.dtypes)
# look for infinite values
for column in train_df.columns.values:
if train_df.dtypes[column] == 'float64':
train_df[column].replace(np.inf, -1.0, inplace=True)
# after feature engineering align the two data frames
train_df, test_df = train_df.align(test_df, join='inner', axis=1)
logger.debug('after alignment the training data has shape %d x %d' % train_df.shape)
logger.debug('after alignment the test data has shape %d x %d' % test_df.shape)
random_state = get_setting('random_state', settings)
# build the model
model = DecisionTreeRegressor(criterion='mse', splitter='best', max_depth=None, min_impurity_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None,
random_state=random_state, max_leaf_nodes=None, presort=False,
min_impurity_decrease=0.0)
model.fit(X=train_df, y=target, sample_weight=None, check_input=True, X_idx_sorted=None)
y_pred = model.predict(X=test_df, check_input=True)
logger.debug('done')
finish_time = time()
elapsed_hours, elapsed_remainder = divmod(finish_time - start_time, 3600)
elapsed_minutes, elapsed_seconds = divmod(elapsed_remainder, 60)
logger.info("Time: {:0>2}:{:0>2}:{:05.2f}".format(int(elapsed_hours), int(elapsed_minutes), elapsed_seconds))
console_handler.close()
logger.removeHandler(console_handler)
| mikedelong/animated-kaggle | code/predict.py | predict.py | py | 4,735 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.isdir",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "logging.Formatter",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"li... |
1601838586 | import requests, re
res = requests.get('http://langlang2017.com')
html = res.text.encode('ISO-8859-1').decode(res.apparent_encoding)
# print(html)
pattern = re.compile('<li><img.*?src="img/(.*?)"', re.S)
con = re.findall(pattern, html)
print(con)
for i in con[0]:
ur = 'http://langlang2017.com/img/' + i
r = requests.get(ur)
with open(i, 'wb') as f:
f.write(r.content)
| Lousm/Python | 04_爬虫/week1/day03/07zhengze.py | 07zhengze.py | py | 391 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "re.S",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "re.findall",
"line_number": 7,
... |
32796762532 | import cv2
import numpy as np
import sys
if (len(sys.argv) > 1):
filename = sys.argv[1]
else:
print('Pass a filename as first argument')
sys.exit(0)
img1 = cv2.imread(filename, cv2.IMREAD_COLOR)
img2 = img1.copy()
DEFAULT = img1.copy()
DEFAULT2 = img2.copy()
def get_points_img1(event, x, y, flags, param):
global img1_points, img1_click_n, img1, DEFAULT
if event == cv2.EVENT_LBUTTONDOWN:
if img1_click_n == 3:
img1 = DEFAULT.copy()
img1_points = [(-1,-1), (-1,-1), (-1,-1)]
img1_click_n = 0
if img1_click_n == 0:
img1_points[0] = (x, y)
elif img1_click_n == 1:
img1_points[1] = (x, y)
elif img1_click_n == 2:
img1_points[2] = (x, y)
cv2.circle(img1, (x, y), 5, (255,0,200), -1)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img1, str(img1_click_n+1), (x+5, y+4), font, 0.5, (0,0,0), 1, cv2.LINE_AA)
img1_click_n += 1
if event == cv2.EVENT_RBUTTONDOWN:
img1 = DEFAULT.copy()
img1_points = [(-1,-1), (-1,-1), (-1,-1)]
img1_click_n = 0
def get_points_img2(event, x, y, flags, param):
global img2_points, img2_click_n, img2, DEFAULT2
if event == cv2.EVENT_LBUTTONDOWN:
if img2_click_n == 3:
img2 = DEFAULT.copy()
img2_points = [(-1,-1), (-1,-1), (-1,-1)]
img2_click_n = 0
if img2_click_n == 0:
img2_points[0] = (x, y)
elif img2_click_n == 1:
img2_points[1] = (x, y)
elif img2_click_n == 2:
img2_points[2] = (x, y)
cv2.circle(img2, (x, y), 5, (255,0,200), -1)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img2, str(img2_click_n+1), (x+5, y+4), font, 0.5, (0,0,0), 1, cv2.LINE_AA)
img2_click_n += 1
if event == cv2.EVENT_RBUTTONDOWN:
img2 = DEFAULT2.copy()
img2_points = [(-1,-1), (-1,-1), (-1,-1)]
img2_click_n = 0
img1_click_n = 0
img1_points = [(-1,-1), (-1,-1), (-1,-1)]
img2_click_n = 0
img2_points = [(-1,-1), (-1,-1), (-1,-1)]
cv2.namedWindow('image1')
cv2.namedWindow('image2')
cv2.setMouseCallback('image1', get_points_img1)
cv2.setMouseCallback('image2', get_points_img2)
while(1):
cv2.imshow('image1', img1)
cv2.imshow('image2', img2)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
if (img1_click_n == 3 & img2_click_n == 3):
pts1 = np.float32([img1_points[0], img1_points[1], img1_points[2]])
pts2 = np.float32([img2_points[0], img2_points[1], img2_points[2]])
M = cv2.getAffineTransform(pts1, pts2)
img1_h, img1_w, img1_c = np.shape(img1)
cv2.warpAffine(DEFAULT2, M, (img1_w, img1_h), img1)
cv2.destroyAllWindows()
| mtsafur/vision-por-computadora | practicas-clase/clase-4/practica-2/practica_2.py | practica_2.py | py | 2,879 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 11,... |
14194255928 | import hashlib
import inspect
import operator
from typing import Callable, Optional, Union
import cloudpickle
import numpy as np
class _FakeArgSpec:
def __init__(
self,
args=None,
varargs=None,
varkw=None,
defaults=None,
kwonlyargs=None,
kwonlydefaults=None,
annotations=None,
):
self.args = args
self.varargs = varargs
self.varkw = varkw
self.defaults = defaults
self.kwonlyargs = kwonlyargs
self.kwonlydefaults = kwonlydefaults
self.annotations = annotations
def function_repr(
func: Callable,
argspec: Optional[Union[_FakeArgSpec, inspect.FullArgSpec]] = None,
) -> str:
"""Returns a human-readable string representation for a function."""
if argspec is None:
argspec = inspect.getfullargspec(func)
args = [str(arg) for arg in argspec.args]
if argspec.defaults:
for i, val in enumerate(argspec.defaults[::-1]):
args[-(i + 1)] = args[-(i + 1)] + f"={val!r}"
if argspec.varargs:
args.append("*" + argspec.varargs)
if argspec.kwonlyargs:
if not argspec.varargs:
args.append("*")
args.extend(argspec.kwonlyargs)
if argspec.kwonlydefaults:
for i, name in enumerate(args):
if name in argspec.kwonlydefaults:
args[i] = args[i] + f"={argspec.kwonlydefaults[name]!r}"
if argspec.varkw:
args.append("**" + argspec.varkw)
if argspec.annotations:
for i, name in enumerate(args):
if name in argspec.annotations:
args[i] = args[i] + f": {argspec.annotations[name].__name__!r}"
return func.__name__ + "(" + ", ".join(args) + ")"
class Parameter:
"""A callable object that computes a scalar or vector quantity
as a function of position coordinates x, y (and optionally z and time t).
Addition, subtraction, multiplication, and division
between multiple Parameters and/or real numbers (ints and floats)
is supported. The result of any of these operations is a
``CompositeParameter`` object.
Args:
func: A callable/function that actually calculates the parameter's value.
The function must take x, y (and optionally z) as the first and only
positional arguments, and all other arguments must be keyword arguments.
Therefore func should have a signature like
``func(x, y, z, a=1, b=2, c=True)``, ``func(x, y, *, a, b, c)``,
``func(x, y, z, *, a, b, c)``, or ``func(x, y, z, *, a, b=None, c=3)``.
For time-dependent Parameters, ``func`` must also take time ``t`` as a
keyword-only argument.
time_dependent: Specifies that ``func`` is a function of time ``t``.
kwargs: Keyword arguments for func.
"""
__slots__ = ("func", "kwargs", "time_dependent", "_cache")
def __init__(self, func: Callable, time_dependent: bool = False, **kwargs):
argspec = inspect.getfullargspec(func)
args = argspec.args
num_args = 2
if args[:num_args] != ["x", "y"]:
raise ValueError(
"The first function arguments must be x and y, "
f"not {', '.join(args[:num_args])!r}."
)
if "z" in args:
if args.index("z") != num_args:
raise ValueError(
"If the function takes an argument z, "
"it must be the third argument (x, y, z)."
)
num_args = 3
defaults = argspec.defaults or []
if len(defaults) != len(args) - num_args:
raise ValueError(
"All arguments other than x, y, z must be keyword arguments."
)
self.time_dependent = time_dependent
defaults_dict = dict(zip(args[num_args:], defaults))
kwonlyargs = set(kwargs) - set(argspec.args[num_args:])
if not kwonlyargs.issubset(set(argspec.kwonlyargs or [])):
raise ValueError(
f"Provided keyword-only arguments ({kwonlyargs!r}) "
f"do not match the function signature: {function_repr(func)}."
)
defaults_dict.update(argspec.kwonlydefaults or {})
self.func = func
self.kwargs = defaults_dict
self.kwargs.update(kwargs)
self._cache = {}
if self.time_dependent and "t" not in argspec.kwonlyargs:
raise ValueError(
"A time-dependent Parameter must take time t as a keyword argument."
)
def _hash_args(self, x, y, z, t) -> str:
def _coerce_to_tuple(a):
try:
return tuple(_coerce_to_tuple(i) for i in a)
except TypeError:
return a
def _to_tuple(items):
results = []
for key, value in items:
if isinstance(value, dict):
value = _to_tuple(value.items())
elif isinstance(value, (list, np.ndarray)):
value = _coerce_to_tuple(value)
results.append((key, value))
return tuple(results)
return (
hex(hash(_to_tuple(self.kwargs.items())))
+ hashlib.sha1(np.ascontiguousarray(x)).hexdigest()
+ hashlib.sha1(np.ascontiguousarray(y)).hexdigest()
+ hashlib.sha1(np.ascontiguousarray(z)).hexdigest()
+ hex(hash(t))
)
def __call__(
self,
x: Union[int, float, np.ndarray],
y: Union[int, float, np.ndarray],
z: Optional[Union[int, float, np.ndarray]] = None,
t: Optional[float] = None,
) -> Union[int, float, np.ndarray]:
cache_key = self._hash_args(x, y, z, t)
if cache_key not in self._cache:
kwargs = self.kwargs.copy()
if t is not None:
kwargs["t"] = t
x, y = np.atleast_1d(x, y)
if z is not None:
kwargs["z"] = np.atleast_1d(z)
result = np.asarray(self.func(x, y, **kwargs)).squeeze()
if result.ndim == 0:
result = result.item()
self._cache[cache_key] = result
return self._cache[cache_key]
def _get_argspec(self) -> _FakeArgSpec:
if self.kwargs:
kwargs, kwarg_values = list(zip(*self.kwargs.items()))
else:
kwargs = []
kwarg_values = []
kwargs = list(kwargs)
kwarg_values = list(kwarg_values)
if self.time_dependent:
kwargs.insert(0, "time_dependent")
kwarg_values.insert(0, True)
return _FakeArgSpec(args=kwargs, defaults=kwarg_values)
def __repr__(self) -> str:
func_repr = function_repr(self.func, argspec=self._get_argspec())
return f"{self.__class__.__name__}<{func_repr}>"
def __add__(self, other) -> "CompositeParameter":
"""self + other"""
return CompositeParameter(self, other, operator.add)
def __radd__(self, other) -> "CompositeParameter":
"""other + self"""
return CompositeParameter(other, self, operator.add)
def __sub__(self, other) -> "CompositeParameter":
"""self - other"""
return CompositeParameter(self, other, operator.sub)
def __rsub__(self, other) -> "CompositeParameter":
"""other - self"""
return CompositeParameter(other, self, operator.sub)
def __mul__(self, other) -> "CompositeParameter":
"""self * other"""
return CompositeParameter(self, other, operator.mul)
def __rmul__(self, other) -> "CompositeParameter":
"""other * self"""
return CompositeParameter(other, self, operator.mul)
def __truediv__(self, other) -> "CompositeParameter":
"""self / other"""
return CompositeParameter(self, other, operator.truediv)
def __rtruediv__(self, other) -> "CompositeParameter":
"""other / self"""
return CompositeParameter(other, self, operator.truediv)
def __pow__(self, other) -> "CompositeParameter":
"""self ** other"""
return CompositeParameter(self, other, operator.pow)
def __rpow__(self, other) -> "CompositeParameter":
"""other ** self"""
return CompositeParameter(other, self, operator.pow)
def __eq__(self, other) -> bool:
if other is self:
return True
if not isinstance(other, Parameter):
return False
# Check if function bytecode is the same
if self.func.__code__ != other.func.__code__:
return False
if set(self.kwargs) != set(other.kwargs):
return False
def array_safe_equals(a, b) -> bool:
"""Check if a and b are equal, even if they are numpy arrays."""
if a is b:
return True
if isinstance(a, np.ndarray) and isinstance(b, np.ndarray):
return a.shape == b.shape and np.allclose(a, b)
try:
return a == b
except TypeError:
return NotImplemented
for key in self.kwargs:
if not array_safe_equals(self.kwargs[key], other.kwargs[key]):
return False
return True
class CompositeParameter(Parameter):
"""A callable object that behaves like a Parameter
(i.e. it computes a scalar or vector quantity as a function of
position coordinates x, y, z). A CompositeParameter object is created as
a result of mathematical operations between Parameters, CompositeParameters,
and/or real numbers.
Addition, subtraction, multiplication, division, and exponentiation
between Parameters, CompositeParameters and real numbers (ints and floats)
are supported. The result of any of these operations is a new
CompositeParameter object.
Args:
left: The object on the left-hand side of the operator.
right: The object on the right-hand side of the operator.
operator_: The operator acting on left and right (or its string representation).
"""
VALID_OPERATORS = {
operator.add: "+",
operator.sub: "-",
operator.mul: "*",
operator.truediv: "/",
operator.pow: "**",
}
def __init__(
self,
left: Union[int, float, Parameter, "CompositeParameter"],
right: Union[int, float, Parameter, "CompositeParameter"],
operator_: Union[Callable, str],
):
valid_types = (int, float, complex, Parameter, CompositeParameter)
if not isinstance(left, valid_types):
raise TypeError(
f"Left must be a number, Parameter, or CompositeParameter, "
f"not {type(left)!r}."
)
if not isinstance(right, valid_types):
raise TypeError(
f"Right must be a number, Parameter, or CompositeParameter, "
f"not {type(right)!r}."
)
if isinstance(left, (int, float)) and isinstance(right, (int, float)):
raise TypeError(
"Either left or right must be a Parameter or CompositeParameter."
)
if isinstance(operator_, str):
operators = {v: k for k, v in self.VALID_OPERATORS.items()}
operator_ = operators.get(operator_.strip(), None)
if operator_ not in self.VALID_OPERATORS:
raise ValueError(
f"Unknown operator, {operator_!r}. "
f"Valid operators are {list(self.VALID_OPERATORS)!r}."
)
self.left = left
self.right = right
self.operator = operator_
self.time_dependent = False
if isinstance(self.left, Parameter) and self.left.time_dependent:
self.time_dependent = True
if isinstance(self.right, Parameter) and self.right.time_dependent:
self.time_dependent = True
def __call__(
self,
x: Union[int, float, np.ndarray],
y: Union[int, float, np.ndarray],
z: Optional[Union[int, float, np.ndarray]] = None,
t: Optional[float] = None,
) -> Union[int, float, np.ndarray]:
kwargs = dict() if t is None else dict(t=t)
values = []
for operand in (self.left, self.right):
if isinstance(operand, Parameter):
if operand.time_dependent:
value = operand(x, y, z, **kwargs)
else:
value = operand(x, y, z)
else:
value = operand
values.append(value)
return self.operator(*values)
def _bare_repr(self) -> str:
op_str = self.VALID_OPERATORS[self.operator]
if isinstance(self.left, CompositeParameter):
left_repr = self.left._bare_repr()
elif isinstance(self.left, Parameter):
left_argspec = self.left._get_argspec()
left_repr = function_repr(self.left.func, left_argspec)
else:
left_repr = str(self.left)
if isinstance(self.right, CompositeParameter):
right_repr = self.right._bare_repr()
elif isinstance(self.right, Parameter):
right_argspec = self.right._get_argspec()
right_repr = function_repr(self.right.func, right_argspec)
else:
right_repr = str(self.right)
return f"({left_repr} {op_str} {right_repr})"
def __eq__(self, other) -> bool:
if other is self:
return True
if not isinstance(other, type(self)):
return False
return (
self.left == other.left
and self.right == other.right
and self.operator is other.operator
)
def __repr__(self) -> str:
return f"{self.__class__.__name__}<{self._bare_repr()}>"
def __getstate__(self):
state = self.__dict__.copy()
state["left"] = cloudpickle.dumps(state["left"])
state["right"] = cloudpickle.dumps(state["right"])
return state
def __setstate__(self, state):
state["left"] = cloudpickle.loads(state["left"])
state["right"] = cloudpickle.loads(state["right"])
self.__dict__.update(state)
class Constant(Parameter):
"""A Parameter whose value doesn't depend on position or time."""
def __init__(self, value: Union[int, float, complex], dimensions: int = 2):
if dimensions not in (2, 3):
raise ValueError(f"Dimensions must be 2 or 3, got {dimensions}.")
if dimensions == 2:
def constant(x, y, value=0):
return value * np.ones_like(x)
else:
def constant(x, y, z, value=0):
return value * np.ones_like(x)
super().__init__(constant, value=value)
| loganbvh/py-tdgl | tdgl/parameter.py | parameter.py | py | 14,810 | python | en | code | 24 | github-code | 1 | [
{
"api_name": "typing.Callable",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "inspect.FullArgSpec",
... |
12934811377 | from __future__ import annotations
import operator
from copy import copy
from itertools import chain
from collections.abc import Callable
from dataclasses import replace
from functools import partialmethod
from random import sample
from typing import Any, Hashable, Iterable, List, Optional, Sequence, Tuple, Union
from enum import Enum
import numpy as np
from .context import CacheKey, Context
class Empty(Enum):
# See https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions
empty = None
_empty = Empty.empty
class Resolveable:
def __invert__(self):
return self._resolve()
def _resolve(self) -> Union[Any, Iterable[Any], Empty]:
raise NotImplementedError
@property
def cache_key(self) -> Hashable:
raise NotImplementedError
class BaseValue(Resolveable):
constant: Optional[bool] = None # Whether the value depends on independent variables
name: Optional[str] = None
def _operation(
self,
function: Callable[..., Union[float, bool]],
other: Union[float, Value, Empty] = _empty,
reverse: bool = False,
):
if not isinstance(other, Resolveable):
other = Value(other)
if reverse:
assert other.value is not _empty
return Value(Operation(function, other, self))
return Value(Operation(function, self, other))
__neg__ = partialmethod(_operation, operator.neg)
__pos__ = partialmethod(_operation, operator.pos)
__abs__ = partialmethod(_operation, operator.abs)
__add__ = partialmethod(_operation, operator.add)
__floordiv__ = partialmethod(_operation, operator.floordiv)
__mod__ = partialmethod(_operation, operator.mod)
__mul__ = partialmethod(_operation, operator.mul)
__pow__ = partialmethod(_operation, operator.pow)
__sub__ = partialmethod(_operation, operator.sub)
__truediv__ = partialmethod(_operation, operator.truediv)
__radd__ = partialmethod(_operation, operator.add, reverse=True)
__rfloordiv__ = partialmethod(_operation, operator.floordiv, reverse=True)
__rmod__ = partialmethod(_operation, operator.mod, reverse=True)
__rmul__ = partialmethod(_operation, operator.mul, reverse=True)
__rpow__ = partialmethod(_operation, operator.pow, reverse=True)
__rsub__ = partialmethod(_operation, operator.sub, reverse=True)
__rtruediv__ = partialmethod(_operation, operator.truediv, reverse=True)
# TODO: Decide whether the implementation of the below six methods is really what we want
__lt__ = partialmethod(_operation, operator.lt)
__le__ = partialmethod(_operation, operator.le)
__eq__ = partialmethod(_operation, operator.eq) # type: ignore
__ne__ = partialmethod(_operation, operator.ne) # type: ignore
__ge__ = partialmethod(_operation, operator.ge)
__gt__ = partialmethod(_operation, operator.gt)
class Value(BaseValue):
def __init__(
self,
value: Union[float, Resolveable, Empty],
constant: Optional[bool] = None,
name: Optional[str] = None,
):
self.value = value
self.constant = constant
self.name = name
def __repr__(self):
if self.name:
return self.name
return str(self.value)
@property
def cache_key(self):
if isinstance(self.value, Resolveable):
return self.value.cache_key
return self.value
def _resolve(self):
if isinstance(self.value, Resolveable):
return ~self.value
return self.value
class Operation(Resolveable):
FORMATS = {
operator.neg: "-{this}",
operator.pos: "{this}",
operator.abs: "abs({this})",
operator.add: "{this} + {other}",
operator.floordiv: "{this} // {other}",
operator.mod: "{this} % {other}",
operator.mul: "{this} * {other}",
operator.pow: "{this} ** {other}",
operator.sub: "{this} - {other}",
operator.truediv: "{this} / {other}",
operator.lt: "{this} < {other}",
operator.le: "{this} <= {other}",
operator.eq: "{this} == {other}",
operator.ne: "{this} != {other}",
operator.ge: "{this} >= {other}",
operator.gt: "{this} > {other}",
}
PRECEDENCE = {
operator.lt: 6,
operator.le: 6,
operator.eq: 6,
operator.ne: 6,
operator.ge: 6,
operator.gt: 6,
operator.add: 11,
operator.sub: 11,
operator.floordiv: 12,
operator.mod: 12,
operator.mul: 12,
operator.truediv: 12,
operator.neg: 13,
operator.pos: 13,
operator.pow: 14,
operator.abs: 16,
}
format: Optional[Callable[..., str]] = None
precedence: int = -1
def __init__(
self,
function: Callable[..., Any],
this: BaseValue,
other: BaseValue = Value(_empty),
):
self.function = function
self.this = this
self.other = other
if function in self.FORMATS:
self.format = self.FORMATS[function].format
if function in self.PRECEDENCE:
self.precedence = self.PRECEDENCE[function]
@property
def cache_key(self):
this_key = self.this.cache_key
other_key = self.other.cache_key
return CacheKey(function=self.function, nested=(this_key, other_key))
def __repr__(self):
if self.format:
if self.other is _empty:
return self.format(this=self.this)
this, other = str(self.this), str(self.other)
if (
isinstance(self.this, Value)
and isinstance(self.this.value, Operation)
and self.this.value.precedence < self.precedence
):
this = f"({this})"
if (
isinstance(self.other, Value)
and isinstance(self.other.value, Operation)
and self.other.value.precedence <= self.precedence
):
other = f"({other})"
return self.format(this=this, other=other)
return f"{type(self).__name__}({self.function.__name__}, {self.this}, {self.other})"
def _resolve(self):
this, other = ~self.this, ~self.other
if other is _empty:
return self.function(this)
return self.function(this, other)
class Distribution(BaseValue):
constant = True
def __init__(
self,
function: Callable[..., Iterable[float]],
*args: float,
name: Optional[str] = None,
**kwargs: Empty,
): # pylint: disable=super-init-not-called
self.function = function
self.name = name
self.args = args
self.kwargs = kwargs
def __repr__(self):
if self.name:
return self.name
name = self.function.__name__
args = ", ".join(map(str, self.args))
kwargs = ", ".join(f"{key}={value}" for key, value in self.kwargs.items())
return f'{name}({", ".join(part for part in (args, kwargs) if part)})'
@property
def cache_key(self):
with Context() as context:
return CacheKey(
sample_count=context.sample_count,
function=self.function,
args=self.args,
kwargs=tuple(sorted(self.kwargs.items())),
)
def _resolve(self):
with Context() as context:
if self.cache_key in context.cache:
return context.cache[self.cache_key]
value = self.function(*self.args, size=context.sample_count, **self.kwargs)
context.cache[self.cache_key] = value
return value
class Mixture(BaseValue):
def __init__(
self, values: Sequence[BaseValue], name: Optional[str] = None
): # pylint: disable=super-init-not-called
self.values = values
self.name = name
def __repr__(self):
if self.name:
return self.name
return f"{type(self).__name__}({self.values})"
@property
def cache_key(self):
with Context() as context:
return CacheKey(
sample_count=context.sample_count,
nested=tuple(replace(value.cache_key, sample_count=None) for value in self.values),
)
@staticmethod
def _sample(*values: BaseValue):
with Context() as context:
remainder = context.sample_count % len(values)
sample_counts = [context.sample_count // len(values)] * len(values)
sample_counts = [
sample_count + int(i < remainder) for i, sample_count in enumerate(sample_counts)
]
sample_counts = sample(sample_counts, len(sample_counts)) # Shuffling
samples = []
for value, sample_count in zip(values, sample_counts):
with Context(sample_count=sample_count):
samples.append(~value)
return np.concatenate(samples)
def _resolve(self):
with Context() as context:
if self.cache_key in context.cache:
return context.cache[self.cache_key]
value = self._sample(*self.values)
context.cache[self.cache_key] = value
return value | Telofy/SquigglyPy | squigglypy/tree.py | tree.py | py | 9,286 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "enum.Enum",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number"... |
72540925474 | # Test app main components.
# python -m pytest tests/app/test_main.py
import pytest
from fastapi import status
from fastapi.testclient import TestClient
from app.main import app
from app.models import models
def test_index():
with TestClient(app) as client:
response = client.get("/")
assert response.status_code == status.HTTP_200_OK
assert response.json()["message"] == "Successful operation"
def test_construct_response():
with TestClient(app) as client:
response = client.get("/")
assert response.request.method == "GET"
assert response.status_code == status.HTTP_200_OK
@pytest.fixture
def example_user():
user = {
"email": "adminnxns@gmxail.com",
"first_name": "Juan",
"last_name": "Perez",
"password": "123456",
}
return user
def test_not_exists_user(example_user):
assert models.User.objects(email=example_user["email"]).first() is None
def test_create_user(example_user):
with TestClient(app) as client:
response = client.post("/users/", data=example_user)
assert response.request.method == "POST"
assert response.status_code == status.HTTP_201_CREATED
def test_exits_user(example_user):
assert models.User.objects(email=example_user["email"]).first() is not None
def test_login(example_user):
data = {"username": example_user["email"], "password": example_user["password"]}
with TestClient(app) as client:
response = client.post("/login/", data=data)
assert response.request.method == "POST"
assert response.status_code == status.HTTP_200_OK
global token
token = response.json()["access_token"]
assert len(token) > 0
def test_remove_user(example_user):
models.User.objects(email=example_user["email"]).delete()
assert models.User.objects(email=example_user["email"]).first() is None
def test_not_authorized_nlp():
with TestClient(app) as client:
response = client.post("/nlp/")
assert response.request.method == "POST"
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_authorized_nlp():
params = {
"sequence": "One day I will see the world",
"candidate_labels": ["travel", "cooking", "dancing"],
}
with TestClient(app) as client:
response = client.post("/nlp/", json=params, headers={"Authorization": f"Bearer {token}"})
assert response.request.method == "POST"
assert response.status_code == status.HTTP_200_OK
| igmalta/ml-classification-api | tests/app/test_main.py | test_main.py | py | 2,543 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fastapi.testclient.TestClient",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "app.main.app",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "fastapi.status.HTTP_200_OK",
"line_number": 15,
"usage_type": "attribute"
},
{
"ap... |
36239962708 | from services.serve import db
from datetime import datetime
from typing import List, Tuple
from sqlalchemy import func, desc
class Visit(db.Model):
__tablename__ = 'visits'
id = db.Column(db.Integer,primary_key=True)
ip = db.Column(db.String(20),nullable=False)
visitable_id = db.Column(db.Integer,nullable=False)
visitable_type = db.Column(db.String(30),nullable=False)
created_at = db.Column(db.DateTime,default=datetime.now)
def __init__(self,ip: str, visitable_id: int, visitable_type: str):
self.ip = ip
self.visitable_id = visitable_id
self.visitable_type = visitable_type
@classmethod
def set_visit(cls,ip: str, visitable_id: int, visitable_type: str) -> None:
visit = cls.query.filter(cls.ip == ip,
cls.visitable_id == visitable_id,
cls.visitable_type == visitable_type).first()
if not visit:
save_visit = Visit(ip,visitable_id,visitable_type)
save_visit.save_to_db()
@classmethod
def visit_popular_by(cls,visit_type: str,limit: int) -> List[Tuple[int,int]]:
visits = db.session.query(cls.visitable_id.label('visit_id'),
func.count(cls.visitable_id).label('count_total')).group_by('visit_id').order_by(desc('count_total')).filter(
cls.visitable_type == visit_type).limit(limit).all()
return visits
@classmethod
def get_seen_activity(cls,visit_type: str,visit_id: int) -> int:
return cls.query.filter(cls.visitable_id == visit_id, cls.visitable_type == visit_type).count()
def save_to_db(self) -> None:
db.session.add(self)
db.session.commit()
def delete_from_db(self) -> None:
db.session.delete(self)
db.session.commit()
| mentimun-mentah/zooka-watersports | restapi/services/models/VisitModel.py | VisitModel.py | py | 1,765 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "services.serve.db.Model",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "services.serve.db",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "services.serve.db.Column",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "... |
33370297522 | #!/usr/bin/env python
# coding: utf-8
import numpy as np
import pickle
import sys
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
from robo.fmin import bayesian_optimization
# Global Configuration
try:
CFG = sys.argv[1]
CFG = CFG.replace(".py", "")
SEED = int(sys.argv[2])
except:
logger.info("USAGE: python run.py CONFIGURATION_FILE_PATH SEED_VALUE")
sys.exit(-1)
PREFIX = CFG+("_%i" % SEED)
CFG = CFG+".py"
OUTFILE = "%s_bo_posterior" % PREFIX
logger.info("OPENING CODE FROM CFG=%s (SEED=%i) => OUTPUT:%s" % (CFG, SEED, OUTFILE))
exec(open(CFG).read())
# BlackBox BO over posterior
rng = np.random.RandomState(SEED)
res = bayesian_optimization(objective_function, lower, upper, num_iterations=num_iterations, X_init=X_init, Y_init=Y_init, n_init=n_init, rng=rng,)
| zehsilva/prior-predictive-specification | bo_optimization/bo_run.py | bo_run.py | py | 833 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.argv",
... |
42272214993 | """
Module for building and manipulating astronomical catalogues.
@author: A.Ruiz
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import zip, range
from io import open
import os
import warnings
import tempfile
import subprocess
from copy import deepcopy
from string import ascii_uppercase
import numpy as np
from astropy import log
from astropy import units as u
from astropy.coordinates import SkyCoord
#from astropy.utils.misc import ShapedLikeNDArray
from astropy.table import Table, join, setdiff, unique, vstack
from astropy.units.quantity import Quantity
from astropy.utils.misc import indent
from astropy.utils.exceptions import AstropyUserWarning
from mocpy import MOC
# Global
ALLSKY_AREA_DEG = (4*np.pi * u.rad**2).to(u.deg**2)
class SkyCoordErr(object):
"""
A class for the positional errors of a SkyCoord object
"""
# TODO: Use ShapedLikeNDArray as base object
ERRTYPE = ['circle', 'ellipse', 'rcd_dec_ellipse',
'cov_ellipse', 'cor_ellipse']
def __init__(self, data, errtype='circle', unit=None, errsys=None, check=True):
self.errtype = self._set_errtype(errtype)
self.components = self._set_components(data, unit)
if errsys is not None:
self.add_syserr(errsys)
if check:
self._check_components()
def __repr__(self):
comp_str = ', '.join(self.components.colnames)
unit_str = ', '.join([str(col.unit) for col in self.components.itercols()])
data_str = indent(str(self.components.as_array()))
err_str = '<SkyCoordErr ({}): ({}) in {}\n{}>'
return err_str.format(self.errtype, comp_str, unit_str, data_str)
def __getitem__(self, key):
item_data = self.components[key]
if not isinstance(item_data, Table):
# We do this because when key is an integer and the components
# only have one column, it returns components[key] returns a row
# instead of a Table.
item_data = self.components[key:key+1]
return SkyCoordErr(item_data, errtype=self.errtype, check=False)
def __len__(self):
return len(self.components)
def transform_to(self, errtype='ellipse'):
"""
Transform errors to `errtype`
"""
not_implemented_errtypes = ['rcd_dec_ellipse',
'cov_ellipse',
'cor_ellipse']
covmatrix = self.covariance_matrix()
if errtype == 'circle':
errs = self._to_circular(covmatrix)
elif errtype == 'ellipse':
errs = self._to_ellipse(covmatrix)
elif errtype in not_implemented_errtypes:
# TODO: implement remaining transformations
raise NotImplementedError
else:
raise ValueError('Unknown error type: {}'.format(errtype))
return errs
def as_array(self):
"""
Return error values as a numpy array.
"""
errs = self.components
if self.errtype == 'circle':
#err_arrays = errs.columns[0].data << errs.columns[0].unit
err_arrays = errs.columns[0].data * errs.columns[0].unit
else:
err_arrays = []
for col in errs.itercols():
#err_arrays.append(col.data << col.unit)
err_arrays.append(col.data * u.Unit(col.unit))
err_arrays = np.array(err_arrays)
return err_arrays
def covariance_matrix(self, inverse=False):
"""
Returns the corresponding covariance matrix. If `inverse` is True,
returns the inverse of the covariance matrix.
"""
sigma_x, sigma_y, rhoxy = self._covariance_components()
if inverse:
V = self._inverse_covariance_matrix(sigma_x, sigma_y, rhoxy)
else:
V = self._covariance_matrix(sigma_x, sigma_y, rhoxy)
return V
def add_syserr(self, syserr):
"""
Add systematic to the error components. Only works for circular errors.
"""
if self.errtype == 'circle':
data = self.components.columns[0].data
unit = self.components.columns[0].unit
err = data * u.Unit(unit)
errcol = self.components.colnames[0]
self.components[errcol] = np.sqrt(syserr**2 + err**2)
else:
raise NotImplementedError
def _set_errtype(self, errtype):
"""
Check that `errtype` is a valid value.
"""
if errtype not in self.ERRTYPE:
raise ValueError('Unknown error type: {}'.format(errtype))
else:
return errtype
def _set_components(self, data, unit=None):
"""
Define an astropy table with statistical positional errors
(no systematic errors applied here). The number of columns depends
on what kind of errors are defined
"""
if unit is None:
unit = self._get_default_units()
poserr = Table()
for col, col_unit in zip(data.colnames, unit):
if data[col].unit is None:
poserr[col] = data[col]*col_unit
else:
poserr[col] = data[col].to(col_unit)
# # Set bad values to zero
# good_mask = np.isfinite(poserr[col])
# poserr[col][~good_mask] = 0.0
#
# negative_mask = poserr[col] < 0
# poserr[col][negative_mask] = 0.0
return poserr
def _check_components(self):
"""
Check that all errors are positive and finite (not nan or inf)
"""
for i, col in enumerate(self.components.colnames):
if i >= 2:
break
if not all(np.isfinite(self.components[col])):
raise ValueError('Some positional errors are not finite!')
if not all(self.components[col] > 0):
raise ValueError('Some positional errors are non positive!')
def _get_default_units(self):
"""
Define default units depending on the error type
"""
if self.errtype == "circle":
# RADEC_ERR (e.g. 3XMM)
units = [u.arcsec]
elif self.errtype == "ellipse":
# major axis, minor axis, position angle (e.g. 2MASS)
units = [u.arcsec, u.arcsec, u.deg]
elif self.errtype == "rcd_dec_ellipse":
# ra error, dec error (e.g. SDSS)
units = [u.arcsec, u.arcsec]
elif self.errtype == "cov_ellipse":
# sigma_x, sigma_y, covariance
units = [u.arcsec, u.arcsec, u.arcsec**2]
elif self.errtype == "cor_ellipse":
# sigma_x, sigma_y, correlation
units = [u.arcsec, u.arcsec, u.arcsec/u.arcsec]
else:
raise ValueError('Wrong errtype!')
return units
def _to_ellipse(self, covmatrix):
"""
Calculate components of the ellipse error from the covariance
matrix and define a SkyCoordErr object with those components.
"""
a, b, PA = self._covariance_to_ellipse(covmatrix)
errs = Table([a, b, PA], names=['eeMaj', 'eeMin', 'eePA'])
return SkyCoordErr(errs, errtype='ellipse')
def _to_circular(self, covmatrix):
"""
Estimate equivalent circular errors from the covariance matrix
and define a SkyCoordErr object with those components.
"""
if self.errtype != 'circle':
message = ('Converting non-circular to circular errors! '
'New errors will preserve the area.')
warnings.warn(message, AstropyUserWarning)
# The determinat of the covariance matrix is related to the
# 1 sigma area covered by the positional errors: A = pi * sqrt(|V|)
# If we want a circular error that preserves the area:
# r = |V|^(1/4)
r = np.power(np.linalg.det(covmatrix), 0.25)
errs = Table([r], names=['RADEC_ERR'])
return SkyCoordErr(errs, errtype='circle')
else:
return self
def _covariance_components(self):
"""
Calculate the components of the covariance matrix from the errors
"""
npars = len(self.components.colnames)
errs = self.components
if self.errtype == "circle":
if npars != 1:
raise ValueError('Wrong error type!')
else:
sigma_x = np.array(errs.columns[0])*errs.columns[0].unit
sigma_y = np.array(errs.columns[0])*errs.columns[0].unit
rhoxy = np.zeros(len(sigma_x))*errs.columns[0].unit**2
elif self.errtype == "ellipse":
if npars != 3:
raise ValueError('Wrong error type!')
else:
err0 = np.array(errs.columns[0])*errs.columns[0].unit
err1 = np.array(errs.columns[1])*errs.columns[1].unit
err2 = np.array(errs.columns[2])*errs.columns[2].unit
sigma_x = np.sqrt((err0*np.sin(err2))**2 +
(err1*np.cos(err2))**2)
sigma_y = np.sqrt((err0*np.cos(err2))**2 +
(err1*np.sin(err2))**2)
rhoxy = np.cos(err2)*np.sin(err2)*(err0**2 - err1**2)
elif self.errtype == "rcd_dec_ellipse":
if npars != 2:
raise ValueError('Wrong error type!')
else:
sigma_x = np.array(errs.columns[0])*errs.columns[0].unit
sigma_y = np.array(errs.columns[1])*errs.columns[1].unit
rhoxy = np.zeros(len(sigma_x))*errs.columns[0].unit**2
elif self.errtype == "cov_ellipse":
if npars != 3:
raise ValueError('Wrong error type!')
else:
sigma_x = np.array(errs.columns[0])*errs.columns[0].unit
sigma_y = np.array(errs.columns[1])*errs.columns[1].unit
rhoxy = np.array(errs.columns[2])*errs.columns[2].unit
elif self.errtype == "cor_ellipse":
if npars != 3:
raise ValueError('Wrong error type!')
else:
err0 = np.array(errs.columns[0])*errs.columns[0].unit
err1 = np.array(errs.columns[1])*errs.columns[1].unit
err2 = np.array(errs.columns[2])*errs.columns[2].unit
sigma_x = err0
sigma_y = err1
rhoxy = err2*err0*err1
else:
raise ValueError('Unknown error type: {}'.format(self.errtype))
return sigma_x, sigma_y, rhoxy
@staticmethod
def _covariance_matrix(sigma_x, sigma_y, rhoxy):
"""
Calculates the covariance matrix V with
elements sigma_x, sigma_y and rhoxy.
(Eq. 6 of Pineau+2017)
"""
V = np.full((len(sigma_x), 2, 2), np.nan)
V[:, 0, 0] = sigma_x**2
V[:, 0, 1] = rhoxy
V[:, 1, 0] = rhoxy
V[:, 1, 1] = sigma_y**2
return V
@staticmethod
def _inverse_covariance_matrix(sigma_x, sigma_y, rhoxy):
"""
Calculates the inverse of the covariance matrix V with
elements sigma_x, sigma_y and rhoxy
(Eq. 7 of Pineau+2017)
"""
K = (sigma_x*sigma_y)**2 - rhoxy**2
Vinv = np.full((len(sigma_x), 2, 2), np.nan)
Vinv[:, 0, 0] = sigma_y**2/K
Vinv[:, 0, 1] = -rhoxy/K
Vinv[:, 1, 0] = -rhoxy/K
Vinv[:, 1, 1] = sigma_x**2/K
return Vinv
@staticmethod
def _covariance_to_ellipse(V):
"""
Given the covariance matrix V, returns the corresponding ellipse
error with semi-major axis a, semi-minor axis b (in arcsec)
and position angle PA (in degrees)
"""
A = V[:, 0, 0] + V[:, 1, 1] # sigma_x**2 + sigma_y**2
B = V[:, 1, 1] - V[:, 0, 0] # sigma_y**2 - sigma_x**2
C = V[:, 1, 0] # rho*sigma_x*sigma_y
a = np.sqrt((A + np.sqrt(B**2 + 4*C**2))/2)
b = np.sqrt((A - np.sqrt(B**2 + 4*C**2))/2)
PA = np.arctan2(2*C, B)/2
PA[PA < 0] += np.pi
return a, b, PA*(180/np.pi)
class Catalogue(object):
"""
A class for catalogue objects.
Parameters
----------
data_table : Astropy ``Table`` or ``str``
Astropy ``Table`` with the catalogue data. Alternatively, the path
to a file containing the catalogue data in a format compatible with
Astropy (fits, csv, VOTable, etc) can be passed. It should contain at
least three columns: the identification labels of the sources and their
coordinates (e.g. RA and Dec).
area : ``str``, ``MOC`` or ``Quantity``
Sky area covered by the catalogue. the area can be defined as a path
to the catalogue MOC, a mocpy ``MOC`` object or an Astropy ``Quantity``
with units consistents with square deg.
name : ``str` or ``None``, optional
Catalogue identification label. If None, it uses the file name of
`data_table`. Defaults to ``None``.
id_col : ``str`` or ``None``, optional
Name of the column in `data_table` with the identification labels. If
``None``, it assumes that the first column contains the id labels.
coord_cols : ``list``, optional
Two element list with the column names for the coordinates. Defaults
to ['RA', 'DEC'].
frame : ``str`` or Astropy ``BaseCoordinateFrame``, optional
Coordinates reference frame of `coord_cols`. Defaults to 'icrs'.
poserr_cols : ``list``, optional
List with the column names for the psotional errors. The size of
the list depend on the error type. See the SkyCoordErr documentation
for details. Defaults to ['RADEC_ERR'].
poserr_type : ``str``, optional
Type of the positional errors. It can be 'circle', 'ellipse',
'rcd_dec_ellipse', 'cov_ellipse' or 'cor_ellipse'. See the SkyCoordErr
documentation for details. Defaults to 'circle'.
mag_cols : ``list``, optional
List with the column names for the magnitudes.
Attributes
----------
name : ``str``
Catalogue identification label.
ids : ``str`` or ``int``
Source identification labels.
coords : Astropy ``SkyCoord``
Catalogue coordinates in ICRS frame.
poserr : Astropy ``Quantity`` or ``None``
Average positional error coords in units consistent with arcsec.
moc : mocpy ``MOC`` or ``None``
MOC of the catalogue.
area : Astropy ``Quantity``
Sky area covered by the catalogue in square deg.
mags : Astropy ``Table`` or ``None``
Source magnitudes.
"""
def __init__(self, data_table, area, name=None, id_col=None,
coord_cols=['RA', 'DEC'], frame='icrs',
poserr_cols=['RADEC_ERR'], poserr_type='circle',
mag_cols=None):
self.name = self._set_name(name, data_table)
# if data_table is a string, assumes it is the path to the data file
if isinstance(data_table, str):
data_table = Table.read(data_table)
self.ids = self._set_ids(data_table, id_col)
self.coords = self._set_coords(data_table, coord_cols, frame)
self.mags = self._set_mags(data_table, mag_cols)
self.area, self.moc = self._set_area(area)
self.poserr = self._set_poserr(data_table, poserr_cols, poserr_type)
self._self_apply_moc() # keep only sources within self.moc, if exists
def __len__(self):
return len(self.ids)
def __repr__(self):
return str(self.save(filename=None))
def __getitem__(self, key):
newcat = deepcopy(self)
newcat.ids = self.ids[key]
newcat.coords = self.coords[key]
newcat.poserr = self.poserr[key]
if self.mags is not None:
newcat.mags = self.mags[key]
return newcat
@property
def poserr_type(self):
return self.poserr.errtype
def apply_moc(self, moc, outside=False):
"""
Returns a new ``Catalogue`` including only sources
within the area defined by `moc`.
Parameters
----------
moc : mocpy ``MOC``
MOC to be applied to the catalogue.
outside : ``bolean``, optional
If True, it returns the id labels of the sources outside `moc`.
Defaults to False.
"""
idx = moc.contains(self.coords.ra, self.coords.dec)
if len(idx) > 0:
newcat = self[idx]
if self.moc is None:
newcat.moc = moc
else:
newcat.moc = moc.intersection(self.moc)
newcat.area = newcat.moc.sky_fraction * ALLSKY_AREA_DEG
else:
warnings.warn('No sources in moc!!!', AstropyUserWarning)
newcat = None
if outside:
idx_out = moc.contains(self.coords.icrs.ra, self.coords.icrs.dec,
keep_inside=False)
return newcat, self.ids[idx_out]
else:
return newcat
# def to_moc(self, radius=1*u.arcmin, moc_order=12):
# """
# Returns a moc defining the areas around the sources
# of the Catalogue. It can be used as a source mask.
#
# Parameters
# ----------
# radius : Astropy ``Quantity``, optional
# Radius of the circular area to be selected around Catalogue
# `coords` in units consistent with arcsec. Defaults to one arcmin
# moc_order : ``int``
# Maximum order of the resulting moc.
# """
# # PYMOC!!!
# moc_srcs = catalog_to_moc(self.coords, radius, moc_order, inclusive=True)
#
# # Convert PYMOC to MOCPY
# mocdict = {order: list(cells) for order, cells in moc_srcs}
# moc_srcs = MOC.from_json(mocdict)
#
# return moc_srcs
def select_by_id(self, ids):
"""
Returns a new ``Catalogue`` including only sources with ids equal
to `ids`. Sources in the new catalogue are ordered as in `ids`.
Parameters
----------
ids : ``list``
List of ids to be selected.
"""
catids = Table()
catids['ID'] = self.ids
catids['IDX'] = range(len(self.ids))
newids = Table()
newids['ID'] = ids#.columns[0]
newids['newIDX'] = range(len(ids))
joincat = join(newids, catids, keys='ID', join_type='left')
joincat.sort('newIDX') # This way we always get the same row order as in ids
joinidx = joincat['IDX'].data
return self[joinidx]
def remove_by_id(self, ids):
"""
Returns a new ``Catalogue`` with `ids` sources removed
Parameters
----------
ids : ``list`` or ``Column``
List of ids to be selected.
"""
catids = Table()
catids['ID'] = self.ids
catids['IDX'] = range(len(self.ids))
rmids = Table()
rmids['ID'] = ids
rmids['newIDX'] = range(len(ids))
rmcat_ids = setdiff(catids, rmids, keys='ID')
rmcat_ids.sort('IDX')
return self.select_by_id(rmcat_ids['ID'])
def join(self, cat, name=None):
"""
Returns a new ``Catalogue`` joining the current catalogue with 'cat'. Both
catalogue must be consistent: same coordinates, positional errors and
magnitudes, if they are included.
If the original catalogues have areas defined through MOCs, the final area is
the union of their MOCs, otherwise the area of the current catalogue is used.
If the original catalogues have common sources, repeated entries will be
remove from the final catalogue.
"""
if name is None:
name = self.name
join_cat_data = vstack([self.save(), cat.save()])
join_cat_data = unique(join_cat_data)
try:
area = self.moc.union(cat.moc)
except:
area = self.area
mag_cols = None
if self.mags is not None:
mag_cols = self.mags.colnames
join_cat = Catalogue(
join_cat_data,
poserr_cols=self.poserr.components.colnames,
poserr_type=self.poserr.errtype,
area=area,
name=self.name,
mag_cols=mag_cols
)
return join_cat
def randomise(self, r_min=20*u.arcsec, r_max=120*u.arcsec,
numrepeat=10, seed=None):
"""
Returns a ``Catalogue`` object with random coordinates away
from the positions of the original catalogue.
Parameters
----------
r_min : Astropy ``Quantity``, optional
Minimum distance from original catalogue coordinates in angular
units. Defaults to 20 arcsec.
r_max : Astropy ``Quantity``, optional
Maximum distance from original catalogue coordinates in angular
units. Defaults to 120 arcsec.
numrepeat : ``int``, optional
The total number of sources in the new catalogue is `numrepeat`
times the number of sources in the original catalogue. Defaults to
10. If `numrepeat` is 1, the nway library is used to create a
random catalogue with the same number of sources and preserving
the spatial structure.
"""
if self.moc is None:
area = self.area
else:
area = self.moc
if numrepeat == 1:
# Use nway tool to generate a random catalogue:
# good balance between reproducing local structures
# and filling the field.
r_min = r_min.to(u.arcsec).value
poserr_cols = self.poserr.components.colnames
with tempfile.NamedTemporaryFile() as input_file:
filename = input_file.name
self.save(filename)
rnd_cat_data = self._nway_fake_catalogue(filename, radius=r_min)
rnd_cat = Catalogue(
rnd_cat_data, area=area, poserr_cols=poserr_cols, name=self.name
)
else:
# Use seed != None only for testing, to obtain the same random catalogue
ra, dec = self._random_coords(
0*u.deg, 360*u.deg, r_min, r_max, numrepeat, seed
)
ids = ['RND{:06d}'.format(i) for i in range(len(ra))]
rnd_cat_data = Table()
rnd_cat_data['SRCID'] = ids
rnd_cat_data['RA'] = ra
rnd_cat_data['DEC'] = dec
# This catalogue have positional errors set to zero and hence it
# shows a warning when the random catalogue is created. We use
# this context manager to avoid showing the warning, which could
# be misleading for the user.
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=AstropyUserWarning)
rnd_cat = Catalogue(
rnd_cat_data, area=area, poserr_cols=None, name=self.name
)
return rnd_cat
def set_fake_counterparts(self, candidates):
from scipy.stats import rayleigh
# Assign fake counterparts
idx_fake = np.random.choice(len(candidates), len(self))
cat_fake = candidates[idx_fake]
# Calculate coordinates for fake candidates
# We randomize the positions of the fake counterpart around the
# positions of the primary sources using a Rayleigh distribution
mean_ra_fake = self.coords.ra.deg
mean_dec_fake = self.coords.dec.deg
# To estimate the variance for the Rayleigh distribution, we
# circularize the errors of both catalogues:
pcat_poserr_circ = self.poserr.transform_to('circle')
cat_fake_poserr_circ = cat_fake.poserr.transform_to('circle')
sig_fake = np.sqrt(
(pcat_poserr_circ.components.columns[0].to(u.deg))**2 +
(cat_fake_poserr_circ.components.columns[0].to(u.deg))**2
)
dr = rayleigh.rvs(loc=0.0, scale=sig_fake.value)
theta = 2 * np.pi * np.random.random_sample(size=len(cat_fake))
coords_ra_fake = mean_ra_fake + dr * np.cos(theta)
coords_dec_fake = mean_dec_fake + dr * np.sin(theta)
cat_fake.coords = SkyCoord(coords_ra_fake, coords_dec_fake, unit="deg")
cat_fake.moc = self.moc
cat_fake.area = self.area
# We set the ids of the fake counterparts as the ids of this catalogue
# for an easy identification of true counterparts
cat_fake.ids = self.ids
return cat_fake
def save(self, filename=None, format='fits', include_mags=True):
"""
Dump Catalogue to an Astropy Table and save it to a file.
Parameters
----------
filename : ``str``
File path. If ``None``, only returns an Astropy Table.
format : ``str``, optional
Format of the output file (compatible with Astropy tables).
Defaults to 'fits'.
include_mags : ``bolean``, optional
If ``True``, magnitudes are also included in the Astropy Table.
Defaults to ``True``.
"""
data_table = Table()
try:
data_table['SRCID_' + self.name] = self.ids
except TypeError:
# We do this in case len(ids) = 1
data_table['SRCID_' + self.name] = [self.ids]
data_table['RA'] = self.coords.ra
data_table['DEC'] = self.coords.dec
for errcol in self.poserr.components.colnames:
data_table[errcol] = self.poserr.components[errcol]
if self.mags and include_mags:
for magcol in self.mags.colnames:
data_table[magcol] = self.mags[magcol]
if filename:
data_table.write(filename, format=format, overwrite=True)
return data_table
def nway_dict(self, use_mags=True):
"""
Converts the Catalogue object into a python dictionary
with a structure compatible with the nway library.
Parameters
----------
use_mags : ``bolean``, optional
If True, magnitudes are also included in the dictionary.
Defaults to True.
"""
# Code adapted from https://github.com/JohannesBuchner/nway/blob/api/nway-apitest.py
# area in square degrees
# poserr_col: error column name or numerical value (in arcsec)
# coord_cols: ra/dec columns (in degrees)
# magnitude_columns: list with (mag, magfile) sequence or empty list []
# mag: column of something
# magfile: file with mag histogram (bin, sel, all) or None (for auto)
if self.poserr_type != 'circle':
raise ValueError('Nway catalogues must have circular positional errors!')
cat_dict = {}
cat_dict['name'] = self.name
cat_dict['srcid'] = self.ids.data
cat_dict['ra'] = self.coords.ra.deg
cat_dict['dec'] = self.coords.dec.deg
cat_dict['area'] = self.area.value # sky coverage in square degrees
# Astrometrical errors in arcsec
poserr = self.poserr.as_array()
cat_dict['error'] = poserr.to(u.arcsec).value
# magnitude columns
# maghists: either (bin, sel, all) tuple or None (for auto)
mags, magnames = [], []
if use_mags and self.mags is not None:
for magcol in self.mags.itercols():
mag_all = magcol.data
# mark negative magnitudes (e.g. -99 or -9.9999949E8) as undefined
mag_all[mag_all < 0] = np.nan
mags.append(mag_all)
magnames.append(magcol.name)
cat_dict['mags'] = mags
cat_dict['maghists'] = []
cat_dict['magnames'] = magnames
return cat_dict
def _set_name(self, name, data_table):
if name is not None:
return name
if isinstance(data_table, str):
# We assume that data_table is the path to the catalogue data.
# We use as name of the catalogue the name of the file, without extension
filename = os.path.basename(data_table)
filename, ext = os.path.splitext(filename)
return filename
def _set_ids(self, data_table, id_col):
if id_col is None:
# Assume first column is the SRCID
id_col = data_table.colnames[0]
# set ids as strings
ids = data_table[id_col].astype(str)
#ids = np.array(data_table[id_col].data, dtype=str)
# Workaround for a bug in hdf5 with Python 3
# In python 3 strings are unicode by default,
# and hdf5 doesn't handle that well
# if ids.dtype.kind == 'U':
# ids = Column([iid.encode('utf8') for iid in ids], name=id_col)
return ids
def _set_coords(self, data_table, coord_cols, frame):
coords = SkyCoord(ra=data_table[coord_cols[0]],
dec=data_table[coord_cols[1]],
unit='deg', frame=frame)
return coords.icrs
def _set_mags(self, data_table, mag_cols):
# If data_table is a masked table, we convert it to a normal table
# by filling masked values with -99 (assuming that they mask non-valid
# magnitude values). This solves the problem of using a masked ndarray
# in scipy interpolate. Then, we search for non-finite values in
# the table (e.g. nan or inf) and change it to -99. This solves some
# problems when using numpy histogram in python 3 (e.g. it fails to
# automatically define a finite range if there are nans in the input,
# even when the edges of the bins are passed).
if mag_cols is not None:
mags = data_table[mag_cols].filled(-99)
for column in mag_cols:
good_mask = np.isfinite(mags[column])
mags[column][~good_mask] = -99
return mags
def _set_moc(self, mocfile):
if mocfile is not None:
return MOC.from_fits(mocfile)
def _set_area(self, area):
"""
Returns the area covered by the catalogue and the corresponding
MOC, if defined.
Parameters
----------
area : ``str``, ``MOC`` or ``Quantity``
area can be defined as a path to the catalogue MOC, a mocpy
``MOC`` object or an Astropy ``Quantity`` with units consistents
with square deg.
"""
# If area is a string, we assume is the path for a moc file
if isinstance(area, str):
moc = MOC.from_fits(area)
area = moc.sky_fraction * ALLSKY_AREA_DEG
elif isinstance(area, MOC):
moc, area = area, area.sky_fraction * ALLSKY_AREA_DEG
elif isinstance(area, Quantity):
area = area.to(u.deg**2)
moc = None
else:
raise ValueError('Invalid `area` value!')
return area, moc
def _set_poserr(self, data, columns, errtype):
"""
Define a SkyCoordErr object with statistical positional errors
(no systematic errors applied here). The number of components depends
on what kind of errors are defined, given by `errtype`.
"""
if columns is not None:
errs = data[columns]
check = True
else:
message = 'Positional errors are set to zero!!!'
warnings.warn(message, AstropyUserWarning)
r = np.zeros([len(data)], dtype=float) * u.arcsec
errs = Table([r], names=['RADEC_ERR'])
errtype = 'circle'
check = False
return SkyCoordErr(errs, errtype=errtype, check=check)
def _self_apply_moc(self):
if self.moc is not None:
cat_inmoc = self.apply_moc(self.moc)
self.ids = cat_inmoc.ids
self.coords = cat_inmoc.coords
self.mags = cat_inmoc.mags
self.poserr = cat_inmoc.poserr
def _random_coords(self, a_min, a_max, r_min, r_max, numrepeat, seed):
# a_min, a_max, r_min, r_max: Quantity type
num_rand = numrepeat * len(self)
np.random.seed(seed)
r = r_min + (r_max - r_min)*np.random.random_sample(num_rand) # large kick
a = a_min + (a_max - a_min)*np.random.random_sample(num_rand)
dra = r.to(self.coords.ra.unit) * np.cos(a) # offset in RA
ddec = r.to(self.coords.dec.unit) * np.sin(a) # offset in DEC
rnd_dec = np.repeat(self.coords.dec, numrepeat) + ddec
rnd_ra = np.repeat(self.coords.ra, numrepeat) \
+ dra/np.cos(rnd_dec)
if self.moc is not None:
idx = self.moc.contains(rnd_ra, rnd_dec)
rnd_ra = rnd_ra[idx]
rnd_dec = rnd_dec[idx]
return rnd_ra, rnd_dec
@staticmethod
def _nway_fake_catalogue(input_file, radius=20):
# Create a fake catalogue based on the positions of input_file.
# No fake sources closer to `radius` arcsec with respect to the
# original sources
root, ext = os.path.splitext(input_file)
output_file = '{}_fake{}'.format(root, ext)
command = ('nway-create-fake-catalogue.py --radius {} {} {}')
command = command.format(radius, input_file, output_file)
subprocess.check_output(command, shell=True)
fake_data = Table.read(output_file)
os.remove(output_file)
return fake_data
def xmatch_mock_catalogues(xmatchserver_user=None, seed=None, **kwargs):
"""
Create mock catalogues using the tool provided by the XMatch service.
Parameters
----------
xmatchserver_user : ``str`` or ``None``, optional
User name for the XMatch server. If ``None``, it uses anonymous access.
Default is ``None``.
seed : ``long`` or ``None``, optional
Long integer to be used as seed for the random generator in the XMatch
server. Default is `None`.
**kwargs :
Check the XMatch documentation to see all accepted arguments.
Returns
-------
catalogues : ``list``
List of `Catalogue` objects with the mock catalogues created
by XMatch.
"""
from .xmatch import XMatchServer
if 'nTab' not in kwargs:
raise ValueError('nTab parameter is missing!')
catalogues = []
cat_prefix = 'tmp_mock'
cat_fmt = 'fits'
area = _mockcat_area(**kwargs)
xms = XMatchServer(user=xmatchserver_user)
try:
files_in_server = []
for tag in ascii_uppercase[:kwargs['nTab']]:
histfile_key = 'poserr{}file'.format(tag)
if histfile_key in kwargs:
files_in_server.append(os.path.basename(kwargs[histfile_key]))
xms.put(kwargs[histfile_key])
log.info('Creating mock catalogues in XMatch server...')
with tempfile.NamedTemporaryFile() as xms_file:
_make_xms_file(
xms_file.name, prefix=cat_prefix, fmt=cat_fmt, seed=seed, **kwargs
)
xms.run(xms_file.name)
log.info('Downloading results...')
for tag in ascii_uppercase[:kwargs['nTab']]:
cat_file = '{}{}.{}'.format(cat_prefix, tag, cat_fmt)
files_in_server.append(cat_file)
xms.get(cat_file)
_mockcat_idcol_padwithzeros(cat_file)
cat = Catalogue(
cat_file,
area=area,
id_col='id',
coord_cols=['posRA', 'posDec'],
poserr_cols=['ePosA', 'ePosB', 'ePosPA'],
poserr_type='ellipse',
name=tag + 'mock',
)
catalogues.append(cat)
os.remove(cat_file)
cat_file = '{}.{}'.format(cat_prefix, cat_fmt)
files_in_server.append(cat_file)
log.info('Delete data from the server...')
xms.remove(*files_in_server)
xms.logout()
except:
xms.logout()
raise
return catalogues
def _mockcat_area(**kwargs):
geometry = kwargs['geometry']
if geometry == 'allsky':
area = ALLSKY_AREA_DEG
elif geometry == 'cone':
r = kwargs['r'] * u.deg
area = np.pi * r**2
elif geometry == 'moc':
area = MOC.from_fits(kwargs['mocfile'])
else:
raise ValueError('Unknown geometry: {}'.format(geometry))
return area
def _mockcat_idcol_padwithzeros(catfile, len_idstr=None):
cat = Table.read(catfile)
if not len_idstr:
len_idstr = len(cat['id'][0])
cat['id'] = [idstr.strip().zfill(len_idstr) for idstr in cat['id']]
cat.write(catfile, overwrite=True)
def _make_xms_file(filename, prefix='tmp_mock', fmt='fits', seed=None, **kwargs):
if 'mocfile' in kwargs:
kwargs['mocfile'] = os.path.basename(kwargs['mocfile'])
for tag in ascii_uppercase[:kwargs['nTab']]:
histfile_key = 'poserr{}file'.format(tag)
if histfile_key in kwargs:
kwargs[histfile_key] = os.path.basename(kwargs[histfile_key])
args_str = ' '.join(
'{}={}'.format(key, value) for key, value in kwargs.items()
)
save_str = 'save prefix={0} suffix=.{1} common={0}.{1} format={1}'
save_str = save_str.format(prefix, fmt)
with open(filename, 'w') as f:
f.write('synthetic ')
if seed is not None:
f.write('seed={} '.format(seed))
f.write('{}\n'.format(args_str))
f.write(save_str)
| ruizca/astromatch | astromatch/catalogues.py | catalogues.py | py | 37,736 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "numpy.pi",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "astropy.units.rad",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "astropy.units.deg"... |
22020722236 | from . import app
from twilio.twiml.messaging_response import MessagingResponse
from .models import Question
from flask import url_for, request, session
@app.route('/question/<question_id>')
def question(question_id):
question = Question.query.get(question_id)
session['question_id'] = question.id
return sms_twiml(question)
def is_sms_request():
return 'MessageSid' in request.values.keys()
def sms_twiml(question):
response = MessagingResponse()
response.message(question.content)
#response.message(SMS_INSTRUCTIONS[question.kind])
return str(response)
SMS_INSTRUCTIONS = {
Question.TEXT: 'Please type your answer',
Question.BOOLEAN: 'Please type yes or no (or y or n)',
Question.NUMERIC: 'Please type a number between one and seven'
}
| picsul/short-message-survey | sms_app/question_view.py | question_view.py | py | 798 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "models.Question.query.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "models.Question.query",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "models.Question",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "fla... |
18607397296 | # -*- coding: utf-8 -*-
"""
@Datetime: 2019/3/4
@Author: Zhang Yafei
"""
from django.urls import path, re_path
from organization.views import OrgView, AddUserAskView, OrgHomeView, OrgCoueseView, OrgTeacherView, OrgDescView
from organization.views import AddFavView, TeacherListView, TeacherDetailView
app_name = 'org'
urlpatterns = [
# 课程机构列表页
re_path(r'^list/', OrgView.as_view(), name='org_list'),
re_path(r'^add_ask/', AddUserAskView.as_view(), name='add_ask'),
path('home/<int:org_id>/', OrgHomeView.as_view(), name='org_home'),
path('course/<int:org_id>/', OrgCoueseView.as_view(), name='org_course'),
path('desc/<int:org_id>/', OrgDescView.as_view(), name='org_desc'),
path('org_teacher/<int:org_id>/', OrgTeacherView.as_view(), name='org_teacher'),
# 机构收藏
re_path(r'^add_fav/$', AddFavView.as_view(), name="add_fav"),
# 讲师列表页
re_path(r'^teacher/list/$', TeacherListView.as_view(), name="teacher_list"),
# 讲师详情页
re_path(r'^teacher/detail/(?P<teacher_id>\d+)/$', TeacherDetailView.as_view(), name="teacher_detail"),
]
| zhangyafeii/mxonline | apps/organization/urls.py | urls.py | py | 1,124 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.re_path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "organization.views.OrgView.as_view",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "organization.views.OrgView",
"line_number": 16,
"usage_type": "name"
},
{
... |
25411012625 | import logging
import threading
from devil.android import device_errors
from devil.utils import reraiser_thread
from devil.utils import watchdog_timer
from pylib import constants
from pylib.base import base_test_result
from pylib.base import test_collection
DEFAULT_TIMEOUT = 7 * 60 # seven minutes
class _ThreadSafeCounter(object):
"""A threadsafe counter."""
def __init__(self):
self._lock = threading.Lock()
self._value = 0
def GetAndIncrement(self):
"""Get the current value and increment it atomically.
Returns:
The value before incrementing.
"""
with self._lock:
pre_increment = self._value
self._value += 1
return pre_increment
class _Test(object):
"""Holds a test with additional metadata."""
def __init__(self, test, tries=0):
"""Initializes the _Test object.
Args:
test: The test.
tries: Number of tries so far.
"""
self.test = test
self.tries = tries
def _RunTestsFromQueue(runner, collection, out_results, watcher,
num_retries, tag_results_with_device=False):
"""Runs tests from the collection until empty using the given runner.
Adds TestRunResults objects to the out_results list and may add tests to the
out_retry list.
Args:
runner: A TestRunner object used to run the tests.
collection: A TestCollection from which to get _Test objects to run.
out_results: A list to add TestRunResults to.
watcher: A watchdog_timer.WatchdogTimer object, used as a shared timeout.
num_retries: Number of retries for a test.
tag_results_with_device: If True, appends the name of the device on which
the test was run to the test name. Used when replicating to identify
which device ran each copy of the test, and to ensure each copy of the
test is recorded separately.
"""
def TagTestRunResults(test_run_results):
"""Tags all results with the last 4 digits of the device id.
Used when replicating tests to distinguish the same tests run on different
devices. We use a set to store test results, so the hash (generated from
name and tag) must be unique to be considered different results.
"""
new_test_run_results = base_test_result.TestRunResults()
for test_result in test_run_results.GetAll():
test_result.SetName('%s_%s' % (runner.device_serial[-4:],
test_result.GetName()))
new_test_run_results.AddResult(test_result)
return new_test_run_results
for test in collection:
watcher.Reset()
try:
if not runner.device.IsOnline():
# Device is unresponsive, stop handling tests on this device.
msg = 'Device %s is unresponsive.' % runner.device_serial
logging.warning(msg)
raise device_errors.DeviceUnreachableError(msg)
result, retry = runner.RunTest(test.test)
if tag_results_with_device:
result = TagTestRunResults(result)
test.tries += 1
if retry and test.tries <= num_retries:
# Retry non-passing results, only record passing results.
pass_results = base_test_result.TestRunResults()
pass_results.AddResults(result.GetPass())
out_results.append(pass_results)
logging.warning('Will retry test %s, try #%s.', retry, test.tries)
collection.add(_Test(test=retry, tries=test.tries))
else:
# All tests passed or retry limit reached. Either way, record results.
out_results.append(result)
except:
# An unhandleable exception, ensure tests get run by another device and
# reraise this exception on the main thread.
collection.add(test)
raise
finally:
# Retries count as separate tasks so always mark the popped test as done.
collection.test_completed()
def _SetUp(runner_factory, device, out_runners, threadsafe_counter):
"""Creates a test runner for each device and calls SetUp() in parallel.
Note: if a device is unresponsive the corresponding TestRunner will not be
added to out_runners.
Args:
runner_factory: Callable that takes a device and index and returns a
TestRunner object.
device: The device serial number to set up.
out_runners: List to add the successfully set up TestRunner object.
threadsafe_counter: A _ThreadSafeCounter object used to get shard indices.
"""
try:
index = threadsafe_counter.GetAndIncrement()
logging.warning('Creating shard %s for device %s.', index, device)
runner = runner_factory(device, index)
if runner:
runner.SetUp()
out_runners.append(runner)
else:
logging.info('Device %s is not active. Will not create shard %s.',
str(device), index)
except (device_errors.CommandFailedError,
device_errors.CommandTimeoutError,
device_errors.DeviceUnreachableError):
logging.exception('Failed to create shard for %s', str(device))
def _RunAllTests(runners, test_collection_factory, num_retries, timeout=None,
tag_results_with_device=False):
"""Run all tests using the given TestRunners.
Args:
runners: A list of TestRunner objects.
test_collection_factory: A callable to generate a TestCollection object for
each test runner.
num_retries: Number of retries for a test.
timeout: Watchdog timeout in seconds.
tag_results_with_device: If True, appends the name of the device on which
the test was run to the test name. Used when replicating to identify
which device ran each copy of the test, and to ensure each copy of the
test is recorded separately.
Returns:
A tuple of (TestRunResults object, exit code)
"""
logging.warning('Running tests with %s test %s.',
len(runners), 'runners' if len(runners) != 1 else 'runner')
results = []
exit_code = 0
run_results = base_test_result.TestRunResults()
watcher = watchdog_timer.WatchdogTimer(timeout)
test_collections = [test_collection_factory() for _ in runners]
threads = [
reraiser_thread.ReraiserThread(
_RunTestsFromQueue,
[r, tc, results, watcher, num_retries, tag_results_with_device],
name=r.device_serial[-4:])
for r, tc in zip(runners, test_collections)]
workers = reraiser_thread.ReraiserThreadGroup(threads)
workers.StartAll()
try:
workers.JoinAll(watcher)
except device_errors.CommandFailedError:
logging.exception('Command failed on device.')
except device_errors.CommandTimeoutError:
logging.exception('Command timed out on device.')
except device_errors.DeviceUnreachableError:
logging.exception('Device became unreachable.')
if not all((len(tc) == 0 for tc in test_collections)):
logging.error('Only ran %d tests (all devices are likely offline).',
len(results))
for tc in test_collections:
run_results.AddResults(base_test_result.BaseTestResult(
t, base_test_result.ResultType.UNKNOWN) for t in tc.test_names())
for r in results:
run_results.AddTestRunResults(r)
if not run_results.DidRunPass():
exit_code = constants.ERROR_EXIT_CODE
return (run_results, exit_code)
def _CreateRunners(runner_factory, devices, timeout=None):
"""Creates a test runner for each device and calls SetUp() in parallel.
Note: if a device is unresponsive the corresponding TestRunner will not be
included in the returned list.
Args:
runner_factory: Callable that takes a device and index and returns a
TestRunner object.
devices: List of device serial numbers as strings.
timeout: Watchdog timeout in seconds, defaults to the default timeout.
Returns:
A list of TestRunner objects.
"""
logging.warning('Creating %s test %s.', len(devices),
'runners' if len(devices) != 1 else 'runner')
runners = []
counter = _ThreadSafeCounter()
threads = reraiser_thread.ReraiserThreadGroup(
[reraiser_thread.ReraiserThread(_SetUp,
[runner_factory, d, runners, counter],
name=str(d)[-4:])
for d in devices])
threads.StartAll()
threads.JoinAll(watchdog_timer.WatchdogTimer(timeout))
return runners
def _TearDownRunners(runners, timeout=None):
"""Calls TearDown() for each test runner in parallel.
Args:
runners: A list of TestRunner objects.
timeout: Watchdog timeout in seconds, defaults to the default timeout.
"""
threads = reraiser_thread.ReraiserThreadGroup(
[reraiser_thread.ReraiserThread(r.TearDown, name=r.device_serial[-4:])
for r in runners])
threads.StartAll()
threads.JoinAll(watchdog_timer.WatchdogTimer(timeout))
def ApplyMaxPerRun(tests, max_per_run):
"""Rearrange the tests so that no group contains more than max_per_run tests.
Args:
tests:
max_per_run:
Returns:
A list of tests with no more than max_per_run per run.
"""
tests_expanded = []
for test_group in tests:
if type(test_group) != str:
# Do not split test objects which are not strings.
tests_expanded.append(test_group)
else:
test_split = test_group.split(':')
for i in range(0, len(test_split), max_per_run):
tests_expanded.append(':'.join(test_split[i:i+max_per_run]))
return tests_expanded
def RunTests(tests, runner_factory, devices, shard=True,
test_timeout=DEFAULT_TIMEOUT, setup_timeout=DEFAULT_TIMEOUT,
num_retries=2, max_per_run=256):
"""Run all tests on attached devices, retrying tests that don't pass.
Args:
tests: List of tests to run.
runner_factory: Callable that takes a device and index and returns a
TestRunner object.
devices: List of attached devices.
shard: True if we should shard, False if we should replicate tests.
- Sharding tests will distribute tests across all test runners through a
shared test collection.
- Replicating tests will copy all tests to each test runner through a
unique test collection for each test runner.
test_timeout: Watchdog timeout in seconds for running tests.
setup_timeout: Watchdog timeout in seconds for creating and cleaning up
test runners.
num_retries: Number of retries for a test.
max_per_run: Maximum number of tests to run in any group.
Returns:
A tuple of (base_test_result.TestRunResults object, exit code).
"""
if not tests:
logging.critical('No tests to run.')
return (base_test_result.TestRunResults(), constants.ERROR_EXIT_CODE)
tests_expanded = ApplyMaxPerRun(tests, max_per_run)
if shard:
# Generate a shared TestCollection object for all test runners, so they
# draw from a common pool of tests.
shared_test_collection = test_collection.TestCollection(
[_Test(t) for t in tests_expanded])
test_collection_factory = lambda: shared_test_collection
tag_results_with_device = False
log_string = 'sharded across devices'
else:
# Generate a unique TestCollection object for each test runner, but use
# the same set of tests.
test_collection_factory = lambda: test_collection.TestCollection(
[_Test(t) for t in tests_expanded])
tag_results_with_device = True
log_string = 'replicated on each device'
logging.info('Will run %d tests (%s): %s',
len(tests_expanded), log_string, str(tests_expanded))
runners = _CreateRunners(runner_factory, devices, setup_timeout)
try:
return _RunAllTests(runners, test_collection_factory,
num_retries, test_timeout, tag_results_with_device)
finally:
try:
_TearDownRunners(runners, setup_timeout)
except device_errors.DeviceUnreachableError as e:
logging.warning('Device unresponsive during TearDown: [%s]', e)
except Exception: # pylint: disable=broad-except
logging.exception('Unexpected exception caught during TearDown')
| hanpfei/chromium-net | build/android/pylib/base/test_dispatcher.py | test_dispatcher.py | py | 11,922 | python | en | code | 289 | github-code | 1 | [
{
"api_name": "threading.Lock",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pylib.base.base_test_result.TestRunResults",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "pylib.base.base_test_result",
"line_number": 74,
"usage_type": "name"
},
{
... |
18653186375 | from django.conf.urls import url
from django.urls import path
from .views import (TicketAPIView, TicketAPIDetailView, ProjectTicketAPIDetailView, TicketReadAPIView,
OrderTicketAPIDetailView, VisitAPIView, VisitAPIDetailView, TicketVisitAPIDetailView)
app_name = "api-tickets"
# app_name will help us do a reverse look-up latter.
urlpatterns = [
url(r'^tickets/(?P<id>\d+)/$', TicketAPIDetailView.as_view(), name='detail'),
url(r'^visits/(?P<id>\d+)/$', VisitAPIDetailView.as_view(), name='vdetail'),
path('create/', TicketAPIView.as_view(), name='create'),
path('tickets/', TicketReadAPIView.as_view(), name='list'),
path('visits/', VisitAPIView.as_view(), name='vlist'),
url(r'^prtickets/(?P<project>\d+)/$', ProjectTicketAPIDetailView.as_view(), name='p-list'),
url(r'^odtickets/(?P<order>\d+)/$', OrderTicketAPIDetailView.as_view(), name='o-list'),
url(r'^ticvisits/(?P<ticket>\d+)/$', TicketVisitAPIDetailView.as_view(), name='t-list'),
] | KUSH23/bkend | tickets/api/urls.py | urls.py | py | 998 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.conf.urls.url",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "views.TicketAPIDetailView.as_view",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "views.TicketAPIDetailView",
"line_number": 12,
"usage_type": "name"
},
{
"... |
15684526765 | from aiogram import Bot, Dispatcher, types, executor
import requests
import json
btn = types.ReplyKeyboardMarkup(resize_keyboard=True, row_width=2)
btn.add("USD-UZS", "RUB-UZS", "EURO-UZS", "CNY-UZS", "WON-UZS", "DINOR-UZS")
token = "Bot_token"
bot = Bot(token=token)
dp = Dispatcher(bot)
@dp.message_handler(commands=["start"])
async def first(message: types.Message):
p = open("images/pht.jpg", "rb")
await bot.send_photo(message.chat.id, p, caption="<b>Assalomu-alaykum, ValyutaBotga Hush Kelibsiz!</b>", reply_markup=btn, parse_mode="HTML")
@dp.message_handler(content_types=["text"])
async def second(message: types.Message):
global inputs, outputs, result, photo, caps
text = message.text
if text == "USD-UZS":
inputs = "USD"
outputs = "UZS"
photo = open("images/USD-UZS.jpg", "rb")
caps = "Dollorning Sumdagi qiymati 👇"
if text == "RUB-UZS":
inputs = "RUB"
outputs = "UZS"
photo = open("images/RUB-UZS.jpg", "rb")
caps = "Rublning Sumdagi qiymati 👇"
if text == "EURO-UZS":
inputs = "EUR"
outputs = "UZS"
photo = open("images/EUR-UZS.jpg", "rb")
caps = "Euorning Sumdagi qiymati 👇"
if text == "CNY-UZS":
inputs = "CNY"
outputs = "UZS"
photo = open("images/CNY-UZS.jpg", "rb")
caps = "Yenaning Sumdagi qiymati 👇"
if text == "DINOR-UZS":
inputs = "KWD"
outputs = "UZS"
photo = open("images/DINOR-UZS.jpg", "rb")
caps = "Dinorning Sumdagi qiymati 👇"
if text == "WON-UZS":
inputs = "KRW"
outputs = "UZS"
photo = open("images/WON-UZS.jpg", "rb")
caps = "Vonning Sumdagi qiymati 👇"
url = "https://v6.exchangerate-api.com/v6/5f6d43916b52307ea4aed1f3/latest/" + inputs
responses = requests.get(url)
rest = json.loads(responses.text)
result = rest["conversion_rates"]["UZS"]
if message.text.isdigit():
can = int(message.text) * result
await bot.send_photo(message.chat.id, photo, caption=caps)
await bot.send_message(message.chat.id, round(can, 4))
if __name__ == '__main__':
executor.start_polling(dp)
| Sardor746/Valutchik | Valyuta/Valyuta.py | Valyuta.py | py | 2,272 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "aiogram.types.ReplyKeyboardMarkup",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "aiogram.types",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "aiogram.Bot",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "aiogram.Disp... |
30640759983 | import discord
from discord.ext import commands
class MyClient(discord.Client):
async def on_ready(self):
print('Logged on as {0}!'.format(self.user))
async def on_message(self, message):
print('Message from {0.author}: {0.content}'.format(message))
if message.content.startswith('\u203Dreset'):
if str(message.author) == "Micah#2740":
add_to = open("C:/Users/Micah/Desktop/Personal/Python/count.txt", "w")
add_to.write(0)
add_to.close()
await message.reply("I'm afraid I can't do that, Dave")
elif message.content.startswith('\u203Doff'):
await message.reply("Wrong")
elif message.content.startswith('\u203D'):
count = open("C:/Users/Micah/Desktop/Personal/Python/count.txt", "r")
try:
current_count = int(count.read())
except ValueError:
current_count = 0
count.close()
user_content = message.content[1:]
try:
user_content = int(user_content)
except TypeError:
return
current_count += user_content
add_to = open("C:/Users/Micah/Desktop/Personal/Python/count.txt", "w")
add_to.write(str(current_count))
await message.reply(f'The total dislike of the new Discord is {current_count}', mention_author=True)
print(message.content)
client = MyClient()
keyfile = open("C:/Users/Micah/Desktop/Personal/Python/key.txt", "r")
logon_key = str(keyfile.read())
client.run(logon_key) | Ban-Ironic-Ohms/discordbot | micah_s_marvelous.py | micah_s_marvelous.py | py | 1,628 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "discord.Client",
"line_number": 5,
"usage_type": "attribute"
}
] |
32422131788 | from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from time import sleep
# 模拟鼠标操作-鼠标拖动-滑动验证码
driver = webdriver.Chrome()
driver.get("https://reg.taobao.com/member/reg/fill_mobile.htm")
driver.maximize_window()
sleep(3)
# 点击确定按钮
element1 = driver.find_element_by_css_selector("#J_AgreementBtn")
element1.click()
sleep(4)
# 获取滑动条的size
span_background = driver.find_element_by_css_selector("#nc_1__scale_text > span")
sleep(2)
span_background_size = span_background.size
print(span_background_size)
# 获取滑块的位置
button = driver.find_element_by_css_selector("#nc_1_n1z")
sleep(2)
button_location = button.location
print(button_location)
# 拖动操作:drag_and_drop_by_offset
# 将滑块的位置由初始位置,右移一个滑动条长度(即为x坐标在滑块位置基础上,加上滑动条的长度,y坐标保持滑块的坐标位置)
x_location = button_location["x"] + span_background_size["width"]
sleep(2)
y_location = button_location["y"]
sleep(2)
ActionChains(driver).drag_and_drop_by_offset(button, x_location, y_location).perform() | chinashenqiuwuyanzu/wuyanzu | python1/taobao.py | taobao.py | py | 1,227 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "time.sleep",
... |
10990304604 | import torch
from torch import nn
from torch.autograd import Function
import pywt
from functools import partial
def get_configs(model_name="wave_vit_s"):
args = {
"wave_vit_s":
dict(stem_hidden_dim=32, embed_dims=[64, 128, 320, 448],
num_heads=[2, 4, 10, 14], mlp_ratios=[8, 8, 4, 4],
norm_layer=partial(nn.LayerNorm, eps=1e-6),
depths=[3, 4, 6, 3], sr_ratios=[4, 2, 1, 1]),
"wave_vit_b":
dict(stem_hidden_dim=64, embed_dims=[64, 128, 320, 512],
num_heads=[2, 4, 10, 16], mlp_ratios=[8, 8, 4, 4],
norm_layer=partial(nn.LayerNorm, eps=1e-6),
depths=[3, 4, 12, 3], sr_ratios=[4, 2, 1, 1]),
"wave_vit_l":
dict(stem_hidden_dim=64, embed_dims=[96, 192, 384, 512],
num_heads=[3, 6, 12, 16], mlp_ratios=[8, 8, 4, 4],
norm_layer=partial(nn.LayerNorm, eps=1e-6),
depths=[3, 6, 18, 3], sr_ratios=[4, 2, 1, 1]),
}
return args[model_name]
class DWTFunction(Function):
"""
DWTFunction
"""
@staticmethod
def forward(ctx, x, w_ll, w_lh, w_hl, w_hh):
x = x.contiguous()
ctx.save_for_backward(w_ll, w_lh, w_hl, w_hh)
ctx.shape = x.shape
dim = x.shape[1]
x_ll = torch.nn.functional.conv2d(x, w_ll.expand(dim, -1, -1, -1), stride=2, groups=dim)
x_lh = torch.nn.functional.conv2d(x, w_lh.expand(dim, -1, -1, -1), stride=2, groups=dim)
x_hl = torch.nn.functional.conv2d(x, w_hl.expand(dim, -1, -1, -1), stride=2, groups=dim)
x_hh = torch.nn.functional.conv2d(x, w_hh.expand(dim, -1, -1, -1), stride=2, groups=dim)
x = torch.cat([x_ll, x_lh, x_hl, x_hh], dim=1)
return x
@staticmethod
def backward(ctx, dx):
if ctx.needs_input_grad[0]:
w_ll, w_lh, w_hl, w_hh = ctx.saved_tensors
b, c, h, w = ctx.shape
dx = dx.view(b, 4, -1, h//2, w//2)
dx = dx.transpose(1, 2).reshape(b, -1, h//2, w//2)
filters = torch.cat([w_ll, w_lh, w_hl, w_hh], dim=0).repeat(c, 1, 1, 1)
dx = torch.nn.functional.conv_transpose2d(dx, filters, stride=2, groups=c)
return dx, None, None, None, None
class IDWTFunction(Function):
"""
IDWTFunction
"""
@staticmethod
def forward(ctx, x, filters):
ctx.save_for_backward(filters)
ctx.shape = x.shape
b, _, h, w = x.shape
x = x.view(b, 4, -1, h, w).transpose(1, 2)
c = x.shape[1]
x = x.reshape(b, -1, h, w)
filters = filters.repeat(c, 1, 1, 1)
x = torch.nn.functional.conv_transpose2d(x, filters, stride=2, groups=c)
return x
@staticmethod
def backward(ctx, dx):
if ctx.needs_input_grad[0]:
filters = ctx.saved_tensors
filters = filters[0]
_, c, _, _ = ctx.shape
c = c // 4
dx = dx.contiguous()
w_ll, w_lh, w_hl, w_hh = torch.unbind(filters, dim=0)
x_ll = torch.nn.functional.conv2d(dx, w_ll.unsqueeze(1).expand(c, -1, -1, -1), stride=2, groups=c)
x_lh = torch.nn.functional.conv2d(dx, w_lh.unsqueeze(1).expand(c, -1, -1, -1), stride=2, groups=c)
x_hl = torch.nn.functional.conv2d(dx, w_hl.unsqueeze(1).expand(c, -1, -1, -1), stride=2, groups=c)
x_hh = torch.nn.functional.conv2d(dx, w_hh.unsqueeze(1).expand(c, -1, -1, -1), stride=2, groups=c)
dx = torch.cat([x_ll, x_lh, x_hl, x_hh], dim=1)
return dx, None
class IDWT2D(nn.Module):
"""
IDWT2D
"""
def __init__(self, wave):
super().__init__()
w = pywt.Wavelet(wave)
rec_hi = torch.Tensor(w.rec_hi)
rec_lo = torch.Tensor(w.rec_lo)
w_ll = rec_lo.unsqueeze(0) * rec_lo.unsqueeze(1)
w_lh = rec_lo.unsqueeze(0) * rec_hi.unsqueeze(1)
w_hl = rec_hi.unsqueeze(0) * rec_lo.unsqueeze(1)
w_hh = rec_hi.unsqueeze(0) * rec_hi.unsqueeze(1)
w_ll = w_ll.unsqueeze(0).unsqueeze(1)
w_lh = w_lh.unsqueeze(0).unsqueeze(1)
w_hl = w_hl.unsqueeze(0).unsqueeze(1)
w_hh = w_hh.unsqueeze(0).unsqueeze(1)
filters = torch.cat([w_ll, w_lh, w_hl, w_hh], dim=0)
self.register_buffer("filters", filters)
self.filters = self.filters.to(dtype=torch.float16)
def forward(self, x):
return IDWTFunction.apply(x, self.filters)
class DWT2D(nn.Module):
"""
DWT2D
"""
def __init__(self, wave):
super().__init__()
w = pywt.Wavelet(wave)
dec_hi = torch.Tensor(w.dec_hi[::-1])
dec_lo = torch.Tensor(w.dec_lo[::-1])
w_ll = dec_lo.unsqueeze(0) * dec_lo.unsqueeze(1)
w_lh = dec_lo.unsqueeze(0) * dec_hi.unsqueeze(1)
w_hl = dec_hi.unsqueeze(0) * dec_lo.unsqueeze(1)
w_hh = dec_hi.unsqueeze(0) * dec_hi.unsqueeze(1)
self.register_buffer("w_ll", w_ll.unsqueeze(0).unsqueeze(0))
self.register_buffer("w_lh", w_lh.unsqueeze(0).unsqueeze(0))
self.register_buffer("w_hl", w_hl.unsqueeze(0).unsqueeze(0))
self.register_buffer("w_hh", w_hh.unsqueeze(0).unsqueeze(0))
self.w_ll = self.w_ll.to(dtype=torch.float16)
self.w_lh = self.w_lh.to(dtype=torch.float16)
self.w_hl = self.w_hl.to(dtype=torch.float16)
self.w_hh = self.w_hh.to(dtype=torch.float16)
def forward(self, x):
return DWTFunction.apply(x, self.w_ll, self.w_lh, self.w_hl, self.w_hh)
| towhee-io/towhee | towhee/models/wave_vit/wave_vit_utils.py | wave_vit_utils.py | py | 5,531 | python | en | code | 2,843 | github-code | 1 | [
{
"api_name": "functools.partial",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.nn.LayerNorm",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "functools.partial"... |
42589540928 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 6 19:59:37 2017
@author: saber_master
"""
import requests
from bs4 import BeautifulSoup
import bs4
UA = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36'
headers = {'User-Agent':UA}
# 获取url信息, 输出url内容模块
def getHTMLText(url):
try:
r = requests.get(url, headers = headers, timeout = 30)
r.raise_for_status() # 异常信息
r.encoding = r.apparent_encoding
return r.text
except:
return ""
# 提取html信息并存入ulist列表中
def fillUnivList(ulist, html):
soup = BeautifulSoup(html, 'html.parser')
for tr in soup.find('tbody').children:
if isinstance(tr, bs4.element.Tag):
tds = tr('td')
ulist.append([tds[0].string, tds[1].string, tds[2].string])
def printUnivList(ulist, num):
tplt = "{0:10}\t{1:{3}^10}\t{2:^10}" # {3} 使用第三个元素进行填充
print(tplt.format("排名", "学校名称", "省份", chr(12288))) # chr(12288):用中文空格进行填充
for i in range(num):
u = ulist[i]
print(tplt.format(u[0], u[1], u[2], chr(12288)))
def main():
uinfo = []
url = 'http://www.zuihaodaxue.cn/zuihaodaxuepaiming2016.html' # 2017 年的模式和2016年的不一样
html = getHTMLText(url)
fillUnivList(uinfo, html)
printUnivList(uinfo, 20)
main()
| saberly/qiyuezaixian | spider/bilibili/beautifulsoup.py | beautifulsoup.py | py | 1,546 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "bs4.element",
"line_number": 31,
"usage_type": "attribute"
}
] |
33529844024 | import numpy as np
from sympy.physics.mechanics import ReferenceFrame,Point,Vector
from sympy import symbols
from matplotlib import pyplot as plt
from mpl_toolkits import mplot3d
from mpl_toolkits.mplot3d import proj3d
from matplotlib.text import Text
from stl import mesh
from mpl_toolkits.mplot3d.art3d import Line3D,Line3DCollection
from stl import mesh
import copy
def transformationMatrix(rotation,translation):
'''Builds a 4x4 transformation matrix form rotation and translation'''
translation=np.reshape(translation,(3,1))
upper=np.concatenate((rotation,translation),axis=1)
lower=np.array([[0,0,0,1]])
return np.concatenate((upper,lower))
class Visualizer:
textoffset=np.transpose(np.array([[0.1,0.1,0.1]]))
def __init__(self,baseFrame,origin):
self.fig = plt.figure()
self.ax = self.fig.add_subplot(projection='3d')
self.ax.set_xlabel('X')
self.ax.set_ylabel('Y')
self.ax.set_zlabel('Z')
self.baseFrame=baseFrame
self.origin=origin
self.objs=[] #Objects hold all the drawables as (matrixeqs,obj,params)
self.xrange=[-1,1]
self.yrange=[-1,1]
self.zrange=[-1,1]
plt.show()
def add(self,frame,point,shape=None,frame_scale=1,mesh_scale=1):
'''Add an actor consisting of a frame a point and
optionally a shape given by an stl filepath'''
p=point.pos_from(self.origin).to_matrix(self.baseFrame)
f=frame.dcm(self.baseFrame)
# Will append the dict with matrixeqs,obj,params
# This means: create the matplotlib obj, compute the eqs, and fill params
if shape==None:
# Point
actor,=self.ax.plot3D(0,0,0,'b.')
obj=dict()
obj['actor']=actor
obj['eq']=p
self.objs.append(obj)
# Text for the point
actor=self.ax.text2D(0,0,point.name)
obj=dict()
obj['actor']=actor
obj['eq']=p
self.objs.append(obj)
# Quiver for the frame
actor=list()
for i in range(0,3):
actor.append(self.ax.quiver3D(0,0,0,0,0,0))
obj=dict()
obj['actor']=actor
obj['eq']=(p,f)
obj['scale']=frame_scale
self.objs.append(obj)
else:
# Stl mesh
obj=dict()
obj['mesh']=None
if shape!=None:
shape_mesh = mesh.Mesh.from_file(shape)
shape_mesh.vectors=shape_mesh.vectors*mesh_scale
obj['actor']=shape_mesh
obj['eq']=(p,f)
obj['scale']=mesh_scale
self.objs.append(obj)
def plot(self,replacements=dict()):
'''Collect all the objects and redraw'''
for obj in self.objs:
# Do a first pass skipping text since it uses projection
# and we don't have the autoscaling baked yet
if (isinstance(obj['actor'],Text)):
continue
elif (isinstance(obj['actor'],Line3D)):
p=obj['eq'].subs(replacements)
p=np.array(p,dtype=np.float64)
obj['actor'].set_data_3d(p[0],p[1],p[2])
self.autoscale(p,boundary=0)
elif (isinstance(obj['actor'],list)): #Frame
colors=['r','g','b']
p=obj['eq'][0].subs(replacements)
p=np.array(p,dtype=np.float64)
f=obj['eq'][1].subs(replacements)
f=np.array(f,dtype=np.float64)
for i in range(0,3):
obj['actor'][i].remove()
obj['actor'][i]=self.ax.quiver(p[0],p[1],p[2],f[i,0],f[i,1],f[i,2],length=obj['scale'],normalize=False,color=colors[i])
self.autoscale(np.squeeze(p),boundary=obj['scale'])
elif (isinstance(obj['actor'],mesh.Mesh)): #Mesh
if 'surf' in obj:
obj['surf'].remove()
p=obj['eq'][0].subs(replacements)
p=np.array(p,dtype=np.float64)
f=obj['eq'][1].subs(replacements)
f=np.array(f,dtype=np.float64)
H=transformationMatrix(np.transpose(f),p)
transformedMesh=copy.deepcopy(obj['actor'])
transformedMesh.transform(H)
obj['surf']=self.ax.add_collection3d(mplot3d.art3d.Poly3DCollection(transformedMesh.vectors))
obj['surf'].set_edgecolor(np.array([0.2,0.2,0.2,0.05],dtype=np.float64))
obj['surf'].set_facecolor(np.array([0.2,0.2,0.2,0.2],dtype=np.float64))
self.autoscale_mesh(transformedMesh)
#Autoscale ax box
self.ax.set_xlim(xmin=self.xrange[0],xmax=self.xrange[1])
self.ax.set_ylim(ymin=self.yrange[0],ymax=self.yrange[1])
self.ax.set_zlim(zmin=self.zrange[0],zmax=self.zrange[1])
self.ax.set_box_aspect([self.xrange[1]-self.xrange[0],self.yrange[1]-self.yrange[0],self.zrange[1]-self.zrange[0]])
for obj in self.objs:
if (isinstance(obj['actor'],Text)):
p=obj['eq'].subs(replacements)
p=np.array(p,dtype=np.float64)+Visualizer.textoffset
x, y, _ = proj3d.proj_transform(p[0],p[1],p[2],self.ax.get_proj())
obj['actor'].set_position((x,y))
else:
continue
def autoscale_mesh(self,mesh):
'''Computes the axis range based on mesh points data'''
pmin = np.min(mesh.vectors,axis=(0,1))
pmax = np.max(mesh.vectors,axis=(0,1))
self.autoscale(pmin)
self.autoscale(pmax)
def autoscale(self,p,boundary=0):
if p[0]+boundary>self.xrange[1]:
self.xrange[1]=p[0]+boundary
elif p[0]-boundary<self.xrange[0]:
self.xrange[0]=p[0]-boundary
if p[1]+boundary>self.yrange[1]:
self.yrange[1]=p[1]+boundary
elif p[1]-boundary<self.zrange[0]:
self.yrange[0]=p[1]-boundary
if p[2]+boundary>self.zrange[1]:
self.zrange[1]=p[2]+boundary
elif p[2]-boundary<self.zrange[0]:
self.zrange[0]=p[2]-boundary
#Make scaling equal on xyz
#self.xrange[0]=np.min([self.xrange[0],self.yrange[0],self.xrange[0]])
#self.xrange[1]=np.max([self.xrange[1],self.yrange[1],self.xrange[1]])
#self.yrange=self.xrange
#self.zrange=self.xrange
| JonathanCamargo/Dinamica_Mecanica_Material_Interactivo | tools/vis.py | vis.py | py | 6,914 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.reshape",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"... |
27592126496 | import sys
from collections import deque
input = sys.stdin.readline
n, m = map(int, input().split())
arr = [list(map(int, input().split())) for x in range(m)]
f1, f2 = map(int, input().split())
def limit_weight(f1, f2):
graph = [[] for x in range(n + 1)]
for v1, v2, w in arr:
graph[v1].append((v2, w))
graph[v2].append((v1, w))
def dfs(f1, f2, weight):
visited = [False] * (n + 1)
que = deque([f1])
while que:
cur_v = que.popleft()
if cur_v == f2:
return True
if visited[cur_v]:
continue
visited[cur_v] = True
for next_v, next_w in graph[cur_v]:
if not visited[next_v] and next_w >= weight:
que.append(next_v)
return False
start = 1
end = 1e9
result = -1
while start <= end:
mid = int((start + end) // 2)
if dfs(f1, f2, mid):
start = mid + 1
result = mid
else:
end = mid - 1
return result
print(limit_weight(f1, f2))
| ChoHon/Algorithm | week 03/Test/1939_2.py | 1939_2.py | py | 1,103 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.stdin",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 19,
"usage_type": "call"
}
] |
11225620250 | import os, sys
# DIR PATH: D:\Environments\practices of the python pro\Bark app\presentation
dir_path = os.path.dirname(os.path.realpath(__file__))
# PARENT DIR PATH: D:\Environments\practices of the python pro\Bark app
parent_dir_path = os.path.abspath(os.path.join(dir_path, os.pardir))
sys.path.insert(0, parent_dir_path)
# the previous three commands were added because it doesn't recognize imports
# reason is because __main__ doesn't recognize that is part of a package
# so we add it's path to sys
from collections import OrderedDict
from business_logic import commands
class Option:
def __init__(self, name, command, prep_call=None):
self.name = name
self.command = command
self.prep_call = prep_call
def choose(self):
data = self.prep_call() if self.prep_call else None
message = self.command.execute(data) if data else self.command.execute()
print(message)
def __str__(self):
return self.name
def print_options(options):
for shortcut, option in options.items():
print(f"({shortcut}) {option}")
print()
def option_choice_is_valid(choice, options):
return choice in options or choice.upper() in options
def get_option_choice(options):
choice = input("Choose an option: ")
while not option_choice_is_valid(choice, options):
print("Invalid choice")
choice = input("Choose an option: ")
return options[choice.upper()]
def get_user_input(label, required=True):
value = input(f"{label}: ") or None
while required and not value:
value = input(f"{label}: ") or None
return value
def get_new_bookmark():
return {
"title": get_user_input("Title"),
"url": get_user_input("URL"),
"notes": get_user_input("Notes", required=False),
}
def get_bookmark_id_for_deletion():
return get_user_input("Enter bookmark ID to delete: ")
def clear_screen():
clear = "cls" if os.name == "nt" else "clear"
os.system(clear)
def get_github_import_options():
return {
"github_username": get_user_input("Github username"),
"preserve_timestamps": get_user_input(
"Preserve timestamps [Y/n]", required=False
)
in {"Y", "y", None,},
}
def get_new_bookmark_info():
bookmark_id = get_user_input("Enter a bookmark ID to edit")
field = get_user_input("Choose a value to edit (title, URL, notes)")
new_value = get_user_input(f"Enter the new value for {field}")
return {
"id": bookmark_id,
"update": {field: new_value},
}
def loop():
clear_screen()
options = OrderedDict(
{
"A": Option(
"Add a bookmark",
commands.AddBookMarkCommand(),
prep_call=get_new_bookmark,
),
"B": Option("List bookmarks by date", commands.ListBookmarksCommand()),
"C": Option(
"List bookmarks by title",
commands.ListBookmarksCommand(order_by="title"),
),
"D": Option(
"Delete a bookmark",
commands.DeleteBookmarkCommand(),
prep_call=get_bookmark_id_for_deletion,
),
"E": Option(
"Update single bookmark",
commands.EditBookmarkCommand(),
prep_call=get_new_bookmark_info,
),
"G": Option(
"Import github stars",
commands.ImportGitHubStarsCommand(),
prep_call=get_github_import_options,
),
"Q": Option("Quit", commands.QuitCommand(),),
}
)
print_options(options)
chosen_option = get_option_choice(options)
clear_screen()
chosen_option.choose()
_ = input("Press Enter to return to menu")
if __name__ == "__main__":
commands.CreateBookmarksTableCommand().execute()
while True:
loop()
| Zoki92/Practices-of-python-pro | Bark app/presentation/__main__.py | __main__.py | py | 3,957 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"lin... |
33475013754 | bl_info = {
"name": "HECL",
"author": "Jack Andersen <jackoalan@gmail.com>",
"version": (1, 0),
"blender": (2, 80, 0),
"tracker_url": "https://github.com/AxioDL/hecl/issues/new",
"location": "Properties > Scene > HECL",
"description": "Enables blender to gather meshes, materials, and textures for hecl",
"category": "System"}
# Package import
from . import hmdl, sact, srea, swld, armature, mapa, mapu, frme, path, Nodegrid, Patching
Nodegrid = Nodegrid.Nodegrid
parent_armature = sact.SACTSubtype.parent_armature
import bpy, os, sys, struct, math
from mathutils import Vector
# Appendable list allowing external addons to register additional resource types
hecl_typeS = [
('NONE', "None", "Active scene not using HECL", None),
('MESH', "Mesh", "Active scene represents an HMDL Mesh", hmdl.draw),
('CMESH', "Collision Mesh", "Active scene represents a Collision Mesh", None),
('ARMATURE', "Armature", "Active scene represents an Armature", armature.draw),
('ACTOR', "Actor", "Active scene represents a HECL Actor", sact.draw),
('AREA', "Area", "Active scene represents a HECL Area", srea.draw),
('WORLD', "World", "Active scene represents a HECL World", swld.draw),
('MAPAREA', "Map Area", "Active scene represents a HECL Map Area", mapa.draw),
('MAPUNIVERSE', "Map Universe", "Active scene represents a HECL Map Universe", mapu.draw),
('FRAME', "Gui Frame", "Active scene represents a HECL Gui Frame", frme.draw),
('PATH', "Path Mesh", "Active scene represents a HECL Path Mesh", path.draw)]
# Main Scene Panel
class hecl_scene_panel(bpy.types.Panel):
bl_idname = "SCENE_PT_hecl"
bl_label = "HECL"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "scene"
@classmethod
def poll(cls, context):
return (context.scene is not None)
def draw(self, context):
layout = self.layout
type_row = layout.row(align=True)
type_row.prop_menu_enum(context.scene, 'hecl_type', text='Export Type')
if context.scene.hecl_type == 'MESH' or context.scene.hecl_type == 'AREA' or context.scene.hecl_type == 'ACTOR':
sm_row = layout.row(align=True)
sm_row.prop_enum(context.scene, 'hecl_shader_model', 'ORIGINAL')
sm_row.prop_enum(context.scene, 'hecl_shader_model', 'PBR')
layout.prop(context.scene, 'hecl_mp3_bloom', text='View MP3 Bloom')
for exp_type in hecl_typeS:
if exp_type[0] == context.scene.hecl_type and callable(exp_type[3]):
exp_type[3](self.layout, context)
break
# Light Panel
class hecl_light_panel(bpy.types.Panel):
bl_idname = "DATA_PT_hecl_light"
bl_label = "HECL"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "data"
@classmethod
def poll(cls, context):
return context.light
def draw(self, context):
layout = self.layout
layout.prop(context.light, 'hecl_falloff_constant')
layout.prop(context.light, 'hecl_falloff_linear')
layout.prop(context.light, 'hecl_falloff_quadratic')
# Blender export-type registration
def register_export_type_enum():
bpy.types.Scene.hecl_type = bpy.props.EnumProperty(items=
[tp[:3] for tp in hecl_typeS],
name="HECL Export Type",
description="Selects how active scene is exported by HECL")
# Function for external addons to register export types with HECL
def add_export_type(type_tuple):
type_tup = tuple(type_tuple)
for tp in hecl_typeS:
if tp[0] == type_tup[0]:
raise RuntimeError("Type already registered with HECL")
hecl_types.append(type_tup)
register_export_type_enum()
# Shell command receiver (from HECL driver)
def command(cmdline, writepipeline, writepipebuf):
pass
def mesh_aabb(writepipebuf):
scene = bpy.context.scene
total_min = Vector((99999.0, 99999.0, 99999.0))
total_max = Vector((-99999.0, -99999.0, -99999.0))
if bpy.context.scene.hecl_type == 'ACTOR':
sact_data = bpy.context.scene.hecl_sact_data
for subtype in sact_data.subtypes:
if subtype.linked_mesh in bpy.data.objects:
mesh = bpy.data.objects[subtype.linked_mesh]
minPt = mesh.bound_box[0]
maxPt = mesh.bound_box[6]
for comp in range(3):
if minPt[comp] < total_min[comp]:
total_min[comp] = minPt[comp]
for comp in range(3):
if maxPt[comp] > total_max[comp]:
total_max[comp] = maxPt[comp]
elif bpy.context.scene.hecl_type == 'MESH':
meshName = bpy.context.scene.hecl_mesh_obj
if meshName in bpy.data.objects:
mesh = bpy.data.objects[meshName]
minPt = mesh.bound_box[0]
maxPt = mesh.bound_box[6]
for comp in range(3):
if minPt[comp] < total_min[comp]:
total_min[comp] = minPt[comp]
for comp in range(3):
if maxPt[comp] > total_max[comp]:
total_max[comp] = maxPt[comp]
writepipebuf(struct.pack('fff', total_min[0], total_min[1], total_min[2]))
writepipebuf(struct.pack('fff', total_max[0], total_max[1], total_max[2]))
def shader_model_update(self, context):
value = 0.0
if self.hecl_shader_model == 'PBR':
value = 1.0
bloom_value = 0.0
if self.hecl_mp3_bloom:
bloom_value = 1.0
for shad in ('RetroShader', 'RetroDynamicShader', 'RetroDynamicAlphaShader', 'RetroDynamicCharacterShader'):
if shad in bpy.data.node_groups and 'NewShaderModel' in bpy.data.node_groups[shad].nodes:
bpy.data.node_groups[shad].nodes['NewShaderModel'].outputs[0].default_value = value
for shad in ('RetroShaderMP3',):
if shad in bpy.data.node_groups and 'Mix Shader' in bpy.data.node_groups[shad].nodes:
bpy.data.node_groups[shad].nodes['Mix Shader'].inputs[0].default_value = bloom_value
# Load scene callback
from bpy.app.handlers import persistent
@persistent
def scene_loaded(dummy):
# Hide everything from an external library
if bpy.context.scene.hecl_type != 'FRAME':
for o in bpy.context.scene.objects:
if o.library or (o.data and o.data.library):
o.hide_set(True)
# Show PATH library objects as wireframes
if bpy.context.scene.hecl_type == 'PATH':
if bpy.context.scene.background_set:
for o in bpy.context.scene.background_set.objects:
o.display_type = 'WIRE'
if bpy.context.scene.hecl_path_obj in bpy.context.scene.objects:
path_obj = bpy.context.scene.objects[bpy.context.scene.hecl_path_obj]
path_obj.show_wire = True
# Linked-Child Detection
for scene in bpy.data.scenes:
if scene.hecl_type == 'ACTOR':
actor_data = scene.hecl_sact_data
for subtype in actor_data.subtypes:
if subtype.linked_mesh in bpy.data.objects:
mesh_obj = bpy.data.objects[subtype.linked_mesh]
if subtype.linked_armature in bpy.data.objects:
arm_obj = bpy.data.objects[subtype.linked_armature]
parent_armature(mesh_obj, arm_obj)
for overlay in subtype.overlays:
if overlay.linked_mesh in bpy.data.objects:
mesh_obj = bpy.data.objects[overlay.linked_mesh]
parent_armature(mesh_obj, arm_obj)
# Show only the active mesh and action
if sact.SACTSubtype.SACTSubtype_load.poll(bpy.context):
bpy.ops.scene.sactsubtype_load()
if sact.SACTAction.SACTAction_load.poll(bpy.context):
bpy.ops.scene.sactaction_load()
shader_model_update(bpy.context.scene, bpy.context)
def power_of_distance(context, light, dist):
color = light.color
return dist * dist * context.scene.eevee.light_threshold / max(color[0], max(color[1], color[2]))
def power_of_coefficients(context, light):
epsilon = 1.19e-07
if light.hecl_falloff_linear < epsilon and light.hecl_falloff_quadratic < epsilon:
return 0.0
color = light.color
intens = max(color[0], max(color[1], color[2]))
if light.hecl_falloff_quadratic > epsilon:
if intens <= epsilon:
return 0.0
return power_of_distance(context, light, math.sqrt(intens / (0.0588235 * light.hecl_falloff_quadratic)))
if light.hecl_falloff_linear > epsilon:
return power_of_distance(context, light, intens / (0.0588235 * light.hecl_falloff_linear))
return 0.0
def set_light_falloff(self, context):
self.energy = power_of_coefficients(context, self)
# Registration
def register():
register_export_type_enum()
hmdl.register()
sact.register()
srea.register()
frme.register()
mapa.register()
mapu.register()
path.register()
armature.register()
bpy.utils.register_class(hecl_scene_panel)
bpy.utils.register_class(hecl_light_panel)
bpy.types.Scene.hecl_auto_select = bpy.props.BoolProperty(name='HECL Auto Select', default=True)
bpy.types.Light.hecl_falloff_constant = bpy.props.FloatProperty(
name="HECL Falloff Constant",
description="Constant falloff coefficient",
update=set_light_falloff,
default=1.0,
min=0.0)
bpy.types.Light.hecl_falloff_linear = bpy.props.FloatProperty(
name="HECL Falloff Linear",
description="Linear falloff coefficient",
update=set_light_falloff,
default=0.0,
min=0.0)
bpy.types.Light.hecl_falloff_quadratic = bpy.props.FloatProperty(
name="HECL Falloff Quadratic",
description="Quadratic falloff coefficient",
update=set_light_falloff,
default=0.0,
min=0.0)
bpy.types.Scene.hecl_shader_model = bpy.props.EnumProperty(name="HECL Shader Model",
description="Which shader model to use for rendering",
items=[
('ORIGINAL', "Original", "Close approximation of GameCube materials"),
('PBR', "PBR", "Hybrid PBR materials replacing original reflection")],
update=shader_model_update,
default='ORIGINAL')
bpy.types.Scene.hecl_mp3_bloom = bpy.props.BoolProperty(name="HECL View MP3 Bloom",
description="Preview MP3 bloom factors of model",
update=shader_model_update,
default=False)
bpy.app.handlers.load_post.append(scene_loaded)
Patching.register()
def unregister():
bpy.app.handlers.load_post.remove(scene_loaded)
hmdl.unregister()
sact.unregister()
srea.unregister()
path.unregister()
bpy.utils.unregister_class(hecl_scene_panel)
bpy.utils.unregister_class(hecl_light_panel)
Patching.unregister()
if __name__ == "__main__":
register()
| AxioDL/hecl | blender/hecl/__init__.py | __init__.py | py | 11,246 | python | en | code | 15 | github-code | 1 | [
{
"api_name": "bpy.types",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "bpy.types",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "bpy.types",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "bpy.props.EnumProperty",... |
21617622868 | from bs4 import BeautifulSoup
from jinja2 import Environment, FileSystemLoader
with open('pending_follow_requests.html', 'r') as f:
html = f.read()
soup = BeautifulSoup(html, 'html.parser')
users = []
for element in soup.find_all('div', {'class': 'pam _3-95 _2ph- _a6-g uiBoxWhite noborder'}):
user = {}
user['username'] = element.find('a').text
user['link'] = element.find('a')['href']
user['date'] = element.find_all('div')[3].text
users.append(user)
with open('pending.txt', 'w') as f:
for user in users:
f.write(user['username'] + ',' + user['link'] + ',' + user['date'] + '\n')
print('Usernames and links saved to pending.txt')
env = Environment(loader=FileSystemLoader('.'))
env.globals.update(enumerate=enumerate)
template = env.get_template('template.html')
output = template.render(users=users)
with open('pending.html', 'w') as f:
f.write(output)
print('HTML file saved to pending.html')
| yassindaboussi/Pending-follow-requests | FromHtml/ExtractAndRender.py | ExtractAndRender.py | py | 952 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "bs4.BeautifulSoup",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "jinja2.Environment",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "jinja2.FileSystemLoader",
"line_number": 24,
"usage_type": "call"
}
] |
74708557793 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : 2016-12-05 10:27:37
# @Author : Arms (526923945@qq.com)
# @Link : https://armszhou.github.io
# @Version : $Id$
from xlrd import open_workbook
import re
import os
workbook_name = '钱端(定制版)接口汇总.xlsx'
result_file_name = 'IID_Data'
result_file_path = '/'.join([os.getcwd(), result_file_name])
def build_IID_Data():
wb = open_workbook(workbook_name)
# 获取常规接口的 sheet
sheet = wb.sheets()[0]
with open(result_file_path, 'w') as f:
for rownum in range(sheet.nrows):
# 获取每行的前3列单元格
rowvalue = '|'.join([sheet.cell(rownum, 0).value, sheet.cell(
rownum, 1).value, sheet.cell(rownum, 2).value]) + '\n'
f.writelines(rowvalue)
if __name__ == '__main__':
build_IID_Data()
| ArmsZhou/Python3-Practice | 实战/read_data_from_excel.py | read_data_from_excel.py | py | 856 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.getcwd",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "xlrd.open_workbook",
"line_number": 18,
"usage_type": "call"
}
] |
28905968047 | """Add hospitalizedDischarged column
Revision ID: 58ea38a64c64
Revises: bc309f70af25
Create Date: 2021-01-12 15:41:17.174849
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '58ea38a64c64'
down_revision = 'bc309f70af25'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('coreData', sa.Column('hospitalizedDischarged', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('coreData', 'hospitalizedDischarged')
# ### end Alembic commands ###
| COVID19Tracking/covid-publishing-api | migrations/versions/58ea38a64c64_add_hospitalizeddischarged_column.py | 58ea38a64c64_add_hospitalizeddischarged_column.py | py | 708 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "alembic.op.add_column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer... |
12157928524 | import torch.nn as nn
from torch.nn import functional as F
class Encoder(nn.Module):
"""
(3, 64, 64)の画像を(1024,)のベクトルに変換するエンコーダ
"""
def __init__(self):
super(Encoder, self).__init__()
self.cv1 = nn.Conv2d(3, 32, kernel_size=4, stride=2)
self.cv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.cv3 = nn.Conv2d(64, 128, kernel_size=4, stride=2)
self.cv4 = nn.Conv2d(128, 256, kernel_size=4, stride=2)
def forward(self, obs):
hidden = F.relu(self.cv1(obs))
hidden = F.relu(self.cv2(hidden))
hidden = F.relu(self.cv3(hidden))
embedded_obs = F.relu(self.cv4(hidden)).reshape(hidden.size(0), -1)
return embedded_obs
| chika-sawa/WorldModels | src/model/encoder.py | encoder.py | py | 754 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
12212610838 | """Defines base classes"""
import abc
from collections import namedtuple
from typing import Dict, List, Optional, Type, Union
from convtools.base import BaseConversion
_none = BaseConversion._none
class BaseStep:
STEP_TYPE = "base_step"
ensures_type: Optional[Type] = None
TypeValueCodeGenArgs = namedtuple(
"TypeValueCodeGenArgs",
[
"code_suffix",
"code",
"type_value",
"name_code",
"data_code",
"errors_code",
"base_conversion",
"ctx",
"level",
"type_var_to_type_value",
"type_to_model_meta",
"cast",
"cast_overrides_stack",
# to track top level unions, because order matters
"path_before_model",
"model_depth",
"union_paths",
],
)
class BaseCaster(BaseStep, abc.ABC):
"""Defines base caster (object which casts input data to a necessary type
or describes errors)"""
STEP_TYPE = "cast"
name = "base_caster"
@abc.abstractmethod
def to_code(self, args: TypeValueCodeGenArgs):
raise NotImplementedError
CastOverrides = Union[
None,
Dict[Type, BaseCaster],
Dict[Type, List[BaseCaster]],
]
class BaseModel:
def __getitem__(self, key):
return getattr(self, key)
def to_dict(self):
raise NotImplementedError
class ProxyObject:
def __init__(self):
self.wrapped_object__ = None
def __getattr__(self, name):
return getattr(self.wrapped_object__, name)
class DictModel(BaseModel):
pass
class ObjectModel(BaseModel):
pass
class BaseError(Exception):
pass
class ValidationError(BaseError):
pass
dict_getitem = dict.__getitem__
dict_contains = dict.__contains__
dict_delitem = dict.__delitem__
class ErrorsDict(dict):
"""This is a dict which acts like a defaultdict(dict) of any depth.
It also supports lazy entries."""
locked = False
def lock(self):
if self.locked is False:
self.locked = True
else:
self.locked[None] = None
def __getitem__(self, k):
try:
return dict_getitem(self, k)
except KeyError:
if self.locked:
raise
if k == "__ROOT":
return self
self[k] = value = ErrorsDict()
if self.locked is False:
value.locked = self.locked = {}
else:
value.locked = self.locked
return value
def __contains__(self, k):
if k == "__ROOT":
return self
return dict_contains(self, k)
def __delitem__(self, k):
if k == "__ROOT":
return self.clear()
dict_delitem(self, k)
def get_lazy_item(self, k):
if k == "__ROOT":
return self
if k in self:
return self[k]
return LazyErrorsDict(self, k)
class LazyErrorsDict:
"""A helper class which is a lazy entry of
:py:obj:`ErrorsDict<convtools.contrib.models.base.ErrorsDict>`"""
__slots__ = ("parent_errors_dict", "key", "value")
def __init__(self, parent_errors_dict, key):
self.parent_errors_dict = parent_errors_dict
self.key = key
self.value = None
def __getitem__(self, k):
if self.value is None:
self.value = self.parent_errors_dict[self.key]
return self.value[k]
def __setitem__(self, k, v):
if self.value is None:
self.value = self.parent_errors_dict[self.key]
self.value[k] = v
def __contains__(self, key):
if self.value is None:
return False
return key in self.value
def __delitem__(self, k):
if self.value is not None:
del self.value[k]
if not self.value:
del self.parent_errors_dict[self.key]
self.value = None
def __bool__(self):
return bool(self.value)
def get_lazy_item(self, k):
if k == "__ROOT":
return self
return LazyErrorsDict(self, k)
| simrit1/convtools | src/convtools/contrib/models/base.py | base.py | py | 4,078 | python | en | code | null | github-code | 1 | [
{
"api_name": "convtools.base.BaseConversion._none",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "convtools.base.BaseConversion",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 14,
"usage_type": "name"
},
{
... |
33198834237 | import datetime
import datetime
from django.core.paginator import Paginator
from django.db.models import Q
from django.utils import timezone
from django.http import JsonResponse
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from .models import Message
from .forms import MessageForm
from django.views.generic import UpdateView, CreateView, DeleteView, DetailView, ListView
import json
from django.core.serializers import serialize
from House.models import House, Section, Floor
from Appartament.models import Appartament
from User.models import User
from django.contrib.auth import get_user_model
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.contrib.auth.tokens import default_token_generator
UserModel = get_user_model()
# Create your views here.
class MessagesList(ListView):
model = Message
template_name = 'Messages/message_list.html'
queryset = Message.objects.all()
class MessageCreate(CreateView):
model = Message
template_name = 'Messages/message_create.html'
form_class = MessageForm
success_url = reverse_lazy('message_list')
def form_valid(self, form):
users = User.objects.filter(appartament=form.instance.appartament.id)
form.instance.date_send = timezone.now()
form.instance.user_send = self.request.user.get_full_name()
print(form.cleaned_data)
for user in users:
current_site = get_current_site(self.request)
mail_subject = f'Від MyHouse24:{form.instance.title_mess}'
message = render_to_string('Messages/send_message_to_email.html', {
'form': form,
'domain': current_site.domain,
})
to_email = user.email
email = EmailMessage(
mail_subject, message, to=[to_email]
)
email.send()
print('Sending')
return super().form_valid(form=form)
class MessageDetail(DetailView):
model = Message
template_name = 'Messages/message_detail.html'
class MessageDelete(DeleteView):
model = Message
success_url = reverse_lazy('message_list')
def get(self, request, *args, **kwargs):
return self.delete(self, request, *args, **kwargs)
def message_list(request):
messages = Message.objects.all()
search_text = request.GET.get('search_text')
query = Q()
if search_text:
query |= Q(title_mess__icontains=search_text)
query |= Q(user_send__icontains=search_text)
messages = messages.filter(query)
start = int(request.GET.get('start', 0))
length = int(request.GET.get('length', 10))
paginator = Paginator(messages, length)
messages = paginator.get_page(start // length + 1)
data = []
for message in messages:
data.append({
'id': message.id,
'user': message.user_send,
'text': message.title_mess,
'date': message.date_send.strftime("%d.%m.%Y")
})
response = {
'draw': request.GET.get('draw'),
'recordsTotal': Message.objects.all().count(),
'recordsFiltered': messages.paginator.count,
'data': data
}
return JsonResponse(response)
def filter_message(request):
if request.GET.get('house_id') != '' and request.GET.get('house_id') is not None:
print(request.GET.get('section_id'))
sections = serialize('json', Section.objects.filter(house_id=request.GET.get('house_id')))
floors = serialize('json', Floor.objects.filter(house_id=request.GET.get('house_id')))
return JsonResponse({'sections': sections, 'floors': floors}, status=200)
else:
print('Hallo')
return JsonResponse({'sections': 0, 'floors': 0}, status=200)
def filter_appartament_message(request):
print(request.GET.get('house_id'), request.GET.get('section_id'), request.GET.get('floor_id'))
if request.GET.get('house_id') != '' and request.GET.get('section_id') != '' and request.GET.get('floor_id') != '':
appartaments = serialize('json', Appartament.objects.filter(house_id=request.GET.get('house_id'), section_id=request.GET.get('section_id'), floor_id=request.GET.get('floor_id')))
return JsonResponse({'appartaments': appartaments}, status=200)
else:
return JsonResponse({'appartaments': 0}, status=200)
def delete_selected_message(request):
message_items = request.POST.getlist('selectedItems[]')
for item_id in message_items:
Message.objects.filter(id=item_id).delete()
return redirect('message_list')
| Sampleeeees/MyHouse24 | Messages/views.py | views.py | py | 4,814 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.views.generic.ListView",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "models.Message",
"line_number": 29,
"usage_type": "name"
},
{
"a... |
70647530913 | #!/usr/bin/env python3
#https://github.com/syakoo/galois-field
#pip install git+https://github.com/syakoo/galois-field
import argparse, sys, os.path, math
from galois_field import GFpn
from lib.wg_gf_lib import SimuGF
from galois_field.core import validator
#----------------------------
def show_examples():
scriptname = os.path.basename(__file__)
sample=f"""
samples:
{scriptname} 11
{scriptname} 11 -s3
{scriptname} 11 -s3 -rc
{scriptname} 10
{scriptname} 10 -s5 -c2
{scriptname} 10 -s5 -c2
"""
print(sample)
#----------------------------
class ArgumentParser(argparse.ArgumentParser):
def error(self, message):
self.print_help(sys.stderr)
show_examples()
self.exit(2, '%s: error: %s\n' % (self.prog, message))
#----------------------------
def parseargs():
parser = ArgumentParser(description='List lexical sorted workshops without pair repetition')
parser.add_argument("person_count", help="number of participants", type=int)
parser.add_argument("-s", "--maxsize", nargs='?', help="max teamsize", type=int)
parser.add_argument("-c", "--groupcount", nargs='?', help="nr of teams ", type=int)
parser.add_argument("-i", "--irr_poly", help="irregular polynom for construction of Galoisfield (advanced option)", required=False, type=str)
parser.add_argument("-r", "--representation", help="'c','m' for coefficients/modulo (where supported). Default is 'n' for index", default='n', required=False)
parser.add_argument("-p", "--procedure", help="procedure 'w','op','wst' wg/optables/ws-test. Default is ws for workshop", default='ws', required=False)
parser.add_argument("-o", "--ortho", help="orthogonal squares in procedure ws", action="store_true", default='False', required=False)
parser.add_argument("-v", "--verbose", help="verbose",
action="store_true")
parser.add_argument("-d", "--debug", help="debug",
action="store_true")
args = parser.parse_args()
return args
conductor = {
4 : {'basis' :2, 'power' :2, 'irr_poly' :'111'},
8 : {'basis' :2, 'power' :3, 'irr_poly' :'1101'},
16 : {'basis' :2, 'power' :4, 'irr_poly' :'10011'},
32 : {'basis' :2, 'power' :5, 'irr_poly' :'100101'},
9 : {'basis' :3, 'power' :2, 'irr_poly' :'101'},
25 : {'basis' :5, 'power' :2, 'irr_poly' :'102'},
}
#----------------------------
def show_lookup_and_exit(lookup, person_count, comment):
print (comment)
print (f'Supported partitions for person_count {person_count} are:')
for match_object in lookup:
print(match_object)
sys.exit()
#----------------------------
def evaluate_some_args(args):
class MatchObject:
#----------------------------
def __init__(self, person_count, groupcount, maxsize, minsize, rest):
self.person_count = person_count
self.groupcount = groupcount
self.maxsize = maxsize
self.minsize = minsize
self.rest = rest
assert person_count > 0, person_count
assert groupcount > 0, groupcount
assert maxsize > 0, maxsize
assert minsize > 0, minsize
#----------------------------
def __str__(self):
person_count = self.person_count
groupcount = self.groupcount
minsize = self.minsize
maxsize = self.maxsize
rest = self.rest
nmin = groupcount - rest
nmax = rest
partition = nmin * [minsize] + nmax * [maxsize]
partition = '-'.join([str(x) for x in partition])
assert nmin * minsize + nmax * maxsize == person_count, f'person_count={person_count} partition={partition}, {nmin} {nmax} gc{groupcount}'
return f'--groupcount {groupcount:>2} --maxsize {maxsize:>2} # {partition}'
maxsize = args.maxsize
groupcount = args.groupcount
person_count = args.person_count
verbose = args.verbose
debug = args.debug
person_param_info=f'person_count={person_count}, groupcount={groupcount}, maxsize={maxsize}'
lower_groupcount = math.ceil(math.sqrt(person_count))
upper_groupcount = person_count // 2
primes100 = {x for x in range(2, 101) if all(x%y for y in range(2, min(x, 11)))}
all_valid_groupcounts = set(conductor.keys()).union(primes100)
groupcount_range = range(lower_groupcount, upper_groupcount+1)
valid_groupcounts = sorted(all_valid_groupcounts.intersection(set(groupcount_range)))
if verbose:
print(f'lower_groupcount {lower_groupcount}')
print(f'upper_groupcount {upper_groupcount}')
print(f'all_valid_groupcounts {all_valid_groupcounts}')
print(f'valid_groupcounts {valid_groupcounts}')
lookup=[]
if len(valid_groupcounts) > 0:
for valid_groupcount in valid_groupcounts:
valid_minsize, rest = divmod(person_count, valid_groupcount)
if rest == 0:
valid_maxsize = valid_minsize
else:
valid_maxsize = valid_minsize + 1
valid_minsize0 = math.floor(person_count/valid_groupcount)
valid_maxsize0 = math.ceil(person_count/valid_groupcount)
assert valid_minsize0 == valid_minsize, f'valid_minsize0={valid_minsize0}, valid_minsize={valid_minsize0}'
assert valid_maxsize0 == valid_maxsize, f'valid_maxsize0={valid_maxsize0}, valid_maxsize={valid_maxsize0}'
match_object = MatchObject(person_count=person_count, groupcount = valid_groupcount, maxsize = valid_maxsize, minsize = valid_minsize, rest=rest)
lookup.append(match_object)
if not lookup:
print(f'No Workshops for {person_count} participants found.')
sys.exit(0)
founds = []
for match_object in lookup:
match_groupcount=None
match_maxsize=None
if groupcount:
match_groupcount = (groupcount == match_object.groupcount)
if debug: print(111, f'match_groupcount={match_groupcount}, match_object.groupcount={match_object.groupcount}, groupcount={groupcount}')
if maxsize:
match_maxsize = (maxsize == match_object.maxsize)
if debug: print(222, f'match_maxsize={match_maxsize}, match_object.maxsize={match_object.maxsize}, maxsize={maxsize}')
match_pair = (match_groupcount, match_maxsize)
if match_pair in [(True,True),(True,None),(None, True), (None,None)]: #, (None,None)] show workshop direct if only one found
founds.append(match_object)
if debug: print(f'333 match_pair={match_pair}, len(founds)={len(founds)}, match_object={match_object}')
else:
if debug: print(f'000 match_pair={match_pair}, len(founds)={len(founds)}, match_object={match_object}')
#if not founds:
# show_lookup_and_exit(lookup, person_count, f'No Workshop found for {person_param_info}.')
if founds:
if len(founds) > 1:
show_lookup_and_exit(founds, person_count, f'Several Workshops found for person_count {person_count}.')
elif len(founds) == 1:
found = founds[0]
if debug:print(f'678 found = {found}')
if not groupcount:
groupcount = found.groupcount
if not maxsize:
maxsize = found.maxsize
if args.debug:
print(4711, args)
print(4720, f'person_count={args.person_count}, maxsize={maxsize}, groupcount={groupcount}')
return maxsize, groupcount
#----------------------------
def main():
args = parseargs()
person_count = args.person_count
representation = args.representation
procedure = args.procedure
verbose = args.verbose
debug = args.debug
ortho = args.ortho
maxsize, groupcount = evaluate_some_args(args)
if debug: print(10453, f'maxsize={maxsize}, groupcount={groupcount}')
groupcount_is_prime = validator.is_prime(groupcount)
if groupcount_is_prime:
irr_poly = None
basis = groupcount
power = 1
if representation == 'c':
print(f"Representation 'c' is not supported for prime groupcount {groupcount}")
else:
assert groupcount in conductor, groupcount
basis = conductor[groupcount]['basis']
power = conductor[groupcount]['power']
irr_poly = args.irr_poly or conductor[groupcount]['irr_poly']
irr_poly = [int(c) for c in irr_poly]
assert groupcount == basis**power, (groupcount,basis,power)
if args.verbose:
print(args)
simu_gf = SimuGF()
simu_gf.work(person_count, basis, power, maxsize, groupcount, irr_poly,
representation=representation, procedure=procedure, ortho=ortho, verbose=verbose, debug=debug)
#----------------------------
def demo():
simu = SimuGF()
representation='n'
show='ws'
# Generating the field GF(2^3)
basis=2
power=3
irr_poly=[1, 1, 0 ,1] #y^3=1+y^2 y^3-y^2-1=0
simu.work(basis, power, irr_poly, representation, show)
basis=2
power=3
irr_poly=[1, 0, 1 ,1] #x^3=1+1 x^3-x-1=0
#GF(3^2)
basis=3
power=2
irr_poly_f=[1, 0 ,1] #f=x^2+1
irr_poly_g=[1, 1 ,2] #g=x^2+x-1
irr_poly_h=[1, 2 ,2] #h=x^2-x-1
simu.work(basis, power, irr_poly_f, representation, show)
simu.work(basis, power, irr_poly_g, representation, show)
simu.work(basis, power, irr_poly_h, representation, show)
if __name__ == '__main__':
main()
#demo()
| guenterjantzen/workshop-groups | wg_gf.py | wg_gf.py | py | 9,532 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.path.basename",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentPars... |
74913147553 | import requests
import os
import sys
import json
import argparse
import shlex
import urllib.parse
import xml.etree.ElementTree as ET
import configparser
from typing import List, Any, Union, Dict, Tuple, Optional
import readline
class apimodule:
"""
Arguments
modulename: The name of the module
modulejson: The JSON string from
curl -H "Accept: application/json" https://api.glesys.com/sshkey
"""
def __init__(self, modulename: str, modulejson: str):
self.modulename = modulename
self.json = modulejson
self.moduledata = json.loads(self.json)
# print(self.moduledata)
def _toboolean(self, indata: Union[bool, str]) -> bool:
"""
Converts a string with 'true' to a real True, or False
Arguments
indata: string with true
Returns
Boolean
"""
if isinstance(indata, str):
if indata == 'true':
return True
else:
return False
if isinstance(indata, bool):
return indata
def module(self) -> Dict[str, Any]:
"""
Returns a dict with module data
"""
if 'module' not in self.moduledata['response']:
return dict()
return self.moduledata['response']['module']
def function(self, item: str) -> Dict[str, Any]:
"""
Arguments:
item: Which function to return, if it exists
Return:
Dict with the function
"""
for oneitem in self.module()['allowed_functions']:
if oneitem['function'] == item:
return oneitem
return dict()
def functions(self) -> List[str]:
"""
Return:
A list with all functions
(https://api.glesys.com/api/listfunctions)
"""
functionlist: List[str] = list()
for item in self.module()['allowed_functions']:
functionlist.append(item['function'])
return functionlist
def documentation(self, item: str) -> str:
"""
Arguments:
item: Which function to get the documentation for
Returns:
A string with the functions documentation
"""
if item not in self.functions():
return str()
function = self.function(item)
return function['documentation']
def required_arguments(self, item) -> List[str]:
"""
Arguments:
item: Which function to list required arguments
Returns
A list with required arguments
"""
if item not in self.functions():
return list()
function = self.function(item)
if 'required_arguments' not in function:
return list()
return function['required_arguments']
def optional_arguments(self, item) -> List[str]:
"""
Arguments:
item: Which function to list optional arguments
Returns
A list with optional arguments
"""
if item not in self.functions():
return list()
function = self.function(item)
if 'optional_arguments' not in function:
return list()
return function['optional_arguments']
def post_allowed(self, item) -> bool:
"""
Arguments:
item: Which function to check for
Returns
True or False if POST is allowed
"""
if item not in self.functions():
return False
function = self.function(item)
if 'post' in function and function['post']:
return True
return False
def get_allowed(self, item) -> bool:
"""
Arguments:
item: Which function to check for
Returns
True or False if GET is allowed
"""
if item not in self.functions():
return False
function = self.function(item)
if 'get' in function and function['get']:
return True
return False
def authentication(self) -> Dict[str, Any]:
"""
Returns:
Authentication dict
"""
if 'authentication' not in self.module():
return dict()
return self.module()['authentication']
def username(self) -> str:
"""
Returns
The used username
"""
authobject = self.authentication()
if 'user' not in authobject:
return str()
if 'username' not in authobject['user']:
return str()
return authobject['user']['username']
def cloudaccount(self) -> str:
"""
Returns
The used cloudaccount
"""
authobject = self.authentication()
if 'user' not in authobject:
return str()
if 'cloudaccount' not in authobject['user']:
return str()
return authobject['user']['cloudaccount']
def customernumber(self) -> str:
"""
Returns
The used customernumber
"""
authobject = self.authentication()
if 'user' not in authobject:
return str()
if 'customernumber' not in authobject['user']:
return str()
return authobject['user']['customernumber']
def auth_required(self) -> bool:
"""
Returns
True if authentication is required to use this module
"""
authobject = self.authentication()
if 'required' not in authobject:
return False
return self._toboolean(authobject['required'])
def allowed_with_apikey(self) -> bool:
"""
Returns
True if authentication is allowed with apikey
"""
authobject = self.authentication()
if 'apikey' not in authobject:
return False
return self._toboolean(authobject['apikey'])
def anonymous(self) -> bool:
"""
Returns
True if anonymous use is allowed
"""
authobject = self.authentication()
if 'anonymous' not in authobject:
return False
return self._toboolean(authobject['anonymous'])
def data(self, modulecommands: List[str]) -> Dict[str, str]:
"""
Arguments:
modulecommands: The commands except the first
Returns:
A dict with all parameters
Example:
{'sshkey': 'xxxxxxx', 'description': 'mykey'}
"""
postdata: Dict[str, str] = dict()
currentarg = str()
currentfunction = str()
functionexists = False
for cli in modulecommands:
if cli in self.functions():
functionexists = True
currentfunction = cli
continue
if cli in self.required_arguments(currentfunction):
currentarg = cli
postdata[currentarg] = ''
continue
if cli in self.optional_arguments(currentfunction):
currentarg = cli
postdata[currentarg] = ''
continue
if functionexists:
if cli.startswith('{') or cli.startswith('['):
userdict = json.loads(cli)
else:
userdict = cli
postdata[currentarg] = userdict
return postdata
def url(self, functioncommands: List[str]) -> str:
for cli in functioncommands:
if cli in self.functions():
return '/' + os.path.join(self.modulename, cli)
return '/' + self.modulename
def requestdata(self, functioncommands: List[str]) -> Tuple[str, Union[Dict[str, str], None]]:
"""
Arguments:
functioncommands: [functions]
Example:
['add', 'description', 'my_key']
Returns:
/sshkey/add and a dict with arguments
"""
if self.post_allowed(functioncommands[0]):
retdata = self.data(functioncommands)
return self.url(functioncommands), retdata
if self.get_allowed(functioncommands[0]):
ret = self.url(functioncommands)
return ret, None
return '/', None
class apimodules:
def __init__(self, apiserver: str, apiuser: str, apikey: str):
self.apiserver = apiserver
self.apiuser = apiuser
self.apikey = apikey
self.modulesjson = json.loads(self.apicall_get('api/listfunctions/'))
def apicall_get(self, function: str, acceptjson: bool = True) -> str:
url = urllib.parse.urljoin(self.apiserver, function)
auth = requests.auth.HTTPBasicAuth(self.apiuser, self.apikey)
if acceptjson:
headers = {'Accept': 'application/json'}
else:
headers = {'Accept': 'application/xml'}
request = requests.get(url=url, timeout=(5, 14), auth=auth,
headers=headers)
return request.content.decode()
def apicall_post(self, function: str, body: Dict[str, Any],
acceptjson: bool = False) -> str:
url = urllib.parse.urljoin(self.apiserver, function)
auth = requests.auth.HTTPBasicAuth(self.apiuser, self.apikey)
if acceptjson:
headers = {'Accept': 'application/json'}
else:
headers = {'Accept': 'application/xml'}
request = requests.post(url=url, timeout=(5, 14), auth=auth,
json=body, headers=headers)
return request.content.decode()
def module(self, modulename: str) -> apimodule:
"""
HTTP GET to https://api.glesys.com/[modulename]/
Arguments:
modulename which module to ask for
Returns:
xml data for that module
"""
jsondata: str = str()
if os.path.exists(modulename + '.json'):
jsondata = self.readfile(modulename + '.json')
else:
jsondata = self.apicall_get(modulename)
with open(modulename + '.json', 'w') as jsonfile:
jsonfile.write(jsondata)
return apimodule(modulename, jsondata)
def readfile(self, path: str) -> str:
with open(path, 'r') as jsonfile:
return jsonfile.read()
return str()
def listmodules(self) -> List[str]:
modules = list(self.modulesjson['response']['modules'].keys())
# modules.append('help')
return modules
def suboptions(self, modulename: str, text: str) -> List[str]:
if modulename in self.listmodules():
current = self.module(modulename)
subresult = [x + ' ' for x in current.functions() if x.startswith(text)]
return subresult
return current.functions()
def subrequired(self, modulename: str, suboption: str, text: str) -> List[str]:
if suboption in self.module(modulename).functions():
current = self.module(modulename)
subresult = [x + ' ' for x in current.required_arguments(suboption) if x.startswith(text)]
options = [x + ' ' for x in current.optional_arguments(suboption) if x.startswith(text)]
return subresult + options
return current.required_arguments(suboption)
def complete(self, text: str, state: int) -> Union[str, None]:
try:
tokens = readline.get_line_buffer().split()
commands = self.listmodules()
results = [x + ' ' for x in commands if x.startswith(text)] + [None]
if len(tokens) > 0:
if len(tokens) == 1:
if tokens[0] in commands:
results = self.suboptions(tokens[0], text) + [None]
if len(tokens) == 2:
if tokens[0] in commands:
results = self.suboptions(tokens[0], text) + [None]
subfuncs = [x + ' ' for x in self.module(tokens[0]).functions()]
if tokens[1] in self.module(tokens[0]).functions() and text == '':
results = self.subrequired(tokens[0], tokens[1], text) + [None]
if len(tokens) > 2:
results = self.subrequired(tokens[0], tokens[1], text) + [None]
return results[state]
except Exception as e:
print(e)
return str()
def main():
parser = argparse.ArgumentParser(description='G API explorer')
parser.add_argument('commands',
nargs='*',
type=str,
help='Commands to execute instead of interactive'
)
args = parser.parse_args()
config = configparser.ConfigParser()
if os.path.exists('gapicli.conf'):
config.read('gapicli.conf')
else:
print("Configuration file was not found")
sys.exit(0)
apiserver = config['main']['apiserver']
apiuser = config['main']['apiuser']
apikey = config['main']['apikey']
readline.parse_and_bind("tab: complete")
api = apimodules(apiserver, apiuser, apikey)
cmds = list()
if args.commands == list():
readline.set_completer(api.complete)
try:
line = input("{}@{}> ".format(apiuser, apiserver))
cmds = shlex.split(line)
except KeyboardInterrupt:
pass
else:
cmds = args.commands
if not cmds:
sys.exit(0)
module = api.module(cmds[0])
del(cmds[0])
remoteurl, data = module.requestdata(cmds)
if data is None:
print(api.apicall_get(remoteurl, json = True))
else:
print(api.apicall_post(remoteurl, data, acceptjson = True))
if os.path.exists(module.modulename + '.json'):
os.remove(module.modulename + '.json')
if __name__ == '__main__':
main()
| thka2315/gapicli | gapicli.py | gapicli.py | py | 14,177 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.loads",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 4... |
23165249908 | # -*- coding: utf-8 -*-
# =============================================================================
# Author : Ahmadreza Farmahini Farahani
# Created Date : 2023/4
# Project : This project is developed for "Machine Learning and Pattern Recognition" course
# Supervisor : Prof. Sandro Cumani
# =============================================================================
import numpy as np
from utils import vrow
from utils import single_fold
from classifiers import LR
class DCF:
def __init__(self, cfn, cfp, scores, labels):
self.scores = vrow(scores)[0]
self.cfn = cfn
self.cfp = cfp
self.labels = labels
def assign_labels(self, pi, th=None):
if th is None:
th = -np.log(pi*self.cfn)+np.log((1-pi)*self.cfp)
p = self.scores > th
return np.int32(p)
def compute_conf_matrix_binary(self, pred):
c = np.zeros((2, 2))
c[0,0] = ((pred==0) * (self.labels[0]==0)).sum()
c[0,1] = ((pred==0) * (self.labels[0]==1)).sum()
c[1,0] = ((pred==1) * (self.labels[0]==0)).sum()
c[1,1] = ((pred==1) * (self.labels[0]==1)).sum()
return c
def compute_emp_bayes_binary(self, cm, pi):
fnr = cm[0,1] / (cm[0,1] + cm[1,1])
fpr = cm[1,0] / (cm[0,0] + cm[1,0])
return pi * self.cfn * fnr + (1-pi) * self.cfp * fpr
def compute_normalized_emp_bayes(self, cm, pi):
emp_bayes = self.compute_emp_bayes_binary(cm, pi)
return emp_bayes / min(pi*self.cfn, (1-pi)*self.cfp)
def compute_act_dcf(self, pi, th=None):
pred = self.assign_labels(pi, th=th)
cm = self.compute_conf_matrix_binary(pred)
return self.compute_normalized_emp_bayes(cm, pi)
def compute_min_dcf(self, pi):
t = np.array(self.scores)
t.sort()
np.concatenate([np.array([-np.inf]), t, np.array([np.inf])])
dcflist = []
for _th in t:
dcflist.append(self.compute_act_dcf(pi, th = _th))
dcfarray = np.array(dcflist)
return dcfarray.min()
def compute_min_dcf_threshold(self, pi):
t = np.array(self.scores)
t.sort()
np.concatenate([np.array([-np.inf]), t, np.array([np.inf])])
dcflist = []
for _th in t:
dcflist.append(self.compute_act_dcf(pi, th = _th))
dcfarray = np.array(dcflist)
return t[np.where(dcfarray == dcfarray.min())]
def bayes_error_plot(self, p_array, min_cost=False, scores=None):
y = []
if scores is not None:
self.scores=scores
for p in p_array:
pi = 1 / (1 + np.exp(-p))
if min_cost:
y.append(self.compute_min_dcf(pi))
else:
y.append(self.compute_act_dcf(pi))
return np.array(y)
def calibrated_plot(self, p_array, th):
y = []
for p in p_array:
pi = 1 / (1 + np.exp(-p))
y.append(self.compute_act_dcf(pi, th=th))
return np.array(y)
def det_plot(self):
fnrs = []
fprs = []
sorted_threasholds = self.scores.copy()
sorted_threasholds.sort()
for th in sorted_threasholds:
pred = self.assign_labels(pi=0.5, th=th)
cm = self.compute_conf_matrix_binary(pred)
fnr = cm[0,1] / (cm[0,1] + cm[1,1]) * 100
fpr = cm[1,0] / (cm[0,0] + cm[1,0]) * 100
if fpr<5 and fnr<5:
continue
fnrs.append(fnr)
fprs.append(fpr)
return fnrs, fprs
class calibration:
def __init__(self, pi=0.5, lambd=0.00001):
self.pi=pi
self.lambd=lambd
self.model= LR()
def fit(self, scores, labels):
self.model.fit(scores, labels, self.lambd, pi=self.pi, balance=True)
def predict(self, scores):
return self.model.predict(scores)
class fusion:
def __init__(self, pi=0.5, lambd=0.00001):
self.pi=pi
self.lambd=lambd
self.model= LR()
def fit(self, scores, labels):
self.model.fit(scores, labels, self.lambd, pi=self.pi, balance=True)
def predict(self, scores):
return self.model.predict(scores)
def thresh_calibration(scores_train, labels_train, scores_test, labels_test, pi):
train_dcf = DCF(1, 1, scores_train, labels_train)
valid_dcf = DCF(1, 1, scores_test, labels_test)
min_thresh = train_dcf.compute_min_dcf_threshold(pi)
emprical_min = valid_dcf.compute_min_dcf(pi)
emprical_act = valid_dcf.compute_act_dcf(pi, th=-np.log(pi/(1-pi)))
threshold_act = valid_dcf.compute_act_dcf(pi, th=min_thresh)
return emprical_min, emprical_act, threshold_act
| ahmadrezafrh/Gender-Identification-MLPR | postprocess.py | postprocess.py | py | 4,786 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "utils.vrow",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 31,... |
5099702172 | import json
from flask import Blueprint, request
from werkzeug.datastructures import ImmutableMultiDict
from api import http_status
from api.errors import ApiException, ValidationException
from api.forms.module_set_config import ModuleSetConfigForm
from api.forms.module_set_value import ModuleSetValueForm
from core.models import Module, DeviceTypeDatapoint, Protocol, Device, ModuleDeviceType, DataType
from mqtt.client import mqtt_client
from mqtt.errors import MQTTException
blueprint = Blueprint('blueprint', __name__, template_folder='templates', static_folder='static')
# DEVICE_TYPE_DATAPOINT
@blueprint.route('/datapoints', methods=['GET'])
def list_datapoints():
datapoints = DeviceTypeDatapoint.query.all()
return json.dumps(
[item.summary() for item in datapoints]
), 200, {'ContentType': 'application/json'}
@blueprint.route('/datapoints/<datapoint_id>', methods=['GET'])
def get_datapoint(datapoint_id):
datapoint = DeviceTypeDatapoint.query.filter(DeviceTypeDatapoint.id == datapoint_id).first()
return json.dumps(
datapoint.summary()
), 200, {'ContentType': 'application/json'}
# DATATYPE
@blueprint.route('/datatypes', methods=['GET'])
def list_datatypes():
datatypes = DataType.query.all()
return json.dumps(
[item.summary() for item in datatypes]
), 200, {'ContentType': 'application/json'}
@blueprint.route('/datatypes/<datatypes_id>', methods=['GET'])
def get_datatype(datatypes_id):
datatype = DataType.query.filter(DataType.id == datatypes_id).first()
return json.dumps(
datatype.summary()
), 200, {'ContentType': 'application/json'}
# MODULE_DEVICE_TYPE
@blueprint.route('/devicetypes', methods=['GET'])
def list_device_types():
module_device_types = ModuleDeviceType.query.all()
return json.dumps(
[item.summary() for item in module_device_types]
), 200, {'ContentType': 'application/json'}
@blueprint.route('/devicetypes/<device_type_id>', methods=['GET'])
def get_device_type(device_type_id):
module_device_type = ModuleDeviceType.query.filter(ModuleDeviceType.id == device_type_id).first()
return json.dumps(
module_device_type.summary()
), 200, {'ContentType': 'application/json'}
# PROTOCOLS
@blueprint.route('/protocols', methods=['GET'])
def list_protocols():
protocols = Protocol.query.all()
return json.dumps(
[item.summary() for item in protocols]
), 200, {'ContentType': 'application/json'}
@blueprint.route('/protocols/<protocol_id>', methods=['GET'])
def get_protocol(protocol_id):
protocol = Protocol.query.filter(Protocol.id == protocol_id).first()
return json.dumps(
protocol.summary()
), 200, {'ContentType': 'application/json'}
# DEVICES
@blueprint.route('/devices', methods=['GET'])
def list_devices():
devices = Device.query.all()
return json.dumps(
[item.summary() for item in devices]
), 200, {'ContentType': 'application/json'}
@blueprint.route('/devices/<devices_id>', methods=['GET'])
def get_device(devices_id):
device = Device.query.filter(Device.id == devices_id).first()
return json.dumps(
device.summary()
), 200, {'ContentType': 'application/json'}
# MODULES
@blueprint.route('/modules', methods=['GET'])
def list_modules():
modules = Module.query.all()
return json.dumps(
[item.summary() for item in modules]
), 200, {'ContentType': 'application/json'}
@blueprint.route('/modules/<module_id>', methods=['GET'])
def get_module(module_id):
module = Module.query.filter(Module.id == module_id).first()
return json.dumps(
module.summary()
), 200, {'ContentType': 'application/json'}
# MODULES_OPERATIONS
@blueprint.route('/modules/request', methods=['POST'])
def request_all(module_id):
json_data = ImmutableMultiDict(request.get_json(force=True))
form = ModuleSetValueForm(json_data, meta={'csrf': False})
if not form.validate():
raise ValidationException(form.errors)
modules = Module.query.all()
if not modules:
raise ApiException('Daný modul sa nepodarilo nájsť.', status_code=http_status.HTTP_404_NOT_FOUND)
data = form.data
return json.dumps(data), 200, {'ContentType': 'application/json'}
@blueprint.route('/modules/<module_id>/request', methods=['POST'])
def request_module(module_id):
json_data = ImmutableMultiDict(request.get_json(force=True))
form = ModuleSetValueForm(json_data, meta={'csrf': False})
if not form.validate():
raise ValidationException(form.errors)
module = Module.query.get(module_id)
if not module:
raise ApiException('Daný modul sa nepodarilo nájsť.', status_code=http_status.HTTP_404_NOT_FOUND)
data = form.data
return json.dumps(data), 200, {'ContentType': 'application/json'}
@blueprint.route('/modules/<module_id>/update', methods=['POST'])
def update_module(module_id):
json_data = ImmutableMultiDict(request.get_json(force=True))
form = ModuleSetValueForm(json_data, meta={'csrf': False})
if not form.validate():
raise ValidationException(form.errors)
module = Module.query.get(module_id)
if not module:
raise ApiException('Daný modul sa nepodarilo nájsť.', status_code=http_status.HTTP_404_NOT_FOUND)
data = form.data
return json.dumps(data), 200, {'ContentType': 'application/json'}
@blueprint.route('/modules/<module_id>/set_value', methods=['POST'])
def set_value_module(module_id):
json_data = ImmutableMultiDict(request.get_json(force=True))
form = ModuleSetValueForm(json_data, meta={'csrf': False})
if not form.validate():
raise ValidationException(form.errors)
data = form.data
module = Module.query.get(module_id)
if not module:
raise ApiException('Daný modul sa nepodarilo nájsť.', status_code=http_status.HTTP_404_NOT_FOUND)
device = module.devices.filter_by(id=data.get('device_id')).first()
del data['device_id']
if not device:
raise ApiException('Dané zariadenie sa nepodarilo nájsť.', status_code=http_status.HTTP_404_NOT_FOUND)
datapoint = module.module_device_type.device_type_datapoints.filter_by(code=data.get('datapoint')).first()
if not datapoint:
raise ApiException('Daný datapoint sa nepodarilo nájsť.', status_code=http_status.HTTP_404_NOT_FOUND)
data['device_uuid'] = str(device.uuid)
# TODO: Ziskat sequence number (odniekial)
data['sequence_number'] = 123
try:
response = mqtt_client.send_message(module.mac, 'SET_VALUE', json.dumps(data))
except MQTTException as e:
raise ApiException(e.message, status_code=http_status.HTTP_400_BAD_REQUEST, previous=e)
return json.dumps(response), 200, {'ContentType': 'application/json'}
@blueprint.route('/modules/<module_id>/config', methods=['POST'])
def config_module(module_id):
json_data = ImmutableMultiDict(request.get_json(force=True))
form = ModuleSetConfigForm(json_data, meta={'csrf': False})
if not form.validate():
raise ValidationException(form.errors)
request_data = form.data
module = Module.query.get(module_id)
if not module:
raise ApiException('Daný modul sa nepodarilo nájsť.', status_code=http_status.HTTP_404_NOT_FOUND)
device = module.devices.filter_by(id=request_data.get('device_id')).first()
if not device:
raise ApiException('Dané zariadenie sa nepodarilo nájsť.', status_code=http_status.HTTP_404_NOT_FOUND)
device.poll_rate = request_data.get('poll_rate')
device.save()
data = {
str(device.uuid): {
'address': request_data.get('address'),
'poll_rate': request_data.get('poll_rate')
}
}
try:
response = mqtt_client.send_message(module.mac, 'SET_CONFIG', json.dumps(data))
except MQTTException as e:
raise ApiException(e.message, status_code=http_status.HTTP_400_BAD_REQUEST, previous=e)
return json.dumps(response), 200, {'ContentType': 'application/json'}
| brewmajsters/brewmaster-backend | api/routes.py | routes.py | py | 8,055 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "core.models.DeviceTypeDatapoint.query.all",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "core.models.DeviceTypeDatapoint.query",
"line_number": 18,
"usage_type": "attri... |
72744196835 | from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name='firstpage'),
path("posts", views.posts, name='posts'),
path("posts/<slug>", views.posts_detail, name='info'), #slug - posts/myfirstpost
]
| StrixPO/Django_blog_skeleton | my_site/blog/urls.py | urls.py | py | 246 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
}
] |
27393257484 | #!/usr/bin/env python
from tasks import *
from utils.functions import get_folder
from pathlib import Path
import config
# Run all tasks here, can also comment out those not wanted
def main():
# Create output.csv in output folder using files in data (starting with ASO)
create_output() # get correct exp, zscore by bio replicate and mean of exp zscore (renormalization here)
# # REQUIRED: output.csv
create_avg_exp() # Create avg_exp.csv containing Exp zscore range and histograms from output.csv (unfiltered)
create_control_hist() # Make control histograms
# # REQUIRED: avg_exp_renormalized_to_neg.csv
create_tiers(drop_output=False) # Create tiers.csv
create_r_squared() # make r squared plots from WT, MT
create_error_bars() # Make error bars
# # REQUIRED: tiers.csv
create_tier_plots() # Make plots showing tiers of samples
if __name__=="__main__":
# Check all directories are created
# renormalized
folder = get_folder()
Path(folder).mkdir(parents=True, exist_ok=True)
Path(folder + "Control Plots").mkdir(parents=True, exist_ok=True)
Path(folder + "Tiers").mkdir(parents=True, exist_ok=True)
Path(folder + "Z Score Range Plots").mkdir(parents=True, exist_ok=True)
# run tasks
main() | tyulab/ixCells | run.py | run.py | py | 1,309 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "utils.functions.get_folder",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
... |
746501982 | # Flask-TurnKey Version 0.0.1
# App.py
# Flask
from flask import Flask
# Flask-TurboDuck
from flask_turboduck.db import Database
app = Flask(__name__)
app.config.from_object('config.Configuration')
db = Database(app)
def create_tables():
User.create_table()
| DommertTech/flask-turnkey | flask_turnkey/app.py | app.py | py | 266 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask_turboduck.db.Database",
"line_number": 12,
"usage_type": "call"
}
] |
17358841868 | import yaml
from pprint import pprint
from netmiko import ConnectHandler
file = "/home/mwhite/.netmiko.yml"
with open(file) as f:
yaml_out = yaml.load(f)
device = yaml_out["cisco3"]
with ConnectHandler(**device) as connected:
output = connected.find_prompt()
print(output)
| mickdcsw/PyPlus | class3/class3_task5.py | class3_task5.py | py | 287 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "yaml.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "netmiko.ConnectHandler",
"line_number": 12,
"usage_type": "call"
}
] |
10681643242 | import torch
import torch.nn as nn
import torch.nn.functional as F
class SRCNN(nn.Module):
def __init__(self, c, n1, n2, n3, f1, f2, f3):
super(SRCNN, self).__init__()
# patch extraction
self.F1 = nn.Conv2d(
in_channels=c,
out_channels=n1,
kernel_size=f1,
stride=1,
padding=4,
)
# non-linear mapping
self.F2 = nn.Conv2d(
in_channels=n1,
out_channels=n2,
kernel_size=f2,
stride=1,
padding=1,
)
# reconstruction
self.F3 = nn.Conv2d(
in_channels=n2,
out_channels=n3,
kernel_size=f3,
stride=1,
padding=2,
)
def forward(self, low_res_img):
patches = F.relu(self.F1(low_res_img))
mapping = F.relu(self.F2(patches))
high_res = self.F3(mapping)
return high_res | thepooons/SRCNN | src/models.py | models.py | py | 833 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
73176238435 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pexen",
version="0.0.3",
author="Jiri Jaburek",
author_email="comps@nomail.dom",
description="Python EXecution ENvironment, scheduler included",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/comps/pexen",
packages=setuptools.find_packages(),
setup_requires=["pytest-runner"],
tests_require=["pytest", "psutil"],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| comps/pexen | setup.py | setup.py | py | 792 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "setuptools.setup",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 15,
"usage_type": "call"
}
] |
72449217314 | #!/usr/bin/python3
"""Gets user, their tasks done, undone and total
and prints the done tasks"""
from requests import get
from sys import argv
if __name__ == '__main__':
url1 = get('https://jsonplaceholder.typicode.com/todos?userId=' + argv[1])
url2 = get('https://jsonplaceholder.typicode.com/users/' + argv[1])
d_todos = url1.json()
d_users = url2.json()
completed = 0
name = d_users.get('name')
for task in d_todos:
if task.get('completed'):
completed += 1
print("Employee {} is done with tasks({}/{}):".format(
name, completed, len(d_todos)))
for task in d_todos:
if task.get('completed'):
print("\t {}".format(task.get('title')))
| AyshaMuss/holberton-system_engineering-devops | 0x15-api/0-gather_data_from_an_API.py | 0-gather_data_from_an_API.py | py | 725 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 10,
... |
34902152990 | """DjangoBBSForum URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from bbs import views
urlpatterns = [
path('', views.index, name="bbs"),
path('category/<int:category_id>/', views.category),
path('article/<int:article_id>/', views.article_detail, name="article_detail"),
path('comment/', views.comment, name="post_comment"),
path('comment_list/<int:article_id>/', views.get_comments, name="get_comments"),
path('new_article/', views.new_article, name="new_article"),
path('latest_article_count/', views.get_latest_article_count, name="get_latest_article_count"),
]
| solost23/DjangoBBSForum | bbs/urls.py | urls.py | py | 1,196 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "bbs.views.index",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "bbs.views",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
... |
28666281069 | from urllib.request import urlopen
from bs4 import BeautifulSoup
# 텍스트 읽기 - 영어
url = 'https://www.pythonscraping.com/pages/warandpeace/chapter1.txt'
textPage = urlopen(url)
print(textPage.read()[:1000])
print('='*100)
# 러시아어+프랑스어
url = 'https://www.pythonscraping.com/pages/warandpeace/chapter1-ru.txt'
textPage = urlopen(url)
print(str(textPage.read(), 'utf-8')[:1000]) # 명시적으로 인코딩 지정
print('='*100)
# BeautifulSoup 를 이용하는 경우, byte string 으로 전환 후 decode!
html = urlopen('https://en.wikipedia.org/wiki/Python_(programming_language)')
bs = BeautifulSoup(html, 'html.parser')
content = bs.find('div', {'id':'mw-content-text'}).get_text()
content = bytes(content, 'UTF-8')
content = content.decode('UTF-8')
print(content[:1000])
| japark/PythonWebScraper | ReadFiles/read_txt.py | read_txt.py | py | 807 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "urllib.request.urlopen",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "b... |
45737922134 | from turtle import Screen
from paddle import Paddle
from ball import Ball
from scoreboard import Scoreboard
import time
# Create the game window
screen = Screen()
screen.bgcolor('black')
screen.setup(800, 600)
screen.title('My Pong Game')
screen.tracer(0)
# Create the right paddle and left paddle
r_paddle = Paddle(350, 0)
l_paddle = Paddle(-350, 0)
# Listen for keyboard events
screen.listen()
screen.onkey(r_paddle.go_up, 'Up')
screen.onkey(r_paddle.go_down, 'Down')
screen.onkey(l_paddle.go_up, 'w')
screen.onkey(l_paddle.go_down, 's')
# Game state flag
game_on = True
# Create the pong ball
ball = Ball()
scoreboard = Scoreboard()
# Game loop
while game_on:
# Add a short delay to control the frame rate
time.sleep(ball.move_speed)
# Update the game screen
screen.update()
# Move the pong ball
ball.move_ball()
# Change vertical direction if the ball hits the top or bottom
if ball.ycor() > 280 or ball.ycor() < -280:
ball.bounce_y()
# Change horizontal direction if the ball hits a paddle within position range
if (ball.distance(r_paddle) < 40 and ball.xcor() > 320) or (ball.distance(l_paddle) < 40 and ball.xcor() < -320):
ball.bounce_x()
# Reset ball's position if it goes beyond the right edge
if ball.xcor() > 380:
ball.reset_position()
scoreboard.l_point()
# Reset ball's position if it goes beyond the left edge
if ball.xcor() < -380:
ball.reset_position()
scoreboard.r_point()
# Click the game window to exit the game
screen.exitonclick()
| LJW92/PythonProjects | GUI Projects/PongGame/main.py | main.py | py | 1,570 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "turtle.Screen",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "paddle.Paddle",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "paddle.Paddle",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "ball.Ball",
"line_number... |
11496524767 | import praw
from praw.models import MoreComments
import datetime
sarcComments = []
def readReplies(comment, regFile, sarcFile):
replies = comment.replies
for reply in replies:
if isinstance(reply, MoreComments):
continue
body = reply.body.lower()
tokens = body.split(". ")
if ("/s" in reply.body and "https" not in reply.body):
pos = reply.body.index("/s")
if (pos + 2 < len(reply.body) and reply.body[pos + 2].isalpha()):
#dropped.write(reply.body + "\n")
print("IN IF STATMENT")
continue
sarcFile.write(" ~cmt~ " + reply.body + " ~end~ \n")
#else:
#regFile.write(" ~cmt~ " + reply.body + " ~end~ \n")
readReplies(reply, regFile, sarcFile)
def readComments(submission, regFile, sarcFile):
#file = open("comments.txt", "w+")
comments = submission.comments
for comment in comments:
if isinstance(comment, MoreComments):
continue
body = comment.body.lower()
tokens = body.split(". ")
if("/s" in comment.body and "https" not in comment.body):
pos = comment.body.index("/s")
if(pos+2 < len(comment.body) and comment.body[pos+2].isalpha()):
#dropped.write(comment.body + "\n")
print("IN IF STATMENT")
continue
sarcFile.write(" ~cmt~ " + comment.body + " ~end~ \n")
else:
regFile.write(" ~cmt~ " + comment.body + " ~end~ \n")
readReplies(comment, regFile, sarcFile)
#file.close()
def main():
reddit = praw.Reddit(client_id='qGIyN9bTIN9q9Q',
client_secret='8Ikzq1_l4Nrx0bFtMYRXlalj0JE',
user_agent='SarcasmDetector',
username='NLPSarcasm',
password='CSCInlp404')
sub = reddit.subreddit('politics')
print(sub.title)
# reg = open("regJokeComments.txt", "w+")
# sarc = open("sarcJokeComments.txt", "w+")
# currTime = datetime.datetime.now()
# for submission in sub.top(limit=5000):
# print("================================")
# print(submission.title)
# print(submission.score)
# print(submission.id)
# print(submission.url)
# # reg.write(submission.title + "\n")
# # sarc.write(submission.title + "\n")
# readComments(submission, reg, sarc)
# reg.close()
# sarc.close()
reg = open("regTestComments.txt", "w+")
sarc = open("sarcTestComments.txt", "w+")
currTime = datetime.datetime.now()
for submission in sub.hot(limit=50):
print("================================")
print(submission.title)
print(submission.score)
print(submission.id)
print(submission.url)
# reg.write(submission.title + "\n")
# sarc.write(submission.title + "\n")
readComments(submission, reg, sarc)
reg.close()
sarc.close()
print("Data retrieved at: " + str(currTime))
main()
| chrisw2529/Natural-Language-Processing | finalProj/collectData.py | collectData.py | py | 3,077 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "praw.models.MoreComments",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "praw.models.MoreComments",
"line_number": 30,
"usage_type": "argument"
},
{
"api_name": "praw.Reddit",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": ... |
70520052195 | import json
from django.shortcuts import render, redirect
from .forms import *
from .models import *
from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from django.core import serializers
# Create your views here.
# @login_required(login_url="login")
def home(request):
items = Item.objects.all()
# items_json = serializers.serialize('json', items)
return render(request, "home.html", {"items": items})
@login_required(login_url="login")
def item_detail(request, pk):
item = Item.objects.get(id=pk)
flag = True
if item.owner == request.user:
flag = False
for i in item.users.all():
if i == request.user:
flag = False
print(flag)
if request.method == "POST":
item.users.add(request.user)
item.save()
return redirect("/")
return render(request, "item_detail.html", {"item": item, "flag": flag})
# def getItems(request):
# items = Item.objects.all().values()
# data = list(items)
# return JsonResponse({"items": data}, safe=False)
@login_required(login_url="login")
def myItems(request):
user = request.user
items = user.owned_items.all()
context = {
"user": user,
"items": items,
}
return render(request, "myItems.html", context)
@login_required(login_url="login")
def electronics(request):
electronic_items = Item.objects.filter(category="Electronics")
return render(request, "electronics.html", {"items": electronic_items})
@login_required(login_url="login")
def fashion(request):
return render(
request, "fashion.html", {"items": Item.objects.filter(category="Fashion")}
)
@login_required(login_url="login")
def games(request):
return render(
request, "games.html", {"items": Item.objects.filter(category="Toys and Games")}
)
@login_required(login_url="login")
def sports(request):
return render(
request,
"sports.html",
{"items": Item.objects.filter(category="Sports and Outdoors")},
)
def signup(req):
form = UserForm()
if req.method == "POST":
form = UserForm(req.POST)
if form.is_valid():
user = form.save()
UserProfile.objects.create(user=user)
messages.success(req, "Account was created for " + user.username)
return redirect("login")
return render(req, "signup.html", {"form": form})
def loginUser(req):
if req.method == "POST":
username = req.POST.get("username")
password = req.POST.get("password")
user = authenticate(req, username=username, password=password)
if user is not None:
login(req, user)
return redirect("home")
else:
messages.info(req, "username or password is inccorect")
return render(req, "login.html")
return render(req, "login.html")
def logoutUser(req):
logout(req)
return redirect("login")
@login_required(login_url="login")
def create_item(request):
if request.method == "POST":
form = ItemForm(request.POST, request.FILES)
if form.is_valid():
item = form.save()
item.owner = request.user
item.save()
return redirect("/")
else:
form = ItemForm()
return render(request, "newItem.html", {"form": form})
| Badr-Mohammed9/shopuProject | shop/views.py | views.py | py | 3,527 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.shortcuts.render",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 36,
"usage_type": "call"
},
{
"api_nam... |
19578363552 | import os
import shutil
import subprocess
import tempfile
from functools import wraps
import click
from PIL import ImageOps
from tqdm import tqdm
from .outliner import outliner
def xml_wrap(tag, inner, **kwargs):
kw = ' '.join('%s="%s"' % (k, str(v)) for k,v in kwargs.items())
if inner is None:
return f'<%s %s/>' % (tag, kw)
return '<%s %s>%s</%s>\n' % (tag, kw, inner, tag)
def path(polys, xdim, ydim, em, par):
d = '\n'.join(
'M ' + 'L '.join(
'%d %d ' % (int(x * par * em / ydim), int(y * em / ydim)) for (x, y) in poly
) + 'Z' for poly in polys
)
return xml_wrap('path', None, d=d, fill='currentColor')
def path_to_svg(polys, xdim, ydim, em, par):
return '\n'.join([
'<?xml version="1.0" encoding="UTF-8" standalone="no"?>',
'<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/SVG/DTD/svg10.dtd">',
xml_wrap('svg', path(polys, xdim, ydim, em, par), width=int(par * xdim * em) / ydim, height=em),
])
def convert(glyphs, ascent, descent, name, par=1, keep=False):
em = 1000
scale = em / (ascent + descent)
print(ascent, descent)
ttf = name
path = tempfile.mkdtemp()
pe = open(os.path.join(path, ttf+'.pe'), 'w')
pe.write('New()\n')
pe.write('SetFontNames("%s", "%s", "%s")\n' % (name, name, name))
pe.write('SetTTFName(0x409, 1, "%s")\n' % name)
pe.write('SetTTFName(0x409, 2, "Medium")\n')
pe.write('SetTTFName(0x409, 4, "%s")\n' % name)
pe.write('SetTTFName(0x409, 5, "1.0")\n')
pe.write('SetTTFName(0x409, 6, "%s")\n' % name)
pe.write('ScaleToEm(%d, %d)\n' % (int(ascent*scale), int(descent*scale)))
pe.write('Reencode("unicode")\n')
for i,v in tqdm(glyphs.items()):
img = ImageOps.invert(v.convert("L"))
polygons = outliner(img)
(xdim, ydim) = img.size
pe.write('SelectSingletons(UCodePoint(%d))\n' % i)
if polygons:
# Only generate the svg file for non-empty glyph (ie not SPACE and similar).
svg = path_to_svg(polygons, xdim, ydim, em, par)
open(os.path.join(path, '%04x.svg' % i), 'w').write(svg)
# FontForge does not like empty SVG files, but if we just don't import anything
# then we get a blank glyph for this codepoint.
pe.write('Import("%s/%04x.svg", 0)\n' % (path, i))
pe.write('SetWidth(%d)\n' % int(par*xdim*em/ydim))
pe.write('CanonicalStart()\n')
pe.write('CanonicalContours()\n')
pe.write('Generate("%s")\n' % ttf)
pe.close()
subprocess.check_call(['fontforge', '-script', os.path.join(path, ttf+'.pe')])
if keep:
print(path)
else:
shutil.rmtree(path)
def converter(f):
@click.argument('ttf', type=click.Path(exists=False), required=True)
@click.option('-k', '--keep', is_flag=True, help='Keep intermediate files.')
@click.option('-a', '--ascent', type=int, default=None, help='Override input ascent.')
@click.option('-d', '--descent', type=int, default=None, help='Override input descent.')
@click.option('-x', '--xscale', type=float, default=1.0, help='X scale.')
@click.option('-y', '--yscale', type=float, default=1.0, help='Y scale.')
@wraps(f)
def _convert(ttf, keep, ascent, descent, xscale, yscale, *args, **kwargs):
glyphs, _ascent, _descent = f(*args, **kwargs)
if ascent is not None:
_ascent = ascent
if descent is not None:
_descent = descent
convert(glyphs, _ascent, _descent, ttf, keep=keep, par=xscale/yscale)
return _convert
| ali1234/bitmap2ttf | bitmap2ttf/convert.py | convert.py | py | 3,625 | python | en | code | 95 | github-code | 1 | [
{
"api_name": "tempfile.mkdtemp",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "tqdm.tqdm",
"line_numb... |
4246703255 | import pandas as pd
import numpy as np
import umap
import json
from loguru import logger
def convert_str_emb_to_float(emb_list):
float_emb = []
for str_emb in emb_list:
emb = json.loads(str_emb)
float_emb.append(np.array(emb))
return float_emb
def reduce_embedding_dimension(
data,
dimension,
):
float_emb = convert_str_emb_to_float(data)
logger.info(f"reduction to {dimension} dimensions...")
transformer = umap.UMAP(
n_components=dimension,
random_state=42,
n_neighbors=10,
transform_seed=42,
verbose=False,
).fit(float_emb)
emb_reduced = transformer.embedding_.astype(np.float32)
return emb_reduced.tolist()
| pass-culture/data-gcp | jobs/ml_jobs/embeddings/tools/dimension_reduction.py | dimension_reduction.py | py | 717 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "json.loads",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "loguru.logger.info",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "loguru.logger",
"line_n... |
11819620665 | import cv2
fps = 24
size = (274,512)
videowriter = cv2.VideoWriter("result.avi",cv2.cv.FOURCC('M','J','P','G'),fps,size)
for i in range(1,150):
img = cv2.imread('../results/result%d.png' % i)
if i!=70:
videowriter.write(img)
| Hajiren/Parametric-human-shape-reshaping-for-video | code/videoMake/videoMake.py | videoMake.py | py | 243 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "cv2.VideoWriter",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.cv.FOURCC",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.cv",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number"... |
41283212288 | from hypothesis import given
from hypothesis.strategies import lists, builds
from cim.collection_validator import validate_collection_unordered
from cim.iec61968.common.test_document import document_kwargs, verify_document_constructor_default, verify_document_constructor_kwargs, \
verify_document_constructor_args, document_args
from zepben.evolve import OperationalRestriction, Equipment
operational_restriction_kwargs = {
**document_kwargs,
"equipment": lists(builds(Equipment), max_size=2),
}
operational_restriction_args = [*document_args, [Equipment()]]
def test_operational_restriction_constructor_default():
or_ = OperationalRestriction()
verify_document_constructor_default(or_)
assert not list(or_.equipment)
@given(**operational_restriction_kwargs)
def test_operational_restriction_constructor_kwargs(equipment, **kwargs):
# noinspection PyArgumentList
or_ = OperationalRestriction(
equipment=equipment,
**kwargs
)
verify_document_constructor_kwargs(or_, **kwargs)
assert list(or_.equipment) == equipment
def test_operational_restriction_constructor_args():
# noinspection PyArgumentList
or_ = OperationalRestriction(*operational_restriction_args)
verify_document_constructor_args(or_)
assert list(or_.equipment) == operational_restriction_args[-1]
def test_equipment_collection():
# noinspection PyArgumentList
validate_collection_unordered(OperationalRestriction,
lambda mrid, _: Equipment(mrid),
OperationalRestriction.num_equipment,
OperationalRestriction.get_equipment,
OperationalRestriction.equipment,
OperationalRestriction.add_equipment,
OperationalRestriction.remove_equipment,
OperationalRestriction.clear_equipment)
| zepben/evolve-sdk-python | test/cim/iec61968/operations/test_operational_restriction.py | test_operational_restriction.py | py | 1,975 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "cim.iec61968.common.test_document.document_kwargs",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "hypothesis.strategies.lists",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "hypothesis.strategies.builds",
"line_number": 11,
"usage_ty... |
74541383072 | # Local imports
import json
import os
# Paths
JLAB_ROOT = os.getcwd()
PACKAGE_JSON = os.path.join(JLAB_ROOT, "dev_mode", "package.json")
EXTERNAL_DEPENDENCIES = {
"@jupyterlab-benchmarks/table-render": "0.1.1"
}
def main():
with open(PACKAGE_JSON, "r") as fh:
data = json.loads(fh.read())
jlab_data = data["jupyterlab"]
if "externalExtensions" in jlab_data:
external_extensions = jlab_data["externalExtensions"]
else:
external_extensions = {}
external_extensions.update(EXTERNAL_DEPENDENCIES)
jlab_data["externalExtensions"] = external_extensions
data["jupyterlab"] = jlab_data
print(json.dumps(data, indent=4, sort_keys=True))
with open(PACKAGE_JSON, "w") as fh:
fh.write(json.dumps(data, indent=4, sort_keys=True))
if __name__ == "__main__":
main()
| jupyterlab/benchmarks | docker/add_table_render.py | add_table_render.py | py | 835 | python | en | code | 12 | github-code | 1 | [
{
"api_name": "os.getcwd",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 15,
... |
34886502654 | import time
from pika import BlockingConnection, ConnectionParameters
from pika.adapters.blocking_connection import BlockingChannel
QUEUE_NAME = "basic_channel"
def send_message(channel: BlockingChannel, body: str):
"""
Send a basic message to the given channel
:param channel: The channel to send to.
:param body: The body to send.
"""
print(f"Sending message: {body}")
channel.basic_publish(
exchange="",
routing_key=QUEUE_NAME,
body=body
)
def run_distributer(connection):
"""
Runs the distributer after we have established a connection.
:param connection: The established connection.
"""
channel = connection.channel()
channel.queue_declare(queue=QUEUE_NAME)
for counter in range(10000):
send_message(channel, f"Message number: {counter}")
connection.close()
if __name__ == "__main__":
while True:
try:
time.sleep(1)
connection = BlockingConnection(ConnectionParameters('rabbitmq'))
break
except Exception:
print("Rabbitmq is down!")
run_distributer(connection) | gnir-work/dockerized-rabbitmq | distributer/distributer/distributer.py | distributer.py | py | 1,161 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pika.adapters.blocking_connection.BlockingChannel",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pika.BlockingConnection",
"line_number": 41,
"usage_type": "call"
},
{
... |
30922015541 | """Misc. utilities."""
def endless_range(start=0, step=1):
i = start
while True:
yield i
i += step
try:
from setproctitle import setproctitle
except ImportError:
setproctitle = lambda t: NotImplemented
def mixined(cls, *mixin_clses):
return type(cls.__name__ + "+Mixins", mixin_clses + (cls,), {})
import sys
import imp
from os import walk, path
def load_whole_package(package_modname):
package = __import__(package_modname, fromlist=[""], level=0)
suffixes = imp.get_suffixes()
origin = path.dirname(package.__file__)
for dirn, subdirns, fnames in walk(origin):
subdirns[:] = filter(lambda dn: not dn.startswith("."), subdirns)
curr_package = package_modname + dirn[len(origin):].replace(path.sep, ".")
found_mods = {}
# Phase one: collect modules to load in this directory.
for fname in fnames:
for suffix, mode, type_ in suffixes:
if fname.endswith(suffix):
fpath = path.join(dirn, fname)
desc = suffix, mode, type_
found_mods[fname[:-len(suffix)]] = (fpath, desc)
# Phase two: load modules in order (to get __init__ first and some
# predictability.)
for modname in sorted(found_mods):
fpath, desc = found_mods[modname]
if modname == "__init__":
modname = curr_package
else:
modname = curr_package + "." + modname
# Only load modules if they aren't already.
if modname not in sys.modules:
suffix, mode, type_ = desc
fp = open(fpath, mode)
try:
mod = imp.load_module(modname, fp, fpath, desc)
finally:
fp.close()
imp.acquire_lock()
sys.modules[modname] = mod
imp.release_lock()
# DSN Parsing
import urlparse
class DSNParseError(Exception): pass
def parse_dsn(dsn_url, scheme_map, default_scheme=None):
"""Parse *dsn_url* into a tuple of `(cls, opts)`."""
dsn = urlparse.urlsplit(dsn_url)
scheme = dsn.scheme or default_scheme
if scheme not in scheme_map:
raise DSNParseError("scheme must be one of %r, not %r" %
(scheme_map.keys(), dsn.scheme))
return scheme_map[scheme], dsn
| yostudios/ams | ams/utils.py | utils.py | py | 2,390 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "setproctitle.setproctitle",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "imp.get_suffixes",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
12855297661 | from __future__ import annotations
from typing import Callable, Type, TypeVar
T = TypeVar("T")
Instance = TypeVar("Instance")
MapValidateSub = TypeVar("MapValidateSub", bound = "MapValidate")
class MapValidate:
"""
A data descriptor that will apply the provided mapper functions to its data to transform it and then validate it
using the provided validator functions before completing assignment. If any of the validators return false, a
ValueError is raised.
The on_get and on_set functions are invoked when the data associated with the descriptor is accessed and set
respectively
"""
__slots__ = "p_name", "s_name", "mappers", "validators", "on_get", "on_set"
def __init__(
self,
mappers: list[Callable[[MapValidateSub, Instance, T], T]] | None = None,
validators: list[Callable[[MapValidateSub, Instance, T], tuple[bool, str]]] | None = None,
on_get: list[Callable[[MapValidateSub, Instance], None]] | None = None,
on_set: list[Callable[[MapValidateSub, Instance], None]] | None = None,
):
"""
:param mappers: A list of functions that can mutate the value that is assigned to this property
:param validators:
A list of functions that can validate the value that is assigned to this property. A ValueError is raised if
validation from any one of these functions fails
:param on_get: A list of functions that are called when this property is accessed
:param on_set: A list of functions that are called when this property is set
"""
self.mappers = mappers or []
self.validators = validators or []
self.on_get = on_get or []
self.on_set = on_set or []
def __set_name__(self, owner: Type[Instance], name: str) -> None:
self.p_name = name
self.s_name = f"_{name}"
def __get__(self, instance: Instance, owner: Type[Instance]) -> MapValidate | T:
if instance is None:
return self
for func in self.on_get:
func(self, instance)
return getattr(instance, self.s_name)
def __set__(self, instance: Instance, value: T) -> None:
val = value
for mapper in self.mappers:
val = mapper(self, instance, val)
for validator in self.validators:
valid, msg = validator(self, instance, val)
if not valid:
raise ValueError(msg % repr(self.p_name))
setattr(instance, self.s_name, val)
for func in self.on_set:
func(self, instance)
| Divy1211/BinaryFileParser | src/binary_file_parser/retrievers/MapValidate.py | MapValidate.py | py | 2,582 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "typing.TypeVar",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "typing.TypeVar",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "typing.TypeVar",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "typing.Callable",
"line... |
4467571006 | import database
import databasebuilder
import idlparser
import logging.config
import os.path
import sys
_logger = logging.getLogger('fremontcutbuilder')
FEATURE_DISABLED = [
'ENABLE_BATTERY_STATUS',
'ENABLE_CSS3_CONDITIONAL_RULES',
'ENABLE_CSS_DEVICE_ADAPTATION',
'ENABLE_CUSTOM_SCHEME_HANDLER',
'ENABLE_ENCRYPTED_MEDIA_V2',
'ENABLE_MEDIA_CAPTURE', # Only enabled on Android.
'ENABLE_ORIENTATION_EVENTS', # Only enabled on Android.
'ENABLE_SPEECH_SYNTHESIS',
'ENABLE_WEBVTT_REGIONS',
'ENABLE_XHR_TIMEOUT',
]
FEATURE_DEFINES = [
'ENABLE_CALENDAR_PICKER',
'ENABLE_CANVAS_PROXY',
'ENABLE_CSS_REGIONS',
'ENABLE_CUSTOM_ELEMENTS',
'ENABLE_DATALIST_ELEMENT',
'ENABLE_DIALOG_ELEMENT',
'ENABLE_ENCRYPTED_MEDIA',
'ENABLE_FONT_LOAD_EVENTS',
'ENABLE_GAMEPAD',
'ENABLE_INPUT_SPEECH',
'ENABLE_LEGACY_NOTIFICATIONS',
'ENABLE_MEDIA_STREAM',
'ENABLE_NAVIGATOR_CONTENT_UTILS',
'ENABLE_NOTIFICATIONS',
'ENABLE_PAGE_POPUP',
'ENABLE_SHARED_WORKERS',
'ENABLE_SVG',
'ENABLE_SVG_FONTS',
'ENABLE_VIDEO',
'ENABLE_WEB_AUDIO',
'ENABLE_WEBGL',
]
def build_database(idl_files, database_dir, feature_defines=None, parallel=False):
"""This code reconstructs the FremontCut IDL database from W3C,
WebKit and Dart IDL files."""
current_dir = os.path.dirname(__file__)
logging.config.fileConfig(os.path.join(current_dir, "logging.conf"))
db = database.Database(database_dir)
# Delete all existing IDLs in the DB.
db.Delete()
builder = databasebuilder.DatabaseBuilder(db)
# TODO(vsm): Move this to a README.
# This is the Dart SVN revision.
webkit_revision = '1060'
# TODO(vsm): Reconcile what is exposed here and inside WebKit code
# generation. We need to recheck this periodically for now.
webkit_defines = [ 'LANGUAGE_DART', 'LANGUAGE_JAVASCRIPT' ]
if feature_defines is None:
feature_defines = FEATURE_DEFINES
webkit_options = databasebuilder.DatabaseBuilderOptions(
idl_syntax=idlparser.WEBKIT_SYNTAX,
# TODO(vsm): What else should we define as on when processing IDL?
idl_defines=webkit_defines + feature_defines,
source='WebKit',
source_attributes={'revision': webkit_revision})
# Import WebKit IDLs.
builder.import_idl_files(idl_files, webkit_options, parallel)
# Import Dart idl:
dart_options = databasebuilder.DatabaseBuilderOptions(
idl_syntax=idlparser.FREMONTCUT_SYNTAX,
source='Dart',
rename_operation_arguments_on_merge=True)
builder.import_idl_files(
[ os.path.join(current_dir, '..', 'idl', 'dart', 'dart.idl') ],
dart_options,
parallel)
# Merging:
builder.merge_imported_interfaces()
builder.fetch_constructor_data(webkit_options)
builder.fix_displacements('WebKit')
# Cleanup:
builder.normalize_annotations(['WebKit', 'Dart'])
conditionals_met = set(
'ENABLE_' + conditional for conditional in builder.conditionals_met)
known_conditionals = set(FEATURE_DEFINES + FEATURE_DISABLED)
unused_conditionals = known_conditionals - conditionals_met
if unused_conditionals:
_logger.warning('There are some unused conditionals %s' %
sorted(unused_conditionals))
unknown_conditionals = conditionals_met - known_conditionals
if unknown_conditionals:
_logger.warning('There are some unknown conditionals %s' %
sorted(unknown_conditionals))
db.Save()
return db
def main(parallel=False):
current_dir = os.path.dirname(__file__)
idl_files = []
webcore_dir = os.path.join(current_dir, '..', '..', '..', 'third_party',
'WebCore')
if not os.path.exists(webcore_dir):
raise RuntimeError('directory not found: %s' % webcore_dir)
DIRS_TO_IGNORE = [
'bindings', # Various test IDLs
'testing', # IDLs to expose testing APIs
'networkinfo', # Not yet used in Blink yet
'vibration', # Not yet used in Blink yet
]
def visitor(arg, dir_name, names):
if os.path.basename(dir_name) in DIRS_TO_IGNORE:
names[:] = [] # Do not go underneath
for name in names:
file_name = os.path.join(dir_name, name)
(interface, ext) = os.path.splitext(file_name)
if ext == '.idl':
idl_files.append(file_name)
os.path.walk(webcore_dir, visitor, webcore_dir)
database_dir = os.path.join(current_dir, '..', 'database')
return build_database(idl_files, database_dir, parallel=parallel)
if __name__ == '__main__':
sys.exit(main())
| MarkBennett/dart | tools/dom/scripts/fremontcutbuilder.py | fremontcutbuilder.py | py | 4,498 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "logging.config.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.config",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "os.path.path.dirname",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path.pa... |
5416694372 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Downblock(nn.Module):
def __init__(self, channels, kernel_size=3):
super(Downblock, self).__init__()
self.dwconv = nn.Conv2d(channels, channels, groups=channels, stride=2,
kernel_size=kernel_size, padding=1, bias=False)
self.bn = nn.BatchNorm2d(channels)
def forward(self, x):
return self.bn(self.dwconv(x))
class GEBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, spatial, extent=0, extra_params=True, mlp=True, dropRate=0.0):
super(GEBlock, self).__init__()
self.bnrelu = nn.Sequential(nn.BatchNorm2d(in_planes), nn.ReLU(inplace=True))
self.conv = nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_planes),
nn.ReLU(inplace=True),
nn.Dropout(p=dropRate),
nn.Conv2d(out_planes, out_planes, kernel_size=3, padding=1, bias=False))
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes,
out_planes, kernel_size=1, stride=stride, bias=False) or None
if extra_params:
if extent: modules = [Downblock(out_planes)]
for i in range((extent-1) // 2): modules.append(nn.Sequential(nn.ReLU(inplace=True), Downblock(out_planes)))
self.downop = nn.Sequential(*modules) if extent else Downblock(out_planes, kernel_size=spatial)
else:
self.downop = nn.AdaptiveAvgPool2d(spatial // extent) if extent else nn.AdaptiveAvgPool2d(1)
self.mlp = nn.Sequential(nn.Conv2d(out_planes, out_planes // 16, kernel_size=1, bias=False), nn.ReLU(inplace=True),
nn.Conv2d(out_planes // 16, out_planes, kernel_size=1, bias=False)) if mlp else lambda x: x
def forward(self, x):
bnrelu = self.bnrelu(x)
out = self.conv(bnrelu)
map = self.mlp(self.downop(out))
# Assuming squares because lazy.
map = F.interpolate(map, out.shape[-1])
if not self.equalInOut: x = self.convShortcut(bnrelu)
return torch.add(x, out * torch.sigmoid(map))
| cuihu1998/GENet-Res50 | pytorch-GENet/models/blocks.py | blocks.py | py | 2,353 | python | en | code | 15 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numbe... |
72514206114 | import torch
import torch.nn as nn
import torch.nn.functional as F
#TODO: focal loss index over error
class FocalLoss(nn.Module):
def __init__(self, gamma=0, alpha=None, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
if isinstance(alpha,(float,int)): self.alpha = torch.Tensor([alpha,1-alpha])
if isinstance(alpha,list): self.alpha = torch.Tensor(alpha)
self.size_average = size_average
def forward(self, input, target):
if input.dim()>2:
input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W
input = input.transpose(1,2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1,1)
logpt = F.log_softmax(input)
logpt = logpt.gather(1,target)
logpt = logpt.view(-1)
pt = logpt.data.exp()
if self.alpha is not None:
if self.alpha.type()!=input.data.type():
self.alpha = self.alpha.type_as(input.data)
at = self.alpha.gather(0,target.data.view(-1))
logpt = logpt * at
loss = -1 * (1-pt)**self.gamma * logpt
if self.size_average:
return loss.mean()
else:
return loss.sum()
def multi_focal_loss_fusion(d0, d1, d2, d3, d4, d5, d6, labels_v):
focal_loss = FocalLoss(size_average=True)
loss0 = focal_loss(d0, labels_v)
loss1 = focal_loss(d1, labels_v)
loss2 = focal_loss(d2, labels_v)
loss3 = focal_loss(d3, labels_v)
loss4 = focal_loss(d4, labels_v)
loss5 = focal_loss(d5, labels_v)
loss6 = focal_loss(d6, labels_v)
loss = loss0 + loss1 + loss2 + loss3 + loss4 + loss5 + loss6
#print("l0: %3f, l1: %3f, l2: %3f, l3: %3f, l4: %3f, l5: %3f, l6: %3f\n"%(loss0.data.item(),loss1.data.item(),loss2.data.item(),loss3.data.item(),loss4.data.item(),loss5.data.item(),loss6.data.item()))
return loss0, loss
def multi_bce_loss_fusion(d0, d1, d2, d3, d4, d5, d6, labels_v):
bce_loss = nn.BCELoss(size_average=True)
loss0 = bce_loss(d0, labels_v)
loss1 = bce_loss(d1, labels_v)
loss2 = bce_loss(d2, labels_v)
loss3 = bce_loss(d3, labels_v)
loss4 = bce_loss(d4, labels_v)
loss5 = bce_loss(d5, labels_v)
loss6 = bce_loss(d6, labels_v)
loss = loss0 + loss1 + loss2 + loss3 + loss4 + loss5 + loss6
#print("l0: %3f, l1: %3f, l2: %3f, l3: %3f, l4: %3f, l5: %3f, l6: %3f\n"%(loss0.data.item(),loss1.data.item(),loss2.data.item(),loss3.data.item(),loss4.data.item(),loss5.data.item(),loss6.data.item()))
return loss0, loss
# def CB_loss(labels, logits, samples_per_cls, no_of_classes, loss_type, beta, gamma):
# """Compute the Class Balanced Loss between `logits` and the ground truth `labels`.
# Class Balanced Loss: ((1-beta)/(1-beta^n))*Loss(labels, logits)
# where Loss is one of the standard losses used for Neural Networks.
# Args:
# labels: A int tensor of size [batch].
# logits: A float tensor of size [batch, no_of_classes].
# samples_per_cls: A python list of size [no_of_classes].
# no_of_classes: total number of classes. int
# loss_type: string. One of "sigmoid", "focal", "softmax".
# beta: float. Hyperparameter for Class balanced loss.
# gamma: float. Hyperparameter for Focal loss.
# Returns:
# cb_loss: A float tensor representing class balanced loss
# """
# effective_num = 1.0 - np.power(beta, samples_per_cls)
# weights = (1.0 - beta) / np.array(effective_num)
# weights = weights / np.sum(weights) * no_of_classes
#
# labels_one_hot = F.one_hot(labels, no_of_classes).float()
#
# weights = torch.tensor(weights).float()
# weights = weights.unsqueeze(0)
# weights = weights.repeat(labels_one_hot.shape[0],1) * labels_one_hot
# weights = weights.sum(1)
# weights = weights.unsqueeze(1)
# weights = weights.repeat(1,no_of_classes)
#
# if loss_type == "focal":
# cb_loss = focal_loss(labels_one_hot, logits, weights, gamma)
# elif loss_type == "sigmoid":
# cb_loss = F.binary_cross_entropy_with_logits(input = logits,target = labels_one_hot, weights = weights)
# elif loss_type == "softmax":
# pred = logits.softmax(dim = 1)
# cb_loss = F.binary_cross_entropy(input = pred, target = labels_one_hot, weight = weights)
# return cb_loss | alswlsghd320/u2net_pytorch | loss.py | loss.py | py | 4,463 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_num... |
43438190115 | import numpy as np
import mediapipe as mp
import matplotlib.pyplot as plt
from utils import (
poseDetector, poseUtils
)
def main():
fileName = 'data/Question 19 - 8AE44065-F2F5-4D85-9A96-F69471837F7A.jpeg'
image = poseUtils.getImage(fileName)
pDetector = poseDetector.PoseDetector()
results = pDetector.getResults( image )
annotatedImage = poseUtils.generateAnnotatedImage( image, results )
print(results.segmentation_mask)
print(results.segmentation_mask.shape)
plt.figure()
plt.imshow(image)
plt.figure()
plt.imshow(annotatedImage)
plt.savefig('results/temp.png')
return
if __name__ == '__main__':
main()
| sankhaMukherjee/legalUserImage | main.py | main.py | py | 713 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "utils.poseUtils.getImage",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "utils.poseUtils",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "utils.poseDetector.PoseDetector",
"line_number": 14,
"usage_type": "call"
},
{
"api_name... |
12837436067 | from datetime import datetime, timedelta
from django.db import models
from django.db.models import Q
from django.contrib.postgres.fields import JSONField
from simple_history.models import HistoricalRecords
from web.sns import notify_productcategory_change
from web.managers import ArticleManager
from web.gs1_code_lists import Allergen, FishingZone, Origin, T3780
class Storage():
TYPE = (
("Colonial", "Colonial"),
("Refrigerated", "Refrigerated"),
("Frozen", "Frozen"),
("Unspecified", "Unspecified")
)
class Article(models.Model):
TYPES = (
("BASE_UNIT_OR_EACH", "BASE_UNIT_OR_EACH"),
("CASE", "CASE"),
("1/4", "1/4"),
("1/2", "1/2"),
("PALLET", "PALLET")
)
VATS = [6.0, 12.0, 25.0]
consumer_unit = models.BooleanField(
default=False, verbose_name="Consumer unit",
help_text="Should this article be available to consumers?")
descriptor_code = models.CharField(
choices=TYPES, max_length=64, verbose_name="Type",
help_text="What type of article is this?")
gtin = models.CharField(max_length=14, unique=True,
default=0, verbose_name="GTIN")
child_gtin = models.CharField(
blank=True, max_length=14, default="",
verbose_name="Child GTIN",
help_text="If this is a package, what GTIN does the package contain?")
quantity_of_lower_layer = models.IntegerField(
blank=True, default=0, verbose_name="Quantity of Lower Layer",
help_text="How many of that GTIN does it contain?")
category = models.CharField(
max_length=100, verbose_name="Category",
help_text="Industri category (GS1)")
weight_g = models.FloatField(default=0, verbose_name="Weight (g)")
height_mm = models.FloatField(default=0, verbose_name="Height (mm)")
length_mm = models.FloatField(default=0, verbose_name="Length (mm)")
width_mm = models.FloatField(default=0, verbose_name="Depth (mm)")
volume_dm3 = models.FloatField(max_length=10, default=0,
verbose_name="Volume (dm³)")
net_content = models.FloatField(null=True, verbose_name="Net Content")
net_content_unit_code = models.CharField(null=True,
max_length=10, verbose_name="Net Content Unit Code", choices=list(T3780.UNIT_CODES.items()))
creation_date = models.DateTimeField(verbose_name="Creation date")
name = models.CharField(max_length=100, verbose_name="Name")
description = models.TextField(blank=True, verbose_name="Description")
recycle_fee = models.FloatField(default=0.0, verbose_name="Recycle Fee")
last_modified = models.DateTimeField(
auto_now=False, verbose_name="Last modified")
brand_name = models.CharField(max_length=256, verbose_name="Brand")
source = models.CharField(max_length=40, verbose_name="Source", blank=True)
vat = models.FloatField(
default=0, verbose_name="VAT", choices=zip([0.0] + VATS, [0.0] + VATS))
adult_product = models.BooleanField(
default=False, verbose_name="Adult Product")
marketing_message = models.TextField(
blank=True, verbose_name="Marketing message")
nutrition_description = models.TextField(
blank=True, verbose_name="Nutrition Description")
ingredient_description = models.TextField(
blank=True, verbose_name="Ingredient Description")
allergen_statement = models.TextField(
verbose_name="Allergen Statement", blank=True, default='')
allergens = JSONField(
verbose_name="Allergens", blank=True, default=dict)
origin = models.IntegerField(
blank=True, verbose_name="Origin", choices=Origin.COUNTRY, default=0)
fishing_zone = models.IntegerField(
blank=True, choices=FishingZone.AREA, default=0)
whitelisted = models.BooleanField(
default=False, verbose_name="Whitelisted")
history = HistoricalRecords(
excluded_fields=['creation_date', 'last_modified'])
objects = ArticleManager()
storage_type = models.CharField(
max_length=64, choices=Storage.TYPE,
default="Unspecified", verbose_name="Storage Type")
storage_temperature_range = models.CharField(
max_length=64, blank=True,
default="", verbose_name="Storage Temperature Range")
class Meta:
indexes = [
models.Index(fields=['category', 'name', 'gtin', 'source'])
]
ordering = ['-id']
def is_valid(self):
if self.whitelisted or not self.consumer_unit:
return True
if (self.articleimage_set.count() > 0 and
self.weight_g > 0 and
self.height_mm > 0 and
self.length_mm > 0 and
self.width_mm > 0 and
self.volume_dm3 > 0 and
self.name != '' and
self.brand_name != '' and
self.marketing_message != '' and
self.nutrition_description != '' and
self.ingredient_description != ''):
if ((self.consumer_unit and self.vat in Article.VATS) or
not self.consumer_unit):
return True
return False
def get_allergens(self):
allergen_list = []
for allergen in self.allergens:
allergen_list.append(
Allergen.get_text(allergen)
)
return ", ".join(allergen_list)
def save(self, *args, **kwargs):
super(Article, self).save(*args, **kwargs)
# Link unlinked images
images = ArticleImage.objects.filter(
article=None,
gtin=self.gtin
)
for image in images:
image.article = self
image.save()
# Save products to trigger validation check
products = Product.objects.filter(article__gtin=self.gtin)
for product in products:
product.save_without_historical_record()
def post_delete(sender, instance, using):
products = Product.objects.filter(article__gtin=instance.gtin)
for product in products:
product.save_without_historical_record()
def get_image(self):
if self.descriptor_code == "PALLET":
return "PALLET"
if (self.descriptor_code in ["CASE", "1/4", "1/2"] and
not self.consumer_unit):
return "CASE"
return self.articleimage_set.first()
def get_related(self):
related = self.get_related_rec()
related = filter(lambda x: x.id != self.id, related)
return sorted(related, key=lambda package: package.gtin)
def get_related_rec(self, packages=[]):
child = Article.objects.filter(gtin=self.child_gtin).first()
parents = list(Article.objects.filter(child_gtin=self.gtin))
if child is not None:
parents.append(child)
for p in parents:
if p not in packages:
packages.append(p)
p.get_related_rec(packages)
return packages
class ArticleImage(models.Model):
gtin = models.CharField(max_length=14, default=0, verbose_name="GTIN")
angle = models.CharField(max_length=6, blank=True, verbose_name="Angle")
filename = models.CharField(max_length=256, verbose_name="Filename")
creation_date = models.DateTimeField(auto_now_add=True,
verbose_name="Created")
source = models.CharField(max_length=40, verbose_name="Source", blank=True)
history = HistoricalRecords(excluded_fields=['creation_date'])
article = models.ForeignKey(Article, on_delete=models.DO_NOTHING,
blank=True, null=True)
last_modified = models.DateTimeField(auto_now=True, blank=True,
verbose_name='Last Modified')
def save(self, *args, **kwargs):
if (self.article is None and
Article.objects.filter(gtin=self.gtin).exists()):
article = Article.objects.get(gtin=self.gtin)
self.article = article
super(ArticleImage, self).save(*args, **kwargs)
class ProductCategory(models.Model):
name = models.CharField(max_length=128,
verbose_name='Category Name', unique=True)
parentcategory = models.ForeignKey("self",
null=True, blank=True,
on_delete=models.DO_NOTHING,
verbose_name='Parent Category')
url = models.CharField(max_length=256, verbose_name='Category URL')
is_public = models.BooleanField(verbose_name='Is Public')
introduction = models.TextField(verbose_name='Intro')
title = models.CharField(max_length=256, verbose_name='Title')
metadescription = models.CharField(max_length=1024,
null=True,
blank=True,
verbose_name='Meta Description')
removed_date = models.DateTimeField(null=True,
blank=True,
verbose_name='Removed')
creation_date = models.DateTimeField(auto_now_add=True,
verbose_name='Created')
last_modified = models.DateTimeField(auto_now=True, blank=True,
verbose_name='Last Modified')
tags = models.ManyToManyField(
'Tag', related_name='categories', blank=True)
class Meta:
ordering = ['-id']
def __str__(self):
return self.name
def save(self, *args, **kwargs):
notify_productcategory_change(self)
super(ProductCategory, self).save()
class MerchantArticle(models.Model):
article_gtin = models.CharField(null=True, max_length=14,
verbose_name="Article GTIN")
article = models.ForeignKey(
Article, null=True, blank=True, on_delete=models.DO_NOTHING,
related_name='merchant_articles')
merchant_name = models.CharField(
max_length=256, verbose_name='Merchant name')
external_id = models.CharField(max_length=256, verbose_name='External ID')
availability_status = models.CharField(
max_length=100, default='', verbose_name='Availability status')
listed = models.CharField(max_length=20, blank=True, verbose_name='Listed')
last_date_to_order = models.CharField(
max_length=20, blank=True, verbose_name='Last date to order')
price = models.FloatField(max_length=10, default=0, verbose_name="Price")
currency = models.CharField(max_length=5, default="SEK", verbose_name="Currency")
sales_price = models.FloatField(max_length=10, default=0, verbose_name="Sales Price")
sales_price_valid_from = models.CharField(
max_length=20, blank=True, verbose_name='Sales Price Valid From')
sales_price_valid_to = models.CharField(
max_length=20, blank=True, verbose_name='Sales Price Valid To')
creation_date = models.DateTimeField(
auto_now_add=True, verbose_name='Creation date')
last_modified = models.DateTimeField(
auto_now=True, verbose_name='Last modified')
history = HistoricalRecords(excluded_fields=['creation_date',
'last_modified'])
class Meta:
unique_together = (("external_id", "merchant_name"))
ordering = ['-id']
def save(self, *args, **kwargs):
self.article = Article.objects.filter(gtin=self.article_gtin).first()
super(MerchantArticle, self).save(*args, **kwargs)
if self.article:
self.save_related_products()
def post_delete(sender, instance, using):
products = Product.objects.filter(article__gtin=instance.article.gtin)
for product in products:
product.save_without_historical_record()
def save_related_products(self):
products = Product.objects.filter(article__gtin=self.article.gtin)
for product in products:
product.save_without_historical_record()
def __str__(self):
return self.external_id + " (" + self.merchant_name + ")"
class Product(models.Model):
TYPES = (
("Normal", "Normal"),
("Crossdocking", "Crossdocking"),
("Nightorder", "Nightorder"),
)
marketing_message = models.TextField(blank=True)
nutrition_description = models.TextField(blank=True)
ingredient_description = models.TextField(blank=True)
name = models.CharField(max_length=256, blank=True)
brand_name = models.CharField(max_length=256, verbose_name="Brand",
blank=True)
description = models.TextField(blank=True, verbose_name="Description")
creation_date = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
product_id = models.BigIntegerField(default=0, unique=True)
article = models.ForeignKey(Article, on_delete=models.DO_NOTHING)
prefered_merchantarticle = models.ForeignKey(
MerchantArticle, on_delete=models.DO_NOTHING,
blank=True, null=True, default=None)
product_category = models.ForeignKey(
ProductCategory, on_delete=models.DO_NOTHING, blank=True, null=True)
tags = models.ManyToManyField(
'Tag', related_name='products', blank=True)
adult_product = models.BooleanField(
default=False, verbose_name="Adult Product")
origin = models.IntegerField(blank=True,
verbose_name="Origin",
choices=Origin.COUNTRY,
default=0)
fishing_zone = models.IntegerField(blank=True,
choices=FishingZone.AREA,
default=0)
allergen_statement = models.TextField(
verbose_name="Allergen Statement", blank=True, default='')
allergens = JSONField(
verbose_name="Allergens", blank=True, default=dict)
weight_g = models.FloatField(
blank=True, null=True, verbose_name="Weight (g)")
height_mm = models.FloatField(
blank=True, null=True, verbose_name="Height (mm)")
length_mm = models.FloatField(
blank=True, null=True, verbose_name="Length (mm)")
width_mm = models.FloatField(
blank=True, null=True, verbose_name="Depth (mm)")
volume_dm3 = models.FloatField(
max_length=10, blank=True, null=True, verbose_name="Volume (dm³)")
net_content = models.FloatField(
blank=True, null=True, verbose_name="Net Content")
net_content_unit_code = models.CharField(
blank=True, null=True, max_length=10, verbose_name="Net Content Unit Code", choices=list(T3780.UNIT_CODES.items()))
recycle_fee = models.FloatField(default=0.0, verbose_name="Recycle Fee")
vat = models.FloatField(
default=0, verbose_name="VAT",
choices=zip([0.0] + Article.VATS, [0.0] + Article.VATS))
last_receipt_day = models.IntegerField(blank=True, default=0)
last_sales_day = models.IntegerField(blank=True, default=0)
product_type = models.CharField(
max_length=128, blank=True, choices=TYPES,
default="Normal", verbose_name="Type")
valid_weight = models.BooleanField(blank=True, default=False)
valid_volume = models.BooleanField(blank=True, default=False)
valid_image = models.BooleanField(blank=True, default=False)
valid_brand = models.BooleanField(blank=True, default=False)
valid_price = models.BooleanField(blank=True, default=False)
valid_merchantarticle = models.BooleanField(blank=True, default=False)
history = HistoricalRecords(
excluded_fields=['creation_date', 'last_modified'])
valid_products_query = Q(
# valid_weight=True,
# valid_volume=True,
valid_image=True,
valid_brand=True,
valid_price=True,
# valid_merchantarticle=True,
article__vat__in=Article.VATS)
class Meta:
indexes = [
models.Index(fields=['product_id', 'name', 'brand_name'])
]
ordering = ['-id']
def validate_weight(self):
packages = self.article.get_related()
case = list(filter(
lambda package: package.descriptor_code == 'CASE',
packages
))
if len(case) == 0:
self.valid_weight = False
return self.valid_weight
else:
case = case[0]
if int(case.weight_g) == 0:
self.valid_weight = False
return self.valid_weight
weight = self.weight_g or self.article.weight_g
weight_diff = (
weight *
case.quantity_of_lower_layer /
case.weight_g - 1) * 100
if abs(weight_diff) >= 20.0:
self.valid_weight = False
return self.valid_weight
self.valid_weight = True
return self.valid_weight
def validate_volume(self):
packages = self.article.get_related()
case = list(filter(
lambda package: package.descriptor_code == 'CASE',
packages
))
if len(case) == 0:
self.valid_volume = False
return self.valid_volume
else:
case = case[0]
if int(case.volume_dm3) == 0:
self.valid_volume = False
return self.valid_volume
volume = self.volume_dm3 or self.article.volume_dm3
volume_diff = (
volume *
case.quantity_of_lower_layer /
case.volume_dm3 - 1) * 100
if abs(volume_diff) >= 20.0:
self.valid_volume = False
return self.valid_volume
self.valid_volume = True
return self.valid_volume
def validate_image(self):
if self.productimage_set.count() == 0:
self.valid_image = False
return self.valid_image
self.valid_image = True
return self.valid_image
def validate_brand(self):
if self.brand_name == '' and self.article.brand_name == '':
self.valid_brand = False
return self.valid_brand
self.valid_brand = True
return self.valid_brand
def validate_price(self):
store_ids_to_check = {10, 14, 16}
store_ids_with_price = set(
self
.product_detail
.filter(price__gt=0)
.values_list('store', flat=True)
)
store_ids_missing_price = store_ids_to_check - store_ids_with_price
if len(store_ids_missing_price) > 0:
self.valid_price = False
return self.valid_price
self.valid_price = True
return self.valid_price
def validate_merchantarticle(self):
orderable = True
if self.article.merchant_articles.count() == 0:
orderable = False
packages = self.article.get_related()
for package in packages:
if package.merchant_articles.count() > 0:
orderable = True
break
self.valid_merchantarticle = orderable
return self.valid_merchantarticle
def validate_all(self):
self.validate_weight()
self.validate_volume()
self.validate_image()
self.validate_brand()
self.validate_price()
self.validate_merchantarticle()
def save(self, *args, **kwargs):
self.validate_all()
super(Product, self).save(*args, **kwargs)
def get_allergens(self):
allergen_list = []
for allergen in self.allergens:
allergen_list.append(
Allergen.get_text(allergen)
)
return ", ".join(allergen_list)
class ProductImage(models.Model):
angle = models.CharField(max_length=6, blank=True, verbose_name="Angle")
filename = models.CharField(max_length=256, verbose_name="Filename")
creation_date = models.DateTimeField(auto_now_add=True,
verbose_name="Created")
source = models.CharField(max_length=40, verbose_name="Source", blank=True)
history = HistoricalRecords(excluded_fields=['creation_date'])
product = models.ForeignKey(Product, on_delete=models.CASCADE, blank=True,
null=True)
active = models.BooleanField(default=True)
main = models.BooleanField(default=False)
class Meta:
ordering = ['-main', '-creation_date']
def save(self, *args, **kwargs):
super(ProductImage, self).save(*args, **kwargs)
if self.product is not None:
self.product.save_without_historical_record()
def post_delete(sender, instance, using):
if instance.product is not None:
instance.product.save_without_historical_record()
class ProductDetail(models.Model):
STORES = (
(10, "Stockholm"),
(14, "Göteborg"),
(16, "Malmö")
)
STATUSES = (
(1, "Open"),
(2, "Out of stock"),
(3, "Replacement"),
(4, "Missing info"),
(5, "Sample")
)
store = models.IntegerField(choices=STORES)
price = models.FloatField(max_length=10, default=0)
enabled = models.BooleanField()
first_enabled = models.DateTimeField(blank=True, null=True)
status = models.IntegerField(choices=STATUSES, default=4)
orderfactor = models.BooleanField(
default=False, verbose_name="Orderfactor")
product = models.ForeignKey(Product, on_delete=models.DO_NOTHING,
related_name='product_detail')
prefered_merchantarticle = models.ForeignKey(
MerchantArticle, on_delete=models.DO_NOTHING,
blank=True, null=True, default=None)
history = HistoricalRecords()
def save(self, *args, **kwargs):
super(ProductDetail, self).save(*args, **kwargs)
if self.product is not None:
self.product.save_without_historical_record()
def post_delete(sender, instance, using):
if instance.product is not None:
instance.product.save_without_historical_record()
def is_tagged_as_new(self):
if self.first_enabled is None:
return False
cutoff_date = datetime.now(
self.first_enabled.tzinfo) - timedelta(days=30)
return self.first_enabled > cutoff_date
class Tag(models.Model):
name = models.CharField(max_length=200, unique=True, verbose_name="Name")
def __str__(self):
return self.name
class Meta:
ordering = ['-id']
| hackcasa/zappa_final | web/models.py | models.py | py | 22,739 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.db.models.Model",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 32,
"usage_type": "call"
},
{
"api_na... |
35700198066 |
from struct import pack, unpack
import logging
from handleclient import common
from handleclient import utils
from handleclient import message
from handleclient import handlevalue
from handleclient.handlevalue import HandleValue
from handleclient.message import Message, Envelope, Header, Body, Credential
logger = logging.getLogger(__name__)
logger.setLevel(common.LOG_LEVEL)
logger.addHandler(common.ch)
class ErrorResponseBody(Body):
def __init__(self):
self.errMsg = ""
@classmethod
def parse(self, body):
assert isinstance(body, bytes)
self.errMsg = utils.uba(body)
assert 4 + len(self.errMsg) == len(body)
def __str__(self):
res = ""
res += f"{self.errMsg}"
return res
class ReferralResponseBody(Body):
"""https://tools.ietf.org/html/rfc3652#section-3.4
"""
def __init__(self):
self.valueList = []
@classmethod
def parse(self, body):
assert isinstance(body, bytes)
logger.debug(body[:])
offset = 0
self.valueCnt = utils.u32(body[offset:])
offset += 4
for _i in range(self.valueCnt):
pass
# def __str__(self):
# res = ""
# res += f"{self.errMsg}\n"
# return res
class SomeResponseBody(Body):
def __init__(self):
pass
@classmethod
def parse(self, body):
assert isinstance(body, bytes)
logger.warning("todo")
def __str__(self):
res = ""
res += "unimplemented"
return res
| pullp/HandleClient | handleclient/response.py | response.py | py | 1,554 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "handleclient.common.LOG_LEVEL",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "handleclient.common",
"line_number": 14,
"usage_type": "name"
},
{
"api_name... |
16150940400 |
# Name: John Kelly
# ID: C00176932
from flask import Flask, render_template, request, session
from operator import itemgetter
from collections import Counter
import pickle
import random
import time
import os.path
import parse_words
app = Flask(__name__)
@app.route('/')
def start_app():
if os.path.isfile('pickle/source_words.pickle'):
with open('pickle/source_words.pickle', 'rb') as ph:
source_words = pickle.load(ph)
else:
parse_words.create_selected_word_dictionary()
session.setdefault('selected_word', '')
session.setdefault('current_user', {})
session['selected_word'] = random.choice(source_words)
session['current_user']['start_time'] = time.time() * 1000
return render_template('entries.html',
title=session['selected_word'] + ' | Word Game')
@app.route('/checkscore', methods=['POST'])
def process_the_data():
session['current_user']['end_time'] = time.time() * 1000
session.setdefault('user_input', {})
session['user_input'] = request.form.to_dict()
valid_word = validate_input_duplicates(session['user_input'])
if valid_word:
valid_word, session['user_input'] = validate_input(session['user_input'], session['selected_word'])
if valid_word is False:
return render_template('wrong_results.html',
sourceWord=session['selected_word'],
words=session['user_input'])
session['current_user']['total_time'] = session['current_user']['end_time'] -session['current_user']['start_time']
return render_template('right_answer.html')
@app.route('/topscorerslist', methods=['POST'])
def join_the_scoreboard():
user_input = request.form.to_dict()
if os.path.isfile('pickle/top_scorers_list.pickle'):
with open('pickle/top_scorers_list.pickle', 'rb') as ph:
top_scorers_list = pickle.load(ph)
else:
top_scorers_list = []
session['current_user']['name'] = user_input['name']
session['current_user']['display_time'] = calculate_display_time(session['current_user']['total_time'])
top_scorers_list.append(session['current_user'])
with open('pickle/top_scorers_list.pickle', 'wb') as ph:
pickle.dump(top_scorers_list, ph)
sorted_list = sorted(top_scorers_list, key=itemgetter('total_time', 'start_time'), reverse=False)
return render_template('top_ten.html',
words=session['user_input'],
selected_word=session['selected_word'],
top_ten_scorers=sorted_list[:10],
position=sorted_list.index(session['current_user']) + 1)
def validate_input(user_input, selected_word):
valid_word = True
for user_input_key, user_input_value in user_input.items():
if validate_input_length(user_input_value) and validate_input_characters(user_input_value.lower(), selected_word) and validate_input_in_dictionary(user_input_value) and user_input_value != selected_word:
user_input[user_input_key] = {
'word': user_input_value,
'color': 'green'
}
else:
if ' ' in user_input_value:
user_input[user_input_key] = {
'word': "Spaces aren't allowed",
'color': 'orange'
}
else:
user_input[user_input_key] = {
'word': user_input_value,
'color': 'red'
}
valid_word = False
return valid_word, user_input
def validate_input_length(word):
return len(word) > 2
def validate_input_characters(word, selected_word):
valid_word = True
s = Counter(selected_word)
w = Counter(word)
for letter in word:
if letter not in selected_word or w[letter] > s[letter]:
valid_word = False
return valid_word
def validate_input_duplicates(all_values):
return len(set(all_values)) == 7
def validate_input_in_dictionary(word):
if os.path.isfile('pickle/dictionary.pickle'):
with open('pickle/dictionary.pickle', 'rb') as ph:
dictionary = pickle.load(ph)
else:
parse_words.create_dictionary()
return word in dictionary
def calculate_display_time(milliseconds):
seconds = int((session['current_user']['total_time'] / 1000) % 60)
minutes = int((session['current_user']['total_time'] / (1000*60)) % 60)
hours = int((session['current_user']['total_time'] / (1000*60*60)) % 24)
return str(hours) + ':' + str(minutes) + ':' + str(seconds)
if __name__ == '__main__':
app.config['SECRET_KEY'] = 'THISISMYSECRETKEY'
app.run(debug=True)
| ItsJohn/Word-Game | webapp.py | webapp.py | py | 4,798 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.path.isfile",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line... |
7814140733 | import cv2
import matplotlib.pyplot as plt
import os
from glob import glob
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument( "--root", type=str, default="./atten_bear_visualization/", help="down or middle or up" )
parser.add_argument( "--num_sample", type=int, default=3, help="number of samples generated in the ldm model" )
parser.add_argument( "--num_level", type=int, default=4, help="(8, 16, 32, 64)" )
parser.add_argument( "--num_att_comp", type=int, default=3, help="down, up, down&up =3" )
parser.add_argument( "--num_att_maps", type=int, default=10, help="number of attnetion maps generated in the model." )
parser.add_argument( "--token_idx", type=int, default=1, help="number of samples generated in the ldm model" )
args = parser.parse_args()
root = args.root
num_sample = args.num_sample
num_level = args.num_level # (8, 16, 32, 64)
num_att_comp = args.num_att_comp #level= 00,22,02
num_att_maps = args.num_att_maps
imgs = sorted(glob( os.path.join(root, "*.png") ))#os.listdir(root)
attmap_list = []
bundle_list = []
""" 1. bundle image 읽어오는 과정 """
cur_idx = num_sample*num_att_maps #i*num_att_maps + k +1
for m in range(cur_idx, cur_idx+num_att_maps):
if m == cur_idx:
h,w,c = cv2.imread(imgs[m]).shape
white_img = np.ones((h, h*num_sample, 3))*255
bundle_list.append( cv2.resize(cv2.imread(imgs[m]), (h*num_sample,h)) ) # (516, 1548, 3)
""" 2. attention map 읽어오는 과정 """
for i in range(num_sample): # num_sample
for k in range(num_att_maps): # num attention maps in each sample
attmap_list.append( cv2.resize(cv2.imread(imgs[i*num_att_maps + k]), (h,h)) )
""" 3. make bundle composition """
last_bundle = None
for mm in range(num_level):
if mm==0:
init_bundle = np.concatenate( ( bundle_list[mm], white_img, white_img), axis=1 )
else:
cur_idx = mm*num_att_comp-(num_sample-1)
cur_bundle = np.concatenate( bundle_list[ cur_idx : cur_idx+num_att_comp ], axis=0 ) # down, downup, up
if last_bundle is None:
last_bundle = cur_bundle
else:
last_bundle = np.concatenate( (last_bundle, cur_bundle), axis=1 )
last_bundle = np.concatenate( (init_bundle, last_bundle), axis=0 )
cv2.imwrite( os.path.join(root, "./last_bundle_imgs_idx:{0}.png".format(args.token_idx) ), last_bundle)
""" 4. make attenmap composition
0: 0 10 20
1: 1 11 21/ 2 12 22 /3 13 23
2: 4 14 24 /5 15 25 /6 16 26
....
"""
last_bundle = None
for r in range(num_level): # 4
cur_att_map_list = []
if r ==0:
for i in range(num_sample): # 3
cur_attmap = attmap_list[i*num_att_maps]
cur_att_map_list.append(cur_attmap)
cur_att_map_list = np.concatenate(cur_att_map_list, axis=1)
white_img = np.ones((h, h*num_sample*2, 3))*255
init_bundle = np.concatenate( (cur_att_map_list, white_img), axis=1 )
else:
cur_idx = r*(num_level-1)-(num_sample-1) #-2
for i in range(num_sample): # 3
cur_subatt_map_list = []
for j in range(num_sample):
cur_attmap = attmap_list[i+cur_idx + j*num_att_maps]
cur_subatt_map_list.append(cur_attmap)
cur_att_map_list.append( np.concatenate(cur_subatt_map_list, axis=1) )
cur_bundle = np.concatenate( cur_att_map_list, axis=0 )#.reshape(h*num_sample, h*num_sample, 3)
if last_bundle is None:
last_bundle = cur_bundle
else:
last_bundle = np.concatenate( (last_bundle, cur_bundle), axis=1 )
last_bundle = np.concatenate( (init_bundle, last_bundle), axis=0 )
cv2.imwrite( os.path.join(root, "./last_bundle_attnmaps_idx:{0}.png".format(args.token_idx)), last_bundle) | sunwoo76/CrossAttentionControl-stablediffusion | visualize_comp.py | visualize_comp.py | py | 3,773 | python | en | code | 73 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_num... |
38989922806 | import argparse
import time
import absim.world as world
from absim.bayesian_agent import BayesianAgent
from absim.prob_agent import ProbAgent
from absim.prox_agent import ProxAgent
from absim.prob_prox_agent import ProbProxAgent
from absim.salesman_agent import SalesmanAgent
agent_lookup = {
"prob" : ProbAgent,
"prox" : ProxAgent,
"prob_prox" : ProbProxAgent,
"salesman" : SalesmanAgent,
"bayes" : BayesianAgent
}
def parse_world(fname):
"""Parses a scavenger hunt world from a datfile.
Parameters
----------
fname : str
source file name
Returns
-------
world.World
finalized scavenger hunt world
list of str
scavenger hunt
str
start location name
"""
src = open(fname, "r")
sec = None
start_loc = None
conns = [] # 3-tuples (from, to, cost)
nodes = {} # User-specified loc names -> integer IDs
node_count = 0
distrs = []
hunt = []
for line in src.readlines():
# Blank lines and comments
line = line.strip()
if len(line) == 0 or line[0] == '#':
continue
# Section header line
if line[0] == '[':
sec = line[1:line.find(']')]
continue
# Map section
if sec == "map":
args = line.split()
assert len(args) == 3
n_from, n_to = args[0], args[1]
# Parse for starting location
if '*' in n_from:
n_from = n_from.replace('*', '')
if start_loc is None:
start_loc = n_from
elif '*' in n_to:
n_to = n_to.replace('*', '')
if start_loc is None:
start_loc = n_to
cost = float(args[2])
if n_from not in nodes:
nodes[n_from] = node_count
node_count += 1
if n_to not in nodes:
nodes[n_to] = node_count
node_count += 1
conns.append((n_from, n_to, cost))
# Distribution section
elif sec == "distr":
args = line.split()
assert len(args) > 2
obj = args[0]
events = []
ind = 1
if obj not in hunt:
hunt.append(obj)
while ind < len(args):
locs = []
prob_ind = ind
while args[prob_ind] in nodes:
locs.append(nodes[args[prob_ind]])
prob_ind += 1
prob_arg = args[prob_ind]
if '/' in prob_arg:
frac = prob_arg.split('/')
prob = float(frac[0]) / float(frac[1])
else:
prob = float(prob_arg)
events.append(world.Event(obj, locs, prob))
ind = prob_ind + 1
distrs.append(world.Distribution(events))
else:
assert False
src.close()
# Build graph
g = world.Graph(node_count)
for conn in conns:
id_from, id_to, cost = nodes[conn[0]], nodes[conn[1]], conn[2]
g.connect(id_from, id_to, cost)
g.name_ids[conn[0]] = id_from
g.name_ids[conn[1]] = id_to
g.finalize()
# Build world
w = world.World(g, distrs)
w.finalize()
return w, hunt, start_loc
def simulate(world, hunt, start_loc, args):
"""Runs one or more scavenger hunts.
Parameters
----------
world : world.World
scavenger hunt world
hunt : list of str
objects to find
start_loc : str
starting node name
args : Namespace
cmdline args parsed by argparse
Returns
-------
float
average distance traveled across all trials
"""
total_distance = 0
total_runtime = 0
trials = args.trials
agent = agent_lookup[args.agent](world, hunt, world.node_id(start_loc))
agent.epoch()
if not args.suppress:
print(">>> Running %s trials of %s" % \
(trials, agent.__class__.__name__))
for i in range(trials):
t_start, t_end = None, None
world.populate()
agent.setup()
t_start = time.time()
while not agent.done():
agent.objs_at_loc = world.objs_at(agent.loc)
agent.run()
t_end = time.time()
total_distance += agent.travel_distance
total_runtime += t_end - t_start
if not args.suppress:
print("Progress: {:2.1%}".format(i / trials), end="\r")
avg_distance = total_distance / trials
avg_runtime = total_runtime / trials
if not args.suppress:
print("Average distance: %s" % avg_distance)
if args.runtime:
print("Average runtime: %ss" % avg_runtime)
return avg_distance
if __name__ == "__main__":
# Parse cmdline args
ap = argparse.ArgumentParser()
ap.add_argument("datfile", help="scavenger hunt world file", type=str)
ap.add_argument("agent", help="algorithm to run", type=str)
ap.add_argument("-t", "--trials", help="number of trials to run", type=int,
default=1)
ap.add_argument("-s", "--suppress", help="silence output",
action='store_true')
ap.add_argument("-r", "--runtime", help="show average runtime",
action='store_true')
args = ap.parse_args()
# Generate scavenger hunt problem and simulate
world, hunt, start_loc = parse_world(args.datfile)
simulate(world, hunt, start_loc, args)
| utexas-bwi/scavenger_hunt | bwi_scavenger/scripts/absim/hunt.py | hunt.py | py | 5,517 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "absim.prob_agent.ProbAgent",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "absim.prox_agent.ProxAgent",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "absim.prob_prox_agent.ProbProxAgent",
"line_number": 15,
"usage_type": "name"
},
... |
8691148159 | from typing import List
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children if children is not None else []
class Solution:
def findRoot(self, tree: List['Node']) -> 'Node':
if len(tree) == 0:
return None
all_children = set()
vals = {}
for node in tree:
vals[node.val] = node
for child in node.children:
all_children.add(child.val)
for v, n in vals.items():
if v not in all_children:
return n
return None
| songkuixi/LeetCode | Python/Find Root of N-Ary Tree.py | Find Root of N-Ary Tree.py | py | 631 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 12,
"usage_type": "name"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.