seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7667261447 | import pandas as pd
import numpy as np
import pickle
from sklearn.base import TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import Pipeline
from sklearn.metrics import f1_score, cohen_kappa_score, accuracy_score, recall_score, precision_score, roc_auc_score, confusion_matrix
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.model_selection import GridSearchCV, train_test_split, cross_val_score, RandomizedSearchCV
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier
def performance(y_true, y_pred, id_model):
"""
Calculate performance metrics
:param y_true: array with true values
:param y_pred: array with predicted values
:param id_model: model identifier
:return: DataFrame with scores
"""
# Calculate metrics
accuracy = accuracy_score(y_true, y_pred)
recall = recall_score(y_true, y_pred)
precision = precision_score(y_true, y_pred)
kappa = cohen_kappa_score(y_true, y_pred)
f1 = f1_score(y_true, y_pred)
auc = roc_auc_score(y_true, y_pred)
model_performance_dict = {
'accuracy': accuracy,
'recall': recall,
'precision': precision,
'kappa': kappa,
'f1': f1,
'auc': auc
}
perf = pd.DataFrame(
list(model_performance_dict.items()),
index=['a', 'b', 'c', 'd', 'e', 'f'],
columns=['metric', 'score_' + id_model]
)
return perf
def logistic_reg(df, class_column, id_model):
"""
Train Logistic Regression
:param df: dataframe with features and class
:param class_column: column name for class
:param id_model: model identifier
:return model_rlog: trained model
:return performance_rlog: dataframe with perfomance metrics
:return cm_log: confusion matrix
"""
# Partition in training and testing
x = df.loc[:, df.columns != class_column] #select all columns except the class
y = df[class_column]
x_train, x_test, y_train, y_test = train_test_split(
x,
y,
stratify=y,
test_size=0.20,
random_state=1234
)
# Scaling
scaler = StandardScaler().fit(x)
x_train_sc = scaler.transform(x_train)
x_test_sc = scaler.transform(x_test)
# Generate LR model
log_model = LogisticRegression(solver='saga',
penalty='elasticnet', # for elastic net penalty
l1_ratio=0.5,
class_weight='balanced', # class balance
max_iter=5000)
# Fitting
model_rlog = log_model.fit(x_train_sc, y_train)
# Testing
y_pred = model_rlog.predict(x_test_sc)
# Evaluating performance
performance_rlog = performance(y_test, y_pred, id_model)
# Confusion Matrix
cm_log = confusion_matrix(y_test, y_pred)
return model_rlog, performance_rlog, cm_log
def random_forest(df, class_column, id_model):
"""
Train Random Forest Classifier
:param df: dataframe with features and class
:param class_column: column name for class
:param id_model: model identifier
:return model_rlog: trained model
:return performance_rlog: dataframe with perfomance metrics
:return cm_log: confusion matrix
"""
# Partition in training and testing
x = df.loc[:, df.columns != class_column] #select all columns except the class
y = df[class_column]
# Prepare hyperparameters to test
random_grid = {
'n_estimators': [250, 500, 1000, 1500, 2000], # Number of trees in random forest
'max_features': ['auto'], # Number of features to consider at every split
'max_depth': [10, 25, 50, 75, 100], # Maximum number of levels in tree
'min_samples_split': [5, 10, 15, 20, 30, 40], # Minimum number of samples required to split a node
'min_samples_leaf': [5, 10, 15, 20] # Minimum number of samples required at each leaf node
}
# Create a base model to adjust
rf = RandomForestClassifier()
# Perform a Random Search of parameters (3 cross validation, 100 different combinations, use all available cores)
rf_random = RandomizedSearchCV(
estimator=rf,
scoring='f1',
param_distributions=random_grid,
n_iter=100,
v=3,
verbose=2,
random_state=42,
n_jobs=-1
)
# Fitting
rf_random.fit(x_train, y_train)
# Testing
y_pred = rf_random.predict(x_test)
# Evaluating performance
performance_randomf = performance(y_test, y_pred, modelo)
# Confusion Matrix
cm_rf = confusion_matrix(y_test, y_pred)
return rf_random, performance_randomf, cm_rf
def xgboost(df, class_column, id_model):
"""
Train XGBoost Classifier
:param df: dataframe with features and class
:param class_column: column name for class
:param id_model: model identifier
:return model_rlog: trained model
:return performance_rlog: dataframe with perfomance metrics
:return cm_log: confusion matrix
"""
# Partition in training and testing
x = df.loc[:, df.columns != class_column] #select all columns except the class
y = df[class_column]
# prepare the hyperparameters to test
random_grid_xgb = {
# Parameters that we are going to tune.
'learning_rate' : [0.01, 0.02, 0.04, 0.06, 0.08, 0.1],
'n_estimators' : [500, 750, 1000, 1250, 1500],
'max_depth':[5, 10, 20, 30],
'subsample': [0.8, 0.9, 1],
'colsample_bytree': [0.4, 0.6, 0.8, 1],
'gamma': [0.1, 0.5, 0.7, 1]
}
xgb = XGBClassifier() # base model
xgb_random = RandomizedSearchCV(
estimator=xgb,
scoring='f1',
param_distributions=random_grid_xgb,
n_iter=100,
cv=3,
verbose=2,
random_state=42,
n_jobs=-1
)
# Fitting
xgb_random.fit(x_train, y_train)
# Testing
y_pred = xgb_random.predict(x_test)
# Evaluating performance
performance_xgb = performance(y_test, y_pred, modelo)
# Confusion Matrix
cm_xgb = confusion_matrix(y_test, y_pred)
return xgb_random, performance_xgb, cm_xgb
| Nicolas-Ferreira/ml-helper-functions | src/models/train_models.py | train_models.py | py | 6,420 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.recall_score",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.precision_score",
"line_number": 34,
"usage_type": "call"
}... |
3687742693 | import torch
def nanminmax(tensor, operation='min', dim=None, keepdim=False):
if operation not in ['min','max']:
raise ValueError("Operation must be 'min' or 'max'.")
mask = torch.isnan(tensor)
replacement = float('-inf') if operation == 'max' else float('inf')
replacement = torch.tensor(replacement, dtype=tensor.dtype, device=tensor.device)
tensor_masked = torch.where(mask, replacement, tensor)
if operation == 'min':
values, _ = torch.min(tensor_masked, dim=dim, keepdim=keepdim)
else:
values, _ = torch.max(tensor_masked, dim=dim, keepdim=keepdim)
values = torch.where(torch.all(mask, dim=dim, keepdim=keepdim), torch.tensor(float('nan'), \
dtype=tensor.dtype, device=tensor.device), values)
return values
def nanstd(tensor, dim=None):
mean = torch.nanmean(tensor, dim=dim, keepdim=True)
deviations = tensor - mean
squared_deviations = torch.square(deviations)
nanvar = torch.nanmean(squared_deviations, dim=dim, keepdim=True)
nanstd = torch.sqrt(nanvar)
return nanstd
def zscore(tensor, dim=None):
mean = torch.nanmean(tensor, dim=dim, keepdim=True)
std = nanstd(tensor, dim=dim)
zscore = (tensor - mean) / std
return zscore
def torch_nan_to_num(input_tensor, nan=0.0, posinf=None, neginf=None):
output_tensor = torch.where(torch.isnan(input_tensor), torch.tensor(nan), input_tensor)
if posinf is not None:
output_tensor = torch.where(torch.isposinf(output_tensor), torch.tensor(posinf), output_tensor)
if neginf is not None:
output_tensor = torch.where(torch.isneginf(output_tensor), torch.tensor(neginf), output_tensor)
return output_tensor
class MinMaxScaler:
def __init__(self):
self.min = None
self.max = None
def fit(self, data):
self.min = nanminmax(data, 'min', dim=0)
self.max = nanminmax(data, 'max', dim=0)
def transform(self, data):
return (data - self.min) / (self.max - self.min + 1e-10)
def fit_transform(self, data):
self.fit(data)
return self.transform(data)
class QuantileTransformer:
def __init__(self, n_quantiles=100):
self.n_quantiles = n_quantiles
self.quantiles = None
def fit(self, data):
self.quantiles = torch.quantile(data, torch.linspace(0, 1, self.n_quantiles))
def transform(self, data):
sorted_data = torch.sort(data, dim=0).values
ranks = torch.searchsorted(self.quantiles, sorted_data)
transformed_data = ranks / (self.n_quantiles - 1)
return transformed_data
def fit_transform(self, data):
self.fit(data)
return self.transform(data)
| zzy99/torch_preprocessing | torch_preprocessing.py | torch_preprocessing.py | py | 2,744 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "torch.isnan",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.where",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 14... |
21680265073 | import torch
import numpy as np
'''
목표 MobileNet v2에 빠르게 QIL 적용
1. alexnet training <- success
2. apply QIL to alexnet (= reproduce)
- know How to quantize ex ) aware quantization - > pytorch source
- know How to apply QIL on alexnet
- conduct training
3. Apply QIL to MobileNet v2
'''
class Quantizer():
discretization_level = 32
def __init__(self):
torch.manual_seed(0)
self.cw,self.dw,self.cx,self.dx,self.gamma = torch.randn(5) - 0.5
print(self.cw,self.dw,self.cx,self.dx,self.gamma)
def quantize(self, target, param):
if target is 'weight':
self.transfomer(param,self.cw,self.dw,self.gamma)
elif target is 'activation':
self.transfomer(param,self.cx,self.dx)
else:
print("Warning : Target is not weight or activation")
self.discretizer(param)
def transfomer(self,tensor,c_delta,d_delta,r = 1):
outplane,inplane,kh,kw = tensor.size()
for o in range(outplane):
for i in range(inplane):
for h in range(kh):
for w in range(kw):
t = tensor[o][i][h][w]
if abs(t) < c_delta - d_delta:
t = 0
elif abs(t) > c_delta + d_delta:
t.sign_()
else:
a,b = (0.5 / d_delta) , (-0.5*c_delta / d_delta + 0.5)
t = (a*abs(w) + b)**r * t.sign()
tensor[o][i][h][w] = t
def discretizer(self,tensor):
q_D = pow(2, Quantizer.discretization_level)
torch.round_(tensor.mul_(q_D))
tensor.div_(q_D)
if __name__ == '__main__':
weights = torch.rand(2,2,2,2).sub_(0.5)
activation = torch.rand(4,4,4,4)
q = Quantizer()
print("before \n",weights)
q.quantize('weight', weights)
#q.transfomer_weights(param,c_delta,d_delta,r)
#q.discretizer(weights)
print("after \n",weights)
| su9262/2020 | QIL.py | QIL.py | py | 2,120 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.manual_seed",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.randn",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.round_",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_num... |
13310547888 | import plotly.offline as py
import plotly.graph_objs as go
import sensor_input
import random
from pandas import Series
def plot_spectrum_from_files(name,files):
spectrums = []
for file in files:
spectrums.append(sensor_input.read_spectrum_from_file(file))
plot_spectrums(name,spectrums)
def plot_data_from_pandas(pandas):
data = []
for panda in pandas:
data.append(go.Scatter(
x=panda,
y=spectrum["power"],
marker = dict(
color = 'rgba(10, 10, 240, .9)'
)))
return data
def get_plot_data(name, spectrums):
data = []
r = random.randrange(0, 255)
g = random.randrange(0, 255)
b = random.randrange(0, 255)
for spectrum in spectrums:
data.append(go.Scatter(
x=spectrum["wavelength"],
y=spectrum["power"],
name=name,
marker = dict(
color = 'rgba(%s, %s, %s, .9)'%(r,g,b)
)
))
return data
def read_plot_data_from_files(name,files):
spectrums = []
for file in files:
spectrums.append(sensor_input.read_spectrum_from_file(file))
return get_plot_data(name,spectrums)
def plot_spectrum_from_files_group(file_groups):
data = []
for key in file_groups.keys():
for plot_data in read_plot_data_from_files(key,file_groups[key]):
data.append(plot_data)
py.plot(data, filename="all_ploted")
def plot_spectrums(name,spectrums):
data = get_plot_data(name,spectrums)
py.plot(data, filename=name) | zalum/tum-milk | sensor/sensor_output.py | sensor_output.py | py | 1,564 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sensor_input.read_spectrum_from_file",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs.Scatter",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 17,
"usage_type": "name"
},
{
"... |
18187534019 | import pytest
from unittest.mock import sentinel, Mock
import bokeh.palettes
import pandas as pd
import pandas.testing as pdt
import datetime as dt
import numpy as np
import glob
import forest.drivers
from forest.drivers import earth_networks
LINES = [
"1,20190417T000001.440,+02.7514400,+031.9206400,-000001778,000,15635,007,001",
"1,20190417T000001.093,+02.6388400,+031.9008800,+000002524,000,14931,007,012",
]
def test_earth_networks(tmpdir):
path = str(tmpdir / "sample.txt")
with open(path, "w") as stream:
stream.writelines(LINES)
loader = earth_networks.Loader()
frame = loader.load([path])
result = frame.iloc[0]
atol = 0.000001
if isinstance(result["date"], dt.datetime):
# Pandas <0.25.x
assert result["date"] == dt.datetime(2019, 4, 17, 0, 0, 1, 440000)
else:
# Pandas 1.0.x
assert result["date"] == np.datetime64("2019-04-17T00:00:01.440000000")
assert result["flash_type"] == "IC"
assert abs(result["latitude"] - 2.75144) < atol
assert abs(result["longitude"] - 31.92064) < atol
def test_dataset():
dataset = forest.drivers.get_dataset("earth_networks")
assert isinstance(dataset, forest.drivers.earth_networks.Dataset)
def get_navigator(settings):
dataset = forest.drivers.get_dataset("earth_networks", settings)
return dataset.navigator()
def test_dataset_navigator():
navigator = get_navigator({"pattern": "*.txt"})
assert isinstance(navigator, forest.drivers.earth_networks.Navigator)
def test_navigator_variables():
navigator = earth_networks.Navigator([])
assert set(navigator.variables(None)) == set(
[
"Strike density (cloud-ground)",
"Strike density (intra-cloud)",
"Strike density (total)",
"Time since flash (cloud-ground)",
"Time since flash (intra-cloud)",
"Time since flash (total)",
]
)
def test_view_render_density():
locator = Mock(specs=["find"])
loader = Mock(specs=["load"])
loader.load.return_value = pd.DataFrame(
{
"flash_type": [],
"longitude": [],
"latitude": [],
}
)
view = earth_networks.View(loader, locator)
view.render(
{
"variable": "Strike density (cloud-ground)",
"valid_time": "1970-01-01T00:00:00Z",
}
)
expect = bokeh.palettes.all_palettes["Spectral"][8]
assert view.color_mappers["image"].palette == expect
def test_view_render_time_since_flash():
locator = Mock(specs=["find"])
loader = Mock(specs=["load"])
loader.load.return_value = pd.DataFrame(
{
"date": [],
"flash_type": [],
"longitude": [],
"latitude": [],
}
)
view = earth_networks.View(loader, locator)
view.render(
{
"variable": "Time since flash (cloud-ground)",
"valid_time": "1970-01-01T00:00:00Z",
}
)
expect = bokeh.palettes.all_palettes["RdGy"][8]
assert view.color_mappers["image"].palette == expect
@pytest.mark.parametrize(
"variable, expect",
[
pytest.param(
"Time since flash (intra-cloud)",
[
("Variable", "@variable"),
("Time window", "@window{00:00:00}"),
("Period start", "@date{%Y-%m-%d %H:%M:%S}"),
("Since start", "@image{00:00:00}"),
],
id="time since flash",
),
pytest.param(
"Strike density (cloud-ground)",
[
("Variable", "@variable"),
("Time window", "@window{00:00:00}"),
("Period start", "@date{%Y-%m-%d %H:%M:%S}"),
("Value", "@image @units"),
],
id="strike density",
),
],
)
def test_view_tooltips(variable, expect):
assert earth_networks.View.tooltips(variable) == expect
@pytest.mark.parametrize(
"variable, expect",
[
pytest.param(
"Time since flash (intra-cloud)",
{"@date": "datetime", "@window": "numeral", "@image": "numeral"},
id="time since flash",
),
pytest.param(
"Strike density (cloud-ground)",
{"@date": "datetime", "@window": "numeral"},
id="strike density",
),
],
)
def test_view_formatters(variable, expect):
assert earth_networks.View.formatters(variable) == expect
def test_view_since_flash():
view = earth_networks.View(Mock(), Mock())
strike_times = ["2020-01-01T00:00:00Z", "2020-01-01T01:00:00Z"]
period_start = "2020-01-01T00:00:00Z"
result = view.since_flash(strike_times, period_start)
expect = pd.Series([0.0, 3600.0])
pdt.assert_series_equal(result, expect)
| MetOffice/forest | test/test_earth_networks.py | test_earth_networks.py | py | 4,847 | python | en | code | 38 | github-code | 1 | [
{
"api_name": "forest.drivers.earth_networks.Loader",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "forest.drivers.earth_networks",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 28,
"usage_type": "attribute"
},
... |
34529138843 | from pprint import pprint
from azdev.operations.regex import (
get_all_tested_commands_from_regex,
search_argument,
search_argument_context,
search_command,
search_command_group)
# pylint: disable=line-too-long
# one line test
def test_one_line_regex():
lines = [
# start with self.cmd.
'self.cmd(\'image builder create -n {tmpl_02} -g {rg} --identity {ide} --scripts {script} --image-source {img_src} --build-timeout 22\')\n',
# start with blanks, match self.cmd("").
' self.cmd("role assignment create --assignee {assignee} --role {role} --scope {scope}")\n',
# start with blanks, match self.cmd('').
' self.cmd(\'role assignment create --assignee {assignee} --role {role} --scope {scope}\')\n',
# start with multiple blanks, characters, self.cmd
' identity_id = self.cmd(\'identity create -g {rg} -n {ide}\').get_output_in_json()[\'clientId\']\n',
# start with blanks, match self.cmd, use fstring, ''
' self.cmd(f\'afd profile usage -g {resource_group} --profile-name {profile_name}\', checks=usage_checks)',
# start with blanks, match self.cmd, use fstring, ""
' self.cmd(f"afd profile usage -g {resource_group} --profile-name {profile_name}", checks=usage_checks)',
# one line docstring '''
' self.cmd("""afd profile usage -g {resource_group} --profile-name {profile_name}""", checks=usage_checks)',
# one line docstring """
' self.cmd("""afd profile usage -g {resource_group} --profile-name {profile_name}""", checks=usage_checks)',
# .format
' self.cmd("afd profile usage -g {} --profile-name {}".format(group, name)',
# %s
' self.cmd("afd profile usage -g %s --profile-name %s", group, name)',
# end with hashtag, should match.
' self.cmd(f"afd profile usage -g {resource_group} --profile-name {profile_name}", checks=usage_checks) # xxx',
# start with hashtag, shouldn't match.
' # self.cmd(f"afd profile usage -g {resource_group} --profile-name {profile_name}", checks=usage_checks)',
# start with blanks, match *_cmd = ''.
' stop_cmd = \'aks stop --resource-group={resource_group} --name={name}\'\n',
# start with blanks, match *_cmd = "".
' enable_cmd = "aks enable-addons --addons confcom --resource-group={resource_group} --name={name} -o json"\n',
# start with blanks, match *_cmd = f''.
' disable_cmd = f\'aks disable-addons --addons confcom --resource-group={resource_group} --name={name} -o json\'\n',
# start with blanks, match *_cmd = f"".
' browse_cmd = f"aks browse --resource-group={resource_group} --name={name} --listen-address=127.0.0.1 --listen-port=8080 --disable-browser"\n',
]
ref = get_all_tested_commands_from_regex(lines)
pprint(ref, width=1000)
assert len(ref) == 15
# multiple lines test
def test_multiple_lines_regex():
lines = [
# start with blanks, self.cmd, one cmd line, multiple checks.
' self.cmd(\'aks list -g {resource_group}\', checks=[\n',
' self.check(\'[0].type\', \'{resource_type}\'),\n',
' StringContainCheck(aks_name),\n',
' StringContainCheck(resource_group)\n',
' ])\n',
# start with blanks, self.cmd, multiple cmd lines, multiple checks.
' self.cmd(\'image builder create -n {tmpl_02} -g {rg} --identity {ide} --scripts {script} --image-source {img_src} --build-timeout 22\'\n',
' \' --managed-image-destinations img_1=westus \' + out_3,\n',
' checks=[\n',
' self.check(\'name\', \'{tmpl_02}\'), self.check(\'provisioningState\', \'Succeeded\'),\n',
' self.check(\'length(distribute)\', 2),\n',
' self.check(\'distribute[0].imageId\', \'/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Compute/images/img_1\'),\n',
' self.check(\'distribute[0].location\', \'westus\'),\n',
' self.check(\'distribute[0].runOutputName\', \'img_1\'),\n',
' self.check(\'distribute[0].type\', \'ManagedImage\'),\n',
' self.check(\'distribute[1].imageId\', \'/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Compute/images/img_2\'),\n',
' self.check(\'distribute[1].location\', \'centralus\'),\n',
' self.check(\'distribute[1].runOutputName\', \'img_2\'),\n',
' self.check(\'distribute[1].type\', \'ManagedImage\'),\n',
' self.check(\'buildTimeoutInMinutes\', 22)\n',
' ])\n',
# start with blanks, characters, self.cmd, but have line break at self.cmd(
' ipprefix_id = self.cmd(\n',
' \'az network public-ip prefix create -g {rg} -n {ipprefix_name} --location {location} --length 29\'). \\n',
' get_output_in_json().get("id")\n',
# start with blanks, match *_cmd = '', multiple lines.
' create_cmd = \'aks create -g {resource_group} -n {name} -p {dns_name_prefix} --ssh-key-value {ssh_key_value} \' \\n',
' \'-l {location} --service-principal {service_principal} --client-secret {client_secret} -k {k8s_version} \' \\n',
' \'--node-vm-size {vm_size} \' \\n',
' \'--tags scenario_test -c 1 --no-wait\'\n',
' update_cmd = \'aks update --resource-group={resource_group} --name={name} \' \\n',
' \'--aad-admin-group-object-ids 00000000-0000-0000-0000-000000000002 \' \\n',
' \'--aad-tenant-id 00000000-0000-0000-0000-000000000003 -o json\'\n',
' enable_autoscaler_cmd = \'aks update --resource-group={resource_group} --name={name} \' \\n',
' \'--tags {tags} --enable-cluster-autoscaler --min-count 2 --max-count 5\'\n',
' disable_autoscaler_cmd = \'aks update --resource-group={resource_group} --name={name} \' \\n',
' \'--tags {tags} --disable-cluster-autoscaler\'\n',
' create_spot_node_pool_cmd = \'aks nodepool add \' \\n',
' \'--resource-group={resource_group} \' \\n',
' \'--cluster-name={name} \' \\n',
' \'-n {spot_node_pool_name} \' \\n',
' \'--priority Spot \' \\n',
' \'--spot-max-price {spot_max_price} \' \\n',
' \'-c 1\'\n',
' create_ppg_node_pool_cmd = \'aks nodepool add \' \\n',
' \'--resource-group={resource_group} \' \\n',
' \'--cluster-name={name} \' \\n',
' \'-n {node_pool_name} \' \\n',
' \'--ppg={ppg} \'\n',
' upgrade_node_image_only_cluster_cmd = \'aks upgrade \' \\n',
' \'-g {resource_group} \' \\n',
' \'-n {name} \' \\n',
' \'--node-image-only \' \\n',
' \'--yes\'\n',
' upgrade_node_image_only_nodepool_cmd = \'aks nodepool upgrade \' \\n',
' \'--resource-group={resource_group} \' \\n',
' \'--cluster-name={name} \' \\n',
' \'-n {node_pool_name} \' \\n',
' \'--node-image-only \' \\n',
' \'--no-wait\'\n',
' get_nodepool_cmd = \'aks nodepool show \' \\n',
' \'--resource-group={resource_group} \' \\n',
' \'--cluster-name={name} \' \\n',
' \'-n {node_pool_name} \'\n',
# start with blanks, match *_cmd = '', multiple lines, use .format
' install_cmd = \'aks install-cli --client-version={} --install-location={} --base-src-url={} \' \\n',
' \'--kubelogin-version={} --kubelogin-install-location={} --kubelogin-base-src-url={}\'.format(version,\n',
' ctl_temp_file,\n',
' "",\n',
' version,\n',
' login_temp_file,\n',
' "")\n',
# start with blanks, match *_cmd, use docstring """
' create_cmd = """storage account create -n {sc} -g {rg} -l eastus2euap --enable-files-adds --domain-name\n',
' {domain_name} --net-bios-domain-name {net_bios_domain_name} --forest-name {forest_name} --domain-guid\n',
' {domain_guid} --domain-sid {domain_sid} --azure-storage-sid {azure_storage_sid}"""\n',
' update_cmd = """storage account update -n {sc} -g {rg} --enable-files-adds --domain-name {domain_name}\n',
' --net-bios-domain-name {net_bios_domain_name} --forest-name {forest_name} --domain-guid {domain_guid}\n',
' --domain-sid {domain_sid} --azure-storage-sid {azure_storage_sid}"""\n',
# start with blanks, match *_cmd, use docstring '''
' update_cmd = \'\'\'storage account update -n {sc} -g {rg} --enable-files-adds --domain-name {domain_name}\n',
' --net-bios-domain-name {net_bios_domain_name} --forest-name {forest_name} --domain-guid {domain_guid}\n',
' --domain-sid {domain_sid} --azure-storage-sid {azure_storage_sid}\'\'\'\n',
# start with blanks, match *_cmd*, .format
' create_cmd1 = \'az storage account create -n {} -g {} --routing-choice MicrosoftRouting --publish-microsoft-endpoint true\'.format('
' name1, resource_group)',
# start with blanks, match .cmd
' test.cmd(\'az billing account list \'',
' \'--expand "soldTo,billingProfiles,billingProfiles/invoiceSections"\',',
' checks=[])',
# start with blanks, match *Command
' runCommand = \'aks command invoke -g {resource_group} -n {name} -o json -c "kubectl get pods -A"\'',
' self.cmd(runCommand, [',
' self.check(\'provisioningState\', \'Succeeded\'),',
' self.check(\'exitCode\', 0),',
' ])',
# start with blanks, match *command, use fstring, single quotation marks
' command = f\'afd origin-group update -g {resource_group_name} --profile-name {profile_name} \' ',
' f\'--origin-group-name {origin_group_name}\'',
# string splicing (+)
' self.cmd(\'spring-cloud app deployment create -g {resourceGroup} -s {serviceName} --app {app} -n green\'\n',
' + \' --container-image {containerImage} --registry-username PLACEHOLDER --registry-password PLACEHOLDER\',\n',
' checks=[\n',
' self.check(\'name\', \'green\'),\n',
' ])\n',
# --format vs .format
' self.cmd(\n',
' \'appconfig kv import -n {config_store_name} -s {import_source} --path "{strict_import_file_path}" --format {imported_format} --profile {profile} --strict -y\')',
]
ref = get_all_tested_commands_from_regex(lines)
pprint(ref, width=1000)
assert len(ref) == 22
def test_detect_new_command():
commands = []
lines = [
'with self.command_group(\'disk\', compute_disk_sdk, operation_group=\'disks\', min_api=\'2017-03-30\') as g:',
# 1.`+ g.command(xxx)`
'+ g.command(\'list-instances\', \'list\', command_type=compute_vmss_vm_sdk)',
# 2.`+ g.custom_command(xxx)`
'+ g.custom_command(\'create\', \'create_managed_disk\', supports_no_wait=True, table_transformer=transform_disk_show_table_output, validator=process_disk_or_snapshot_create_namespace)',
# 3.`+ g.custom_show_command(xxx)`
'+ g.custom_show_command(\'show\', \'get_vmss\', table_transformer=get_vmss_table_output_transformer(self, False))',
# 4.`+ g.wait_command(xxx)`
'+ g.wait_command(\'wait\', getter_name=\'get_vmss\', getter_type=compute_custom)',
]
for row_num, line in enumerate(lines):
command = search_command(line)
if command:
cmd = search_command_group(row_num, lines, command)
if cmd:
commands.append(cmd)
pprint(commands)
assert commands == ['disk list-instances', 'disk create', 'disk show', 'disk wait']
def test_detect_new_params():
parameters = []
lines = [
# without scope
' with self.argument_context(\'disk\') as c:',
'+ c.argument(\'network_policy\')',
'+ c.argument(\'zone\', zone_type, min_api=\'2017-03-30\', options_list=[\'--zone\']) ',
# scope
' for scope in [\'disk\', \'snapshot\']:',
' with self.argument_context(scope) as c:',
'+ c.argument(\'size_gb\', options_list=[\'--size-gb\', \'-z\'], help=\'size in GB. Max size: 4095 GB (certain preview disks can be larger).\', type=int)',
# scope with multi args
' for scope in [\'signalr create\', \'signalr update\']:',
' with self.argument_context(scope, arg_group=\'Network Rule\') as c:',
'+ c.argument(\'default_action\', arg_type=get_enum_type([\'Allow\', \'Deny\']), help=\'Default action to apply when no rule matches.\', required=False)',
# scope AND format
' for scope in [\'create\', \'update\']:',
' with self.argument_context(\'vmss run-command {}\'.format(scope)) as c:',
'+ c.argument(\'vmss_name\', run_cmd_vmss_name)',
# scope AND format
' for scope in [\'vm\', \'vmss\']:',
' with self.argument_context(\'{} stop\'.format(scope)) as c:',
'+ c.argument(\'skip_shutdown\', action=\'store_true\', help=\'Skip shutdown and power-off immediately.\', min_api=\'2019-03-01\')',
# multiple `[]`
' with self.argument_context(\'acr connected-registry create\') as c:',
'+ c.argument(\'client_token_list\', options_list=[\'--client-tokens\'], nargs=\'+\', help=\'Specify the client access to the repositories in the connected registry. It can be in the format [TOKEN_NAME01] [TOKEN_NAME02]...\')',
'+ c.argument(\'notifications\', options_list=[\'--notifications\'], nargs=\'+\', help=\'List of artifact pattern for which notifications need to be generated. Use the format "--notifications [PATTERN1 PATTERN2 ...]".\')',
# multiple lines
' with self.argument_context(\'webapp update\') as c:',
'+ c.argument(\'skip_custom_domain_verification\',',
'+ help="If true, custom (non *.azurewebsites.net) domains associated with web app are not verified",',
'+ arg_type=get_three_state_flag(return_label=True), deprecate_info=c.deprecate(expiration=\'3.0.0\'))',
# options_list=[""] double quotes
' with self.argument_context(\'webapp update\') as c:',
'+ c.argument(\'minimum_elastic_instance_count\', options_list=["--minimum-elastic-instance-count", "-i"], type=int, is_preview=True, help="Minimum number of instances. App must be in an elastic scale App Service Plan.")',
'+ c.argument(\'prewarmed_instance_count\', options_list=["--prewarmed-instance-count", "-w"], type=int, is_preview=True, help="Number of preWarmed instances. App must be in an elastic scale App Service Plan.")',
# self.argument_context with multi args
' with self.argument_context(\'appconfig kv import\', arg_group=\'File\') as c:',
'+ c.argument(\'strict\', validator=validate_strict_import, arg_type=get_three_state_flag(), help="Delete all other key-values in the store with specified prefix and label", is_preview=True)',
' with self.argument_context(\'snapshot\', resource_type=ResourceType.MGMT_COMPUTE, operation_group=\'snapshots\') as c:',
'+ c.argument(\'snapshot_name\', existing_snapshot_name, id_part=\'name\', completer=get_resource_name_completion_list(\'Microsoft.Compute/snapshots\'))',
]
# pattern = r'\+\s+c.argument\((.*)\)?'
for row_num, line in enumerate(lines):
params, _ = search_argument(line)
if params:
cmds = search_argument_context(row_num, lines)
# print(cmds)
for cmd in cmds:
parameters.append([cmd, params])
continue
pprint(parameters)
assert parameters[0] == ['disk', ['--network-policy']]
assert parameters[1] == ['disk', ['--zone']]
assert parameters[2] == ['disk', ['--size-gb', '-z']]
assert parameters[3] == ['snapshot', ['--size-gb', '-z']]
assert parameters[4] == ['signalr create', ['--default-action']]
assert parameters[5] == ['signalr update', ['--default-action']]
assert parameters[6] == ['vmss run-command create', ['--vmss-name']]
assert parameters[7] == ['vmss run-command update', ['--vmss-name']]
assert parameters[8] == ['vm stop', ['--skip-shutdown']]
assert parameters[9] == ['vmss stop', ['--skip-shutdown']]
assert parameters[10] == ['acr connected-registry create', ['--client-tokens']]
assert parameters[11] == ['acr connected-registry create', ['--notifications']]
assert parameters[12] == ['webapp update', ['--skip-custom-domain-verification']]
assert parameters[13] == ['webapp update', ['--minimum-elastic-instance-count', '-i']]
assert parameters[14] == ['webapp update', ['--prewarmed-instance-count', '-w']]
assert parameters[15] == ['appconfig kv import', ['--strict']]
assert parameters[16] == ['snapshot', ['--snapshot-name']]
if __name__ == '__main__':
test_one_line_regex()
test_multiple_lines_regex()
test_detect_new_command()
test_detect_new_params()
| Azure/azure-cli-dev-tools | azdev/operations/tests/test_cmdcov.py | test_cmdcov.py | py | 19,031 | python | en | code | 71 | github-code | 1 | [
{
"api_name": "azdev.operations.regex.get_all_tested_commands_from_regex",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "azdev.operations.regex.get_all_tested_commands_from_regex",
"line_num... |
72765977315 | #!/usr/bin/env python
# list of packages that should be imported for this code to work
import cobra.mit.access
import cobra.mit.session
import cobra.mit.request
import cobra.mit.naming
import yaml
import argparse
import warnings
warnings.filterwarnings("ignore")
def get_args():
parser = argparse.ArgumentParser(description="Looking for a ports in filter. Return the contract using the filter")
parser.add_argument('-p', dest='port', help='TCP/UDP port', type=int)
args = parser.parse_args()
return args
# since some of the port values are encoded as string I need to map them back to the port interger value
def port_to_int(port):
port_map = {'unspecified':0, 'https':443, 'http':80, 'smtp':25, 'ftpData':21, 'dns':53, 'pop3':110,'rtsp':554}
try:
return int(port)
except:
return port_map[port]
# Get command line arguments
args = get_args()
# open yaml files with the credentials
f = open('credentials_camfab.yaml', 'r')
credentials = yaml.load(f)
f.close()
# log into an APIC and create a directory object
ls = cobra.mit.session.LoginSession(credentials['host'], credentials['user'], credentials['pass'], secure=False, timeout=180)
md = cobra.mit.access.MoDirectory(ls)
md.login()
#Qury the APIC for all the filters
q = cobra.mit.request.ClassQuery('vzFilter')
q.subtree = 'children'
# Run the query
filters = md.query(q)
flt_list = ""
#iterate over the filters
for filt in filters:
# print filt.dn
# "uni/tn-common/flt-arp/e-arp" ==> E-arp == Entry
# iterate over the ACLs
for acl in filt.e:
# get the filter name withiht the "E" class preposition
flt_name = filt.name.split('/e-')[0]
if port_to_int(acl.sFromPort) <= args.port <= port_to_int(acl.sToPort):
flt_list += flt_name + '|'
break
if port_to_int(acl.dFromPort) <= args.port <= port_to_int(acl.dToPort) :
flt_list += flt_name + '|'
#Remove the trailing | character
flt_list = flt_list[:-1]
# Get all the contracts in the fabric
q = cobra.mit.request.ClassQuery('vzBrCP')
q.subtree = 'full'
# From the subtree return only the child that are of class vzRsSubjFiltAtt --> relationship between filter ans subject
q.subtreeClassFilter = 'vzRsSubjFiltAtt'
# Return only the Subjects that are using one of the filters that are matching the port we need.
# Basicaly we check that the DN of the vzRsSubjFiltAtt contains the name of the filter. That means that we do get back all the contracts
# however only the one that match our filter have a subject attached
q.subtreePropFilter = 'wcard(vzRsSubjFiltAtt.dn, "{}")'.format(flt_list)
# Run the query
contracts = md.query(q)
for contract in contracts:
# If the returned contract has a subject
if contract.subj:
print("Contract {} is using the following filter matching the port".format(str(contract.dn).strip('/uni')))
for subjects in contract.subj:
for flt in subjects.rssubjFiltAtt:
print("\t {}".format(str(flt.dn).strip('/uni')))
print("\n")
| camrossi/aci-scripts | contractLookup.py | contractLookup.py | py | 3,057 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "yaml.load",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cobra.mit.a... |
37441147060 | """
the simulation, simulates the motors and work in the same coordinate system.
"""
import pygame
import numpy as np
import time
import math
import matplotlib.pyplot as plt
import ArduinoCommunication as Ac
import Algorithmics as Algo
# ---------- CONSTANTS -------------
# COLORS
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
BLUE = (0, 0, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
# MECHANICS
SCREEN = (16, 12) # dimensions of 10'' screen
ARMS = (15, 10) # length of arm links in cm
d = 15 # distance from screen in cm
STEPS_PER_REVOLUTION = 200 # number of full steps to make a full round
STEPS_FRACTION = 8 # resolution of micro-stepping
MINIMAL_ANGLE = 2 * np.pi / (STEPS_PER_REVOLUTION * STEPS_FRACTION) # the minimal angle step of the motor in rad (it is
BITS_PER_BYTE = 8
LENGTH_OF_COMMAND = 6 # how many chars are in one command
# TIME
WANTED_RPS = 0.5 # revolutions per second of motors
ONE_STEP_DELAY = 5.0 / WANTED_RPS / STEPS_FRACTION / 1000.0 # in sec
SERIAL_BPS = 19200 # bits per second the serial can read and write
WRITE_DELAY = 1.0/(SERIAL_BPS/BITS_PER_BYTE/LENGTH_OF_COMMAND) # delay in sec after writing to prevent buffer overload
T = 1 # time of one slice in sec
dt_serial = WRITE_DELAY * 4 # time between 2 readings from serial in sec
dt_motor = ONE_STEP_DELAY * 4 # time of writing to the serial in sec
times_ideal = int(T / dt_motor) # the size of the vectors for the simulation
times_serial = int(T / dt_serial) # the amount of different values for the
TIME_TO_QUIT_SIMULATION = 2 # time to quit the simulation after finished in sec
first_point = 0, 0 # theta, phi - will be updated in the code
theta_simulation, phi_simulation = [], []
# ---------- ALGORITHMIC FUNCTION ---------------
def get_xy_by_t_line_acceleration(t): # gets time in sec
"""
example of trajectory function that tells the arm to make a straight line with acceleration in the beginning and
the end of the line
:param t: time
:return: tuple (x, y)
"""
acc = 1800.0
x_0 = -SCREEN[0] / 2
y_0 = 0.8 * SCREEN[1]
d_a = SCREEN[0] / 4.0
t_a = math.sqrt(2 * d_a / acc)
v = acc * t_a
x = x_0
y = y_0
if t < t_a:
x = x_0 + 0.5 * acc * math.pow(t, 2)
elif t_a < t < T - t_a:
x = x_0 + d_a + v * (t - t_a)
elif t > T - t_a:
x = x_0 + d_a + v * (T - 2 * t_a) + v * (t - (T - t_a)) - 0.5 * acc * (t - (T - t_a)) ** 2
return x, y
def get_xy_by_t_line_const_speed(t): # gets time in sec
"""
example of trajectory function that tells the arm to make a straight line with constant speed
:param t: time
:return: tuple (x, y)
"""
x_0 = -SCREEN[0] / 2
y_0 = 0.5 * SCREEN[1]
x = x_0 + SCREEN[0] * t / T
y = y_0
return x, y
# ----------- PLOTS AND GRAPHS FUNCTIONS -----------
def plot_screen(screen):
"""
plots the lines for the tablet screen in the pygame simulation
:param screen: pygame screen object - pygame.display.set_mode((WIDTH, HEIGHT))
"""
draw_line([SCREEN[0] / 2, d], [SCREEN[0] * 3 / 2, d], screen)
draw_line([SCREEN[0] / 2, d + SCREEN[1]], [SCREEN[0] * 3 / 2, d + SCREEN[1]], screen)
draw_line([SCREEN[0] / 2, d], [SCREEN[0] / 2, d + SCREEN[1]], screen)
draw_line([SCREEN[0] * 3 / 2, d], [SCREEN[0] * 3 / 2, d + SCREEN[1]], screen)
draw_circle([SCREEN[0], 0], 2, screen, RED)
def draw_line(start_pos, end_pos, screen):
"""
draws a line in the pygame simulation
:param start_pos: tuple (x, y)
:param end_pos: tuple (x, y)
:param screen: pygame screen object - pygame.display.set_mode((WIDTH, HEIGHT))
"""
pygame.draw.line(screen, BLUE, [cm_to_pixels(start_pos[0]), cm_to_pixels(
start_pos[1])], [cm_to_pixels(end_pos[0]), cm_to_pixels(end_pos[1])], 1)
return
def draw_circle(pos, radius, screen, color):
"""
draws a circle in the pygame simulation
:param pos: tuple (x, y)
:param radius: double
:param screen: pygame screen object - pygame.display.set_mode((WIDTH, HEIGHT))
:param color: color of circle
"""
pygame.draw.circle(screen, color, [cm_to_pixels(pos[0]), cm_to_pixels(pos[1])],
radius, 0)
def cm_to_pixels(length):
"""
returns the length in number of pixels
:param length: double in cm
:return: int - number of pixels
"""
return int(20 * length)
def draw_graph(x, y, title, xlabel, ylabel):
"""
draws a graph in matplotlib
:param x: vector for x axis
:param y: vector for y axis
:param title: string for title
:param xlabel: string for x axis lable
:param ylabel: string for y axis lable
"""
plt.plot(x, y)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.show()
# width and height of the window of the pygame simulation
WIDTH = cm_to_pixels(2 * SCREEN[0])
HEIGHT = cm_to_pixels(2 * (SCREEN[1] + d))
# ------------- HELPING FUNCTIONS ------------
def modulo_by_1(num):
"""
makes a modulo by 1 that returns a positive number for positive parameter and a negative number for a negative
parameter
:param num: double
"""
if num > 0:
return num % 1
else:
return num % 1 - 1
def xy_by_theta_phi(theta, phi):
"""
returns the location in (x, y) according to the location in (theta, phi) by the trigonometric connection - this
is the location of the pen (the end of the 2nd arm)
:param theta: double
:param phi: double
:param x_0: double
:return: tuple of doubles (x, y)
"""
x = ARMS[0] * np.cos(theta) + ARMS[1] * np.cos(phi)
y = ARMS[0] * np.sin(theta) + ARMS[1] * np.sin(phi) - d
return x, y
def xy_by_theta(theta, x_0):
"""
returns the location in (x, y) according to the location in (theta) by the trigonometric connection - this
is the location of the link (the end of the 1st arm)
:param theta: double
:param x_0: double
:return: tuple of doubles (x, y)
"""
x = x_0 + ARMS[0] * np.cos(theta)
y = ARMS[0] * np.sin(theta)
return x, y
def unite_vector(a):
united = []
for i in a:
united += i
return united
def draw_points(points_to_draw):
screen = pygame.display.set_mode((WIDTH, HEIGHT))
x_0, y_0 = SCREEN[0], 0
green = 0
# quiting option
event = pygame.event.poll()
if event.type == pygame.QUIT:
pygame.quit()
# plotting the screen
screen.fill(WHITE)
plot_screen(screen)
for point in points_to_draw:
x_point = point[0]
y_point = point[1]
x_point_simulation, y_point_simulation = xy_board_to_xy_simulation(x_point, y_point)
color = (255, green, 0)
draw_circle([x_point_simulation, y_point_simulation], 2, screen, color)
# change the color to be more blue
green += 255 // len(points_to_draw)
# display the simulation
pygame.display.flip()
# quiting the simulation
time.sleep(TIME_TO_QUIT_SIMULATION)
pygame.display.quit()
def xy_board_to_xy_simulation(x_board, y_board):
return x_board + SCREEN[0], y_board + d
# -------------- SLICE TRAJECTORY ------------------
def make_slice_by_trajectory(points, invert=True):
"""
Sends commands to Arduino according to the given route from the algorithmic module.
:param points: list of tuples, each tuple is a point the arm should go through
:param invert: if true then make also invert slice
"""
steps_theta, steps_phi = list(), list()
for i in range(len(points)-1):
current_point = Ac.xy2angles(points[i]) # in (theta,phi)
next_point = Ac.xy2angles(points[i+1]) # in (theta,phi)
current_steps_theta, current_steps_phi = Ac.generate_steps_list(Ac.rad2steps(next_point[0] - current_point[0]),
Ac.rad2steps(next_point[1] - current_point[1]))
for j in range(len(current_steps_theta)):
steps_theta.append(current_steps_theta[j])
steps_phi.append(current_steps_phi[j])
global first_point
if len(points) != 0:
first_point = Ac.xy2angles(points[0])
move_2_motors_simulation(steps_theta, steps_phi)
if invert:
i_steps_theta, i_steps_phi = Ac.invert_slice(steps_theta, steps_phi)
if len(points) != 0:
first_point = Ac.xy2angles(points[-1])
move_2_motors_simulation(i_steps_theta, i_steps_phi, True)
def duplicate_theta_and_phi_values_for_simulation(theta_vector, phi_vector):
"""
returns theta vector and phi vector to show in simulation - with the dt of the simulation
:param theta_vector: list of theta angles - double list
:param phi_vector: list of phi angles - double list
:param steps_theta: steps for the theta motor - int list
:param steps_phi: steps for the phi motor - int list
:return: tuple of 2 lists (theta vector, phi vector) in the right dt interval
"""
factor = int(times_ideal/times_serial)
theta_mul = [[angle]*factor for angle in theta_vector]
phi_mul = [[angle]*factor for angle in phi_vector]
theta_simulation = unite_vector(theta_mul)
phi_simulation = unite_vector(phi_mul)
return theta_simulation, phi_simulation
def get_theta_and_phi_vectors_by_steps_vectors(steps_theta, steps_phi):
theta_vector = []
phi_vector = []
theta_vector.append(first_point[0])
phi_vector.append(first_point[1])
for i in range(len(steps_theta)):
theta_vector.append(theta_vector[-1] + steps_theta[i] * MINIMAL_ANGLE)
phi_vector.append(phi_vector[-1] + steps_phi[i] * MINIMAL_ANGLE)
return theta_vector, phi_vector
def move_2_motors_simulation(steps_theta, steps_phi, inverse=False):
global theta_simulation, phi_simulation
theta_vector, phi_vector = get_theta_and_phi_vectors_by_steps_vectors(steps_theta, steps_phi)
theta_simulation, phi_simulation = duplicate_theta_and_phi_values_for_simulation(theta_vector, phi_vector)
# ------------- FRUIT TRAJECTORY ------------------
def xy_by_fruit_trajectory(trajectory, total_time, dt):
"""
returns vector of x and vector of y in the simulation dt interval in cm
:param trajectory: function that gets double t and returns a tuple (x, y) of the fruit location by the estimated
trajectory
:param total_time: the total time of the fruit on the screen (from the moment that the trajectory was calculated)
:param dt: the dt of the simulation
:return: tuple of 2 lists (x of the fruit, y of the fruit) in the simulation dt interval in cm
"""
# calculation of the right dt interval in order to show it in the simulation
dt_trajectory = total_time / (T / dt)
times = range(int(T / dt))
x_fruit, y_fruit = [0 for _ in times], [0 for _ in times]
for i in times:
x_fruit[i], y_fruit[i] = trajectory(i * dt_trajectory)
x_fruit[i], y_fruit[i] = Algo.algo_to_mech((x_fruit[i], y_fruit[i]))
# # TODO the next 2 lines are a bit "fishy". check why it is necessary to add values for the conversion
# x_fruit[i] += SCREEN[0] / 2
# y_fruit[i] += d
return x_fruit, y_fruit
def get_fruit_xy_vectors(fruits):
def zero_trajectory(_):
return 0, 0
fruit_trajectories = [fruit.trajectory for fruit in fruits]
# get the trajectory of the first fruit - (x,y) by t
if len(fruit_trajectories) > 0:
first_trajectory = []
first_trajectory_total_time = []
for i in range(len(fruit_trajectories)):
# first_trajectory_object.append(fruit_trajectories[i])
first_trajectory.append(fruit_trajectories[i].calc_trajectory())
first_trajectory_total_time.append(fruit_trajectories[i].calc_life_time())
# # do not have to get into the 2 else down
# else:
# first_trajectory = zero_trajectory
# first_trajectory_total_time = 1
else:
first_trajectory = []
first_trajectory_total_time = 1
xy_of_fruits_list = []
for j in range(len(first_trajectory)):
xy_of_fruits_list.append(xy_by_fruit_trajectory(first_trajectory[j],first_trajectory_total_time[j], dt_motor ))
return xy_of_fruits_list
def init_multi():
global SCREEN, d
SCREEN = (12.0,8.0)
d = 17.8
# ------------- CALCULATE LOCATIONS -------------
def run_simulation(points_to_go_through, fruits_sliced):
"""
Runs the simulation.
:param func: function of (x,y)(t), route of slice
:param fruits_trajectories_and_starting_times:
"""
# print("points to go through:")
# print(points_to_go_through)
# draw_points(points_to_go_through)
global theta_simulation, phi_simulation
make_slice_by_trajectory(points_to_go_through, False)
xy_of_fruits_list = get_fruit_xy_vectors(fruits_sliced)
# ------------- PLOT -------------------
screen = pygame.display.set_mode((WIDTH, HEIGHT))
x_0, y_0 = SCREEN[0], 0
times = len(theta_simulation)
x_practical_vector = [0 for _ in range(times)]
y_practical_vector = [0 for _ in range(times)]
# loop of plot
for i in range(times):
# quiting option
event = pygame.event.poll()
if event.type == pygame.QUIT:
pygame.quit()
# plotting the screen
screen.fill(WHITE)
plot_screen(screen)
# draw fruits locations
for k in range(len(xy_of_fruits_list)):
x_fruit, y_fruit = xy_board_to_xy_simulation(xy_of_fruits_list[k][0][i], xy_of_fruits_list[k][1][i])
draw_circle([x_fruit, y_fruit], 10, screen, GREEN)
# real locations
x_practical, y_practical = xy_by_theta_phi(theta_simulation[i], phi_simulation[i])
x_practical, y_practical = xy_board_to_xy_simulation(x_practical, y_practical)
x_practical_vector[i], y_practical_vector[i] = x_practical, y_practical
x_link_practical, y_link_practical = xy_by_theta(theta_simulation[i], x_0)
draw_circle([x_practical, y_practical], 2, screen, RED)
draw_line([x_link_practical, y_link_practical], [x_0, y_0], screen)
draw_line([x_link_practical, y_link_practical], [x_practical, y_practical], screen)
# display the simulation
pygame.display.flip()
# sleep for the simulation dt (dt_motor is the simulation dt)
time.sleep(dt_motor)
# quiting the simulation
time.sleep(TIME_TO_QUIT_SIMULATION)
pygame.display.quit()
if __name__ == '__main__':
for i in range(1):
run_simulation([(0.6,0.0), (-7.0,4.0), (7.0,4.0), (0.6,0.0)], [])
time.sleep(1)
| tomerpeled1/Projecton | Simulation.py | Simulation.py | py | 14,618 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.pi",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "math.sqrt",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "math.pow",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "pygame.draw.line",
"line_number"... |
6213017763 | from functools import lru_cache
import csv
@lru_cache
def read(path):
data = []
with open(path) as file:
jobs_reader = csv.DictReader(file, delimiter=",", quotechar='"')
for row in jobs_reader:
data.append(row)
return data
| oelithon/estudo-projeto-job-insights | src/jobs.py | jobs.py | py | 265 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "csv.DictReader",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "functools.lru_cache",
"line_number": 5,
"usage_type": "name"
}
] |
2136965668 | import pygame
import time
pygame.mixer.init()
pygame.init()
pygame.mixer.music.set_volume(1)
window = pygame.display.set_mode((1800, 450))
pygame.display.set_caption('Guitar -> Piano Visualizer')
#### Maybe use this later for adding text to pygame screen #####
font = pygame.font.Font('freesansbold.ttf', 30)
eText = font.render('RED : E', True, (255,0,0), (127,0,0))
eTextRect = eText.get_rect()
eTextRect.center = (150, 425)
window.blit(eText, eTextRect)
aText = font.render('ORANGE : A', True, (255,165,0), (127,84,0))
aTextRect = aText.get_rect()
aTextRect.center = (450, 425)
window.blit(aText, aTextRect)
dText = font.render('YELLOW : D', True, (255,255,0), (127,127,0))
dTextRect = dText.get_rect()
dTextRect.center = (750, 425)
window.blit(dText, dTextRect)
gText = font.render('GREEN : G', True, (0,255,0), (0,127,0))
gTextRect = gText.get_rect()
gTextRect.center = (1050, 425)
window.blit(gText, gTextRect)
bText = font.render('BLUE : B', True, (0,0,255), (0,0,127))
bTextRect = bText.get_rect()
bTextRect.center = (1350, 425)
window.blit(bText, bTextRect)
EText = font.render('PURPLE : High E', True, (255,0,255), (127,0,127))
ETextRect = EText.get_rect()
ETextRect.center = (1650, 425)
window.blit(EText, ETextRect)
#key colors
white = (255,255,255)
black = (0,0,0)
#used for calculations
scottBases = [7, 0, 5, 10, 2, 7]
willBases = [0, 5, 10, 15, 19, 24]
#active colors
red = (255,0,0)
orange = (255,165,0)
yellow = (255,255,0)
green = (0,255,0)
blue = (0,0,255)
purple = (255,0,255)
# used for coloring keyboard
activeColors = [red, orange, yellow, green, blue, purple]
#passive colors
red2 = (255/2,0,0)
orange2 = (255/2,165/2,0)
yellow2 = (255/2,255/2,0)
green2 = (0,255/2,0)
blue2 = (0,0,255/2)
purple2 = (255/2,0,255/2)
#used for coloring keyboard
passiveColors = [red2, orange2, yellow2, green2, blue2, purple2]
class Key:
individualKeys = []
def __init__(self, sharp, num, xPos, yPos, width, height, pianoSound, guitarSound):
self.__class__.individualKeys.append(self)
self.self = self
self.sh = sharp
self.n = num
self.x = xPos
self.y = yPos
self.w = width
self.h = height
self.ps = pianoSound
self.gs = guitarSound
def whiten(self):
if self.sh == 1:
pygame.draw.rect(window, white, (self.x, self.y, self.w, self.h), 5)
else:
pygame.draw.rect(window, white, (self.x, self.y, self.w, self.h), 0)
def activate(self, colornum):
pygame.draw.rect(window, activeColors[colornum], (self.x, self.y, self.w, self.h), 0)
def makePassive(self, colornum):
pygame.draw.rect(window, passiveColors[colornum], (self.x, 0, self.w, 50), 0)
pygame.draw.rect(window, black, (self.x, self.y, self.w, self.h), 0)
pygame.draw.rect(window, white, (self.x, self.y, self.w, self.h), 5)
#pygame.draw.rect(window, passiveColors[colornum], (self.x, self.y, self.w, self.h), 0)
def displayNumber(self):
font = pygame.font.Font('freesansbold.ttf', 15)
numText = font.render(str(self.n), True, (255,255,255), (0,0,0))
numTextRect = numText.get_rect()
if self.sh == 1:
numTextRect.center = ((self.x + 12.5), 360)
else:
numTextRect.center = (self.x + 25, 375)
window.blit(numText, numTextRect)
def playGuitarSound(self):
pygame.mixer.Sound(self.gs).play()
pygame.display.update()
### SET KEYS ###
e1 = Key(0,0,0,50,50,300,'e1.mp3','Ge1.mp3')
f1 = Key(0,1,50,50,50,300,'f1.mp3','Gf1.mp3')
fsharp1 = Key(1,2,100, 0,25,300,'fsharp1.mp3','Gfsharp1.mp3')
g1 = Key(0,3,125,50,50,300,'g1.mp3','Gg1.mp3')
gsharp1 = Key(1,4,175, 0,25,300,'gsharp1.mp3','Ggsharp1.mp3')
a1 = Key(0,5,200,50,50,300,'a1.mp3','Ga1.mp3')
asharp1 = Key(1,6,250, 0,25,300,'asharp1.mp3','Gasharp1.mp3')
b1 = Key(0,7,275,50,50,300,'b1.mp3','Gb1.mp3')
c1 = Key(0,8,325,50,50,300,'c1.mp3','Gc1.mp3')
csharp1 = Key(1,9,375, 0,25,300,'csharp1.mp3','Gcsharp1.mp3')
d1 = Key(0,10,400,50,50,300,'d1.mp3','Gd1.mp3')
dsharp1 = Key(1,11,450, 0,25,300,'dsharp1.mp3','Gdsharp1.mp3')
e2 = Key(0,12,475,50,50,300,'e2.mp3','Ge2.mp3')
f2 = Key(0,13,525,50,50,300,'f2.mp3','Gf2.mp3')
fsharp2 = Key(1,14,575, 0,25,300,'fsharp2.mp3','Gfsharp2.mp3')
g2 = Key(0,15,600,50,50,300,'g2.mp3','Gg2.mp3')
gsharp2 = Key(1,16,650, 0,25,300,'gsharp2.mp3','Ggsharp2.mp3')
a2 = Key(0,17,675,50,50,300,'a2.mp3','Ga2.mp3')
asharp2 = Key(1,18,725, 0,25,300,'asharp2.mp3','Gasharp2.mp3')
b2 = Key(0,19,750,50,50,300,'b2.mp3','Gb2.mp3')
c2 = Key(0,20,800,50,50,300,'c2.mp3','Gc2.mp3')
csharp2 = Key(1,21,850, 0,25,300,'csharp2.mp3','Gcsharp2.mp3')
d2 = Key(0,22,875,50,50,300,'d2.mp3','Gd2.mp3')
dsharp2 = Key(1,23,925, 0,25,300,'dsharp2.mp3','Gdsharp2.mp3')
e3 = Key(0,24,950,50,50,300,'e3.mp3','Ge3.mp3')
f3 = Key(0,25,1000,50,50,300,'f3.mp3','Gf3.mp3')
fsharp3 = Key(1,26,1050, 0,25,300,'fsharp3.mp3','Gfsharp3.mp3')
g3 = Key(0,27,1075,50,50,300,'g3.mp3','Gg3.mp3')
gsharp3 = Key(1,28,1125, 0,25,300,'gsharp3.mp3','Ggsharp3.mp3')
a3 = Key(0,29,1150,50,50,300,'a3.mp3','Ga3.mp3')
asharp3 = Key(1,30,1200, 0,25,300,'asharp3.mp3','Gasharp3.mp3')
b3 = Key(0,31,1225,50,50,300,'b3.mp3','Gb3.mp3')
c3 = Key(0,32,1275,50,50,300,'c3.mp3','Gc3.mp3')
csharp3 = Key(1,33,1325, 0,25,300,'csharp3.mp3','Gcsharp3.mp3')
d3 = Key(0,34,1350,50,50,300,'d3.mp3','Gd3.mp3')
dsharp3 = Key(1,35,1400, 0,25,300,'dsharp3.mp3','Gdsharp3.mp3')
e4 = Key(0,36,1425,50,50,300,'e4.mp3','Ge4.mp3')
f4 = Key(0,37,1475,50,50,300,'f4.mp3','Gf4.mp3')
fsharp4 = Key(1,38,1525, 0,25,300,'fsharp4.mp3','Gfsharp4.mp3')
g4 = Key(0,39,1550,50,50,300,'g4.mp3','Gg4.mp3')
gsharp4 = Key(1,40,1600, 0,25,300,'gsharp4.mp3','Ggsharp4.mp3')
a4 = Key(0,41,1625,50,50,300,'a4.mp3','Ga4.mp3')
asharp4 = Key(1,42,1675, 0,25,300,'asharp4.mp3','Gasharp4.mp3')
b4 = Key(0,43,1700,50,50,300,'b4.mp3','Gb4.mp3')
c4 = Key(0,44,1750,50,50,300,'c4.mp3','Gc4.mp3')
scottNotes = []
willNotes = []
totalNumberOfChords = 0
### RUN THIS TO ENTER FRETS AND SEE KEYS ###
def chooseNewChord():
print("Hello!")
stringOfFrets = input("For each string, say which fret is played, seperated by spaces (E A D G B E): ")
print()
frets = stringOfFrets.split()
fretInts = map(int, frets)
listFretInts = list(fretInts) #int list of fret nums
for i in range(6):
scottNotes.append(scottBases[i] + listFretInts[i])
for i in range(6):
if listFretInts[i] >= 0:
willNotes.append(willBases[i] + listFretInts[i])
else:
willNotes.append(-1)
def displayCurrentchord():
j = -1
for i in range(44):
if (Key.individualKeys[i].n in willNotes) and (Key.individualKeys[i].n >= 0):
j += 1
Key.individualKeys[i].activate(j)
pygame.mixer.Sound(Key.individualKeys[i].ps).play()
pygame.display.update()
time.sleep(0.3)
time.sleep(3)
for i in range(44):
if (Key.individualKeys[i].n in willNotes) and (Key.individualKeys[i].n >= 0):
pygame.mixer.Sound(Key.individualKeys[i].gs).play()
pygame.display.update()
time.sleep(0.1)
#### Maybe use this later for adding text to pygame screen #####
font = pygame.font.Font('freesansbold.ttf', 30)
eText = font.render('RED : E - ' + str(scottNotes[0]), True, (255,0,0), (127,0,0))
eTextRect = eText.get_rect()
eTextRect.center = (150, 425)
window.blit(eText, eTextRect)
aText = font.render('ORANGE : A - ' + str(scottNotes[1]), True, (255,165,0), (127,84,0))
aTextRect = aText.get_rect()
aTextRect.center = (450, 425)
window.blit(aText, aTextRect)
dText = font.render('YELLOW : D - ' + str(scottNotes[2]), True, (255,255,0), (127,127,0))
dTextRect = dText.get_rect()
dTextRect.center = (750, 425)
window.blit(dText, dTextRect)
gText = font.render('GREEN : G - ' + str(scottNotes[3]), True, (0,255,0), (0,127,0))
gTextRect = gText.get_rect()
gTextRect.center = (1050, 425)
window.blit(gText, gTextRect)
bText = font.render('BLUE : B - ' + str(scottNotes[4]), True, (0,0,255), (0,0,127))
bTextRect = bText.get_rect()
bTextRect.center = (1350, 425)
window.blit(bText, bTextRect)
EText = font.render('PURPLE : High E - ' + str(scottNotes[5]), True, (255,0,255), (127,0,127))
ETextRect = EText.get_rect()
ETextRect.center = (1650, 425)
window.blit(EText, ETextRect)
pygame.display.update()
def displayPreviousChord(listKeys):
j = -1
for i in range(44):
if Key.individualKeys[i].n in listKeys:
j += 1
Key.individualKeys[i].makePassive(j)
pygame.display.update()
print('previous wills notez' , listKeys)
##########################################
# Makes Chord Class #
##########################################
class Chord:
bank = []
def __init__(self, listKeyInts):
self.__class__.bank.append(self)
self.noteList = listKeyInts
self.e = listKeyInts[0]
self.a = listKeyInts[1]
self.d = listKeyInts[2]
self.g = listKeyInts[3]
self.b = listKeyInts[4]
self.E = listKeyInts[5]
####################################
#### Initilize the White Piano #####
####################################
for i in range(45):
Key.individualKeys[i].whiten()
Key.individualKeys[i].displayNumber()
pygame.display.update()
def play():
chooseNewChord()
displayCurrentchord()
yayNay = input("Start fresh? press 'e' to exit: ")
if yayNay == 'e' :
exit()
else :
print("u tried!")
play()
| wjudy/ForFunProjects | music/GUITARPIANO/guitarpiano.py | guitarpiano.py | py | 9,652 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.mixer.init",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "pygame.init",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.music.set_v... |
1091161094 | import cv2
print(cv2.__version__)
cam = cv2.VideoCapture(0)
while True:
output1, frame = cam.read()
greyFrame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
cv2.imshow("CameraWindow", greyFrame)
cv2.moveWindow("CameraWindow", 500,0)
if cv2.waitKey(1) & 0xff == ord("q"):
break
cam.release()
| kseeger-code/AI_for_Everyone | openCV-1.py | openCV-1.py | py | 321 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.__version__",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",... |
44344510292 | import os
import zipfile
if not os.path.isdir("dist"):
os.mkdir("dist")
zf = zipfile.ZipFile("dist/gcp-dumper-function.zip", "w")
for dirname, subdirs, files in os.walk("libs"):
if not dirname.endswith("__pycache__"):
zf.write(dirname)
for filename in files:
zf.write(os.path.join(dirname, filename))
zf.write("gcp_dumper_function.py", "main.py")
zf.write("requirements.txt")
zf.write("indexGroup-by-provider.csv")
zf.close() | dhering/stock-scoring | gcp_dumper_build.py | gcp_dumper_build.py | py | 465 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "os.path.isdir",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "zipfile.ZipFile",
"line_number":... |
36902412685 | """For each root element in a XML file, count how many lines in the element.
The print out a list of the elements and the number of lines they have.
Optionally pass a flag to sort the elements by length
"""
from lxml import etree
import argparse
import re
import os
from pathlib import Path
from lxml.etree import XMLParser
from typing import Dict
replaceRegex = re.compile(r'/xs:schema/xs:element\[.\]/')
def countElements(infile: str, sort: bool = False) -> Dict[str, int]:
"""Count number of lines for each root element.
Args:
infile: File to read in from
sort: Whether or not to sort the element by size
Returns:
A dict of all of the root elements and the number of lines they have.
"""
elementCounts: Dict[str, int] = {}
# Load in the XSD file and then get the root elements.
# Make sure the parser know to remove comments so they do not cause glitches
schema: etree._ElementTree = etree.parse(infile)
rootElements = list(schema.getroot())
# Iterate backwards through all root element. (Backwards works better for some reason. )
for rootElement in rootElements:
asText = etree.tostring(rootElement, pretty_print=True)
newLineCount = len(re.findall(br"\n", asText))
try:
elementCounts[rootElement.attrib['name']] = newLineCount
except KeyError:
pass
if sort:
elementCounts = {k: v for k, v in sorted(elementCounts.items(), key=lambda item: item[1])}
return elementCounts
def main():
"""Function to run when called directly from CLI."""
# Use the argparse library to process arguments from the command line.
parser = argparse.ArgumentParser(description='Count lines in each root element')
parser.add_argument("infile", type=str, help="Path to the file to read")
parser.add_argument("-s", "--sorted", action='store_true', help="Sort by line number")
args = parser.parse_args()
elementCounts = countElements(args.infile, args.sorted)
for key, value in elementCounts.items():
print(f"{value}:", key)
print(f'{sum(elementCounts.values())}: Total')
if __name__ == "__main__":
main()
| marsfan/Farming-Simulator-Mod-Doc | utils/rootSizeCount.py | rootSizeCount.py | py | 2,187 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "re.compile",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "lxml.etree._ElementTree",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "lxml.etree",
... |
24296607499 | ### stats.nba.com scraping ###
#region Imports
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
import requests
import datetime as dt
import os
import json
import time
from datetime import timedelta, date
#selenium imports
import selenium
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
#endregion
#region stats.nba.com scraper class
class nba_stats:
def __init__(self,start_date,end_date, season,season_type = "Regular", date_list = None):
self.start_date = start_date
self.end_date = end_date
self.season = season
self.season_type = season_type
self.date_list = date_list
def daterange(self,start_date,end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
def defensive_rebounding(self):
"""
WIP - Contended DREB, DREB Chances, AVG DREB dist
https://stats.nba.com/players/defensive-rebounding/
"""
return None
def play_type(self):
"""
WIP - FG%/PPP by isolation, P&R (BH and RM), post-up, spot-up, handoff,
off-screen, putbacks
https://stats.nba.com/players/isolation/
"""
return None
def defensive_dashboard(self, shot_category = 'overall'):
"""
Similar to opponent shooting, but includes an expected FG% --> something we
can't calculate - this might be more useful than opponent_shooting
https://stats.nba.com/players/defense-dash-overall/
"""
acceptable_categories = ['overall','3pt','2pt','lt6','lt10','gt15']
if shot_category not in acceptable_categories:
print("Please choose from one of these options for shot category:")
return acceptable_categories
df_all = pd.DataFrame(columns=['Player','Team','Age','Position','GP','G','DFGM',
'DFGA','EFG%','DIFF%'])
#Date Range Starting Loop
if self.date_list == None:
dates = [d for d in self.daterange(self.start_date, self.end_date)]
else:
dates = self.date_list
for single_date in dates:
url = 'https://stats.nba.com/players/defense-dash-'
# incognito window
chrome_options = Options()
chrome_options.add_argument("--incognito")
# open driver
driver = webdriver.Chrome(r"C:/Users/Spelk/Documents/chromedriver_win32/chromedriver.exe")
# date parameters
year = single_date.strftime("%Y")
month = single_date.strftime("%m")
day = single_date.strftime("%d")
full_date = single_date.strftime("%Y-%m-%d")
# dynamic url one date at a time
url += shot_category + "/"
url += "?Season=" + str(self.season)
url += "-" + str(self.season+1)[2:]
url += "&SeasonType=" + self.season_type
url += "%20Season&DateFrom=" + month
url += "%2F" + day + "%2F" + year
url += "&DateTo=" + month + "%2F" + day + "%2F" + year
# go to url
driver.get(url)
time.sleep(15)
print("Reached url for:",full_date,"... \n")
#data with error handling
try:
stats_table = driver.find_element_by_class_name('nba-stat-table__overflow')
stats_text = stats_table.text
try:
driver.find_element_by_xpath("/html/body/main/div[2]/div/div[2]/div/div/nba-stat-table/div[1]/div/div/select/option['All']").click()
except:
pass
except: # No such element exception
print("No games on this date - pass \n")
driver.close()
continue
#Create df from text
print("Now scraping the table \n")
player = []
team = []
age = []
pos = []
gp = []
g = []
dfgm = []
dfga = []
efg = []
dif = []
for index,line in enumerate(stats_text.split('\n')[1:]): #first for is the header
#get column names
if index % 2 == 0:
[player.append(p) for p in [line]]
else:
stats = line.split(' ')
team.append(stats[0])
age.append(stats[1])
pos.append(stats[2])
gp.append(stats[3])
g.append(stats[4])
dfgm.append(stats[6])
dfga.append(stats[7])
efg.append(stats[9])
dif.append(stats[10])
#Create new_df and append to df_all
new_df = pd.DataFrame({'Player':player,
'Team':team,
'Age':age,
'Position':pos,
'GP':gp,
'G':g,
'DFGM':dfgm,
'DFGA':dfga,
'EFG%':efg,
'DIFF%':dif})
new_df['Game_Date'] = full_date
df_all = df_all.append(new_df,ignore_index = True)
print("Done scraping",full_date,"\n")
driver.close()
time.sleep(3)
df_all['Shot_Category'] = shot_category
return df_all
def opponent_shooting(self):
df_all = pd.DataFrame(columns=['Player','Team','Age','Distance','FGM','FGA','Game_Date'])
#Date Range Starting Loop
if self.date_list == None:
dates = [d for d in self.daterange(self.start_date, self.end_date)]
else:
dates = self.date_list
for single_date in dates:
url = 'https://stats.nba.com/players/opponent-shooting/?Season='
# incognito window
chrome_options = Options()
chrome_options.add_argument("--incognito")
# open driver
driver = webdriver.Chrome(r"C:/Users/Spelk/Documents/chromedriver_win32/chromedriver.exe")
# date parameters
year = single_date.strftime("%Y")
month = single_date.strftime("%m")
day = single_date.strftime("%d")
full_date = single_date.strftime("%Y-%m-%d")
# dynamic url one date at a time
url += str(self.season)
url += "-" + str(self.season+1)[2:]
url += "&SeasonType=" + self.season_type
url += "%20Season&DateFrom=" + month
url += "%2F" + day + "%2F" + year
url += "&DateTo=" + month + "%2F" + day + "%2F" + year
# go to url
driver.get(url)
time.sleep(18) # ! Load time - come up with a better way for this and the following line (clicking all)
#data with error handling
try:
stats_table = driver.find_element_by_class_name('nba-stat-table__overflow')
stats_text = stats_table.text
try:
driver.find_element_by_xpath("/html/body/main/div[2]/div/div[2]/div/div/nba-stat-table/div[1]/div/div/select/option['All']").click()
except:
pass
except: # No such element exception
print("This date failed or there are no games on this date - pass \n")
print(single_date)
driver.close()
continue
print("Reached url for:",full_date,"... \n")
print("Now scraping the table \n")
#data
stats_table = driver.find_element_by_class_name('nba-stat-table__overflow')
stats_text = stats_table.text
#Create df from text
distances = ['0_5_ft','5_9_ft','10_14_ft','15_19_ft',
'20_24_ft','25_29_ft']
player = []
team = []
age = []
distance = []
fgm = []
fga = []
for index,line in enumerate(stats_text.split('\n')[2:]): #first two rows are headers
#get column names
if index % 2 == 0:
[player.append(p) for p in [line]*len(distances)]
else:
stats = line.split(' ')
[team.append(i) for i in [stats[0]]*len(distances)]
[age.append(i) for i in [stats[1]]*len(distances)]
[distance.append(i) for i in distances]
fgm_list = [stats[2],stats[5],stats[8],stats[11],stats[14],stats[17]]
fga_list = [stats[3],stats[6],stats[9],stats[12],stats[15],stats[18]]
[fgm.append(i) for i in fgm_list]
[fga.append(i) for i in fga_list]
#Create new_df and append to df_all
new_df = pd.DataFrame({'Player':player,
'Team':team,
'Age':age,
'Distance':distance,
'FGM':fgm,
'FGA':fga})
new_df['Game_Date'] = full_date
df_all = df_all.append(new_df,ignore_index = True)
print("Done scraping",full_date,"\n")
driver.close()
time.sleep(3)
return df_all
# endregion
# region Using the Scraper
start = date(2018,10,16)
end = date(2019,4,11)
szn = "Regular"
season = 2018
stats_range = nba_stats(start,end,season,szn)
#df = stats_range.opponent_shooting()
#df3 = stats_range.defensive_dashboard(shot_category = '3pt') #test dataset
#endregion
#region Date Cleanup
# Some dates are missed because the browser doesn't load in time
# This won't really be an issue going forward if we're scraping one day at a time
all_dates = [pd.to_datetime(i) for i in stats_range.daterange(stats_range.start_date, stats_range.end_date)]
dates_scraped = list(df['Game_Date'].unique())
datetime_list = list(pd.to_datetime(dates_scraped))
not_scraped = list(set(all_dates) - set(datetime_list))
date_cleanup = nba_stats(start,end,season,szn,date_list=not_scraped)
df_cleanup = date_cleanup.opponent_shooting()
#endregion
# region Joining to GameLogs
# Opponent Shooting Prep
opponent_shooting = pd.read_csv('opponent_shooting.csv')
opponent_shooting.info()
category_cols = ['Player','Team','Distance','Season']
opponent_shooting[category_cols] = opponent_shooting[category_cols].astype('category')
opponent_shooting['Game_Date'] = pd.to_datetime(opponent_shooting['Game_Date'])
opponent_shooting.info()
#Game log Prep
game_logs = pd.read_csv('game_logs.csv')
subset_cols = [
"dateGame","idGame","numberGameTeamSeason","nameTeam",
"idTeam","isB2B","isB2BFirst","isB2BSecond","locationGame",
"slugMatchup","slugTeam","countDaysRestTeam","countDaysNextGameTeam",
"slugOpponent","slugTeamWinner","slugTeamLoser","outcomeGame","namePlayer",
"numberGamePlayerSeason","countDaysRestPlayer","countDaysNextGamePlayer","idPlayer",
"isWin","fgm","fga","fg3m","fg3a","fg2m","fg2a","minutes","ftm","fta","oreb","dreb",
"treb","ast","stl","blk","tov","pf","pts","plusminus","urlTeamSeasonLogo","urlPlayerStats",
"urlPlayerThumbnail","urlPlayerHeadshot","urlPlayerActionPhoto","urlPlayerPhoto"
]
game_logs = game_logs[subset_cols]
category_cols = ["namePlayer","nameTeam"]
game_logs[category_cols] = game_logs[category_cols].astype('category')
game_logs['dateGame'] = pd.to_datetime(game_logs['dateGame'])
#Join
all_data = opponent_shooting.merge(game_logs, how = 'left',
left_on = ['Player', 'Game_Date'],
right_on = ['namePlayer', 'dateGame']).rename(columns = {'FGM':'DFGM','FGA':'DFGA'})
all_data.drop(labels = ['dateGame','slugTeam','namePlayer'], axis = 1, inplace = True)
all_data.to_csv('FinalGameLogs.csv', index = False) | HyunTruth/CSE6242-S20-PRJ-NBA-frontend | NBAPlayerDefensiveComparisons/Code/Scraping/nba_scraper.py | nba_scraper.py | py | 12,336 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.timedelta",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 80,
"usage_type": "call"
},
{
"api... |
18732404186 | from motor.motor_asyncio import AsyncIOMotorCollection, AsyncIOMotorClientSession
from pydantic import Field
import infrastructure
from models.base import EntityModel
class Organization(EntityModel):
name: str = Field()
class OrgCollectionRepository(infrastructure.LoggedCollectionRepository[Organization]):
def __init__(
self,
collection: AsyncIOMotorCollection,
session: AsyncIOMotorClientSession,
query_base: dict,
):
super().__init__(collection, session, Organization, query_base)
| paukstelom/sponsorbook | contexts/organization.py | organization.py | py | 542 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "models.base.EntityModel",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "pydantic.Field",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "infrastructure.LoggedCollectionRepository",
"line_number": 12,
"usage_type": "attribute"
},
{
... |
43495796464 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 7 17:53:40 2023
@author: mingjunsun
This code is an example of solving LASSO-like optimization problems.
"""
import cvxpy as cp
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
from sklearn import linear_model
#from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.linear_model import LassoCV, RidgeCV, ElasticNetCV
#SET WORKING DIRECTORY
os.chdir('/Users/mingjunsun/Library/CloudStorage/Dropbox/23 Summer/Data/')
data = pd.read_csv("Characteristics/usa.csv")
#define start date and end date of training set
train_start = 19900000
train_end = 20000000
#define start date and end date of test set
test_start = 20000000
test_end = 20220000
#exclude data that's not during the period
data = data[(data["date"] > train_start) & (data["date"] < test_end)]
#exclude observations with missing market equity in month t and missing return in month t+1
data = data.dropna(subset=['me', 'ret_exc_lead1m'])
#exclude nano caps
data = data.loc[data['size_grp'] != 'nano']
#delete observation with more than 5 out of the 21 characteristics missing
cols = ["be_me", "ret_12_1", "market_equity", "ret_1_0", "rvol_252d", "beta_252d", "qmj_safety", "rmax1_21d", "chcsho_12m",
"ni_me", "eq_dur", "ret_60_12", "ope_be", "gp_at", "ebit_sale", "at_gr1", "sale_gr1", "at_be","cash_at", "age", "z_score"]
data["missing_num"] = data[cols].isna().sum(1)
data = data.loc[data['missing_num'] <= 5]
#impute the missing characteristics by replacing them with the cross-sectional median
for i in cols:
data[i] = data[i].astype(float)
data[i] = data[i].fillna(data.groupby('date')[i].transform('median'))
cols1 = ["permno","date","ret_exc_lead1m", "be_me", "ret_12_1", "market_equity", "ret_1_0", "rvol_252d", "beta_252d",
"qmj_safety", "rmax1_21d", "chcsho_12m","ni_me", "eq_dur", "ret_60_12", "ope_be", "gp_at", "ebit_sale", "at_gr1", "sale_gr1",
"at_be","cash_at", "age", "z_score"]
data1 = data[cols1]
data1 = data1.dropna()
#rank transformation
#each characteristics is transformed into the cross-sectional rank
for i in cols:
data1[i] = data1.groupby("date")[i].rank(pct=True)
#use df, store data1
df = data1
df.sort_values(by=['date', 'permno'], inplace=True)
# lists of months
train_month_list = df.date.unique()
df_test = df[(df["date"] > test_start) & (df["date"] < test_end)]
obj_month_list = df_test.date.unique()
#optimization part
def regularizer(beta):
return cp.norm1(beta)
def loss_fn(X, Y, beta, intercept):
return (1.0 / X.shape[0]) *(cp.norm2(X @ beta + intercept - Y)**2)
#objective function 1 - L1 penalty on the coefficient
def objective_fn_initial(X, Y, beta, intercept, lambda1):
return loss_fn(X, Y, beta, intercept) + lambda1 * cp.norm1(beta)
#coefficients, intercepts and objective values
beta_list = np.zeros((len(cols), 3))
intercept_list = np.zeros(3)
objective_list = np.zeros(3)
#set your own penalty parameters
lambda1_value = 0.0005
index = list(train_month_list).index(obj_month_list[0])
data_train = df[(df["date"] < train_month_list[index]) & (df["date"] >=train_month_list[index-5])]
X_train = data_train[cols].to_numpy()
Y_train = data_train["ret_exc_lead1m"].to_numpy()
#sklearn
model_lasso = linear_model.Lasso(alpha=lambda1_value/2)
model_lasso.fit(X_train, Y_train)
beta_list[:,0] = model_lasso.coef_
intercept_list[0] = model_lasso.intercept_
objective_list[0] = np.linalg.norm(model_lasso.coef_, ord=1)
#cvx with objective function 1
beta = cp.Variable(len(cols))
intercept = cp.Variable(1)
problem = cp.Problem(cp.Minimize(objective_fn_initial(X_train, Y_train, beta, intercept, lambda1_value)))
problem.solve(solver = cp.SCS)
#beta_list[:,2] = beta.value
#Choose your own threshold here. I chose 0.0001.
beta_list[:,1] = np.where(abs(beta.value) < 0.0001, 0, beta.value)
intercept_list[1] = intercept.value
objective_list[1] = problem.value
objective_list[1] = np.linalg.norm(beta.value, ord=1)
#The estimates in each column should be very close. Because there are the same model when lambda2_value = 0
#Feel free to try different penalty parameters and write your own optimization problems.
| Sho-Shoo/36490-F23-Group1 | example_code/code_example.py | code_example.py | py | 4,282 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.chdir",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cvxpy.norm1",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "cvxpy.norm2",
"line_number":... |
38827167045 | """
The RAD record format defines what a flat record should look like. It's
an unifying format that is used through out the application.
This record is passed around the different components of the application
to keep a consistent data format. Also it is a good model for other people
in the team looking for data. It gives our data collection some backbone.
For example a scraper is ran, it returns a list of "RadRecord"s, some
middleware takes these records and calls a function that accepts "RadRecord"s
and creates and saves database records from them. That way the scraper doesn't
know about the database and the database models aren't structured around the
scrapers.
Use the rad_record function to create new records. It allows you to create
objects without specifying all of the fields.
>>> r = rad_record('Some Hospital')
>>> r.city is None # True
>>> r2 = rad_record('Other Hospital', city='Chicago', state='Illinois')
>>> r2.city is None # False
The rad_record function will return an instance of RadRecord.
>>> r3 = rad_record('Vida Sida')
>>> r3.is_valid() # True
>>> r4 = rad_record(None)
>>> r4.is_valid() # False
To be valid, all records must have a name. In addition, if a
date_verified value is provided, it must be successfully
parsed into a date using 'YYYY-MM-DD' format.
"""
from datetime import datetime
from collections import namedtuple
"""
A record contains the following fields.
The procedure type and category name are
optional. They are transformed by middleware
into relationships at the database.
The one field that is completely necessary is
`name`. This might be an institutions name, or
a doctor to name some examples.
Underneath the hood a RadRecord is a Python named
tuple, read it's documentation if you aren't familiar
with it. It is a very nice and slick data structure.
"""
RadRecord = namedtuple('RadRecord', [
'name',
'organization',
'description',
'address',
'street',
'city',
'state',
'country',
'zipcode',
'email',
'phone',
'fax',
'url',
'source',
'category_name',
'category_names',
'population_names',
'population_tags',
'procedure_type',
'is_icath',
'is_wpath',
'wheelchair_accessible',
'sliding_scale',
'hospital_affiliation',
'hours',
'npi',
'visible',
'notes',
'date_verified'])
def is_valid(record):
"""
A function to help validate RadRecords.
A RadRecord's name should not be None, an empty string,
or consist entirely of whitespace. In addition, if
date_verified is provided, it must parse into a date
using the 'YYYY-MM-DD' format.
Args:
record: The record to validate.
Returns:
A boolean indicating whether the provided record is valid.
"""
# First validate name
if record.name is None or \
len(record.name) == 0 or \
record.name.isspace():
return False
# Validate date_verified if provided
if record.date_verified is not None and \
len(record.date_verified) > 0 and \
not record.date_verified.isspace():
# Try to parse it out using 'YYYY-MM-DD'
try:
datetime.strptime(record.date_verified, '%Y-%m-%d')
except ValueError:
# Parsing error, return false
return False
# Fall-through case
return True
def parse_delimited_list(liststr):
"""
Parses the provided string, which is assumed
to be a semicolon-delimited list of items,
into the corresponding unique list of strings.
Args:
liststr: The delimited string to parse.
Returns:
The resulting unique list of strings.
"""
# Handle null/empty/whitespace values
if liststr is None or \
len(liststr) == 0 or \
liststr.isspace():
return list()
# Split on semicolons, filter out blank entries,
# turn the transformed list into a set (to ensure
# duplicates don't go in), and then convert it
# back to a list.
return list(set((cat.strip() for cat \
in liststr.split(';')
if cat is not None and \
len(cat) > 0 and \
not cat.isspace())))
# Store some lowercase sets for true/false strings
true_values = set([u'true', u't', u'yes', u'y', u'1'])
false_values = set([u'false', u'f', u'no', u'n', u'0'])
def convert_boolean(boolVal):
"""
Coerces the provided value to a Boolean value, falling back to
None if the value could not be converted.
Args:
boolVal: The value to convert.
Returns:
True, False, or None depending on the result of the conversion.
"""
# Handle None types
if boolVal is None:
return None
# Handle Boolean types
if isinstance(boolVal, bool):
return boolVal
# Handle strings
if isinstance(boolVal, str) or isinstance(boolVal, unicode):
# Handle empty values
if len(boolVal) == 0 or boolVal.isspace():
return None
# Normalize the string to Unicode, trim it, and lowercase it
boolVal = unicode(boolVal).strip().lower()
# Now look for it in our sets, falling back to None if
# we don't find anything
if boolVal in true_values:
return True
elif boolVal in false_values:
return False
else:
return None
# Handle integer/float types
if isinstance(boolVal, int) or isinstance(boolVal, float):
if boolVal == 1:
return True
elif boolVal == 0:
return False
else:
return None
# Fall-through case
return None
def convert_category_name(record):
"""
Converts a RadRecord's category_name field to
a list of category_names (separated by semicolons)
and returns the updated RadRecord.
Will bypass processing if category_names is already
populated with data.
Args:
record: The record to convert.
Returns:
An updated version of the RadRecord with category_names
set appropriately.
"""
if record is None:
return record
# Don't bother if we already have category_names
if record.category_names is not None and \
len(record.category_names) > 0:
return record
new_category_names = parse_delimited_list(record.category_name)
# Replace the category_names field in the tuple
return record._replace(category_names=new_category_names)
def convert_population_names(record):
"""
Converts a RadRecord's population_names field to
a list of population_tags (separated by semicolons)
and returns the updated RadRecord.
Will bypass processing if population_tags is already
populated with data.
Args:
record: The record to convert.
Returns:
An updated version of the RadRecord with population_tags
set appropriately.
"""
if record is None:
return record
# Don't bother if we already have population_tags
if record.population_tags is not None and \
len(record.population_tags) > 0:
return record
new_population_tags = parse_delimited_list(record.population_names)
# Replace the population_tags field in the tuple
return record._replace(population_tags=new_population_tags)
def normalize_record(record):
"""
Normalizes all fields on the provided RadRecord.
Args:
record: The RadRecord to normalize.
Returns:
A normalized verion of the RadRecord.
"""
if record is None:
return None
# Normalize values of "None" on visible - that shouldn't be null.
if record.visible is None:
record = record._replace(visible=False)
# Convert names to delimited strings and normalize
# all Boolean values.
return record.convert_category_name(). \
convert_population_names(). \
_replace(is_wpath=convert_boolean(record.is_wpath),
is_icath=convert_boolean(record.is_icath),
wheelchair_accessible=convert_boolean(record.wheelchair_accessible),
sliding_scale=convert_boolean(record.sliding_scale),
visible=convert_boolean(record.visible))
# Give every RadRecord a method to help with validation.
RadRecord.is_valid = is_valid
# Also attach the conversion functions.
RadRecord.convert_category_name = convert_category_name
RadRecord.convert_population_names = convert_population_names
RadRecord.normalize_record = normalize_record
def rad_record(name, organization=None, description=None,
address=None, street=None, city=None, state=None, zipcode=None, country=None,
email=None, phone=None, fax=None, url=None,
source=None, category_name=None, category_names=None,
population_names=None, population_tags=None, procedure_type=None,
is_icath=None, is_wpath=None, wheelchair_accessible=None, sliding_scale=None,
hospital_affiliation=None, hours=None, npi=None, visible=True,
notes=None, date_verified=None):
"""
Convenience method to create RadRecords with optional fields.
Use this instead of the class constructor so you don't have to
specify all of the fields.
"""
return RadRecord(name, organization, description,
address, street, city, state, country, zipcode,
email, phone, fax, url,
source, category_name, category_names,
population_names, population_tags, procedure_type,
is_icath, is_wpath, wheelchair_accessible, sliding_scale,
hospital_affiliation, hours, npi, visible, notes, date_verified)
| radremedy/radrecord | radrecord/rad_record.py | rad_record.py | py | 9,585 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.namedtuple",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 112,
"usage_type": "name"
}
] |
24363636738 | import unittest
import uuid
from collections import namedtuple
from mock import Mock, patch
from spoppy import menus, responses
from . import utils
MockLoader = namedtuple('Loader', ('results', ))
class TestOptions(unittest.TestCase):
def setUp(self):
self.dct = {
'1': menus.MenuValue('A', Mock()),
'2': menus.MenuValue('B', Mock()),
'3': menus.MenuValue('C', Mock()),
's': menus.MenuValue('Search', Mock()),
'kk': menus.MenuValue('pp', Mock()),
'ko': menus.MenuValue('p', Mock()),
'q': menus.MenuValue('o', Mock()),
'o': menus.MenuValue('q', Mock()),
}
self.op = menus.Options(self.dct)
def test_options_filter_empty(self):
self.assertEqual(self.op, self.op.filter(''))
def test_filter_by_key(self):
tc = self.op.filter('1')
self.assertEqual(len(tc), 1)
self.assertIn('1', tc)
tc = self.op.filter('k')
self.assertEqual(len(tc), 2)
self.assertIn('kk', tc)
self.assertIn('ko', tc)
tc = self.op.filter('s')
self.assertEqual(len(tc), 1)
self.assertIn('s', tc)
def test_filter_by_name(self):
tc = self.op.filter('p')
self.assertEqual(len(tc), 2)
self.assertIn('kk', tc)
self.assertIn('ko', tc)
tc = self.op.filter('pp')
self.assertEqual(len(tc), 1)
self.assertIn('kk', tc)
tc = self.op.filter('Sea')
self.assertEqual(len(tc), 1)
self.assertIn('s', tc)
def test_fuzzy_filter(self):
dct = {
'1': menus.MenuValue('This is a playlist', Mock())
}
op = menus.Options(dct)
should_match = (
'ThisIsAPlaylist',
'ThisPlaylist',
'tiaplay',
)
for _filter in should_match:
self.assertEqual(len(op.filter(_filter)), 1)
def test_filter_is_case_insensitive(self):
self.assertEqual(self.op.filter('Search'), self.op.filter('search'))
def test_filter_returns_empty_if_no_match(self):
self.assertEqual(len(self.op.filter('asdf')), 0)
def test_get_possibilities_from_name_and_key(self):
tc = self.op.get_possibilities('q')
self.assertEqual(len(tc), 2)
self.assertEqual(sorted(tc), sorted(['q', 'o']))
def test_possibility_not_duplicated(self):
tc = self.op.get_possibilities('s')
self.assertEqual(len(tc), 1)
self.assertIn('s', tc)
def test_possiblities_with_spaces(self):
op = menus.Options({
' a': menus.MenuValue('p', Mock()),
'b ': menus.MenuValue('p', Mock()),
' c ': menus.MenuValue('p', Mock()),
})
for key in 'a', 'b', 'c':
tc = op.get_possibilities(key)
self.assertEqual(len(tc), 1)
self.assertNotEqual(tc, [key])
self.assertIn(key, tc[0])
def test_matches_by_correct_key(self):
op = menus.Options({
'k': menus.MenuValue('1', Mock()),
'kk': menus.MenuValue('2', Mock()),
'kkk': menus.MenuValue('3', Mock()),
})
best = op.match_best_or_none('k')
self.assertEqual(best.name, '1')
best = op.match_best_or_none('kk')
self.assertEqual(best.name, '2')
best = op.match_best_or_none('kkk')
self.assertEqual(best.name, '3')
def test_matches_by_correct_padded_key(self):
op = menus.Options({
' a': menus.MenuValue('1', Mock()),
'b ': menus.MenuValue('2', Mock()),
' c ': menus.MenuValue('3', Mock()),
' s i ': menus.MenuValue('4', Mock()),
})
best = op.match_best_or_none('a')
self.assertEqual(best.name, '1')
best = op.match_best_or_none('b')
self.assertEqual(best.name, '2')
best = op.match_best_or_none('c')
self.assertEqual(best.name, '3')
best = op.match_best_or_none('si')
self.assertEqual(best.name, '4')
def test_check_unique_keys(self):
with self.assertRaises(TypeError):
menus.Options({
'a': menus.MenuValue('p', Mock()),
' a': menus.MenuValue('k', Mock())
})
with self.assertRaises(TypeError):
self.op[' 1'] = menus.MenuValue('1', Mock())
class MenuTests(unittest.TestCase):
def setUp(self):
self.navigator = Mock()
self.navigator.get_ui_height.return_value = 100
class SubMenu(menus.Menu):
def get_options(self):
return {}
self.submenu = SubMenu(self.navigator)
def test_must_be_subclassed(self):
m = menus.Menu(self.navigator)
with self.assertRaises(NotImplementedError):
m.get_options()
def test_global_options_correct(self):
self.submenu.INCLUDE_UP_ITEM = False
self.navigator.player.has_been_loaded.return_value = False
self.submenu.initialize()
included_items = [
value.destination for value in self.submenu._options.values()
]
self.assertEqual(len(included_items), 1)
self.assertIn(responses.QUIT, included_items)
self.assertNotIn(responses.UP, included_items)
self.assertNotIn(responses.PLAYER, included_items)
self.submenu.INCLUDE_UP_ITEM = True
self.navigator.player.has_been_loaded.return_value = False
self.submenu.initialize()
included_items = [
value.destination for value in self.submenu._options.values()
]
self.assertEqual(len(included_items), 2)
self.assertIn(responses.QUIT, included_items)
self.assertIn(responses.UP, included_items)
self.assertNotIn(responses.PLAYER, included_items)
self.submenu.INCLUDE_UP_ITEM = False
self.navigator.player.has_been_loaded.return_value = True
self.submenu.initialize()
included_items = [
value.destination for value in self.submenu._options.values()
]
self.assertEqual(len(included_items), 2)
self.assertIn(responses.QUIT, included_items)
self.assertNotIn(responses.UP, included_items)
self.assertIn(responses.PLAYER, included_items)
self.submenu.INCLUDE_UP_ITEM = True
self.navigator.player.has_been_loaded.return_value = True
self.submenu.initialize()
included_items = [
value.destination for value in self.submenu._options.values()
]
self.assertEqual(len(included_items), 3)
self.assertIn(responses.QUIT, included_items)
self.assertIn(responses.UP, included_items)
self.assertIn(responses.PLAYER, included_items)
def test_filter_initialized_correctly(self):
self.assertFalse(hasattr(self.submenu, 'filter'))
self.submenu.initialize()
self.assertTrue(hasattr(self.submenu, 'filter'))
self.assertEqual(self.submenu.filter, '')
@patch('spoppy.menus.single_char_with_timeout')
def test_pagination_keys(self, patched_chargetter):
self.assertEqual(self.submenu.PAGE, 0)
patched_chargetter.return_value = menus.Menu.DOWN_ARROW
self.assertEqual(self.submenu.get_response(), responses.NOOP)
self.assertEqual(self.submenu.PAGE, 1)
patched_chargetter.return_value = menus.Menu.UP_ARROW
self.assertEqual(self.submenu.get_response(), responses.NOOP)
self.assertEqual(self.submenu.PAGE, 0)
patched_chargetter.return_value = menus.Menu.UP_ARROW
self.assertEqual(self.submenu.get_response(), responses.NOOP)
self.assertEqual(self.submenu.PAGE, 0)
@patch('spoppy.menus.single_char_with_timeout')
def test_backspace(self, patched_chargetter):
self.submenu.initialize()
patched_chargetter.return_value = b'a'
self.assertEqual(self.submenu.filter, '')
self.assertEqual(self.submenu.get_response(), responses.NOOP)
self.assertEqual(self.submenu.filter, 'a')
self.assertEqual(self.submenu.get_response(), responses.NOOP)
self.assertEqual(self.submenu.filter, 'aa')
self.assertEqual(self.submenu.get_response(), responses.NOOP)
self.assertEqual(self.submenu.filter, 'aaa')
patched_chargetter.return_value = menus.Menu.BACKSPACE
self.assertEqual(self.submenu.filter, 'aaa')
self.assertEqual(self.submenu.get_response(), responses.NOOP)
self.assertEqual(self.submenu.filter, 'aa')
self.assertEqual(self.submenu.get_response(), responses.NOOP)
self.assertEqual(self.submenu.filter, 'a')
self.assertEqual(self.submenu.get_response(), responses.NOOP)
self.assertEqual(self.submenu.filter, '')
@patch('spoppy.menus.Menu.is_valid_response')
@patch('spoppy.menus.single_char_with_timeout')
def test_return(self, patched_chargetter, patched_is_valid):
destination = 'DESTINATION'
patched_is_valid.return_value = menus.MenuValue('TEST', destination)
self.submenu.initialize()
patched_chargetter.return_value = b'\n'
self.assertEqual(self.submenu.get_response(), destination)
patched_is_valid.assert_called_once_with()
@patch('spoppy.menus.single_char_with_timeout')
def test_checks_for_end_of_track(self, patched_chargetter):
patched_chargetter.side_effect = [None, None, b'a']
self.submenu.initialize()
self.assertEqual(self.submenu.get_response(), responses.NOOP)
self.assertEqual(self.submenu.filter, 'a')
self.assertEqual(
self.navigator.player.check_end_of_track.call_count, 3
)
@patch('spoppy.menus.Options.match_best_or_none')
def test_is_valid_uses_options(self, patched_match_best_or_none):
patched_match_best_or_none.return_value = 'RETVAL'
self.submenu.initialize()
self.submenu.filter = 'ASDF'
self.assertEqual(self.submenu.is_valid_response(), 'RETVAL')
patched_match_best_or_none.assert_called_once_with('ASDF')
@patch('spoppy.menus.Options.filter')
def test_ui_filters_items(self, patched_filter):
self.submenu.initialize()
patched_filter.return_value = self.submenu._options
self.submenu.get_ui()
patched_filter.assert_not_called()
self.submenu.filter = 'a'
self.submenu.get_ui()
patched_filter.assert_called_once_with('a')
@patch('spoppy.menus.sorted_menu_items')
def test_no_matches_warning_shown(self, patched_sorter):
self.submenu.initialize()
self.submenu.filter = ''
patched_sorter.return_value = []
ui = self.submenu.get_ui()
has_filter_in_line = [line for line in ui if 'No matches' in line]
self.assertEqual(len(has_filter_in_line), 1)
@patch('spoppy.menus.Menu.get_menu_item')
def test_uses_get_menu_item(self, patched_get_menu_item):
self.submenu.initialize()
self.submenu.filter = ''
patched_get_menu_item.return_value = 'OHAI'
ui = self.submenu.get_ui()
self.assertEqual(
patched_get_menu_item.call_count,
len([line for line in ui if line == 'OHAI'])
)
def test_shows_indicator_if_one_match(self):
self.submenu.filter = 'a'
self.submenu.get_options = Mock()
self.submenu.get_options.return_value = menus.Options({
'the_key': menus.MenuValue('sindri', Mock()),
'foo': menus.MenuValue('foo', Mock()),
'bar': menus.MenuValue('bar', Mock()),
})
self.submenu.initialize()
ui = self.submenu.get_ui()
self.assertEqual(len([line for line in ui if 'sindri' in line]), 1)
self.submenu.filter = 'the_key'
ui = self.submenu.get_ui()
self.assertEqual(len([line for line in ui if 'sindri' in line]), 2)
def test_pagination_ui(self):
option_indicator = 'THIS IS AN OPTION'
random_options = {
str(uuid.uuid4()): menus.MenuValue(option_indicator, Mock())
for i in range(1000)
}
get_options = Mock()
get_options.return_value = random_options
self.submenu.get_options = get_options
self.submenu.initialize()
seen_options = 0
last_page = -1
while last_page != self.submenu.PAGE:
ui = self.submenu.get_ui()
if self.submenu.PAGE == last_page:
break
seen_options += len([
line for line in ui
if option_indicator in line
])
last_page = self.submenu.PAGE
self.submenu.PAGE += 1
self.assertEqual(seen_options, len(random_options))
class TestSubMenus(unittest.TestCase):
def setUp(self):
self.navigator = Mock()
self.navigator.session.playlist_container = []
def get_playlist_selected(self):
ps = menus.PlayListSelected(self.navigator)
tracks = [
utils.Track('Lazarus', ['David Bowie']),
utils.Track('Best song ever', ['Sindri'], False),
utils.Track('Blackstar', ['David Bowie']),
utils.Track('Ziggy Stardust', ['David Bowie']),
]
ps.playlist = utils.Playlist('Playlist', tracks)
ps.disable_loader()
return ps
def test_playlist_overview_shows_all_playlists(self):
self.playlists = [
utils.Playlist('A', [utils.Track('foo', ['bar'])]),
utils.Playlist('B', [utils.Track('foo', ['bar'])]),
utils.Playlist('C', [utils.Track('foo', ['bar'])]),
]
class Session(object):
playlist_container = self.playlists
self.navigator.session = Session()
pov = menus.PlayListOverview(self.navigator)
pov.disable_loader()
pov.loader = MockLoader([item, {}] for item in self.playlists)
options = menus.Options(pov.get_options())
self.assertTrue(
all(
isinstance(value.destination, menus.PlayListSelected)
for value in options.values()
)
)
for playlist in self.playlists:
self.assertIsNotNone(options.match_best_or_none(playlist.name))
def test_playlist_overview_shows_invalid_playlists_as_well(self):
self.playlists = [
utils.Playlist('', []),
utils.Playlist('A', [utils.Track('foo', ['bar'])]),
utils.Playlist('B', []),
utils.Playlist(
'C', [utils.Track('foo', ['bar'], available=False)]
),
utils.Playlist('D', []),
]
del self.playlists[1].link
class Session(object):
playlist_container = self.playlists
self.navigator.session = Session()
pov = menus.PlayListOverview(self.navigator)
pov.disable_loader()
pov.loader = MockLoader([[item, {}] for item in self.playlists])
options = menus.Options(pov.get_options())
self.assertEqual(len(options), 5)
all_playlist_options = [
t.destination.playlist
for t in list(options.values())
]
for playlist in self.playlists:
self.assertIn(playlist, all_playlist_options)
def test_playlist_selected_does_not_fail_on_empty_playlist(self):
ps = menus.PlayListSelected(self.navigator)
ps.playlist = utils.Playlist('asdf', [])
ps.disable_loader()
self.navigator.session.playlist_container = [ps.playlist]
# Only delete and radio available
self.assertEqual(len(ps.get_options()), 2)
self.navigator.spotipy_client = None
# Only delete available
self.assertEqual(len(ps.get_options()), 1)
def test_playlist_selected_contains_only_valid_tracks(self):
ps = self.get_playlist_selected()
options = menus.Options(ps.get_options())
self.assertIsNotNone(options.match_best_or_none('1'))
self.assertIsNotNone(options.match_best_or_none('2'))
self.assertIsNotNone(options.match_best_or_none('3'))
self.assertIsNone(options.match_best_or_none('4'))
def test_shows_shuffle_play(self):
ps = self.get_playlist_selected()
options = menus.Options(ps.get_options())
destinations = [value.destination for value in options.values()]
self.assertIn(ps.shuffle_play, destinations)
def test_shows_add_to_queue_if_playing(self):
ps = self.get_playlist_selected()
self.navigator.player.is_playing.return_value = False
options = menus.Options(ps.get_options())
destinations = [value.destination for value in options.values()]
self.assertNotIn(ps.add_to_queue, destinations)
self.navigator.player.is_playing.return_value = True
options = menus.Options(ps.get_options())
destinations = [value.destination for value in options.values()]
self.assertIn(ps.add_to_queue, destinations)
def test_select_song(self):
ps = self.get_playlist_selected()
song_selected = ps.select_song(0)
self.navigator.player.is_playing.return_value = False
self.assertIsInstance(song_selected(), menus.SongSelectedWhilePlaying)
self.navigator.player.play_track.assert_not_called()
self.navigator.player.is_playing.return_value = True
song_selected_result = song_selected()
self.assertIsInstance(
song_selected_result, menus.SongSelectedWhilePlaying
)
self.assertEqual(song_selected_result.playlist, ps.playlist)
self.assertEqual(song_selected_result.track, ps.playlist.tracks[0])
self.navigator.player.play_track.assert_not_called()
class TestSearch(unittest.TestCase):
def setUp(self):
self.navigator = Mock()
@patch('spoppy.menus.Menu.get_response')
def test_uses_parent_get_response(self, patched_get_response):
for cls in (
menus.TrackSearchResults,
menus.AlbumSearchResults,
menus.TrackSearch,
menus.AlbumSearch
):
patched_get_response.reset_mock()
patched_get_response.return_value = 'foobar'
menu = cls(self.navigator)
self.assertEqual(menu.get_response(), 'foobar')
patched_get_response.assert_called_once_with()
@patch('spoppy.menus.TrackSearchResults.update_cache')
def test_updates_cache_on_init(self, patched_update):
search = 'foobar'
menu = menus.TrackSearchResults(self.navigator)
menu.set_initial_results(search)
patched_update.assert_called_once_with()
def test_get_update_cache(self):
search = 'foobar'
menu = menus.TrackSearchResults(self.navigator)
self.assertEqual(len(menu.get_cache()), 0)
menu.search = search
menu.update_cache()
self.assertIn(search, menu.get_cache())
@patch('spoppy.menus.TrackSearchResults.search')
def test_resets_paginating(self, patched_search):
patched_search.loaded_event.wait.return_value = True
menu = menus.TrackSearchResults(self.navigator)
menu.paginating = True
self.assertEqual(menu.get_response(), menu)
self.assertFalse(menu.paginating)
patched_search.loaded_event.wait.assert_called_once_with()
@patch('spoppy.menus.TrackSearchResults.update_cache')
@patch('spoppy.menus.search')
@patch('spoppy.menus.TrackSearchResults.get_cache')
def test_go_to_from_cache(
self, patched_cache,
patched_search, patched_update
):
patched_cache.return_value = [Mock(), Mock()]
menu = menus.TrackSearchResults(self.navigator)
menu.search = patched_cache.return_value[0]
# next_page
callback = menu.go_to(1)
self.assertEqual(callback(), menu)
self.assertEqual(menu.search, patched_cache.return_value[1])
self.assertTrue(menu.paginating)
patched_update.assert_not_called()
patched_search.assert_not_called()
# previous_page
callback = menu.go_to(-1)
self.assertTrue(callable(callback))
self.assertEqual(callback(), menu)
self.assertEqual(menu.search, patched_cache.return_value[0])
self.assertTrue(menu.paginating)
patched_update.assert_not_called()
patched_search.assert_not_called()
@patch('spoppy.menus.TrackSearchResults.update_cache')
@patch('spoppy.menus.search')
@patch('spoppy.menus.TrackSearchResults.get_cache')
def test_go_to_from_search(
self, patched_cache,
patched_search, patched_update
):
patched_cache.return_value = [Mock()]
patched_search.return_value = Mock()
menu = menus.TrackSearchResults(self.navigator)
menu.search = patched_cache.return_value[0]
callback = menu.go_to(1)
self.assertTrue(callable(callback))
self.assertEqual(callback(), menu)
self.assertEqual(menu.search, patched_search.return_value)
self.assertTrue(menu.paginating)
patched_update.assert_called_once_with()
# Don't check for how it was called, at least not at the moment
self.assertEqual(patched_search.call_count, 1)
def test_mock_playlist_contains_term_in_search(self):
menu = menus.TrackSearchResults(self.navigator)
menu.search = Mock()
menu.search.results.term = 'foobar'
self.assertIn('foobar', menu.get_mock_playlist_name())
@patch('spoppy.menus.TrackSearchResults.search')
def test_select_song_while_playing(self, patched_self_search):
patched_self_search.results.results = ['foo']
self.navigator.player.is_playing.return_value = True
menu = menus.TrackSearchResults(self.navigator)
callback = menu.select_song(0)
self.assertTrue(callable(callback))
res = callback()
self.assertIsInstance(res, menus.SongSelectedWhilePlaying)
self.assertEqual(res.track, 'foo')
@patch('spoppy.menus.TrackSearchResults.search')
def test_select_song_while_paused(self, patched_self_search):
patched_self_search.results.results = ['foo']
self.navigator.player.is_playing.return_value = False
menu = menus.TrackSearchResults(self.navigator)
callback = menu.select_song(0)
self.assertTrue(callable(callback))
res = callback()
# self.assertEqual(res, self.navigator.player)
self.assertIsInstance(res, menus.SongSelectedWhilePlaying)
self.assertEqual(res.track, 'foo')
# self.navigator.player.clear.assert_called_once_with()
# self.navigator.player.add_to_queue.assert_called_once_with('foo')
# self.navigator.player.play_track.assert_called_once_with(0)
@patch('spoppy.menus.TrackSearchResults.search')
def test_get_res_idx(self, patched_self_search):
menu = menus.TrackSearchResults(self.navigator)
for i in range(0, 5, 20):
patched_self_search.results.offset = i
self.assertEqual(menu.get_res_idx(0), i+1)
@patch('spoppy.menus.Menu.get_ui')
def test_returns_different_ui_while_paginating(self, patched_get_ui):
menu = menus.TrackSearchResults(self.navigator)
first_one = menu.get_ui()
menu.paginating = True
second_one = menu.get_ui()
self.assertNotEqual(first_one, second_one)
patched_get_ui.assert_called_once_with()
def test_get_options(self):
menu = menus.TrackSearchResults(self.navigator)
menu.search = Mock()
menu.search.results.previous_page = True
menu.search.results.next_page = True
menu.search.results.offset = 1
menu.search.results.results = []
menu.paginating = True
self.assertEqual(len(menu.get_options()), 0)
menu.paginating = False
# Last page, next page
self.assertEqual(len(menu.get_options()), 2)
menu.search.results.previous_page = False
# Only next page
self.assertEqual(len(menu.get_options()), 1)
menu.search.results.next_page = False
self.assertEqual(len(menu.get_options()), 0)
menu.search.results.results = [utils.Track('foo', 'bar')]
# Shuffle and the song itself
self.assertEqual(len(menu.get_options()), 2)
class TestPlaylistSaver(unittest.TestCase):
def setUp(self):
self.navigator = Mock()
def test_returns_different_ui_while_paginating(self):
menu = menus.SavePlaylist(self.navigator)
menu.filter = ''
menu.song_list = []
first_one = menu.get_ui()
menu.is_saving = True
menu.new_playlist_name = ''
second_one = menu.get_ui()
self.assertNotEqual(first_one, second_one)
@patch('spoppy.menus.Menu.get_response')
def test_uses_parent_get_response(self, patched_get_response):
patched_get_response.reset_mock()
patched_get_response.return_value = 'foobar'
menu = menus.SavePlaylist(self.navigator)
self.assertEqual(menu.get_response(), 'foobar')
patched_get_response.assert_called_once_with()
@patch('spoppy.menus.Playlist')
def test_saves_playlist(self, patched_playlist):
class MockSong(object):
class Link(object):
pass
def __init__(self, id):
self.link = MockSong.Link()
self.link.uri = id
patched_playlist.return_value = Mock()
spotipy = self.navigator.spotipy_client
spotipy.current_user_playlists = Mock()
spotipy.current_user_playlists.return_value = {
'items': [],
}
spotipy.user_playlist_create = Mock()
spotipy.user_playlist_create.return_value = {
'id': 'some-id',
'uri': 'some-uri',
}
spotipy.user_playlist_add_tracks = Mock()
self.navigator.spotipy_me = {
'id': 'sindrig',
}
menu = menus.SavePlaylist(self.navigator)
menu.is_saving = True
menu.new_playlist_name = 'foobar'
menu.song_list = [MockSong(1), MockSong(2), MockSong(3)]
menu.callback = Mock()
self.assertEqual(menu.get_response(), responses.UP)
spotipy.user_playlist_create.assert_called_once_with(
user='sindrig',
name='foobar',
)
spotipy.user_playlist_add_tracks.assert_called_once_with(
user='sindrig',
playlist_id='some-id',
tracks=[1, 2, 3]
)
menu.callback.assert_called_once_with(patched_playlist.return_value)
patched_playlist.assert_called_once_with(
self.navigator.session,
'some-uri',
)
patched_playlist.return_value.load.assert_called_once_with()
@patch('spoppy.menus.Playlist')
def test_edits_playlist(self, patched_playlist):
class MockSong(object):
class Link(object):
pass
def __init__(self, id):
self.link = MockSong.Link()
self.link.uri = id
patched_playlist.return_value = Mock()
spotipy = self.navigator.spotipy_client
spotipy.current_user_playlists = Mock()
spotipy.current_user_playlists.return_value = {
'items': [{
'id': 'some-id',
'name': 'foobar',
'uri': 'some-uri',
}],
}
spotipy.user_playlist_create = Mock()
spotipy.user_playlist_add_tracks = Mock()
spotipy.user_playlist_replace_tracks = Mock()
self.navigator.spotipy_me = {
'id': 'sindrig',
}
menu = menus.SavePlaylist(self.navigator)
menu.is_saving = True
menu.new_playlist_name = 'foobar'
menu.song_list = [MockSong(1), MockSong(2), MockSong(3)]
menu.callback = Mock()
self.assertEqual(menu.get_response(), responses.UP)
spotipy.user_playlist_create.assert_not_called()
spotipy.user_playlist_add_tracks.assert_not_called()
spotipy.user_playlist_replace_tracks.assert_called_once_with(
user='sindrig',
playlist_id='some-id',
tracks=[1, 2, 3]
)
menu.callback.assert_called_once_with(patched_playlist.return_value)
patched_playlist.assert_called_once_with(
self.navigator.session,
'some-uri',
)
patched_playlist.return_value.load.assert_called_once_with()
@patch('spoppy.menus.threading')
@patch('spoppy.menus.webbrowser')
@patch('spoppy.menus.oAuthServerThread')
def test_spotipy_initialization(
self, patched_server, patched_browser, patched_threading
):
sp_oauth = Mock()
self.navigator.lifecycle.get_spotipy_oauth.return_value = sp_oauth
sp_oauth.get_authorize_url.return_value = 'http://irdn.is/'
menu = menus.LogIntoSpotipy(self.navigator)
menu.initialize()
sp_oauth.get_authorize_url.assert_called_once_with()
patched_server().start.assert_called_once_with()
patched_browser.open.assert_called_once_with(
sp_oauth.get_authorize_url.return_value
)
self.assertIsNone(menu.message_from_spotipy)
patched_server.reset_mock()
patched_browser.reset_mock()
sp_oauth.reset_mock()
patched_server().server = None
menu = menus.LogIntoSpotipy(self.navigator)
menu.initialize()
sp_oauth.get_authorize_url.assert_called_once_with()
patched_server().start.assert_called_once_with()
patched_browser.open.assert_not_called()
self.assertIsNotNone(menu.message_from_spotipy)
@patch('spoppy.menus.single_char_with_timeout')
def test_spotipy_get_response_up(self, patched_chargetter):
menu = menus.LogIntoSpotipy(self.navigator)
menu.oauth_server = Mock()
for quitchar in b'q', b'u':
patched_chargetter.return_value = quitchar
self.assertEqual(menu.get_response(), responses.UP)
menu.oauth_server.shutdown.assert_called_once_with()
menu.oauth_server.reset_mock()
@patch('spoppy.menus.single_char_with_timeout')
def test_spotipy_get_response_response_parts_code(
self, patched_chargetter
):
menu = menus.LogIntoSpotipy(self.navigator)
menu.oauth_server = Mock()
menu.sp_oauth = Mock()
patched_chargetter.return_value = None
menu._spotipy_response_parts = {
'code': [
'foobar'
]
}
self.assertEqual(menu.get_response(), responses.UP)
menu.oauth_server.shutdown.assert_called_once_with()
self.navigator.lifecycle.set_spotipy_token.assert_called_once_with(
menu.sp_oauth.get_access_token('foobar')
)
self.navigator.refresh_spotipy_client.assert_called_once_with()
@patch('spoppy.menus.single_char_with_timeout')
def test_spotipy_get_response_response_parts_error(
self, patched_chargetter
):
menu = menus.LogIntoSpotipy(self.navigator)
menu.oauth_server = Mock()
menu.sp_oauth = Mock()
patched_chargetter.return_value = None
menu._spotipy_response_parts = {
'error': [
'foobar'
]
}
self.assertEqual(menu.get_response(), responses.NOOP)
menu.oauth_server.shutdown.assert_called_once_with()
self.navigator.lifecycle.set_spotipy_token.assert_not_called()
self.assertIn('foobar', menu.message_from_spotipy)
@patch('spoppy.menus.single_char_with_timeout')
def test_spotipy_get_response_response_parts_invalid(
self, patched_chargetter
):
menu = menus.LogIntoSpotipy(self.navigator)
menu.oauth_server = Mock()
menu.sp_oauth = Mock()
patched_chargetter.return_value = None
menu._spotipy_response_parts = {
'foobar': [
'hallo', 'madur'
]
}
self.assertEqual(menu.get_response(), responses.NOOP)
menu.oauth_server.shutdown.assert_called_once_with()
self.navigator.lifecycle.set_spotipy_token.assert_not_called()
self.assertIn('hallo', menu.message_from_spotipy)
self.assertIn('madur', menu.message_from_spotipy)
| sindrig/spoppy | tests/test_menus.py | test_menus.py | py | 32,502 | python | en | code | 12 | github-code | 1 | [
{
"api_name": "collections.namedtuple",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "spoppy.menus.MenuValue",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "... |
70285110114 | from math import pi
import gym
import numpy as np
from math import pi, floor
from cmath import rect, phase
from gym import spaces
def wrap_to_pi(a):
"""
Wrap angle to [-pi pi]
:param a: angle to convert
:return: new angle in the [-pi pi]
"""
return a - (2 * pi * floor((a + pi) / (2 * pi)))
def get_polar(x0, y0, xt, yt):
"""
Compute the polar angle and the range between [x0 y0] and [xt yt]
:param x0:
:param y0:
:param xt:
:param yt:
:return: Angle to target in radian in [-Pi Pi] and norm
"""
c = complex(yt - y0, xt - x0)
return -phase(c), abs(c)
def preprocess_state(state):
# obs = np.zeros(8)
# # theta_att_wz, ro_att_wz = get_polar(state[0],state[1], state[9],state[10])
# # theta_targ_wz, ro_targ_wz = get_polar(state[4],state[5], state[9],state[10])
# obs[0] = state [3] / pi # Attacker heading
# obs[1] = state [2] / 1200 # Attacker speed
# #obs[2] = ro_att_wz / 100000 # Distance Attacker vs win zone
# #obs[3] = theta_att_wz / pi # Heading Attacker vs win zone
# obs[4] = state [7] / pi # Target heading
# obs[5] = state [6] / 1200 # Target speed
# obs[6] = ro_targ_wz / 100000 # Distance Target vs win zone
# obs[7] = theta_targ_wz / pi # Heading Target vs win zone
#######################################
# V2
# obs = np.zeros(8)
# theta_att_wz, ro_att_wz = get_polar(state[0],state[1], state[9],state[10])
# theta_targ_wz, ro_targ_wz = get_polar(state[4],state[5], state[9],state[10])
# obs[0] = state [3] / pi # Attacker heading
# obs[1] = state [2] / 1200 # Attacker speed
# obs[2] = ro_att_wz / 100000 # Distance Attacker vs win zone
# obs[3] = theta_att_wz / pi # Heading Attacker vs win zone
# obs[4] = state [7] / pi # Target heading
# obs[5] = state [6] / 1200 # Target speed
# obs[6] = ro_targ_wz / 100000 # Distance Target vs win zone
# obs[7] = theta_targ_wz / pi # Heading Target vs win zone
#######################################
# ? Need to add heading and ro vs loose zone?
obs = np.zeros(11)
theta_a_wz, ro_a_wz = get_polar(state[0], state[1], state[9], state[10])
theta_t_wz, ro_t_wz = get_polar(state[4], state[5], state[9], state[10])
theta_t_lz, ro_t_lz = get_polar(state[4], state[5], state[11], state[12])
theta_a_t, ro_a_t = get_polar(state[0], state[1], state[4], state[5])
obs[0] = (theta_a_wz) / pi
obs[1] = (ro_a_wz - 50000) / 50000
obs[2] = (theta_t_wz) / pi
obs[3] = (ro_t_wz - 50000) / 50000
obs[4] = (theta_a_t) / pi
obs[5] = (ro_a_t - 50000) / 50000
obs[6] = (state[2] - 600) / 600
obs[7] = state[3] / pi
# obs[8] = (state[6] -600) / 600
obs[8] = state[7] / pi
obs[9] = (ro_t_lz - 50000) / 50000
obs[10] = theta_t_lz / pi
# obs[0] = (state[0] - 50000) / 50000
# obs[1] = (state[1] - 50000) / 50000
# obs[2] = (state[2] -600) / 600
# obs[3] = state[3] / pi
# obs[4] = (state[4] - 50000) / 50000
# obs[5] = (state[5] - 50000) / 50000
# obs[6] = (state[6] -600) / 600
# obs[7] = state[7] / pi
# obs[8] = (state[8] - 50000) / 50000
# obs[9] = (state[9] - 50000) / 50000
# obs[10] = (state[10] - 50000) / 50000
# obs[11] = (state[11] - 50000) / 50000
# # obs[12] = (ro - 50000) / 100000
# obs[13] = theta / pi
return obs
class AcasWrapper(gym.Wrapper):
def __init__(self, env):
gym.Wrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(
low=-1, high=1, shape=(11,), dtype=np.float32
)
self.action_space = spaces.Box(low=-1, high=1, shape=(2,), dtype=np.float32)
self.nb_step_in_rand = 0
def reset(self):
state = self.env.reset()
return preprocess_state(state)
def step(self, action):
state, reward, done, info = self.env.step([action[0] * 200, action[1] * pi / 2])
self.nb_step_in_rand += 1
return preprocess_state(state), reward, done, info
def increase_random(self):
if self.nb_step_in_rand > 50000:
self.env.increase_random()
self.nb_step_in_rand = 0
def render(self, mode):
return self.env.render(mode)
| dam-grassman/Drone-Interception-Env | acas_wrappers.py | acas_wrappers.py | py | 4,342 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "math.pi",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "math.floor",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cmath.phase",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 69,
... |
43565072112 | # encoding: utf-8
from collections import OrderedDict
import smtplib
import logging
from django import http
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied, ValidationError
from django.db import IntegrityError
from django.http import Http404
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.shortcuts import render, redirect
from django.views.generic import DetailView, UpdateView, View
from django.core.validators import validate_email
from colab.plugins.utils.collaborations import get_collaboration_data
from colab.accounts.models import (User, EmailAddress, EmailAddressValidation)
from colab.accounts.forms import (ColabSetUsernameForm, UserUpdateForm)
from colab.accounts.utils.email import send_verification_email
class UserProfileBaseMixin(object):
model = get_user_model()
slug_field = 'username'
slug_url_kwarg = 'username'
context_object_name = 'user_'
class UserProfileUpdateView(UserProfileBaseMixin, UpdateView):
template_name = 'accounts/user_update_form.html'
form_class = UserUpdateForm
def post(self, request, *args, **kwargs):
if not request.POST.get('colab_form'):
request.method = 'GET'
result = super(UserProfileUpdateView, self).get(request, *args,
**kwargs)
else:
result = super(UserProfileUpdateView, self).post(request, *args,
**kwargs)
return result
def get_success_url(self):
return reverse('user_profile', kwargs={'username':
self.object.username})
def get_object(self, *args, **kwargs):
obj = super(UserProfileUpdateView, self).get_object(*args, **kwargs)
if self.request.user != obj and not self.request.user.is_superuser:
raise PermissionDenied
return obj
class UserProfileDetailView(UserProfileBaseMixin, DetailView):
template_name = 'accounts/user_detail.html'
def get_context_data(self, **kwargs):
profile_user = self.object
context = {}
count_types = OrderedDict()
logged_user = None
if self.request.user.is_authenticated():
logged_user = User.objects.get(username=self.request.user)
collaborations, count_types_extras = get_collaboration_data(
logged_user, profile_user)
collaborations.sort(key=lambda elem: elem.modified, reverse=True)
count_types.update(count_types_extras)
context['type_count'] = count_types
context['results'] = collaborations[:10]
context.update(kwargs)
return super(UserProfileDetailView, self).get_context_data(**context)
class EmailView(View):
http_method_names = [u'head', u'get', u'post', u'delete', u'update']
def get(self, request, key):
"""Validate an email with the given key"""
try:
email_val = EmailAddressValidation.objects.get(validation_key=key)
except EmailAddressValidation.DoesNotExist:
messages.error(request, _('The email address you are trying to '
'verify either has already been verified'
' or does not exist.'))
return redirect('/')
try:
email = EmailAddress.objects.get(address=email_val.address)
except EmailAddress.DoesNotExist:
email = EmailAddress(address=email_val.address)
if email.user and email.user.is_active:
messages.error(request, _('The email address you are trying to '
'verify is already an active email '
'address.'))
email_val.delete()
return redirect('/')
email.user = email_val.user
email.save()
email_val.delete()
user = User.objects.get(username=email.user.username)
user.is_active = True
user.save()
messages.success(request, _('Email address verified!'))
return redirect('user_profile', username=email_val.user.username)
@method_decorator(login_required)
def post(self, request, key):
"""Create new email address that will wait for validation"""
email = request.POST.get('email')
user_id = request.POST.get('user')
if not email:
return http.HttpResponseBadRequest()
try:
validate_email(email)
except ValidationError:
return http.HttpResponseBadRequest()
try:
EmailAddressValidation.objects.create(address=email,
user_id=user_id)
except IntegrityError:
# 409 Conflict
# duplicated entries
# email exist and it's waiting for validation
return http.HttpResponse(status=409)
return http.HttpResponse(status=201)
@method_decorator(login_required)
def delete(self, request, key):
"""Remove an email address, validated or not."""
request.DELETE = http.QueryDict(request.body)
email_addr = request.DELETE.get('email')
user_id = request.DELETE.get('user')
if not email_addr:
return http.HttpResponseBadRequest()
try:
email = EmailAddressValidation.objects.get(address=email_addr,
user_id=user_id)
except EmailAddressValidation.DoesNotExist:
pass
else:
email.delete()
return http.HttpResponse(status=204)
try:
email = EmailAddress.objects.get(address=email_addr,
user_id=user_id)
except EmailAddress.DoesNotExist:
raise http.Http404
email.user = None
email.save()
return http.HttpResponse(status=204)
@method_decorator(login_required)
def update(self, request, key):
"""Set an email address as primary address."""
request.UPDATE = http.QueryDict(request.body)
email_addr = request.UPDATE.get('email')
user_id = request.UPDATE.get('user')
if not email_addr:
return http.HttpResponseBadRequest()
try:
email = EmailAddress.objects.get(address=email_addr,
user_id=user_id)
except EmailAddress.DoesNotExist:
raise http.Http404
email.user.email = email_addr
email.user.save()
return http.HttpResponse(status=204)
class EmailValidationView(View):
http_method_names = [u'post']
def post(self, request):
email_addr = request.POST.get('email')
user_id = request.POST.get('user')
try:
email = EmailAddressValidation.objects.get(address=email_addr,
user_id=user_id)
except EmailAddressValidation.DoesNotExist:
raise http.Http404
try:
send_verification_email(email_addr, email.user,
email.validation_key)
except smtplib.SMTPException:
logging.exception('Error sending validation email')
return http.HttpResponseServerError()
return http.HttpResponse(status=204)
class SignupView(View):
def is_logged(self, user):
"""Check if a logged user is trying to access the register page.
If so, redirect him/her to his/her profile"""
response = None
if user.is_authenticated():
if not user.needs_update:
response = redirect('user_profile', username=user.username)
return response
def get(self, request):
response = self.is_logged(request.user)
if not response:
user_form = ColabSetUsernameForm()
response = render(request, 'accounts/user_create_form.html',
{'user_form': user_form, })
return response
def post(self, request):
response = self.is_logged(request.user)
if not response:
user_form = ColabSetUsernameForm(request.POST)
if user_form.is_valid():
user = user_form.save(commit=False)
user.needs_update = False
user.is_active = False
user.save()
self.verify_email(request, user)
messages.success(request, _('Your profile has been created!'))
response = redirect('user_profile', username=user.username)
else:
response = render(request, 'accounts/user_create_form.html',
{'user_form': user_form, })
return response
def verify_email(self, request, user):
email = EmailAddressValidation.create(user.email, user)
location = reverse('email_view',
kwargs={'key': email.validation_key})
verification_url = request.build_absolute_uri(location)
EmailAddressValidation.verify_email(email, verification_url)
# Check if the user's email have been used previously
# in the mainling lists to link the user to old messages
email_addr, created = EmailAddress.objects.get_or_create(
address=user.email)
if created:
email_addr.real_name = user.get_full_name()
email_addr.user = user
email_addr.save()
def password_changed(request):
messages.success(request, _('Your password was changed.'))
user = request.user
return redirect('user_profile_update', username=user.username)
def password_reset_done_custom(request):
msg = _(("We've emailed you instructions for setting "
"your password. You should be receiving them shortly."))
messages.success(request, msg)
return redirect('home')
def password_reset_complete_custom(request):
msg = _('Your password has been set. You may go ahead and log in now.')
messages.success(request, msg)
return redirect('home')
def myaccount_redirect(request, route):
if not request.user.is_authenticated():
raise Http404()
url = '/'.join(('/account', request.user.username, route))
return redirect(url)
def resend_email_verification(request):
if request.method == 'GET':
return render(request, 'registration/resend_email_verification.html')
email = request.POST.get('email', '')
user = User.objects.filter(email=email).first()
if not user:
msg = _('This emails is not registered yet.')
messages.error(request, msg)
return render(request, 'registration/resend_email_verification.html')
email = EmailAddressValidation.objects.get_or_create(address=email,
user_id=user.id)[0]
location = reverse('archive_email_view',
kwargs={'key': email.validation_key})
verification_url = request.build_absolute_uri(location)
if EmailAddressValidation.verify_email(email, verification_url):
msg = _('An email was sent to you. Verify your inbox or spam.')
messages.success(request, msg)
else:
msg = _('An error occurred while sending mail.')
messages.error(request, msg)
return redirect('login')
| colab/colab | colab/accounts/views.py | views.py | py | 11,704 | python | en | code | 23 | github-code | 1 | [
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.views.generic.UpdateView",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "colab.accounts.forms.UserUpdateForm",
"line_number": 36,
"usage_type":... |
29055069542 | from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from django.forms.formsets import formset_factory
from django.forms import modelformset_factory
from django.core import serializers
from django.http import HttpResponse
from .models import Portfolio, PortfolioProduct, CustomerPortfolio
from .forms import ProductForm, PortfolioForm
from users.models import Profile, Customer
from cart.cart import Cart
from shop.models import UnitMeasure, Product, Supplier
from portfolios.models import CustomerPortfolioProduct, CustomerPortfolio
from portfolios.forms import CustomerPortfolioForm
from itertools import product
def portfolio_list(request):
if request.user.is_authenticated:
ProductFormSet = formset_factory(ProductForm)
portfolios = Portfolio.objects.filter(user=request.user.profile)
portfolio_form = PortfolioForm()
product_formset = ProductFormSet()
context = {
'portfolios': portfolios,
'portfolio_form': portfolio_form,
# 'product_formset': product_formset,
}
return render(request, 'portfolios/product/list.html', context)
def portfolio_detail(request, id):
portfolio = get_object_or_404(Portfolio, id=id)
return render(request, 'portfolios/product/detail.html', {'portfolio': portfolio,})
def portfolio_add(request):
if request.user.is_authenticated:
# ProductFormSet = formset_factory(ProductForm)
portfolios = Portfolio.objects.filter(user=request.user.profile)
if request.method == 'POST':
portfolio_form = PortfolioForm(request.POST)
# product_formset = ProductFormSet(request.POST)
if portfolio_form.is_valid(): #and product_formset.is_valid():
portfolio = portfolio_form.save(commit=False)
portfolio.user = Profile.objects.get(user=request.user)
portfolio_form.save()
# print(request.POST['selected_products'])
values = map(int, request.POST['selected_products'].split(','))
products = []
for product in values:
# product = product_form.cleaned_data.get('product')
# quantity = product_form.cleaned_data.get('quantity')
# um = product_form.cleaned_data.get('um')
if product:
products.append(PortfolioProduct(portfolio=portfolio, product=Product.objects.get(id=product), quantity=1, um=UnitMeasure.objects.get(id=1)))
try:
PortfolioProduct.objects.bulk_create(products)
# And notify our users that it worked
messages.success(request, 'Portfolio added succesfully!', 'alert-success')
except Exception as e: #If the transaction failed
print(e)
messages.error(request, 'Error saving portfolio!', 'alert-danger')
else:
messages.error(request, 'Error saving portfolio!', 'alert-danger')
return redirect('portfolios:portfolio_list')
def portfolio_delete(request, portfolio_id):
portfolio = Portfolio.objects.get(id=portfolio_id, user=request.user.profile)
portfolio.delete()
return redirect(request.META.get('HTTP_REFERER'), messages.success(request, "Portfolio was successfully deleted!", 'alert-success'))
def portfolio_order(request):
if request.user.is_authenticated:
if request.method == 'POST':
portfolio = get_object_or_404(Portfolio, id=request.POST['portfolio_id'])
cart = Cart(request)
cart.clear()
for product in portfolio.products.all():
prod = product.product
cart.add(prod, um=product.um.id, quantity=product.quantity)
return redirect('dashboard')
def get_portfolio_products(self, customer_id, supplier_id):
filterargs = { 'customer_id': customer_id, 'supplier_id': supplier_id }
portfolio = CustomerPortfolio.objects.filter(**filterargs)
products = CustomerPortfolioProduct.objects.filter(portfolio=portfolio)
return HttpResponse(serializers.serialize('json', products), content_type="application/json")
def customer_portfolio_list(request):
if not request.user.is_authenticated:
return redirect('users:login')
objs = CustomerPortfolio.objects.filter(user=request.user)
args = {
'title': 'Customer Portfolios',
'link_new': 'portfolios:customer_portfolio_new',
'link_edit': 'portfolios:customer_portfolio_edit',
'link_delete': 'portfolios:customer_portfolio_delete',
'fields': ['name', 'supplier', 'customer'],
'objs': objs,
'no_objects_msg': 'You have no Customer Portfolios',
}
return render(request, 'shop/sheet.html', args)
def customer_portfolio_new(request):
if not request.user.is_authenticated:
return redirect('users:login')
CustPortfolioProdFormSet = modelformset_factory(
CustomerPortfolioProduct,
fields=('product', 'price', 'quantity', 'um'),
can_delete=True,
extra=1)
if request.method == "POST":
form = CustomerPortfolioForm(request.POST)
formset = CustPortfolioProdFormSet(request.POST, request.FILES)
if form.is_valid() and formset.is_valid() :
obj = form.save(commit=False)
obj.user = request.user
obj.save()
instances = formset.save(commit=False)
for instance in instances:
instance.user = request.user
instance.portfolio = obj
instance.save()
formset.save(commit=True)
return redirect('portfolios:customer_portfolio_list')
else:
form = CustomerPortfolioForm()
formset = CustPortfolioProdFormSet(queryset=CustomerPortfolioProduct.objects.none())
form.fields['supplier'].queryset = Supplier.objects.filter(user=request.user)
form.fields['customer'].queryset = Customer.objects.filter(user=request.user)
form.fields['supplier'].empty_label = 'Select a supplier'
form.fields['customer'].empty_label = 'Select a customer'
args = {
'form_title': 'Customer Portfolio',
'formset_title': 'Portfolio items',
'link_new': 'portfolios:customer_portfolio_new',
'link_edit': 'portfolios:customer_portfolio_edit',
'link_delete': 'portfolios:customer_portfolio_delete',
'form': form,
'formset': formset,
'filter_items': 1,
}
return render(request, 'shop/form_formset_edit.html', args)
def customer_portfolio_edit(request, pk):
if not request.user.is_authenticated:
return redirect('users:login')
obj = get_object_or_404(CustomerPortfolio, pk=pk)
CustPortfolioProdFormSet = modelformset_factory(
CustomerPortfolioProduct,
fields=('product', 'price', 'quantity', 'um'),
can_delete=True,
extra=1)
if request.method == "POST":
form = CustomerPortfolioForm(request.POST, instance=obj)
formset = CustPortfolioProdFormSet(request.POST, request.FILES)
if form.is_valid() and formset.is_valid() :
obj = form.save(commit=False)
obj.user = request.user
obj.save()
instances = formset.save(commit=False)
for instance in instances:
instance.portfolio = obj
instance.save()
formset.save(commit=True)
return redirect('portfolios:customer_portfolio_list')
else:
form = CustomerPortfolioForm(instance=obj)
formset = CustPortfolioProdFormSet(queryset=CustomerPortfolioProduct.objects.filter(portfolio=obj))
form.fields['supplier'].queryset = Supplier.objects.filter(user=request.user)
form.fields['customer'].queryset = Customer.objects.filter(user=request.user)
form.fields['supplier'].empty_label = 'Select a supplier'
form.fields['customer'].empty_label = 'Select a customer'
# form.fields['supplier'].disabled = True
# form.fields['customer'].disabled = True
for choice_form in formset:
choice_form.fields['product'].queryset = Product.objects.filter(supplier=obj.supplier)
args = {
'form_title': 'Customer Portfolio',
'formset_title': 'Portfolio items',
'link_new': 'portfolios:customer_portfolio_new',
'link_edit': 'portfolios:customer_portfolio_edit',
'link_delete': 'portfolios:customer_portfolio_delete',
'form': form,
'formset': formset,
'filter_items': 2,
}
return render(request, 'shop/form_formset_edit.html', args)
def customer_portfolio_delete(request, pk):
obj = get_object_or_404(CustomerPortfolio, pk=pk)
obj.delete()
return redirect('portfolios:customer_portfolio_list') | cristianalecu/Django | monolith_alt/portfolios/views.py | views.py | py | 7,829 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.forms.formsets.formset_factory",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "forms.ProductForm",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name": "portfolios.models",
"line_number": 20,
"usage_type": "name"
},
{
"api... |
35072126027 | import argparse
from datetime import datetime
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.remote.webdriver import WebDriver
import issuehandler.config as config
from issuehandler.pages import (HomePage, LoginPage, NoIssueWithIDException,
RepoPage)
from issuehandler.screenshots import SeleniumScreenshotter
def get_driver() -> WebDriver:
service = Service(config.DRIVER_PATH)
options = webdriver.ChromeOptions()
options.add_argument(f"--user-data-dir={config.BROWSER_USER_PATH}")
driver = webdriver.Chrome(options=options, service=service)
driver.implicitly_wait(0.5)
return driver
def login_manually(driver: WebDriver) -> bool:
login_page = LoginPage(driver).go()
try:
login_page.wait_for_manual_login()
return True
except TimeoutException:
return False
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'--user', help='Name of the owner of the repo',
default=config.DEFAULT_USER)
parser.add_argument(
'--repo', help='Name of the GitHub repository',
default=config.DEFAULT_REPO)
parser.add_argument(
'--pdf', help='Whether to generate a PDF file with screenshots',
action='store_true', dest='take_screenshots')
parser.add_argument('-v', '--verbosity',
help='Set screenshot verbosity level',
type=int, choices=[1, 2, 3], default=3)
subparsers = parser.add_subparsers(required=True, title='Operation')
close_parser = subparsers.add_parser('close', help='Closes an issue')
close_parser.add_argument(
'id', type=int, help='GitHub ID of the issue to be closed')
close_parser.set_defaults(function=close_issue)
comment_parser = subparsers.add_parser(
'comment', help='Adds comment to issue')
comment_parser.add_argument(
'id', type=int, help='GitHub ID of the issue')
comment_parser.add_argument(
'-m', '--message',
type=str, help='Quote-delimited comment to add',
required=True)
group = comment_parser.add_mutually_exclusive_group()
group.add_argument(
'-c', '--close', help='Set this to close the issue with acomment',
action='store_true')
group.add_argument(
'-o', '--reopen',
help='Set this to reopen the closed issue with a comment',
action='store_true')
comment_parser.set_defaults(function=comment_issue)
open_parser = subparsers.add_parser('open', help='Opens a new issue')
open_parser.add_argument('--title', type=str, required=True)
open_parser.add_argument('--body', type=str, default='')
open_parser.set_defaults(function=open_issue)
return parser
def open_issue(repo_url: str, title: str,
body: str, driver: WebDriver,
**kwargs):
repo_page = RepoPage(driver, repo_url).go()
issues_page = repo_page.go_to_issues().go()
new_issue_page = issues_page.get_new_issue_page().go()
new_issue_page.open_new_issue(title, body)
def close_issue(repo_url: str, id: int, driver: WebDriver, **kwargs):
repo_page = RepoPage(driver, repo_url).go()
issues_page = repo_page.go_to_issues().go()
try:
issue = issues_page.get_issue_by_id(id).go()
if not issue.is_open:
print('Issue is already closed')
return
issue.close()
except NoIssueWithIDException:
print('No issue found with specified ID')
def comment_issue(repo_url: str, id: int,
message: str, close: bool,
reopen: bool, driver: WebDriver,
**kwargs):
repo_page = RepoPage(driver, repo_url).go()
issues_page = repo_page.go_to_issues().go()
try:
issue = issues_page.get_issue_by_id(id).go()
issue.comment(message, close, reopen)
except NoIssueWithIDException:
print('No issue found with specified ID')
def run(driver: WebDriver, args: argparse.Namespace):
SeleniumScreenshotter.take_screenshots = args.take_screenshots
SeleniumScreenshotter.verbosity_level = args.verbosity
SITE_URL = 'https://www.github.com'
args.repo_url = f'{SITE_URL}/{args.user}/{args.repo}'
args.driver = driver
home_page = HomePage(driver).go()
if not home_page.is_logged_in() and not login_manually(driver):
print('User has not logged in')
return
args.function(**vars(args))
if args.take_screenshots:
pdf_name = datetime.now().strftime('%Y-%m-%dT%Hh%Mm%Ss')
SeleniumScreenshotter.save_pdf(f'{config.PDF_DIR}/{pdf_name}.pdf')
def main():
arg_parser = create_parser()
args = arg_parser.parse_args()
driver: WebDriver
with get_driver() as driver:
run(driver, args)
if __name__ == '__main__':
main()
| Vernalhav/github-issue-tracker | issuehandler/issuehandler.py | issuehandler.py | py | 4,968 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.chrome.service.Service",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "issuehandler.config.DRIVER_PATH",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "issuehandler.config",
"line_number": 16,
"usage_type": "na... |
41897699827 | import smtplib
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
import traceback
import os
def build_content(sender, receiver, subject, body):
# 设置邮件正文,这里是支持HTML的
# 设置正文为符合邮件格式的HTML内容
m = MIMEText(_text=body, _subtype='html', _charset='utf-8')
# 设置邮件标题
m['subject'] = subject
# 设置发送人
m['from'] = sender
# 设置接收人
m['to'] = receiver
return m
def build_attach_file(text_content, sender, receiver, subject, files_tuple):
m = MIMEMultipart()
m.attach(text_content)
for file in files_tuple:
file_name = os.path.basename(file)
print(file)
print(file_name)
file_apart = MIMEApplication(open(file, "rb").read())
file_apart.add_header('Content-Disposition', 'attachment', filename=file_name)
m.attach(file_apart)
m['Subject'] = subject
m['from'] = sender
m['to'] = receiver
return m
def test_attach_email():
file = 'demo1.txt'
file_apart = MIMEApplication(open("D:/code/python-project/demo/tmp/demo1.txt", 'rb').read())
file_apart.add_header('Content-Disposition', 'attachment', filename=file)
content = "<h1>You've already sent eamil successfully!</h1>" \
"<p>Chris</p>"
text_apart = MIMEText(content)
m = MIMEMultipart()
m.attach(text_apart)
m.attach(file_apart)
m['subject'] = 'title'
m['from'] = 'lilunlogic@163.com'
m['to'] = '872343840@qq.com'
return m
def sent_email(receiver, subject, body, file_tuple):
# 设置发件服务器地址
host = 'smtp.163.com'
# 设置发件服务器端口号。注意,这里有SSL和非SSL两种形式,现在一般是SSL方式
non_ssl_port = 25
# ssl_port = 465
# 设置发件邮箱,一定要自己注册的邮箱
sender = 'lilunlogic@163.com'
# 设置发件邮箱的授权码密码,根据163邮箱提示,登录第三方邮件客户端需要授权码
pwd = 'Lilun32768+'
m = build_content(sender, receiver, subject, body)
m = build_attach_file(m, sender, receiver, subject, file_tuple)
# m = test_attach_email()
try:
s = smtplib.SMTP(host, non_ssl_port)
# 注意!如果是使用SSL端口,这里就要改为SMTP_SSL
# s = smtplib.SMTP_SSL(host, ssl_port)
# 登陆邮箱
s.login(sender, pwd)
s.sendmail(sender, receiver, m.as_string())
# 发送邮件!
print('Done.sent email success')
s.quit()
except smtplib.SMTPException:
print('Error.sent email fail', traceback.print_exc())
if __name__ == '__main__':
# 设置邮件接收人,可以是QQ邮箱
receiver = 'chris.li@allsale.site'
subject = "send email with attachments"
body = "<h1>You've already sent eamil successfully!</h1>" \
"<p>Chris</p>"
full_file_name = []
dir_path = os.getcwd() + os.sep + "tmp"
for file in os.listdir(dir_path):
full_file_name.append(os.path.join(dir_path, file))
sent_email(receiver, subject, body, tuple(full_file_name))
| ChrisLi716/pythondemo | demo/email_01.py | email_01.py | py | 3,203 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "email.mime.text.MIMEText",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "email.mime.multipart.MIMEMultipart",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 27,
"usage_type": "call"
},
{
"api_... |
25894141826 | import datetime
import pytz
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from secrets import CALENDARS_TO_CHECK
# If modifying these scopes, delete the file token.json.
SCOPES = 'https://www.googleapis.com/auth/calendar.readonly'
def get_calendar_events():
store = file.Storage('token.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('credentials.json', SCOPES)
creds = tools.run_flow(flow, store)
service = build('calendar', 'v3', http=creds.authorize(Http()))
# Call the Calendar API
calendars_result = service.calendarList().list().execute()
cal_ids = [cal.get('id') for cal in calendars_result.get('items') if cal.get('summary') in CALENDARS_TO_CHECK]
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
end_of_day = datetime.datetime.utcnow() + datetime.timedelta(0, 60 * 60 * 12) # get events for the next 12 hours
end_of_day = end_of_day.isoformat() + 'Z' # 'Z' indicates UTC time
all_events = []
for cal_id in cal_ids:
events_result = service.events().list(calendarId=cal_id, timeMin=now,
timeMax=end_of_day, maxResults=10,
singleEvents=True, orderBy='startTime').execute()
all_events.extend(events_result.get('items', []))
cal_string = 'Here are the upcoming calendar events for today: '
if not all_events or len(all_events) == 0:
cal_string = 'There are no calendar events today.'
all_events.sort(key=get_start_time)
all_day_events = [event for event in all_events if 'dateTime' not in event['start']]
time_events = [event for event in all_events if 'dateTime' in event['start']]
for event in all_day_events:
cal_string += event['summary'] + '. '
for event in time_events:
start = datetime.datetime.fromisoformat(event['start']['dateTime'])
cal_string += event['summary'] + ' at ' + start.strftime('%H:%M') + '. '
return cal_string
def get_start_time(event):
if 'dateTime' in event['start']:
return datetime.datetime.fromisoformat(event['start']['dateTime']).replace(tzinfo=pytz.UTC)
return datetime.datetime.fromtimestamp(0).replace(tzinfo=pytz.UTC)
# there is a flag conflict with flask, so running this file standalone will just auth with google
# so the --noauth_local_webserver flag can be passed
if __name__ == '__main__':
get_calendar_events()
| jchorl/waker | server/gcalendar.py | gcalendar.py | py | 2,565 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "oauth2client.file.Storage",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "oauth2client.file",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "oauth2client.client.flow_from_clientsecrets",
"line_number": 17,
"usage_type": "call"
},
... |
33195845551 | # # # # # # # # # # # # # #
# CAPTAINHOOK IDENTIFIER #
# # # # # # # # # # # # # #
import os
import sys
from contextlib import contextmanager
from os.path import join
from .utils import filter_python_files, get_config_file
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
DEFAULT = 'on'
CHECK_NAME = 'flake8'
NO_FLAKE_MSG = ("flake8 is required for the flake8 plugin.\n"
"`pip install flake8` or turn it off in your {} "
"file.".format(get_config_file()))
REQUIRED_FILES = [get_config_file()]
@contextmanager
def redirected(out=sys.stdout, err=sys.stderr):
saved = sys.stdout, sys.stderr
sys.stdout, sys.stderr = out, err
try:
yield
finally:
sys.stdout, sys.stderr = saved
@contextmanager
def change_folder(folder):
old_dir = os.curdir
os.chdir(folder)
try:
yield
finally:
os.chdir(old_dir)
def run(files, temp_folder):
"Check flake8 errors in the code base."
try:
import flake8 # NOQA
except ImportError:
return NO_FLAKE_MSG
try:
from flake8.engine import get_style_guide
except ImportError:
# We're on a new version of flake8
from flake8.api.legacy import get_style_guide
py_files = filter_python_files(files)
if not py_files:
return
DEFAULT_CONFIG = join(temp_folder, get_config_file())
with change_folder(temp_folder):
flake8_style = get_style_guide(config_file=DEFAULT_CONFIG)
out, err = StringIO(), StringIO()
with redirected(out, err):
flake8_style.check_files(py_files)
return out.getvalue().strip() + err.getvalue().strip()
| alexcouper/captainhook | captainhook/checkers/flake8_checker.py | flake8_checker.py | py | 1,705 | python | en | code | 54 | github-code | 1 | [
{
"api_name": "utils.get_config_file",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "utils.get_config_file",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "sys.stder... |
31369997290 | import sys
import unittest
sys.path.append("pig")
from unittest.mock import patch
from pig.computer_player import ComputerPlayer
from io import StringIO
class TestComputerPlayer(unittest.TestCase):
def test_take_turn_easy(self):
# Test that an easy computer player correctly holds their turn
player = ComputerPlayer("Bob", "easy")
player.total_score = 700
with patch("builtins.input", side_effect=["h"]):
score = player.take_turn()
self.assertLessEqual(score, 20)
self.assertGreaterEqual(player.total_score, 700)
def test_take_turn_hard(self):
# Test that a hard computer player correctly holds their turn
player = ComputerPlayer("Bob", "hard")
player.total_score = 700
with patch("builtins.input", side_effect=["h"]):
score = player.take_turn()
self.assertLessEqual(score, 20)
self.assertGreaterEqual(player.total_score, 700)
def test_take_turn_cheat(self):
# Test that a hard computer player correctly holds their turn
player = ComputerPlayer("Bob", "hard")
player.total_score = 700
with patch("builtins.input", side_effect=["h"]):
score = player.take_turn()
self.assertIsNotNone(score, 0)
self.assertLessEqual(score, 700)
self.assertGreaterEqual(player.total_score, 700)
def test_player_has_name(self):
# Test that the computer player has a name attribute
player = ComputerPlayer("Bob", "easy")
self.assertEqual(player.name, "Bob")
def test_player_total_score_starts_at_zero(self):
# Test that the computer player's total score starts at zero
player = ComputerPlayer("Bob", "easy")
self.assertEqual(player.total_score, 0)
def test_player_can_roll_dice(self):
# Test that the computer player can roll the dice
player = ComputerPlayer("Bob", "easy")
with patch("builtins.input", side_effect=["h"]):
score = player.take_turn()
self.assertLessEqual(score, 20)
def test_player_can_take_multiple_turns(self):
# Test that the computer player can take multiple turns
player = ComputerPlayer("Bob", "easy")
with patch("builtins.input", side_effect=["h"]):
score = player.take_turn()
self.assertLessEqual(score, 20)
self.assertGreaterEqual(player.total_score, 0)
def test_player_can_win(self):
# Test that the computer player can win
player = ComputerPlayer("Bob", "easy")
player.total_score = 100
with patch("builtins.input", side_effect=["h"]):
score = player.take_turn()
self.assertGreaterEqual(player.total_score, 100)
@patch('random.randint', return_value=1)
@patch('builtins.input', side_effect=["yes"])
@patch('sys.stdout', new_callable=StringIO)
def test_take_turn_loses_turn(self, mock_stdout, mock_input, mock_randint):
player = ComputerPlayer("Test", "easy")
expected_output = "Test rolled a [1]\nTest lost their turn!\n"
self.assertEqual(player.take_turn(), 0)
self.assertEqual(mock_stdout.getvalue(), expected_output)
if __name__ == "__main__":
unittest.main()
| sams258/Dice-game-Pig- | test/test_computer_player.py | test_computer_player.py | py | 3,248 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "unittest.TestCase",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pig.computer_player... |
42321692074 | import warnings
from typing import Tuple
import geopandas as gpd
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
# Use codes as defined by the [codebook](https://www.mass.gov/files/documents/2016/08/wr/classificationcodebook.pdf)
USE_CODES = {
"101": "Single Family",
"102": "Condominium",
"103": "Mobile Home",
"104": "Two-Family",
"105": "Three-Family",
"106": "Accessory Land with Improvement - garage,etc.",
"109": "Multiple Houses on one parcel (for example, a single and a two-family on one parcel)",
"132": "Undevelopable Land",
"930": "Municipal, Vacant, Selectmen or City Council",
"130": "Vacant Land in a Residential Zone or Accessory to Residential Parcel, Developable Land",
"343": "Office Building", # There is no code 343 in the cited reference, but prefix 34 indicates office buildings.
"131": "Vacant Land in a Residential Zone or Accessory to Residential Parcel, Potentially Developable Land",
"945": "Educational Private, Affilliated Housing",
"942": "Educational Private, College or University",
"920": "Department of Conservation and Recreation, Division of Urban Parks and Recreation",
"340": "General Office Buildings",
"931": "Municipal, Improved, Selectmen or City Council",
"960": "Church, Mosque, Synagogue, Temple, etc",
"325": "Small Retail and Services stores (under 10,000 sq. ft.)",
"337": "Parking Lots - a commercial open parking lot for motor vehicles",
"932": "Municipal, Vacant, Conservation",
"013": "Multiple-Use, primarily Residential",
"031": "Multiple-Use, primarily Commercial",
"950": "Charitable, Vacant, Conservation Organizations",
}
# Currently only need mappings for Milton and Quincy. Expand to add more municipalities.
TOWN_IDS = {
189: "Milton",
243: "Quincy",
}
RESIDENTIAL_USE_CODES = ["101", "102", "103", "104", "105", "109", "013"]
PUBLIC_ACCESS_CODES = {
"Y": "Yes (open to public)",
"N": "No (not open to public)",
"L": "Limited (membership only)",
"X": "Unknown",
}
LEVEL_OF_PROTECTION_CODES = {
"P": "In Perpetuity",
"T": "Temporary",
"L": "Limited",
"N": "None",
}
PRIMARY_PURPOSE_CODES = {
"R": "Recreation (activities are facility based)",
"C": "Conservation (activities are non-facility based)",
"B": "Recreation and Conservation",
"H": "Historical/Cultural",
"A": "Agricultural",
"W": "Water Supply Protection",
"S": "Scenic (official designation only)",
"F": "Flood Control",
"U": "Site is underwater",
"O": "Other (explain)",
"X": "Unknown",
}
def transform_use_codes(use_codes: pd.Series) -> pd.Series:
"""
Standardizes use codes by extracting first three digits and looking up description
USE_CODE – state three-digit use code with optional extension digit to accommodate the four-digit codes commonly used
by assessors. If the codes contain a four-digit use code, because the meaning of the fourth digit varies from community-to-community,
the standard requires a lookup table. See the end of this Section for more details on this look-up table.
"""
def use_codes_map(use_code):
try:
use_description = USE_CODES[use_code]
except KeyError:
use_description = "Other"
return use_description
use_code_descriptions = use_codes.str[:3].map(use_codes_map)
return use_code_descriptions
def plot_map(
gdf: gpd.GeoDataFrame,
column: str,
categorical: bool = True,
axis_scale: float = 1,
legend_shift: float = 1,
figsize: Tuple[int, int] = (20, 20),
markersize: float = 0.01,
legend: bool = True,
title: str = None,
cmap: str = "gist_earth",
fig=None,
ax=None,
**style_kwds,
):
"""Generic function to plot maps from GeoDataFrame.
Args:
gdf (geopandas.GeoDataFrame): the GeoDataFrame we want to plot map from.
column (str): column name that we want to use in the plot.
categorical (bool): ``True`` if the column should be treated as a categorical variable,`False`` if not. Defaults to ``True``.
axis_scale (float): the scale to enlarge or shrink the axis. Defaults to ``1`` (no size adjustments).
legend_shift (float): how much to shift the legend box to the right. Defaults to ``1``. Larger number will shift the legend box further to the right. This parameter is used to prevent overlap of the legend box and the map.
figsize (tuple): the size of the figure. Defaults to ``(20, 20)``.
markersize (float): the size of the marker, only useful for GeoDataFrame that contains point geometries. Defaults to ``0.01``.
title (str): the title of the figure. Defaults to ``None``, in which case no title will be set.
cmap (str): the color map to use in the map plot. Defaults to ``'gist_earth'``. The color maps available in matplotlib can be found here: https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html
ax: (matplotlib.axes._subplots.AxesSubplot) matplotlib axis object to add plot to
Returns:
matplotlib.axes._subplots.AxesSubplot: matplotlib plot.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
units = gdf.crs.to_dict()["units"]
if units != "m":
raise ValueError(
f"coordinate units can only be meters (m). Instead, the crs was: {gdf.crs.to_dict()}"
)
if not ax:
fig, ax = plt.subplots(1, figsize=figsize)
ax.grid()
gdf.plot(
ax=ax,
column=column,
categorical=categorical,
legend=legend,
markersize=markersize,
cmap=cmap,
**style_kwds,
)
# Shrink current axis by `axis_scale`
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * axis_scale, box.height * axis_scale])
if legend:
leg = ax.get_legend()
if leg is not None:
leg.set_bbox_to_anchor((legend_shift, 1))
if units == "m":
ax.set(xlabel="distance_x (meters)", ylabel="distance_y (meters)")
# elif coord == 'latlong':
# ax.set(xlabel='longitude (deg)', ylabel='latitude (deg)')
if isinstance(title, str):
ax.set_title(title)
return ax
def make_choropleth_style_function(
df: pd.DataFrame, attr: str, colormap: str, navalue="None"
):
cmap = matplotlib.cm.get_cmap(colormap)
attr_values = list(df[attr].fillna(navalue).unique())
colormap_dict = {
attr_values[i]: matplotlib.colors.rgb2hex(cmap(i))
for i in range(len(attr_values))
}
def stylefunc(x):
val = x["properties"][attr]
if val is None:
val = navalue
return {
"fillColor": colormap_dict[val],
"color": colormap_dict[val],
}
return stylefunc, colormap_dict
def html_legend(cmap_dict):
table_html = (
"""<table>
<tr>
<th>Value</th>
<th>Color</th>
</tr>
"""
+ "\n".join(
f'<tr><td><span style="font-family: monospace">{code}</span></td> <td><span style="color: {color}">████████</span></td></tr>'
for code, color in cmap_dict.items()
)
+ "</table>"
)
return table_html
# def add_basemap(ax, zoom, url=ctx.sources.ST_TONER_LITE):
# xmin, xmax, ymin, ymax = ax.axis()
# basemap, extent = ctx.bounds2img(xmin, ymin, xmax, ymax, zoom=zoom, source=url)
# ax.imshow(basemap, extent=extent, interpolation='bilinear')
# # restore original x/y limits
# ax.axis((xmin, xmax, ymin, ymax))
| ahasha/milton_maps | milton_maps/milton_maps.py | milton_maps.py | py | 7,614 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pandas.Series",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "geopandas.GeoDataFrame",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "typing.Tuple",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "warnings... |
28784194437 | '''
Author: Ding Pang
'''
import os
import io, csv
from re import S
from sqlalchemy import *
from sqlalchemy.pool import NullPool
from flask import Flask, request, render_template, g, redirect, make_response, flash, session, Response, url_for
from DBHelpers import *
from datetime import date
tmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
app = Flask(__name__, template_folder=tmpl_dir)
engine = create_engine(DATABASEURI)
@app.before_request
def before_request():
"""
This function is run at the beginning of every web request
I use it to setup a database connection that can be used throughout the request.
The variable g is globally accessible.
"""
try:
g.conn = engine.connect()
except:
print("uh oh, problem connecting to database")
import traceback; traceback.print_exc()
g.conn = None
@app.teardown_request
def teardown_request(exception):
"""
At the end of the web request, this makes sure to close the database connection.
"""
try:
g.conn.close()
except Exception as e:
pass
# Edit
@app.route('/editonestorage', methods=["POST"])
def editonestorage():
try:
s = request.form["Storage"].split(" ")
S_Id = s[0]
name = request.form["Name"]
Street1 = request.form["Street1"]
Street2 = request.form["Street2"]
City = request.form["City"]
State = request.form["State"]
ZIP = request.form["ZIP"]
if not name or not Street1 or not City or not State or not ZIP:
flash("Dont't enter something empty")
resp = make_response(redirect("/"))
return resp
g.conn.execute(EDIT_STORAGE, (name, Street1, Street2, City, State, ZIP, S_Id,))
resp = make_response(redirect("/"))
return resp
except:
flash("An error has occured, Please try again")
resp = make_response(redirect("/"))
return resp
@app.route('/editoneitem', methods=["POST"])
def editoneitem():
try:
id = request.form["Item"].split(" ")[0]
S_Id = request.cookies.get('S_Id')
name = request.form["Name"]
Stock = request.form["Stock"]
g.conn.execute(EDIT_ITEM, (name, int(Stock), int(S_Id), int(id),))
resp = make_response(redirect("/viewonestorage"))
return resp
except:
flash("An error has occured or you entered something 'stupid', Please try again")
resp = make_response(redirect("/viewonestorage"))
return resp
# ADD
@app.route('/addonestorage', methods=["POST"])
def addonestorage():
try:
# get first maybe unnecessary
# There could be duplicated address in the database because of this implementation
# But I think this provides flexibility
temp = g.conn.execute(GET_STORAGES_LAST_ID)
id = 1
x = get_first(temp)[0]
if x:
id = int(x) + 1
name = request.form["Name"]
Street1 = request.form["Street1"]
Street2 = request.form["Street2"]
City = request.form["City"]
State = request.form["State"]
ZIP = request.form["ZIP"]
if not name or not Street1 or not City or not State or not ZIP:
flash("Don't enter something empty")
resp = make_response(redirect("/"))
return resp
g.conn.execute(INSERT_ADDRESS, (id, name, Street1, Street2, City, State, ZIP,))
resp = make_response(redirect("/"))
return resp
except:
flash("An error has occured, Please try again")
resp = make_response(redirect("/"))
return resp
@app.route('/addoneitem', methods=["POST"])
def addoneitem():
try:
S_Id = request.cookies.get('S_Id')
temp = g.conn.execute(GET_ITEM_LAST_ID, (int(S_Id)))
id = 1
x = get_first(temp)[0]
if x:
id = int(x) + 1
name = request.form["Name"]
Stock = request.form["Stock"]
g.conn.execute(INSERT_ITEM, (int(S_Id), int(id), name, int(Stock),))
resp = make_response(redirect("/viewonestorage"))
return resp
except:
flash("An error has occured or you entered something 'stupid', Please try again")
resp = make_response(redirect("/viewonestorage"))
return resp
# Remove
@app.route('/removeonestorage', methods=["POST"])
def removeonestorage():
try:
s = request.form["Storage"].split(" ")
S_Id = s[0]
cursor = g.conn.execute(REMOVE_ONE_STORAGE, (int(S_Id)))
resp = make_response(redirect("/"))
return resp
except:
flash("An error has occured, Please try again")
resp = make_response(redirect("/"))
return resp
@app.route('/removeoneitem', methods=["POST"])
def removeoneitem():
try:
s = request.form["Item"].split(" ")
Part_Id = s[0]
S_Id = request.cookies.get('S_Id')
cursor = g.conn.execute(REMOVE_ONE_ITEM, (int(Part_Id), int(S_Id),))
resp = make_response(redirect("/viewonestorage"))
return resp
except:
flash("An error has occured, Please try again")
resp = make_response(redirect("/viewonestorage"))
return resp
# View
@app.route('/viewonestorage', methods=["POST", "GET"])
def viewonestorage():
try:
if request.method == "POST":
s = request.form["Storage"].split(" ")
S_Id = s[0]
Name = " ".join(s[1:])
cursor = g.conn.execute(GET_ONE_STORAGE, (int(S_Id)))
inventory = [s for s in cursor]
context = dict(inventory = inventory, S_Id = S_Id, Name = Name)
resp = make_response(render_template("viewonestorage.html", **context))
resp.set_cookie("S_Id", S_Id)
resp.set_cookie("Name", Name)
return resp
else:
S_Id = request.cookies.get('S_Id')
Name = request.cookies.get("Name")
if not Name or not S_Id:
flash("An error has occured, Please try again")
resp = make_response(redirect("/"))
return resp
cursor = g.conn.execute(GET_ONE_STORAGE, (int(S_Id)))
inventory = [s for s in cursor]
context = dict(inventory = inventory, S_Id = S_Id, Name = Name)
resp = make_response(render_template("viewonestorage.html", **context))
return resp
except:
flash("An error has occured, Please try again")
resp = make_response(redirect("/"))
return resp
@app.route('/viewall', methods=["GET"])
def viewall():
try:
Storages = {}
cursor = g.conn.execute(GET_ALL)
cursor = [s for s in cursor]
for c in cursor:
sname = c[1]
if sname not in Storages:
Storages[sname] = {}
Storages[sname]["address"] = ", ".join([i for i in c[2:7] if i])
Storages[sname]["inventory"] = []
Storages[sname]["inventory"].append(c[7:])
context = dict(Storages = Storages)
resp = make_response(render_template("viewall.html", **context))
return resp
except:
flash("An error has occured, Please try again")
resp = make_response(redirect("/"))
return resp
@app.route('/', defaults={'path': ''}, methods=["GET"])
@app.route('/<path:path>')
def index(path):
try:
if path:
return make_response(redirect("/"))
cursor = g.conn.execute(GET_ALL_STORAGES)
storages = [s for s in cursor]
context = dict(storages = storages)
resp = make_response(render_template("index.html", **context))
return resp
except:
pass
# downloads
@app.route('/download/storagescsv')
def downloadstoragescsv():
try:
cursor = g.conn.execute(GET_ALL_STORAGES)
cursor = [s for s in cursor]
output = io.StringIO()
writer = csv.writer(output)
line = 'S_Id, Name, Street1, Street2, City, State, ZIP'.split(", ")
writer.writerow(line)
for row in cursor:
line = [str(i) for i in row]
writer.writerow(line)
resp = make_response(output.getvalue())
resp.headers["Content-Disposition"] = "attachment; filename=storages.csv"
resp.headers["Content-type"] = "text/csv"
return resp
except:
flash("An error has occured, Please try again")
resp = make_response(redirect("/"))
return resp
@app.route('/download/storagecsv')
def downloadstoragecsv():
try:
S_Id = request.cookies.get("S_Id")
Name = request.cookies.get("Name")
cursor = g.conn.execute(GET_ONE_STORAGE, (int(S_Id)))
cursor = [s for s in cursor]
output = io.StringIO()
writer = csv.writer(output)
line = 'Part_Id, Name, Stock'.split(", ")
writer.writerow(line)
for row in cursor:
line = [str(i) for i in row]
writer.writerow(line)
resp = make_response(output.getvalue())
resp.headers["Content-Disposition"] = "attachment; filename="+Name+".csv"
resp.headers["Content-type"] = "text/csv"
return resp
except:
flash("An error has occured, Please try again")
resp = make_response(redirect("/"))
return resp
@app.route('/download/allcsv')
def downloadallcsv():
try:
Storages = {}
store = set()
cursor = g.conn.execute(GET_ALL)
cursor = [s for s in cursor]
output = io.StringIO()
writer = csv.writer(output)
line = 'S_Id, Name, Address, Part_Id, Name, Stock'.split(", ")
writer.writerow(line)
for c in cursor:
sname = c[1]
if sname not in Storages:
Storages[sname] = {}
Storages[sname]["id"] = c[0]
Storages[sname]["address"] = ", ".join([i for i in c[2:7] if i])
Storages[sname]["inventory"] = []
Storages[sname]["inventory"].append(c[7:])
for sname in Storages:
add = Storages[sname]["address"]
for j in Storages[sname]["inventory"]:
l = None
if sname not in store:
store.add(sname)
l = [str(Storages[sname]["id"]), sname, add] + [str(i) for i in j]
else:
l = [""]*3+[str(i) for i in j]
writer.writerow(l)
resp = make_response(output.getvalue())
resp.headers["Content-Disposition"] = "attachment; filename=all.csv"
resp.headers["Content-type"] = "text/csv"
return resp
except:
flash("An error has occured, Please try again")
resp = make_response(redirect("/"))
return resp
if __name__ == "__main__":
import click
@click.command()
@click.option('--debug', is_flag=False)
@click.option('--threaded', is_flag=True)
@click.argument('HOST', default='0.0.0.0')
@click.argument('PORT', default=8111, type=int)
def run(debug, threaded, host, port):
"""
This function handles command line parameters.
Run the server using:
python server.py
Show the help text using:
python server.py --help
"""
HOST, PORT = host, port
print("RUNNING!!!!!")
print("running on " + str(HOST) + " : " + str(PORT))
app.secret_key = 'super secret key'
app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)
run()
| DingPang/Shopify-Summer-2022-Challenge | server.py | server.py | py | 11,474 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line... |
7296684256 | import cv2
from cv2 import UMat
def crop_and_resize(frame:UMat,width:int,height:int)->UMat:
aspect=float(height)/float(width)
[original_height,original_width]=frame.shape[0:2]
original_aspect=float(original_height)/float(original_width)
if aspect < original_aspect:
# 高さが不要
temporary_height=int(original_width * aspect)
y=int((original_height - temporary_height)/2)
cropped_frame=frame[y:temporary_height,0:original_width]
else:
# 幅が不要
temporary_width=int(original_height / aspect)
x=int((original_width - temporary_width)/2)
cropped_frame=frame[0:original_height,x:temporary_width]
resized_frame=cv2.resize(cropped_frame, (width,height),interpolation=cv2.INTER_NEAREST)
return resized_frame
| novogrammer/echo-of-art | echo_of_art/image_utils.py | image_utils.py | py | 758 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.UMat",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "cv2.resize",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_NEAREST",
"line_number": 18,
"usage_type": "attribute"
}
] |
11012367902 | from django.urls import path
from app1.views import *
urlpatterns = [
path('', inicio, name="Inicio"),
path('vuelo/', vuelo),
path('personal/', personal),
path('pasajero/', pasajero),
path('formulario1/', formulariovuelo, name="Crear Vuelos"),
path('formulario2/', formulariopersonal, name="Crear Personal"),
path('formulario3/', formulariopasajero, name="Crear Pasajeros"),
path('buscarVuelos/', busquedaVuelos, name="Buscar Vuelos"),
path('buscarPersonal/', busquedaPersonal, name="Buscar Personal"),
path('buscarPasajero/', busquedaPasajero, name="Buscar Pasajeros"),
path('buscar_pasa/',buscar_pasa),
path('buscar_per/',buscar_per),
path('buscar/',buscar),
] | Luciano02-web/Entrega1-Bogarin | app1/urls.py | urls.py | py | 725 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
28211746299 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 27 21:40:34 2018
@author: deanng
"""
import pandas as pd
from security_3rd_property import DATA_PATH, DATA_TYPE, ROWS
import time
from contextlib import contextmanager
from security_3rd_model import tfidfModelTrain, nblrTrain
import scipy
# FEATURE ENGINEERING V1
def makeFeature(data, is_train=True):
'''
file_cnt: file有多少样本;
tid_distinct_cnt: file发起了多少线程;
api_distinct_cnt: file调用了多少不同的API ;
value_distinct_cnt: file有多少不同的返回值;
tid_api_cnt_max,tid_api_cnt_min,tid_api_cnt_mean: ","file中的线程调用的 最多/最少/平均 api数目;
tid_api_distinct_cnt_max, tid_api_distinct_cnt_min, tid_api_distinct_cnt_mean:;
file中的线程调用的 最多/最少/平均 不同api数目 ;
value_equals0_cnt: file返回值为0的样本数;
value_equals0_rate: file返回值为0的样本比率;
'''
if is_train:
return_data = data[['file_id', 'label']].drop_duplicates()
else:
return_data = data[['file_id']].drop_duplicates()
################################################################################
feat = data.groupby(['file_id']).tid.count().reset_index(name='file_cnt')
return_data = return_data.merge(feat, on='file_id', how='left')
################################################################################
feat = data.groupby(['file_id']).agg({'tid':pd.Series.nunique, 'api':pd.Series.nunique,'return_value':pd.Series.nunique}).reset_index()
feat.columns = ['file_id', 'tid_distinct_cnt', 'api_distinct_cnt', 'value_distinct_cnt']
return_data = return_data.merge(feat, on='file_id', how='left')
################################################################################
feat_tmp = data.groupby(['file_id', 'tid']).agg({'index':pd.Series.count,'api':pd.Series.nunique}).reset_index()
feat = feat_tmp.groupby(['file_id'])['index'].agg(['max', 'min', 'mean']).reset_index()
feat.columns = ['file_id', 'tid_api_cnt_max', 'tid_api_cnt_min', 'tid_api_cnt_mean']
return_data = return_data.merge(feat, on='file_id', how='left')
feat = feat_tmp.groupby(['file_id'])['api'].agg(['max', 'min', 'mean']).reset_index()
feat.columns = ['file_id', 'tid_api_distinct_cnt_max','tid_api_distinct_cnt_min', 'tid_api_distinct_cnt_mean']
return_data = return_data.merge(feat, on='file_id', how='left')
################################################################################
feat = data[data.return_value==0].groupby(['file_id']).return_value.count().reset_index(name='value_equals0_cnt')
return_data = return_data.merge(feat, on='file_id', how='left')
################################################################################
return_data.loc[:,'value_equals0_rate'] = (return_data.value_equals0_cnt+1) / (return_data.file_cnt+1)
return return_data
# FEATURE ENGINEERING V2
def makeFeature_v2(data):
'''
api_index_min: api首次出现的index;
api_cnt: api出现的次数;
api_rate: api出现的次数占所有api调用次数的比例;
api_value_equals_0_cnt: api返回值为0的次数;
'''
return_data = data[['file_id']].drop_duplicates()
# 统计file调用api的次数
tmp = data.groupby(['file_id']).api.count()
# 统计api调用的最小Index
feat = data.groupby(['file_id', 'api'])['index'].min().reset_index(name='val')
feat = feat.pivot(index='file_id', columns='api', values='val')
feat.columns = [ feat.columns[i]+'_index_min' for i in range(feat.shape[1])]
feat_withFileid = feat.reset_index()
return_data = return_data.merge(feat_withFileid, on='file_id', how='left')
# 统计api调用的次数
feat = data.groupby(['file_id', 'api'])['index'].count().reset_index(name='val')
feat = feat.pivot(index='file_id', columns='api', values='val')
feat.columns = [ feat.columns[i]+'_cnt' for i in range(feat.shape[1])]
feat_withFileid = feat.reset_index()
return_data = return_data.merge(feat_withFileid, on='file_id', how='left')
# 统计api调用的比例
feat_rate = pd.concat([feat, tmp], axis=1)
feat_rate = feat_rate.apply(lambda x: x/feat_rate.api)
feat_rate.columns = [ feat_rate.columns[i]+'_rate' for i in range(feat_rate.shape[1])]
feat_rate_withFileid = feat_rate.reset_index().drop(['api_rate'], axis=1)
return_data = return_data.merge(feat_rate_withFileid, on='file_id', how='left')
# 统计api返回值为0的次数
feat = data[data.return_value==0].groupby(['file_id', 'api'])['index'].count().reset_index(name='val')
feat = feat.pivot(index='file_id', columns='api', values='val')
feat.columns = [ feat.columns[i]+'_value_equals_0_cnt' for i in range(feat.shape[1])]
feat_withFileid = feat.reset_index()
return_data = return_data.merge(feat_withFileid, on='file_id', how='left')
return return_data
# FEATURE ENGINEERING V3
def makeFeature_v3(data):
'''
api_not0_index_min: api返回值不为0的index的最小值;
api_not0_index_min_diff: api返回值不为0时最小index和该api出现的最小index的差;
api_equals0_rate: api返回值为0的次数占该api次数的比例
'''
return_data = data[['file_id']].drop_duplicates()
# 统计api调用的最小Index
feat_api_min_index = data.groupby(['file_id', 'api'])['index'].min().reset_index(name='min_index')
feat_api_not0_min_index = data[data.return_value!=0].groupby(['file_id', 'api'])['index'].min().reset_index(name='value_not0_min_index')
# 统计return_value不为0的最小Index
feat = feat_api_not0_min_index.pivot(index='file_id', columns='api', values='value_not0_min_index')
feat.columns = [ feat.columns[i]+'_not0_index_min' for i in range(feat.shape[1])]
feat_withFileid = feat.reset_index()
return_data = return_data.merge(feat_withFileid, on='file_id', how='left')
# 统计return_value不为0的最小Index和api最小index的差
feat = feat_api_min_index.merge(feat_api_not0_min_index, on=['file_id', 'api'], how='left')
feat.loc[:,'api_index_not0_min_diff'] = feat['value_not0_min_index'] - feat['min_index']
feat = feat.pivot(index='file_id', columns='api', values='api_index_not0_min_diff')
feat.columns = [ feat.columns[i]+'_not0_index_min_diff' for i in range(feat.shape[1])]
feat_withFileid = feat.reset_index()
return_data = return_data.merge(feat_withFileid, on='file_id', how='left')
# 统计api返回值为0的次数
feat = data[data.return_value==0].groupby(['file_id', 'api'])['index'].count().reset_index(name='value_equals0_cnt')
feat_api_cnt = data.groupby(['file_id', 'api']).return_value.count().reset_index(name='file_api_cnt')
feat = feat.merge(feat_api_cnt, on=['file_id', 'api'], how='left')
feat.loc[:,'value_equals0_rate'] = feat['value_equals0_cnt']/(feat['file_api_cnt']*1.0)
# 统计return_value为0的比例
feat = feat.pivot(index='file_id', columns='api', values='value_equals0_rate')
feat.columns = [ feat.columns[i]+'_equals0_rate' for i in range(feat.shape[1])]
feat_withFileid = feat.reset_index()
return_data = return_data.merge(feat_withFileid, on='file_id', how='left')
return return_data
def makeProbFeature(traindata, testdata):
tr_api_vec, val_api_vec = tfidfModelTrain(traindata, testdata)
# TIME-COST FUNCTION
@contextmanager
def timer(title):
t0 = time.time()
yield
print("{} - done in {:.2f}s".format(title, time.time() - t0))
if __name__ == '__main__':
with timer('Load Data'):
traindata = pd.read_csv(DATA_PATH+'/input/train.csv', dtype=DATA_TYPE, nrows=ROWS)
testdata = pd.read_csv(DATA_PATH+'/input/test.csv', dtype=DATA_TYPE, nrows=ROWS)
print('Train Dataset Length: ', traindata.shape[0])
print('Test Dataset Length: ', testdata.shape[0])
with timer('GBT Feature Engineering'):
# MAKE TRAIN DATA FEATURES
train_base_feature_v1 = makeFeature(traindata, True)
print('Base Train Data: ', train_base_feature_v1.shape)
train_base_feature_v1.to_csv(DATA_PATH+'/data/train_base_features_v1.csv', index=None)
train_base_feature_v2 = makeFeature_v2(traindata)
print('Base Train Data: ', train_base_feature_v2.shape)
train_base_feature_v2.to_csv(DATA_PATH+'/data/train_base_features_v2.csv', index=None)
train_base_feature_v3 = makeFeature_v3(traindata)
print('Base Train Data: ', train_base_feature_v3.shape)
train_base_feature_v3.to_csv(DATA_PATH+'/data/train_base_features_v3.csv', index=None)
# MAKE TEST DATA FEATURES
test_base_feature_v1 = makeFeature(testdata, False)
print('Base Test Data: ', test_base_feature_v1.shape)
test_base_feature_v1.to_csv(DATA_PATH+'/data/test_base_features_v1.csv', index=None)
test_base_feature_v2 = makeFeature_v2(testdata)
print('Base Test Data: ', test_base_feature_v2.shape)
test_base_feature_v2.to_csv(DATA_PATH+'/data/test_base_features_v2.csv', index=None)
test_base_feature_v3 = makeFeature_v3(testdata)
print('Base Test Data: ', test_base_feature_v3.shape)
test_base_feature_v3.to_csv(DATA_PATH+'/data/test_base_features_v3.csv', index=None)
# Provided by 3sigma
with timer('TFIDF and OVR-PROB Feature Engineering'):
tr_api_vec, val_api_vec = tfidfModelTrain(traindata, testdata)
scipy.sparse.save_npz(DATA_PATH+'/data/tr_tfidf_rlt.npz', tr_api_vec)
scipy.sparse.save_npz(DATA_PATH+'/data/te_tfidf_rlt.npz', val_api_vec)
tr_prob, te_prob = nblrTrain(tr_api_vec, val_api_vec, train_base_feature_v1)
tr_prob.to_csv(DATA_PATH+'/data/tr_lr_oof_prob.csv',index=False)
te_prob.to_csv(DATA_PATH+'/data/te_lr_oof_prob.csv',index=False)
| DeanNg/3rd_security_competition | final_code/security_3rd_feature.py | security_3rd_feature.py | py | 9,978 | python | en | code | 53 | github-code | 1 | [
{
"api_name": "pandas.Series",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "pandas.Series",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "pandas.concat",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "security_3rd_mode... |
12771861930 | #!/usr/bin/python3
# runs VK modules in correct way
""" Developer and Author: Thomas Fire https://github.com/thomasfire (Telegram: @Thomas_Fire)
### Main manager: Uliy Bee
"""
from logging import exception
from os import getpid
import getmsg
import makeseq
import sendtovk
import updatemedia
def get_last_msg():
f = open('files/msgshistory.db', 'r')
num = int(f.read().strip('@ ').strip(' ;').split(' ;\n@ ')[-1].split(' :: ')[0])
print(num)
f.close()
return num
def run_vk_bot(vk, chatid, albumid, userid, msgshistory, tl_msgs, list_of_alles, list_of_imnts, list_of_cmds,
iterations_vk, curr_stat):
userdic = getmsg.get_user_dict()
keywords = makeseq.load_keywords()
lastid = get_last_msg()
updatemedia.main(vk, albumid, userid)
curr_stat['PID_VK'] = str(getpid())
print('VK is ready to start...PID_VK: {0}'.format(curr_stat['PID_VK']))
cycles = 0
while True:
try:
# getting messages
lastid, userdic, messages = getmsg.getmain(vk, chatid, msgshistory, userdic, lastid)
# update list of available media every 1000th iterarion. It is about every 8-20th minute if you have non-server connection
if cycles >= 1000:
# clearsent(sent_msgs)
updatemedia.main(vk, albumid, userid)
cycles = 0
# running retranslation to TL only if there are new messages from VK
for x in messages:
makeseq.mkmain(x, keywords, list_of_alles, list_of_imnts, list_of_cmds)
# processing commands and retranslation_from_TL in VK
sendtovk.stvmain(vk, chatid, list_of_cmds, tl_msgs)
cycles += 1
iterations_vk.value += 1
except ConnectionResetError: # there are often this type of errors, but it is not my fault
continue
except Exception as exp:
exception("Something gone wrong in vk_bot:\n")
| thomasfire/agent_smith | vk_run.py | vk_run.py | py | 1,972 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "getmsg.get_user_dict",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "makeseq.load_keywords",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "updatemedia.main",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.getpid... |
23481077423 | # Recode By @mrismanaziz
# FROM Man-Userbot <https://github.com/mrismanaziz/Man-Userbot>
# t.me/SharingUserbot & t.me/Lunatic0de
import importlib
import logging
import os
import sys
from pathlib import Path
from userbot import CMD_HELP, DEVS, LOGS, TEMP_DOWNLOAD_DIRECTORY, bot
from userbot.events import register
from userbot.utils import edit_or_reply
DELETE_TIMEOUT = 5
thumb_image_path = os.path.join(TEMP_DOWNLOAD_DIRECTORY, "thumb_image.jpg")
async def reply_id(event):
reply_to_id = None
if event.sender_id in DEVS:
reply_to_id = event.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
return reply_to_id
def load_module(shortname):
if shortname.startswith("__"):
pass
elif shortname.endswith("_"):
path = Path(f"userbot/modules/{shortname}.py")
name = "userbot.modules.{}".format(shortname)
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
LOGS.info("Successfully imported " + shortname)
else:
path = Path(f"userbot/modules/{shortname}.py")
name = "userbot.modules.{}".format(shortname)
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
mod.bot = bot
mod.LOGS = LOGS
mod.CMD_HELP = CMD_HELP
mod.logger = logging.getLogger(shortname)
spec.loader.exec_module(mod)
# for imports
sys.modules["userbot.modules." + shortname] = mod
LOGS.info("Successfully imported " + shortname)
@register(outgoing=True, pattern=r"^\.install$")
async def _(event):
if event.fwd_from:
return
if event.reply_to_msg_id:
try:
await event.edit("`Installing Modules...`")
downloaded_file_name = (
await event.client.download_media( # pylint:disable=E0602
await event.get_reply_message(),
"userbot/modules/", # pylint:disable=E0602
)
)
if "(" not in downloaded_file_name:
path1 = Path(downloaded_file_name)
shortname = path1.stem
load_module(shortname.replace(".py", ""))
await event.edit(
"**Plugin** `{}` **Berhasil di install**".format(
os.path.basename(downloaded_file_name)
)
)
else:
os.remove(downloaded_file_name)
await event.edit("**Error!** Plugin ini sudah terinstall di userbot.")
except Exception as e:
await event.edit(str(e))
os.remove(downloaded_file_name)
@register(outgoing=True, pattern=r"^\.psend ([\s\S]*)")
async def send(event):
reply_to_id = await reply_id(event)
thumb = thumb_image_path if os.path.exists(thumb_image_path) else None
input_str = event.pattern_match.group(1)
the_plugin_file = f"./userbot/modules/{input_str}.py"
if os.path.exists(the_plugin_file):
caat = await event.client.send_file(
event.chat_id,
the_plugin_file,
force_document=True,
allow_cache=False,
reply_to=reply_to_id,
thumb=thumb,
caption=f"➠ **Nama Plugin:** `{input_str}`",
)
await event.delete()
else:
await edit_or_reply(event, "**ERROR: Modules Tidak ditemukan**")
CMD_HELP.update(
{
"core": "**Plugin : **`core`\
\n\n • **Syntax :** `.install` <reply ke file module>\
\n • **Function : **Untuk Menginstall plugins userbot secara instan.\
\n\n • **Syntax :** `.psend` <nama module>\
\n • **Function : **Untuk Mengirim module userbot secara instan.\
"
}
)
| rainbowgirlidx/Man_Userbot | userbot/modules/core.py | core.py | py | 3,876 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "userbot.TEMP_DOWNLOAD_DIRECTORY",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "userbot... |
38904399549 | import pandas as pd
from silx.gui.qt import QMessageBox
from datetime import datetime
def save_csv(self):
filepath = self.imagepath
q_choice = self.q_combo.currentText()
loadedlist = self.loadedlistwidget
curvelist = [item.text() for item in loadedlist.selectedItems()]
curvenames = []
for curve in curvelist:
if '.nxs' in curve:
curvenames.append(curve)
else:
curvenames.append(curve.split('.')[0])
if curvelist == []:
msg = QMessageBox()
msg.setWindowTitle("Error")
msg.setText("Please Select Integrated Curve to Save")
x = msg.exec_()
else:
for curve in curvenames:
df = pd.read_csv(r'{}/{}.dat'.format(filepath, curve), header=None, sep=",", skiprows=2)
df.to_csv(filepath + '/{}_csv.csv'.format(curve), index=False,header=False)
def save_dat(filename,filepath,res,q_choice):
now = datetime.now()
# dd/mm/YY H:M:S
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
date_time = 'DAT file by SAXSII - Timestamp: {}'.format(dt_string)
res_to_save=res.copy()
res_to_save.columns = [f'radial-{q_choice}' if x == 'radial' else x for x in res_to_save.columns]
save_path='{}/{}.dat'.format(filepath, filename)
with open(save_path, 'w') as f:
# f.write('# List of Vars\n')
f.write('#{}\n'.format(date_time))
res_to_save.to_csv(save_path, mode='a',index=False)
f.close() | IdanKes/SAXS | Main Code/saving_methods.py | saving_methods.py | py | 1,452 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "silx.gui.qt.QMessageBox",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "datetim... |
40785984696 | import pytest
from werkzeug.exceptions import NotFound, BadRequest
from app import api
from app.models import Camera
def test_get_query_string_params(application):
with application.test_request_context('/?foo=bar&life=42'):
assert api.get_query_string_params() == {'foo': 'bar', 'life': '42'}
def test_get_resource_empty(session, application):
with application.test_request_context('/api/cameras'):
resources = api.get_resource(Camera, None)
assert isinstance(resources, list)
assert not resources
def test_get_resource_single_item(session):
session.add(Camera(Name='foo'))
session.commit()
resource = api.get_resource(Camera, 1)
assert isinstance(resource, dict)
assert resource['Name'] == 'foo'
assert resource['ID'] == 1
def test_get_resource_not_found(session):
with pytest.raises(NotFound):
api.get_resource(Camera, 2)
def test_get_resource_multiple_items(session, application):
session.add(Camera(Name='foo'))
session.commit()
session.add(Camera(Name='bar'))
session.commit()
with application.test_request_context('/api/cameras/1'):
resource = api.get_resource(Camera, 1)
assert isinstance(resource, dict)
assert resource['Name'] == 'foo'
assert resource['ID'] == 1
with application.test_request_context('/api/cameras'):
resources = api.get_resource(Camera, None)
assert isinstance(resources, list)
assert resources[0]['Name'] == 'foo'
assert resources[0]['ID'] == 1
assert resources[1]['Name'] == 'bar'
assert resources[1]['ID'] == 2
def test_get_data_from_json(application):
with pytest.raises(BadRequest):
with application.test_request_context(json=None):
api.get_data_from_json()
with application.test_request_context(json={'foo': 'bar'}):
assert api.get_data_from_json() == {'foo': 'bar'}
def test_create_resource(session, application):
# We need the session object so the item can be added to the db
with application.test_request_context(json={'Name': 'foo'}):
cam = api.create_resource(Camera)
assert isinstance(cam, dict)
assert cam['Name'] == 'foo'
assert cam['ID'] == 1
with pytest.raises(BadRequest):
with application.test_request_context(json={'Name': 'foo'}):
cam = api.create_resource(Camera)
with pytest.raises(BadRequest):
with application.test_request_context(json=None):
cam = api.create_resource(Camera)
def test_update_resource(session, application):
session.add(Camera(Name='foo'))
session.commit()
og_cam = Camera.query.first()
url = "/api?Name=bar"
with pytest.raises(NotFound):
api.update_resource(Camera, 2)
with pytest.raises(NotFound):
with application.test_request_context(url):
api.update_resource(Camera)
with pytest.raises(BadRequest):
with application.test_request_context(json=None):
api.update_resource(Camera, 1)
with application.test_request_context(json={'Name': 'bar'}):
cam = api.update_resource(Camera, 1)
assert cam['Name'] == 'bar'
assert cam['Created'] == og_cam.Created.isoformat()
assert cam['Updated'] != og_cam.Updated.isoformat()
up1_cam = Camera.query.first()
assert up1_cam.Name == 'bar'
assert up1_cam.Updated.isoformat() == cam['Updated']
with application.test_request_context(url, json={'Name': 'foo'}):
cam = api.update_resource(Camera)
assert cam['Name'] == 'foo'
assert cam['Created'] == og_cam.Created.isoformat()
assert cam['Updated'] != og_cam.Updated.isoformat()
assert cam['Updated'] != up1_cam.Updated.isoformat()
up2_cam = Camera.query.first()
assert up2_cam.Name == 'foo'
assert up2_cam.Updated.isoformat() == cam['Updated']
def test_delete_resource(session):
session.add(Camera(Name='foo'))
session.commit()
og_cam = Camera.query.first()
assert og_cam.Active
cam = api.delete_resource(Camera, 1)
assert not cam['Active']
assert not Camera.query.first().Active
with pytest.raises(BadRequest):
api.delete_resource(Camera, None)
with pytest.raises(NotFound):
api.delete_resource(Camera, 2)
def test_get_create_update_or_delete(session, application):
client = application.test_client()
# Test create
r = client.post('/api/cameras', json={'Name': 'foo'})
assert r.status_code == 201
cam1 = r.json
r = client.post('/api/cameras', json={'Name': 'bar'})
assert r.status_code == 201
cam2 = r.json
assert cam1['Name'] == 'foo'
assert cam1['ID'] == 1
assert cam2['Name'] == 'bar'
assert cam2['ID'] == 2
r = client.post('/api/product_types', json={'Name': 'EDR'})
assert r.status_code == 201
pt = r.json
pt['Name'] == 'EDR'
r = client.post(
'/api/images',
json={
'Name': 'im1',
'URL': 'url',
'DetatchedLabel': False,
'Sol': 42,
'CameraID': 1,
'ProductTypeID': 1,
}
)
assert r.status_code == 201
im = r.json
im['Name'] == 'im1'
# Test Update
r = client.put('/api/cameras/2', json={'Name': 'baz'})
assert r.status_code == 200
assert r.json['Name'] == 'baz'
# Test Get
r = client.get('/api/cameras/1')
assert r.status_code == 200
assert r.json['Name'] == 'foo'
r = client.get('/api/cameras')
assert r.status_code == 200
assert isinstance(r.json, list)
assert len(r.json) == 2
r = client.get('/api/cameras?Name=foo')
assert isinstance(r.json, list)
assert len(r.json) == 1
# Test Delete
r = client.delete('/api/cameras/2')
assert r.status_code == 200
assert not r.json['Active']
| percurnicus/opportunity | app/tests/test_api.py | test_api.py | py | 6,004 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "app.api.get_query_string_params",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "app.api",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "app.api.get_resource",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "app.mode... |
39273846847 | import os
import sys
import torch
import torch.utils.data as data
import numpy as np
from PIL import Image
import glob
import random
from torchvision import transforms
import matplotlib.pyplot as plt
import scipy.misc
class SALICON(data.Dataset):
def __init__(self, stimuli_train_path, stimuli_val_path, gt_train_path, gt_val_path, augment=None, transform_h=None, transform_l=None, target=None, mode='train'):
self.gt_train_path = gt_train_path
self.gt_val_path = gt_val_path
self.mode = mode
self.augment = augment
self.transform_h = transform_h
self.transform_l = transform_l
self.target = target
if (mode == 'train'):
self.data_list = sorted(glob.glob(stimuli_train_path + '*'))
self.gt_list = sorted(glob.glob(gt_train_path + '*'))
print('Total training samples are {}'.format(len(self.data_list)))
else:
self.data_list = sorted(glob.glob(stimuli_val_path + '*'))
self.gt_list = sorted(glob.glob(gt_val_path + '*'))
print('Total validating samples are {}'.format(len(self.data_list)))
def __getitem__(self, idx):
data_stimuli_path = self.data_list[idx]
data_gt_path = self.gt_list[idx]
data_stimuli = Image.open(data_stimuli_path)
data_gt = Image.open(data_gt_path)
if (self.augment != None):
data_stimuli, data_gt = self.augment(data_stimuli, data_gt)
if (self.transform_h != None):
data_stimuli_h = self.transform_h(data_stimuli)
if (self.transform_l != None):
data_stimuli_l = self.transform_l(data_stimuli)
if (self.target != None):
data_gt = self.target(data_gt)
return data_stimuli_h, data_stimuli_l, data_gt
def __len__(self):
return len(self.data_list)
| pengqianli/ACNet | src/dataloader.py | dataloader.py | py | 1,863 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "glob.glob",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "glob.glob",
... |
74718431073 | import requests
from PIL import Image
from bs4 import BeautifulSoup
import os
import xlwt
import re
def get_captcha(si_code):
captcha_url = "http://my.ujs.edu.cn/" + si_code
print(captcha_url)
r = session.get(captcha_url, headers=header)
with open("captcha.jpg","wb") as f:
f.write(r.content)
try:
im = Image.open("captcha.jpg")
im.show()
im.close()
except:
print(f"请到 {os.path.abspath('captcha.jpg')} 目录下 找到captcha.jpg文件手动输入")
captcha = input("please input the captcha\n")
return captcha
def get_si_code(url):
# si_code是一个动态变化的参数
index_page = session.get(url, headers=header)
html = index_page.text
# print(html)
soup = BeautifulSoup(html, "lxml")
si_code = soup.find("img", {"id":"captchaImg"})["src"]
# for i in si_code:
# print(i)
print(si_code)
return si_code
def login(user, pwd, si_code, url):
post_data = {
"username": user,
"pwd": pwd,
"captcha": si_code,
}
post_data["captcha"] = get_captcha(si_code)
# 登陆
login_page = session.post(url, data=post_data, headers=header)
print(login_page.status_code)
if __name__ == '__main__':
url = "http://my.ujs.edu.cn/index.portal?.pn=p2365_p2536"
header = {"Host": "my.ujs.edu.cn",
"Referer": "http://my.ujs.edu.cn/index.portal?.pn=p2365_p2536",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36"}
session = requests.session()
session.get(url, headers=header)
# 获取验证码
si_code = get_si_code(url)
id = input("enter your id:")
password = input("enter your password:")
login(id, password, si_code, url)
| beenlyons/Allspder | PySpider/ujsScore/get_score.py | get_score.py | py | 1,871 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PIL.Image.open",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "os.path.abspath",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number":... |
15127656412 | from collections import Counter
# O(n*log(n)) time | O(n) space
class Solution:
def findOriginalArray(self, changed: List[int]) -> List[int]:
# if list is not of even length, original cannot exist
if len(changed) % 2 != 0:
return []
changed.sort()
count = Counter(changed)
output = []
for num in changed:
if count[num] >= 1:
count[num] -= 1
if count[num * 2] >= 1:
output.append(num)
count[num * 2] -= 1
if len(output) == len(changed) // 2:
return output
return []
| mmichalak-swe/Algo_Expert_Python | LeetCode/2007_Find_Original_Array_From_Doubled/attempt_1.py | attempt_1.py | py | 682 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "collections.Counter",
"line_number": 11,
"usage_type": "call"
}
] |
1586045599 | # -*- coding: utf-8 -*-
"""
*******************
tests.conftest
*******************
Utility functions that are used to configure Py.Test context.
"""
import os
import pytest
def pytest_addoption(parser):
"""Define options that the parser looks for in the command-line.
Pattern to use::
parser.addoption("--cli-option",
action="store",
default=None,
help="cli-option: help text goes here")
"""
parser.addoption("--inputs",
action="store",
default="/home/travis/build/highcharts-for-python/highcharts-core/tests/input_files",
help=("inputs: the absolute path to the directory where input"
" files can be found"))
parser.addoption("--downloads",
action="store",
default="true",
help=("downloads: set to 'false' to disable tests of chart export "
"via the Highsoft-provided Export Server."))
parser.addoption("--create-output-directory",
action = "store",
default = "true",
help=("create-output-directory: set to 'false' to error if the output "
"directory does not exist, otherwise creates it."))
parser.addoption("--pyspark",
action="store",
default="false",
help=("pyspark: set to 'false' to disable tests of pyspark-related"
" functionality, or 'true' to enable those tests. Defaults to"
" 'false'"))
def pytest_runtest_makereport(item, call):
"""Connect current incremental test to its preceding parent."""
# pylint: disable=W0212
if "incremental" in item.keywords:
if call.excinfo is not None:
parent = item.parent
parent._previousfailed = item
def pytest_runtest_setup(item):
"""Fail test if preceding incremental test failed."""
if "incremental" in item.keywords:
previousfailed = getattr(item.parent, "_previousfailed", None)
if previousfailed is not None:
pytest.xfail(
"previous test failed (%s) for reason: %s" % (previousfailed.name,
previousfailed))
| highcharts-for-python/highcharts-core | tests/conftest.py | conftest.py | py | 2,420 | python | en | code | 40 | github-code | 1 | [
{
"api_name": "pytest.xfail",
"line_number": 64,
"usage_type": "call"
}
] |
6649268514 | import copy
import logging
from typing import Optional
import requests
from ocean_provider.utils.basics import get_web3
from ocean_provider.utils.consumable import ConsumableCodes
from ocean_provider.utils.credentials import AddressCredential
from ocean_provider.utils.data_nft import get_data_nft_contract
from ocean_provider.utils.services import Service
logger = logging.getLogger(__name__)
class Asset:
def __init__(self, asset_dict: dict) -> None:
ad = copy.deepcopy(asset_dict)
self.did = ad.pop("id", None)
self.version = ad.pop("version", None)
self.nftAddress = ad.pop("nftAddress", None)
self.chain_id = ad.pop("chainId", None)
self.metadata = ad.pop("metadata", None)
self.services = [
Service.from_json(index, service_dict)
for index, service_dict in enumerate(ad.pop("services", []))
]
self.credentials = ad.pop("credentials", None)
self.nft = ad.pop("nft", None)
self.datatokens = ad.pop("datatokens", None)
self.event = ad.pop("event", None)
self.stats = ad.pop("stats", None)
def get_service_by_index(self, index: int) -> Service:
"""Return the first Service with the given index"""
return next((service for service in self.services if service.index == index))
def get_service_by_id(self, service_id: str) -> Service:
"""Return the Service with the matching id"""
try:
return next(
(service for service in self.services if service.id == service_id)
)
except StopIteration:
return None
@property
def requires_address_credential(self) -> bool:
"""Checks if an address credential is required on this asset."""
manager = AddressCredential(self)
return manager.requires_credential()
@property
def allowed_addresses(self) -> list:
"""Lists addresses that are explicitly allowed in credentials."""
manager = AddressCredential(self)
return manager.get_addresses_of_class("allow")
@property
def denied_addresses(self) -> list:
"""Lists addresesses that are explicitly denied in credentials."""
manager = AddressCredential(self)
return manager.get_addresses_of_class("deny")
@property
def is_disabled(self) -> bool:
return not self.metadata or (self.nft and self.nft["state"] not in [0, 5])
def is_consumable(
self,
credential: Optional[dict] = None,
with_connectivity_check: bool = True,
provider_uri: Optional[str] = None,
) -> ConsumableCodes:
"""Checks whether an asset is consumable and returns a ConsumableCode."""
if self.is_disabled:
return ConsumableCodes.ASSET_DISABLED
manager = AddressCredential(self)
if manager.requires_credential():
return manager.validate_access(credential)
return ConsumableCodes.OK
def get_asset_from_metadatastore(metadata_url, document_id) -> Optional[Asset]:
"""
:return: `Asset` instance or None
"""
url = f"{metadata_url}/api/aquarius/assets/ddo/{document_id}"
response = requests.get(url)
return Asset(response.json()) if response.status_code == 200 else None
def check_asset_consumable(asset, consumer_address, logger, custom_url=None):
if not asset.nft or "address" not in asset.nft or not asset.chain_id:
return False, "Asset malformed or disabled."
web3 = get_web3(asset.chain_id)
nft_contract = get_data_nft_contract(web3, asset.nft["address"])
if nft_contract.functions.getMetaData().call()[2] not in [0, 5]:
return False, "Asset is not consumable."
code = asset.is_consumable({"type": "address", "value": consumer_address})
if code == ConsumableCodes.OK: # is consumable
return True, ""
message = f"Error: Access to asset {asset.did} was denied with code: {code}."
logger.error(message, exc_info=1)
return False, message
| oceanprotocol/provider | ocean_provider/utils/asset.py | asset.py | py | 4,033 | python | en | code | 25 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "ocean_provider.utils.services.Service.from_json",
"line_number": 24,
"usage_type": "call"
},
{
"a... |
11038920804 | import os
import boto3
import time
import copy
import subprocess
import threading
import multiprocessing
MAX_WORKERS = 1
SRC_ACCESS_KEY = os.environ['SRC_ACCESS_KEY']
SRC_SECRET_KEY = os.environ['SRC_SECRET_KEY']
SRC_REGION = os.environ['SRC_REGION']
DST_ACCESS_KEY = os.environ['DST_ACCESS_KEY']
DST_SECRET_KEY = os.environ['DST_SECRET_KEY']
DST_REGION = os.environ['DST_REGION']
DST_REPO_URI = os.environ['DST_REPO_URI']
class Repository:
def __init__(self, name, uri, images):
self.name = name
self.uri = uri
self.images = images
def create_dst_tag(repoName, imageTag):
return '{0}/{1}:{2}'.format(DST_REPO_URI, repoName, imageTag)
def get_catalog(client):
catalog = {}
repos = client.describe_repositories()
for repo in repos['repositories']:
name = repo['repositoryName']
uri = repo['repositoryUri']
images = []
paginator = client.get_paginator('list_images')
for output in paginator.paginate(repositoryName=name, filter={'tagStatus': 'TAGGED'}):
images = images + output['imageIds']
catalog[name] = Repository(name, uri, images)
return catalog
def calculate_catalog_diff(src, dst):
diff = {}
for name, repo in src.iteritems():
if not name in dst:
diff[name] = repo
continue
image_diff = copy.deepcopy(repo.images)
for src_image in repo.images:
src_tag = src_image['imageTag']
for dst_image in dst[name].images:
dst_tag = dst_image['imageTag']
if src_tag == dst_tag:
image_diff.remove(src_image)
break
if len(image_diff) > 0:
diff[name] = Repository(repo.name, repo.uri, image_diff)
return diff
def migrate(repos, src, dst):
for repo in repos:
print('Migrating {0} images(s) for repo {1}'.format(len(repo.images), repo.name))
try:
dst.create_repository(repositoryName=repo.name)
except:
pass
for image in repo.images:
src_tag = '{0}:{1}'.format(repo.uri, image['imageTag'])
dst_tag = create_dst_tag(repo.name, image['imageTag'])
subprocess.call(['docker', 'pull', src_tag])
subprocess.call(['docker', 'tag', src_tag, dst_tag])
subprocess.call(['docker', 'push', dst_tag])
def split_work(repos, size):
return [repos[i::size] for i in xrange(size)]
def main():
src_client = boto3.client('ecr',
region_name=SRC_REGION,
aws_access_key_id=SRC_ACCESS_KEY,
aws_secret_access_key=SRC_SECRET_KEY)
dst_client = boto3.client('ecr',
region_name=DST_REGION,
aws_access_key_id=DST_ACCESS_KEY,
aws_secret_access_key=DST_SECRET_KEY)
print('Fetching source catalog')
src_catalog = get_catalog(src_client)
print('Fetching destination catalog')
dst_catalog = get_catalog(dst_client)
# Migrate repos will least amount of images first
print('Calculating difference')
diff = calculate_catalog_diff(src_catalog, dst_catalog).values()
diff = sorted(diff, key=lambda r: len(r.images))
print('Migrating the following repositories:')
for repo in diff:
print('{0} ({1} images)'.format(repo.name, len(repo.images)))
threads = []
for repos in split_work(diff, min(multiprocessing.cpu_count(), MAX_WORKERS)):
thread = threading.Thread(target=migrate, args=(repos, src_client, dst_client))
thread.daemon = True
threads.append(thread)
print('Migrating {0} repos between {1} thread(s)'.format(len(diff), len(threads)))
time.sleep(2)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
if __name__ == '__main__':
main()
| quintilesims/d.ims.io | tools/migrate.py | migrate.py | py | 3,876 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"lin... |
37670160302 | ## Top 5 restaurant types
from pymongo import MongoClient
def get_node(db):
return list(db.OSMLagny.aggregate([{"$match":{"amenity":{"$exists":1}, "amenity":"restaurant"}},
{"$group":{"_id":"$cuisine", "count":{"$sum":1}}},{"$sort":{"count":-1}}, {"$limit":5}]))
def get_db():
client = MongoClient('localhost:27017')
db = client.mongData
return db
if __name__ == "__main__":
db = get_db()
pprint.pprint(get_node(db)) | narquie/Data-Wrangling-Nanodegree | Python Scripts Queries/Top 5 Restaurants.py | Top 5 Restaurants.py | py | 461 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 7,
"usage_type": "call"
}
] |
42568375826 | import os
import shutil
from typing import List
def main(source_dir: str, des_dir: str, ext: str):
files = list_file_with_ext(source_dir, ext)
for file in files:
shutil.copy(file, des_dir)
# List all music files with path. Get file type by extension name.
def list_file_with_ext(file_path: str, ext_list: List[int]):
result = []
for home, dirs, files in os.walk(file_path):
for file in files:
if (os.path.splitext(file)[1] in ext_list):
file_path = os.path.join(home, file)
result.append(file_path)
return result
source_dir = './'
des_dir = './0_new_folder'
ext_list = ['.wav', '.mp3', '.flac', '.ape']
os.makedirs(des_dir)
main(source_dir, des_dir, ext_list)
| zhiwenliang/scripts | file/colect_files_from_folder.py | colect_files_from_folder.py | py | 747 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "shutil.copy",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "os.walk",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": ... |
73967433954 | import discogs_client
import config
import pandas as pd
#Connect using auth token
d = discogs_client.Client('discogsAnalytics', user_token=config.discogs_token)
me=d.identity()
collection_data = []
for album in me.collection_folders[1].releases:
formatlist = album.release.formats[0]
format_desc = formatlist['descriptions']
media_type = format_desc[0]
data = [
album.release.artists[0].name,
album.release.title,
album.release.genres[0],
media_type,
album.release.year,
# album.release.community.have
]
collection_data.append(data)
df = pd.DataFrame(collection_data, columns=['Artists','Title','Genre','Format','Year'])
print(df)
| scottpmchugh/DiscogsAnalytics | extraction/discogsExtraction.py | discogsExtraction.py | py | 684 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "discogs_client.Client",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "config.discogs_token",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 27,
"usage_type": "call"
}
] |
3176992392 | import tweepy
from app.models.twitter import auth
from app.models.twitter import utils
def get_friends(username):
api = auth.get_api()
return api.friends(screen_name=username, include_user_entities='false',
skip_status='true')
def screen_names_of_friends(username):
friends = get_friends(username)
return [user.screen_name for user in friends]
def search(query, count, sinceId=None):
api = auth.get_api()
results = []
print(query)
for tweet in tweepy.Cursor(api.search, q=query).items(40):
result = {}
result['screen_name'] = tweet.user.screen_name
result['created_at'] = tweet.created_at
result['text'] = tweet.text
result['id'] = tweet.id_str
result['video'] = (tweet.entities.get('urls') or [{}])[0].get('expanded_url')
results.append(result)
return results
| amtsh/vdeos.me | app/models/twitter/Twitter.py | Twitter.py | py | 883 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "app.models.twitter.auth.get_api",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "app.models.twitter.auth",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "app.models.twitter.auth.get_api",
"line_number": 19,
"usage_type": "call"
},
{
... |
37022607442 | import matplotlib
matplotlib.use('Agg')
import time
import argparse
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import gc
# Models Packages
from sklearn import metrics
from sklearn.metrics import mean_squared_error
from sklearn import feature_selection
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
# Gradient Boosting
import lightgbm as lgb
# Tf-Idf
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.pipeline import FeatureUnion
from scipy.sparse import hstack, csr_matrix
from nltk.corpus import stopwords
import nltk
# nltk.download('stopwords')
from lib.print_info import print_debug, print_doing, print_memory, print_doing_in_task, print_header
from lib.read_write_file import save_csv, save_feather, save_file, save_pickle
from lib.read_write_file import load_csv, load_feather, load_pickle, read_train_test
from lib.prep_hdf5 import get_datatype, get_info_key_hdf5, add_dataset_to_hdf5
from lib.prepare_training import get_text_matrix, read_processed_h5, read_dataset, drop_col, load_train_test, add_feature, get_string_time
import lib.configs as configs
import features_list
from features_list import PREDICTORS_BASED, PREDICTORS_OVERFIT, PREDICTORS_GOOD, PREDICTORS_NOTCHECKED, PREDICTORS_TRY
# Viz
import seaborn as sns
import matplotlib.pyplot as plt
SEED = 1988
yearmonthdate_string = get_string_time()
np.random.seed(SEED)
cwd = os.getcwd()
print ('working dir', cwd)
if 'codes' not in cwd:
default_path = cwd + '/codes/'
os.chdir(default_path)
parser = argparse.ArgumentParser(
description='translate',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-b', '--debug', default=2, type=int, choices=[0,1,2])
parser.add_argument('-f', '--frac', default=1, type=float)
parser.add_argument('-tm', '--trainmode', default='gbdt', type=str, choices=['gbdt', 'dart'])
# parser.add_argument('-o', '--option', default=0, type=int)
CATEGORICAL = [
'item_id', 'user_id', 'region', 'city', 'parent_category_name',
'category_name', 'user_type', 'image_top_1', 'day', 'week', 'weekday',
'cn_encoded', 'cty_encoded', 'img1_encoded', 'pcn_encoded',
'reg_encoded', 'uid_encoded', 'uty_encoded',
]
REMOVED_LIST = [
'user_id', 'region', 'city', 'parent_category_name',
'category_name', 'user_type', 'image_top_1',
'param_1', 'param_2', 'param_3', 'title', 'description',
'activation_date', 'image'
]
TARGET = ['deal_probability']
def main():
global args, DEBUG, FRAC, PREDICTORS, TRAINMODE, PREDICTORS, LOCAL_TUNE_RESULT
args = parser.parse_args()
DEBUG = args.debug
FRAC = args.frac
TRAINMODE = args.trainmode
# OPTION=args.option
print_debug(DEBUG)
if DEBUG:
dir_feature = '../processed_features_debug2/'
else:
dir_feature = '../processed_features/'
# boosting_list = ['gbdt', 'dart']
boosting_list = ['gbdt']
num_leave_list = [7,9,15,31,63,128]
max_depth_list = [3,4,7,15,31,64]
model_list = []
for i in range(len(num_leave_list)):
num_leave = num_leave_list[i]
max_depth = max_depth_list[i]
for boosting_type in boosting_list:
model_list = model_list + ['{}_{}_{}'.format(boosting_type,num_leave,max_depth)]
LOCAL_TUNE_RESULT = pd.DataFrame(index=model_list,
columns=['running_time','num_round','train','val'])
if DEBUG: print(LOCAL_TUNE_RESULT)
option = 1
is_textadded = True
PREDICTORS = PREDICTORS_BASED
mat_filename = dir_feature + 'text_feature_kernel.pickle'
print_header('Option {}'.format(option))
print('is_textadded {} \n predictors {} \n mat filename {}'.format(is_textadded, PREDICTORS, mat_filename))
for k in range(len(num_leave_list)):
i = len(num_leave_list) - k - 1
num_leave = num_leave_list[i]
max_depth = max_depth_list[i]
for boosting_type in boosting_list:
DO(option, is_textadded, mat_filename, dir_feature, num_leave, max_depth, boosting_type)
print_header('FINAL SUMMARY')
print(LOCAL_TUNE_RESULT)
LOCAL_TUNE_RESULT.to_csv('csv/tune_params.csv', index=True)
def DO(option, is_textadded, mat_filename, dir_feature, num_leave, max_depth, boosting_type):
tabular_predictors = get_tabular_predictors(PREDICTORS)
X, y, test, full_predictors, predictors, testdex = prepare_training(mat_filename, dir_feature,
tabular_predictors, is_textadded=is_textadded)
categorical = get_categorical(predictors)
predictors = get_predictors(predictors)
train(X,y,num_leave,max_depth,full_predictors,
categorical,predictors,boosting_type,option=option)
gc.collect()
def train(X,y,num_leave,max_depth,full_predictors,categorical,predictors,boosting_type,option):
print_header("Training")
start_time = time.time()
print_doing_in_task('prepare dataset...')
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, test_size=0.10, random_state=SEED)
print('training shape: {} \n'.format(X.shape))
print("Light Gradient Boosting Regressor")
lgbm_params = {
'task': 'train',
'boosting_type': boosting_type,
'objective': 'regression',
'metric': 'rmse',
'max_depth': max_depth,
'num_leave': num_leave,
'feature_fraction': 0.8,
'bagging_fraction': 0.8,
'learning_rate': 0.1,
'lambda_l1' : 10,
'max_bin' : 512,
'verbose': -1
}
print('params:', lgbm_params)
lgtrain = lgb.Dataset(X_train, y_train,
feature_name=full_predictors,
categorical_feature = categorical)
lgvalid = lgb.Dataset(X_valid, y_valid,
feature_name=full_predictors,
categorical_feature = categorical)
if DEBUG:
num_boost_round = 300
early_stopping_rounds = 10
else:
num_boost_round = 20000
early_stopping_rounds = 100
lgb_clf = lgb.train(
lgbm_params,
lgtrain,
num_boost_round=num_boost_round,
valid_sets=[lgtrain, lgvalid],
valid_names=['train','valid'],
early_stopping_rounds=early_stopping_rounds,
verbose_eval=10
)
print_memory()
print_header("Model Report")
runnning_time = '{0:.2f}'.format((time.time() - start_time)/60)
num_boost_rounds_lgb = lgb_clf.best_iteration
print_doing_in_task('fit val')
val_rmse = '{0:.4f}'.format(np.sqrt(metrics.mean_squared_error(y_valid, lgb_clf.predict(X_valid))))
print_doing_in_task('fit train')
train_rmse = '{0:.4f}'.format(np.sqrt(metrics.mean_squared_error(y_train, lgb_clf.predict(X_train))))
print_header("Model Report")
print('boosting_type {}, num_leave {}, max_depth {}'.format(boosting_type,num_leave,max_depth))
print('model training time: {0:.2f} mins'.format((time.time() - start_time)/60))
print('num_boost_rounds_lgb: {}'.format(lgb_clf.best_iteration))
print('best rmse: {0:.4f}'.format(np.sqrt(metrics.mean_squared_error(y_valid, lgb_clf.predict(X_valid)))))
model = '{}_{}_{}'.format(boosting_type,num_leave,max_depth)
LOCAL_TUNE_RESULT['running_time'][model] = runnning_time
LOCAL_TUNE_RESULT['num_round'][model] = num_boost_rounds_lgb
LOCAL_TUNE_RESULT['train'][model] = train_rmse
LOCAL_TUNE_RESULT['val'][model] = val_rmse
def prepare_training(mat_filename, dir_feature, predictors, is_textadded):
print_header('Load features')
df, y, len_train, traindex, testdex = load_train_test(['item_id'], TARGET, DEBUG)
del len_train; gc.collect()
df = drop_col(df,REMOVED_LIST)
# add features
print_doing('add tabular features')
for feature in predictors:
dir_feature_file = dir_feature + feature + '.pickle'
if not os.path.exists(dir_feature_file):
print('can not find {}. Please check'.format(dir_feature_file))
else:
if feature in df:
print('{} already added'.format(feature))
else:
print_doing_in_task('adding {}'.format(feature))
df = add_feature(df, dir_feature_file)
print_memory()
if is_textadded:
# add text_feature
print_doing_in_task('add text features')
ready_df, tfvocab = get_text_matrix(mat_filename, 'all', 2, 0)
# stack
print_doing_in_task('stack')
X = hstack([csr_matrix(df.loc[traindex,:].values),ready_df[0:traindex.shape[0]]]) # Sparse Matrix
testing = hstack([csr_matrix(df.loc[testdex,:].values),ready_df[traindex.shape[0]:]])
print_memory()
print_doing_in_task('prepare vocab')
tfvocab = df.columns.tolist() + tfvocab
for shape in [X,testing]:
print("{} Rows and {} Cols".format(*shape.shape))
print("Feature Names Length: ",len(tfvocab))
else:
tfvocab = df.columns.tolist()
testing = hstack([csr_matrix(df.loc[testdex,:].values)])
X = hstack([csr_matrix(df.loc[traindex,:].values)]) # Sparse Matrix
return X, y, testing, tfvocab, df.columns.tolist(), testdex
def get_tabular_predictors(predictors):
predictors = predictors
print('------------------------------------------------')
print('load list:')
for feature in predictors:
print (feature)
print('-- number of predictors:', len(predictors))
return predictors
def get_predictors(predictors):
print('------------------------------------------------')
print('features:')
for feature in predictors:
print (feature)
print('-- number of predictors:', len(predictors))
return predictors
def get_categorical(predictors):
categorical = []
for feature in predictors:
if feature in CATEGORICAL:
categorical.append(feature)
print('------------------------------------------------')
print('categorical:')
for feature in categorical:
print (feature)
print('-- number of categorical features:', len(categorical))
return categorical
if __name__ == '__main__':
main() | vuhoangminh/Kaggle-Avito | codes/train_tune_params.py | train_tune_params.py | py | 10,360 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.use",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "lib.prepare_training.get_string_time",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "... |
74262849954 | from app.models import Booking,db,SCHEMA,environment
from sqlalchemy.sql import text
from datetime import datetime
from faker import Faker
fake = Faker()
def seed_bookings():
booking1 = Booking(
class_id=1,
user_id=3,
time_start=datetime(2023,7,6, hour = 9,minute = 30),
time_end= datetime(2023,7,6, hour = 10, minute = 30),
)
booking2 = Booking(
class_id=2,
user_id=1,
time_start=datetime(2023,7,6,hour = 10,minute = 30),
time_end= datetime(2023,7,6, hour = 11, minute = 30),
)
booking3 = Booking(
class_id=3,
user_id=2,
time_start=datetime(2023,7,7,hour = 9,minute = 30),
time_end= datetime(2023,7,7, hour = 10, minute = 30),
)
booking4 = Booking(
class_id=4,
user_id=3,
time_start=datetime(2023,7,7,hour = 8,minute = 30),
time_end= datetime(2023,7,7, hour = 9, minute = 30),
)
booking5 = Booking(
class_id=5,
user_id=1,
time_start=datetime(2023,7,8,hour = 12 ,minute = 30),
time_end= datetime(2023,7,8, hour = 13, minute = 30),
)
booking6 = Booking(
class_id=6,
user_id=2,
time_start=datetime(2023,7,8,hour = 15,minute = 30),
time_end= datetime(2023,7,8, hour = 16, minute = 30),
)
bookings = [booking1,booking2,booking3,booking4,booking5,booking6]
[db.session.add(booking) for booking in bookings]
db.session.commit()
def undo_bookings():
if environment == "production":
db.session.execute(
f"TRUNCATE table {SCHEMA}.bookings RESTART IDENTITY CASCADE;")
else:
db.session.execute(text("DELETE FROM bookings"))
db.session.commit()
| xuantien93/IronReligion | app/seeds/bookings.py | bookings.py | py | 1,731 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "faker.Faker",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "app.models.Booking",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
... |
34007380981 | from django.urls import path
from . import views
urlpatterns = [
path('',views.index,name='home'),
path('submit',views.submit,name='submit'),
path('retrieve/',views.retrieve,name='retrieve'),
path('register',views.register,name="register"),
path('login',views.login,name='login'),
path('update/<int:id>/',views.update,name='update'),
path('delete/<int:id>',views.delete,name='delete'),
path('particular/<int:rollno>',views.particular,name='particular'),
path('show',views.show,name='show'),
path('dashboard',views.dashboard,name='dashboard'),
path('forgot',views.forgot,name='forgot'),
path('logout',views.logout,name='logout'),
] | satyampathakk/SIHPROJECT | SIHP/project/urls.py | urls.py | py | 682 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
32232085798 | import sys
sys.path.append("..")
import util.image_processing as impro
from util import mosaic
from util import data
import torch
def run_unet(img,net,size = 128,use_gpu = True):
img=impro.image2folat(img,3)
img=img.reshape(1,3,size,size)
img = torch.from_numpy(img)
if use_gpu:
img=img.cuda()
pred = net(img)
pred = (pred.cpu().detach().numpy()*255)
pred = pred.reshape(size,size).astype('uint8')
return pred
def run_unet_rectim(img,net,size = 128,use_gpu = True):
img = impro.resize(img,size)
img1,img2 = impro.spiltimage(img)
mask1 = run_unet(img1,net,size = 128,use_gpu = use_gpu)
mask2 = run_unet(img2,net,size = 128,use_gpu = use_gpu)
mask = impro.mergeimage(mask1,mask2,img)
return mask
def run_pix2pix(img,net,opt):
if opt.netG == 'HD':
img = impro.resize(img,512)
else:
img = impro.resize(img,128)
img = data.im2tensor(img,use_gpu=opt.use_gpu)
img_fake = net(img)
img_fake = data.tensor2im(img_fake)
return img_fake
def get_ROI_position(img,net,opt):
mask = run_unet_rectim(img,net,use_gpu = opt.use_gpu)
mask = impro.mask_threshold(mask,opt.mask_extend,opt.mask_threshold)
x,y,halfsize,area = impro.boundingSquare(mask, 1)
return mask,x,y,area
def get_mosaic_position(img_origin,net_mosaic_pos,opt):
mask = run_unet_rectim(img_origin,net_mosaic_pos,use_gpu = opt.use_gpu)
mask = impro.mask_threshold(mask,10,128)
x,y,size,area = impro.boundingSquare(mask,Ex_mul=1.5)
rat = min(img_origin.shape[:2])/128.0
x,y,size = int(rat*x),int(rat*y),int(rat*size)
return x,y,size | Synchronized2/DeepMosaics | models/runmodel.py | runmodel.py | py | 1,626 | python | en | code | 83 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "util.image_processing.image2folat",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "util.ima... |
43128390785 | """
__author__: Jiaming Shen
__description__: extract entity pair document level co-occurrence features
Input: 1) the sentence.json
Output: 1) eidDocPairCounts.txt, 2) eidDocPairPPMI.txt
"""
import sys
import json
import itertools
import math
from collections import defaultdict
import mmap
from tqdm import tqdm
def get_num_lines(file_path):
fp = open(file_path, "r+")
buf = mmap.mmap(fp.fileno(), 0)
lines = 0
while buf.readline():
lines += 1
return lines
if __name__ == "__main__":
if len(sys.argv) != 2:
print('Usage: python3 extractEidDocPairFeature.py -data')
exit(1)
corpusName = sys.argv[1]
outfilename= '../../data/' + corpusName + '/intermediate/eidDocPairCount.txt'
outfilename2 = '../../data/' + corpusName + '/intermediate/eidDocPairPPMI.txt'
infilename = '../../data/' + corpusName + '/intermediate/sentences.json'
articeID2eidlist = defaultdict(list)
eid2freq = defaultdict(int)
with open(infilename, 'r') as fin:
for line in tqdm(fin, total=get_num_lines(infilename), desc="Generate document-level cooccurrence features (pass 1)"):
sentInfo = json.loads(line)
articleId = sentInfo['articleId']
eidlist = [em['entityId'] for em in sentInfo['entityMentions']]
articeID2eidlist[articleId].extend(eidlist)
for eid in eidlist:
eid2freq[eid] += 1
eidPair2count = defaultdict(int)
eidTotalCount = 0
eidPairTotalCount = 0
for articleId in tqdm(articeID2eidlist, desc="Generate document-level coocurrence features (pass 2)"):
eidlist = articeID2eidlist[articleId]
eidTotalCount += len(eidlist)
for pair in itertools.combinations(eidlist,2):
eidPairTotalCount += 1
if pair[0] == pair[1]:
continue
eidPair2count[frozenset(pair)] += 1
with open(outfilename, 'w') as fout:
for ele in eidPair2count:
count = eidPair2count[ele]
ele = list(ele)
fout.write(str(ele[0]) + "\t" + str(ele[1]) + "\t" + str(count) + "\n")
with open(outfilename2, 'w') as fout:
for ele in eidPair2count:
p_x_y = eidPair2count[ele] / eidPairTotalCount
ele = list(ele)
p_x = 1.0 * eid2freq[ele[0]] / eidTotalCount
p_y = 1.0 * eid2freq[ele[1]] / eidTotalCount
raw_pmi = math.log(p_x_y / (p_x * p_y) )
if raw_pmi >= 0:
ppmi = raw_pmi
else:
ppmi = 0.0
fout.write(str(ele[0]) + "\t" + str(ele[1]) + "\t" + str(ppmi) + "\n") | mickeysjm/HiExpan | src/featureExtraction/extractEidDocPairFeature.py | extractEidDocPairFeature.py | py | 2,434 | python | en | code | 71 | github-code | 1 | [
{
"api_name": "mmap.mmap",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "collections.defaultdict",
"... |
18075673144 | import numpy as np
import pandas as pd
from scipy.interpolate import make_interp_spline, BSpline
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.switch_backend('agg')
mpl.rcParams['svg.hashsalt'] = 42
np.random.seed(42)
#print (plt.style.available)
#https://matplotlib.org/3.1.1/gallery/style_sheets/style_sheets_reference.html
styles = ['seaborn-notebook', '_classic_test', 'seaborn-pastel', 'seaborn-talk', 'seaborn', 'seaborn-poster', \
'seaborn-deep', 'seaborn-ticks', 'seaborn-paper', 'grayscale', 'seaborn-dark-palette',\
'seaborn-whitegrid', 'classic', 'ggplot', 'seaborn-colorblind', 'seaborn-bright', 'bmh',\
'Solarize_Light2', 'seaborn-white', 'fast', 'dark_background', 'fivethirtyeight', 'seaborn-dark', \
'seaborn-muted', 'seaborn-darkgrid', 'tableau-colorblind10']
plt.style.use('tableau-colorblind10')
data = pd.read_csv("./nvsk_std_minmax_.csv")
n = [50, 100, 200, 500, 1000, 2000, 5000, 10000]
ks = [16, 32, 64, 128, 256, 512]
task_grps = [('age',), ('gen',), ('gen2',), ('ext',), ('ope',), ('bsag',), ('sui',)]
task_grps_name = ["Age", "Gender", "Gender2", "Ext", "Ope", "BSAG", "Sui"]
task_grps_file = ["Age", "Gender", "Gender2", "Ext", "Ope", "BSAG", "Sui"]
task_grps_label = ["Age", "Gender", "Gender2", "Ext", "Ope", "BSAG", "Sui"]
task_grps_score = ["Pearson-r", "macro-f1", "macro-f1", "Pearson-r*", "Pearson-r*", "Pearson-r*", "macro-f1"]
task_color_dict = {
"age": ["#ff3333", "#33beff", "#33ff42", 1],
"gen": ["#00a630", "#4e00a6", "#a60000", 1],
"gen2": ["#ffd800","#00fffb", "#ff00a6", 1],
"ext": ["", "", "", np.sqrt(0.70*0.77)],
"ope": ["", "", "", np.sqrt(0.70*0.77)],
"bsag": ["", "", "", np.sqrt(0.70*0.77)],
"sui": ["", "", "", 1]
}
for grp_no in range(len(task_grps)):
for i in n[4:]:
temp_data = data[(data.Task.isin(task_grps[grp_no])) & (data.N >=1000 )]
y_max, y_min = [], []
for task in temp_data.Task.unique():
temp = temp_data[temp_data.Task == task]
y_max_nodr = np.max(temp_data.values[:, 14:15] + (temp_data.values[:, 15:16]/np.sqrt(10)))
y_min_nodr = np.min(temp_data.values[:, 14:15] - (temp_data.values[:, 15:16]/np.sqrt(10)))
if (task.lower() == "sui"):
y_max_temp = np.max(temp_data.values[:, 2:11:2] + (temp_data.values[:, 3:12:2]/np.sqrt(10)))
y_min_temp = np.min(temp_data.values[:, 2:11:2] - (temp_data.values[:, 3:12:2]/np.sqrt(10)))
else:
y_max_temp = np.max(temp_data.values[:, 2:13:2] + (temp_data.values[:, 3:14:2]/np.sqrt(10)))
y_min_temp = np.min(temp_data.values[:, 2:13:2] - (temp_data.values[:, 3:14:2]/np.sqrt(10)))
y_max_temp = np.max([y_max_temp, y_max_nodr])
y_min_temp = np.min([y_min_temp, y_min_nodr])
y_max.append(y_max_temp)
y_min.append(y_min_temp)
y_max = np.mean(y_max)
y_min = np.mean(y_min)
temp_data = data[(data.N == i) & (data.Task.isin(task_grps[grp_no]))]
fig, ax = plt.subplots(figsize=(15, 15))
#for j in temp_data.values:
j = temp_data.values
#print ((np.mean(j[:, 2:13:2], axis=0).reshape(-1,1)/1).flatten().tolist())
y = (np.mean(j[:, 2:13:2], axis=0)/task_color_dict[j[0,0]][-1]).tolist()
y_std = ((np.mean(j[:, 3:14:2], axis=0)/task_color_dict[j[0,0]][-1])/np.sqrt(10)).tolist()
y_nodr = (np.mean(j[:, 14:15], axis=0)/task_color_dict[j[0,0]][-1]).tolist()
y_nodr_std = (np.mean(j[:, 15:16], axis=0)/task_color_dict[j[0,0]][-1]/np.sqrt(10)).tolist()
y_ci = (np.mean(j[:, 16:17], axis=0)/task_color_dict[j[0,0]][-1]).tolist()
y_max = y_max/task_color_dict[j[0,0]][-1]
y_min = y_min/task_color_dict[j[0,0]][-1]
y_max = np.max([y_max, y_nodr[0]])
y_min = np.min([y_min, y_nodr[0]])
y = np.around(y, decimals=3).tolist()
y_std = np.around(y_std, decimals=3).tolist()
y_up = np.around(np.array(y) + np.array(y_std), 3).tolist()
y_down = np.around(np.array(y) - np.array(y_std), 3).tolist()
y_nodr = np.around(y_nodr, decimals=3).tolist()
y_nodr_std = np.around(y_nodr_std, decimals=3).tolist()
y_ci = np.around(y_ci, decimals=3).tolist()
y_max = np.around(y_max, decimals=3)
y_min = np.around(y_min, decimals=3)
print (y, y_up, y_down)
print (y_max, y_min)
'''
xnew = np.linspace(ks[0], ks[-1], 50)
spl = make_interp_spline(ks, y, k=3)
ynew = spl(xnew)
y_intersect = np.interp(ks, xnew, ynew)
print (f"ynew: {ynew}, {len(ynew)}")
print (f"yintersect: {y_intersect}")
'''
label = task_grps_label[grp_no]
#ax.errorbar(ks, y, y_std)
#ax.fill_between(ks, [y_nodr[0] - y_nodr_std[0]]*len(ks), [y_nodr[0] + y_nodr_std[0]]*len(ks), alpha=0.2, interpolate=False, color="yellow")
ax.fill_between(ks, y_down, y_up, alpha=0.3, interpolate=False, color="gray")
#ax.plot(ks, y_ci*len(ks), label=f"{label} 95% CI L", \
#color=task_color_dict[j[0]][2],\
# marker="", markersize=14, linestyle="-.", linewidth=6)
ax.plot(ks, y_nodr*len(ks), label=f"{label} no dr", \
#color=task_color_dict[j[0]][1], \
marker="", markersize=14, linestyle="dotted", linewidth=10)
ax.plot(ks, y, label=f"{label}", \
#color=task_color_dict[j[0]][0],\
marker="^", markersize=30, linestyle="-", linewidth=4)
ax.set_xscale('log')
ax.set_xticks(ks)
ax.get_xaxis().set_major_formatter(mpl.ticker.ScalarFormatter())
#ax.legend(fontsize=34, frameon=False, loc=0, markerfirst=False)
ax.set_title(f"{task_grps_name[grp_no]}; N = {i}", fontsize=60)
ax.set_xlabel("K", fontsize=60)
ax.set_ylabel(f"{task_grps_score[grp_no]}", fontsize=60)
if not (np.isnan(y_min) or np.isnan(y_max)): ax.set_ylim(y_min, y_max)
ax.tick_params(axis='both', which='major', labelsize=60)
ax.grid(axis='x')
fig.savefig(f"./formatted_results/new_graphs_svg/N_{i}_{task_grps_file[grp_no]}.svg", \
bbox_inches='tight', pad_inches=0.5, format='svg', dpi=1200)
fig.savefig(f"./formatted_results/new_graphs/N_{i}_{task_grps_file[grp_no]}.png", \
bbox_inches="tight", pad_inches=0.5)
fig.clf()
print (f"Saved: N_{i}_{task_grps_file[grp_no]}.png")
#break
#break
| adithya8/ContextualEmbeddingDR | nvskplotter.py | nvskplotter.py | py | 7,467 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.switch_backend",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name... |
8272993033 | # MAGIC CODEFORCES PYTHON FAST IO
import atexit
import io
import sys
_INPUT_LINES = sys.stdin.read().splitlines()
input = iter(_INPUT_LINES).__next__
_OUTPUT_BUFFER = io.StringIO()
sys.stdout = _OUTPUT_BUFFER
@atexit.register
def write():
sys.__stdout__.write(_OUTPUT_BUFFER.getvalue())
# END OF MAGIC CODEFORCES PYTHON FAST IO
def solve():
INF = 10**20
x,n = map(int,input().split())
d = list(map(int,input().split()))
d.sort()
d.append(INF)
q = []
a = b = 0
ret = 0
for steps in range(n-1):
newElement = 0
for items in range(2):
if b == len(q) or d[a] < q[b]:
newElement += d[a]
a += 1
else:
newElement += q[b]
b += 1
ret += newElement
q.append(newElement)
print(ret)
solve()
| elsantodel90/cses-problemset | stick_divisions.py | stick_divisions.py | py | 852 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.stdin.read",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "io.StringIO",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number"... |
14467272098 | """
Dimensionality Reduction using Dense_AutoEncoder
input:data in shape (784,n), Dimension
output:data in shape (Dimension,n)
"""
from keras.layers import *
from keras.models import Model
def dense_AE(data,dim):
x_train = data / 255
x_train = x_train.reshape((x_train.shape[0], -1))
encoding_dim = dim
# input 28*28
input_img = Input(shape=(784,))
# Encoder
encoded = Dense(128, activation='relu')(input_img)
encoded = Dense(64, activation='relu')(encoded)
encoded = Dense(32, activation='relu')(encoded)
encoded = Dense(10, activation='relu')(encoded)
encoder_output = Dense(encoding_dim, activation='tanh')(encoded)
# Decoder
decoded = Dense(10, activation='relu')(encoder_output)
decoded = Dense(32, activation='relu')(decoded)
decoded = Dense(64, activation='relu')(decoded)
decoded = Dense(128, activation='relu')(decoded)
decoded = Dense(784, activation='tanh')(decoded)
autoencoder = Model(inputs=input_img, outputs=decoded)
encoder = Model(inputs=input_img, outputs=encoder_output)
autoencoder.compile(optimizer='adam', loss='mse')
# train the model
autoencoder.fit(x_train, x_train, epochs=50, batch_size=256, shuffle=True)
encoded_imgs = encoder.predict(x_train)
return encoded_imgs | Mateguo1/KMNIST | cluster/Dense_AutoEncoder.py | Dense_AutoEncoder.py | py | 1,300 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "keras.models.Model",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "keras.models.Model",
"line_number": 34,
"usage_type": "call"
}
] |
16736609895 | import random as r
import math
import numpy as np
import tkinter as tk
import heapq
import itertools
import random
class Player:
polygons = []
n = None
def __init__(self, n, score=0):
self.n = n
self.polygons = []
self.score = 0
def add_pol(self, pol):
self.polygons.append(pol)
class Point:
x = 0.0
y = 0.0
def __init__(self, x, y, player=None):
self.x = x
self.y = y
self.player = player
def distance(self, p):
return math.sqrt((self.x-p.x)**2 + (self.y-p.y)**2)
class Event:
x = 0.0
p = None
a = None
valid = True
def __init__(self, x, p, a, p0, p1):
self.x = x
self.p = p
self.a = a
self.p0 = p0
self.p1 = p1
self.valid = True
class Arc:
p = None
pprev = None
pnext = None
e = None
s0 = None
s1 = None
def __init__(self, p, player=None, a=None, b=None):
self.player = player
self.p = p
self.pprev = a
self.pnext = b
self.e = None
self.s0 = None
self.s1 = None
# Arc(p) définit la parabole associé au point p
class Segment:
start = None
end = None
done = False
p1 = None
p2 = None
score1 = False
score2 = False
def __init__(self, p, p1=None, p2=None):
self.start = p
self.end = None
self.done = False
self.p1 = p1
self.p2 = p2
def finish(self, p, edge=False):
if not edge:
if self.done:
return
self.end = p
self.done = True
else:
self.end = p
self.done = True
def point(self, p1, p2):
self.p1 = p1
self.p2 = p2
def hauteur(self, p):
if self.end.x - self.start.x == 0:
p_inter = Point(self.start.x, p.y)
if self.end.y - self.start.y == 0:
p_inter = Point(p.x, self.start.y)
elif self.end.x - self.start.x != 0:
a1 = (self.end.y - self.start.y) / (self.end.x - self.start.x)
# pente d'une droite perpendiculaire à self
a2 = -1/a1
# abscisse du point d'intersection des deux droites
x0 = (-a1*(self.start.x) + a2*(p.x) - (p.y) + (
self.start.y))/(a2 - a1)
y0 = a2*(x0 - p.x) + p.y
p_inter = Point(x0, y0)
return p.distance(p_inter)
def actu_score(self):
if self.p1 is not None and not self.score1:
self.p1.player.score += self.hauteur(self.p1)*(
self.start.distance(self.end))/2
self.score1 = True
if self.p2 is not None and not self.score1:
self.p2.player.score += self.hauteur(self.p2)*(
self.start.distance(self.end))/2
self.score2 = True
def p_edge(self, p):
# renvoie l'intersection du Segment self avec le bord qui est dépassé par le segment
if self.start.x - self.end.x != 0:
a = (self.end.y - self.start.y)/(self.end.x - self.start.x)
x2 = 500
y2 = a*(500 - self.start.x) + self.start.y
if 0 < y2 < 500 and p.x > self.p1.x:
return Point(x2, y2)
x2 = 0
y2 = a*(0 - self.start.x) + self.start.y
if 0 < y2 < 500 and p.x < self.p1.x:
return Point(x2, y2)
y2 = 500
x2 = (500)/a - self.start.y/a + self.start.x
if 0 < x2 < 500 and p.y > self.p1.y:
return Point(x2, y2)
y2 = 0
x2 = (-self.start.y)/a + self.start.x
if 0 < x2 < 500 and p.y < self.p1.y:
return Point(x2, y2)
elif p.y > 500:
return Point(p.x, 500)
else:
return Point(p.x, 0)
def inter_edge(self):
# regarde si un Segment dépasse un bord, si oui remet son extrémité sur le bord du canvas
if self.start.x < 0 or self.start.x > 500 or self.start.y < 0 or self.start.y > 500:
self.start = self.p_edge(self.start)
if self.end.x < 0 or self.end.x > 500 or self.end.y < 0 or self.end.y > 500:
self.end = self.p_edge(self.end)
class PriorityQueue:
def __init__(self):
self.pq = []
self.entry_finder = {}
self.counter = itertools.count()
def push(self, item):
# check for duplicate
if item in self.entry_finder:
return
count = next(self.counter)
# use x-coordinate as a primary key (heapq in python is min-heap)
entry = [item.x, count, item]
self.entry_finder[item] = entry
heapq.heappush(self.pq, entry)
def remove_entry(self, item):
entry = self.entry_finder.pop(item)
entry[-1] = 'Removed'
def pop(self):
while self.pq:
priority, count, item = heapq.heappop(self.pq)
if item != 'Removed':
del self.entry_finder[item]
return item
raise KeyError('pop from an empty priority queue')
def top(self):
while self.pq:
priority, count, item = heapq.heappop(self.pq)
if item != 'Removed':
del self.entry_finder[item]
self.push(item)
return item
raise KeyError('top from an empty priority queue')
def empty(self):
return not self.pq
| PaulWtlr/projet_info_2022 | DataType.py | DataType.py | py | 5,451 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "math.sqrt",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "itertools.count",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "heapq.heappush",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "heapq.heappop",
"line_... |
5719940935 | # correlation between googleTrendsCovidValue and cdcValue in path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import ttest_ind
path = "./data/cdcAnxiety_googleCovid_COMBINED.csv"
missingPath = "./data/cdcAnxiety_googleCovid_COMBINED_withMissing.csv"
cdcOGPath = "./data/cdc/cdc_US_anxiety_clean.csv"
# import csv
data = pd.read_csv(path)
missingData = pd.read_csv(missingPath)
cdcData = pd.read_csv(cdcOGPath)
# turn data from string to float
data["cdcValue"] = data["cdcValue"].astype(float)
data["googleTrendsCovidValue"] = data["googleTrendsCovidValue"].astype(float)
# normalize cdcValue and googleTrendsCovidValue
data["cdcValue"] = (data["cdcValue"] - data["cdcValue"].min()) / \
(data["cdcValue"].max() - data["cdcValue"].min())
data["googleTrendsCovidValue"] = (data["googleTrendsCovidValue"] - data["googleTrendsCovidValue"].min()) / \
(data["googleTrendsCovidValue"].max() -
data["googleTrendsCovidValue"].min())
# get correlation between googleTrendsCovidValue and cdcValue
corr = data.corr(method="pearson")
print(corr["cdcValue"]["googleTrendsCovidValue"])
# t-test on googleTrendsCovidValue and cdcValue
t, p = ttest_ind(data["cdcValue"], data["googleTrendsCovidValue"])
print(t, p)
# plot line graph of googleTrendsCovidValue over time
plt.figure(figsize=(20, 10))
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.title("Google Trends Value Over Time (After Covid US Start)", fontsize=20)
plt.xlabel("Time", fontsize=16)
plt.ylabel("Google Trends", fontsize=16)
plt.plot(data["week"], data["googleTrendsCovidValue"])
plt.xticks(rotation=90)
plt.show()
# plot line graph of googleTrendsCovidValue over time from OG google data
plt.figure(figsize=(20, 10))
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.title("Google Trends Value Over Time (Before Covid US Start)", fontsize=20)
plt.xlabel("Time", fontsize=16)
plt.ylabel("Google Trends", fontsize=16)
plt.plot(missingData["week"], missingData["googleTrendsCovidValue"])
plt.xticks(rotation=90)
plt.show()
# plot line graph of cdcData over time from OG cdc data
plt.figure(figsize=(20, 10))
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.title("CDC Value Over Time (Missing Lockdown)", fontsize=20)
plt.xlabel("Time", fontsize=16)
plt.ylabel("CDC Value", fontsize=16)
plt.plot(cdcData["Time Period"], cdcData["Value"])
plt.show()
# plot the correlation on a scatter graph
# cdcValue on x-axis and googleTrendsCovidValue on y-axis
plt.figure(figsize=(20, 10))
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.xlabel("CDC", fontsize=16)
plt.ylabel("Google Trends", fontsize=16)
plt.scatter(data["cdcValue"], data["googleTrendsCovidValue"])
z = np.polyfit(data["cdcValue"], data["googleTrendsCovidValue"], 1)
p = np.poly1d(z)
plt.plot(data["cdcValue"], p(data["cdcValue"]), "r-o", markersize=0)
plt.show()
# plot the correlation on a line graph over time
# time on x-axis
# cdcValue on y-axis on the left and googleTrendsCovidValue on y-axis on the right
plt.figure(figsize=(20, 10))
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.plot(data["week"], data["cdcValue"], label="CDC")
plt.plot(data["week"], data["googleTrendsCovidValue"], label="Google Trends")
plt.xticks(rotation=90)
plt.legend()
plt.show()
| Xyloidzzz/covid-depression-social | Correlations/cdc_google_correlation.py | cdc_google_correlation.py | py | 3,257 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "scipy.stats.ttest_ind... |
71222805155 | # coding=utf-8
from functools import reduce
num = [1, 2, 3]
nums = [num for i in range(3)]
def all_aa(nums):
mfunc = lambda x, y: ["%s%s" % (i, j) for i in x for j in y]
# reduce(mfunc, nums)
# result1 = mfunc(nums[0], nums[1])
# result2 = mfunc(result1, nums[2])
dataresult = reduce(mfunc, nums)
print(dataresult)
print(len(dataresult))
def all_a(nums):
mfunc = lambda x, y: ["%s%s" % (i, j) for i in x for j in y if i != j if not str(j) in str(i)]
dataresult = reduce(mfunc, nums)
print(dataresult)
print(len(dataresult))
def creverse(nums, i, j):
while i < j:
# print(i, j)
temp = nums[i]
nums[i] = nums[j]
nums[j] = temp
i += 1
j -= 1
def csort(nums, n):
if n == 1: return
maxC = 0
maxI = 0
for i in range(n):
if nums[i] > maxC:
maxC = nums[i]
maxI = i
print(n-1, maxI, nums)
creverse(nums, 0, maxI)
creverse(nums, 0, n-1)
csort(nums, n - 1)
if __name__ == '__main__':
# all_a(nums)
num = [1, 6, 7, 4, 5, 2, 3]
# creverse(num, 2, 5)
csort(num, 7)
print(num)
| FYPYTHON/PathOfStudy | Algorithm/python_reduce.py | python_reduce.py | py | 1,157 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "functools.reduce",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 23,
"usage_type": "call"
}
] |
25730146075 | import shutil
import gradio as gr
import time
import yaml
import openai
import os
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from prepare_vectordb import PrepareVectorDB
from typing import List, Tuple
import re
import ast
import html
from cfg import load_cfg
load_cfg()
with open("configs/app_config.yml") as cfg:
app_config = yaml.load(cfg, Loader=yaml.FullLoader)
# LLM configs
llm_engine = app_config["llm_config"]["engine"]
llm_system_role = app_config["llm_config"]["llm_system_role"]
persist_directory = app_config["directories"]["persist_directory"]
custom_persist_directory = app_config["directories"]["custom_persist_directory"]
embedding_model = OpenAIEmbeddings()
# Retrieval configs
k = app_config["retrieval_config"]["k"]
embedding_model_engine = app_config["embedding_model_config"]["engine"]
chunk_size = app_config["splitter_config"]["chunk_size"]
chunk_overlap = app_config["splitter_config"]["chunk_overlap"]
# # For documentation
# llm_engine = "Test"
# llm_system_role = "Test"
# embedding_model = "Test"
# persist_directory = "Test"
# custom_persist_directory = "Test"
# embedding_model_engine = "Test"
# chunk_size=1
# chunk_overlap=1
# k = 1
class ChatBot:
"""
Class representing a chatbot with document retrieval and response generation capabilities.
This class provides static methods for responding to user queries, handling feedback, and
cleaning references from retrieved documents.
"""
@staticmethod
def respond(chatbot: List, message: str, data_type: str = "Preprocessed", temperature: float = 0.0) -> Tuple:
"""
Generate a response to a user query using document retrieval and language model completion.
Parameters:
- chatbot (List): List representing the chatbot's conversation history.
- message (str): The user's query.
- data_type (str): Type of data used for document retrieval ("Preprocessed" or "Uploaded").
- temperature (float): Temperature parameter for language model completion.
Returns:
Tuple: A tuple containing an empty string, the updated chat history, and references from retrieved documents.
"""
if data_type == "Preprocessed" or data_type == [] or data_type == None:
# directories
vectordb = Chroma(persist_directory=persist_directory,
embedding_function=embedding_model)
elif data_type == "Uploaded":
vectordb = Chroma(persist_directory=custom_persist_directory,
embedding_function=embedding_model)
docs = vectordb.similarity_search(message, k=k)
question = "# User new question:\n" + message
references = ChatBot.clean_references(docs)
retrieved_docs_page_content = [
str(x.page_content)+"\n\n" for x in docs]
retrieved_docs_page_content = "# Retrieved content:\n" + \
str(retrieved_docs_page_content)
prompt = retrieved_docs_page_content + "\n\n" + question
response = openai.ChatCompletion.create(
engine=llm_engine,
messages=[
{"role": "system", "content": llm_system_role},
{"role": "user", "content": prompt}
],
temperature=temperature,
stream=False
)
chatbot.append(
(message, response["choices"][0]["message"]["content"]))
time.sleep(2)
return "", chatbot, references
@staticmethod
def feedback(data: gr.LikeData):
"""
Process user feedback on the generated response.
Parameters:
data (gr.LikeData): Gradio LikeData object containing user feedback.
"""
if data.liked:
print("You upvoted this response: " + data.value)
else:
print("You downvoted this response: " + data.value)
@staticmethod
def clean_references(documents: List) -> str:
"""
Clean and format references from retrieved documents.
Parameters:
documents (List): List of retrieved documents.
Returns:
str: A string containing cleaned and formatted references.
Example:
```python
references = ChatBot.clean_references(retrieved_documents)
```
"""
server_url = "http://localhost:8000"
documents = [str(x)+"\n\n" for x in documents]
markdown_documents = ""
counter = 1
for doc in documents:
# Extract content and metadata
content, metadata = re.match(
r"page_content=(.*?)( metadata=\{.*\})", doc).groups()
metadata = metadata.split('=', 1)[1]
metadata_dict = ast.literal_eval(metadata)
# Decode newlines and other escape sequences
content = bytes(content, "utf-8").decode("unicode_escape")
# Replace escaped newlines with actual newlines
content = re.sub(r'\\n', '\n', content)
# Remove special tokens
content = re.sub(r'\s*<EOS>\s*<pad>\s*', ' ', content)
# Remove any remaining multiple spaces
content = re.sub(r'\s+', ' ', content).strip()
# Decode HTML entities
content = html.unescape(content)
# Replace incorrect unicode characters with correct ones
content = content.encode('latin1').decode('utf-8', 'ignore')
# Remove or replace special characters and mathematical symbols
# This step may need to be customized based on the specific symbols in your documents
content = re.sub(r'â', '-', content)
content = re.sub(r'â', '∈', content)
content = re.sub(r'Ã', '×', content)
content = re.sub(r'ï¬', 'fi', content)
content = re.sub(r'â', '∈', content)
content = re.sub(r'·', '·', content)
content = re.sub(r'ï¬', 'fl', content)
pdf_url = f"{server_url}/{os.path.basename(metadata_dict['source'])}"
# Append cleaned content to the markdown string with two newlines between documents
markdown_documents += f"Reference {counter}:\n" + content + "\n\n" + \
f"Filename: {os.path.basename(metadata_dict['source'])}" + " | " +\
f"Page number: {str(metadata_dict['page'])}" + " | " +\
f"[View PDF]({pdf_url})" "\n\n"
counter += 1
return markdown_documents
class UISettings:
"""
Utility class for managing UI settings.
This class provides static methods for toggling UI components, such as a sidebar.
Example:
```python
ui_state = True
updated_ui, new_state = UISettings.toggle_sidebar(ui_state)
```
"""
@staticmethod
def toggle_sidebar(state):
"""
Toggle the visibility state of a UI component.
Parameters:
state: The current state of the UI component.
Returns:
Tuple: A tuple containing the updated UI component state and the new state.
Example:
```python
ui_state = True
updated_ui, new_state = UISettings.toggle_sidebar(ui_state)
```
"""
state = not state
return gr.update(visible=state), state
class GradioUploadFile:
"""
Utility class for handling file uploads and processing.
This class provides static methods for checking directories and processing uploaded files
to prepare a VectorDB.
Example:
```python
files_dir = ['/path/to/uploaded/files']
chatbot_instance = Chatbot() # Assuming Chatbot is an existing class
GradioUploadFile.process_uploaded_files(files_dir, chatbot_instance)
```
"""
@staticmethod
def check_directory(directory_path):
"""
Check if a directory exists, and if it does, remove it and create a new one.
Parameters:
directory_path (str): The path of the directory to be checked and recreated.
Example:
```python
GradioUploadFile.check_directory("/path/to/directory")
```
"""
if os.path.exists(directory_path):
shutil.rmtree(directory_path)
os.makedirs(directory_path)
@staticmethod
def process_uploaded_files(files_dir: List, chatbot: List) -> Tuple:
"""
Process uploaded files to prepare a VectorDB.
Parameters:
- files_dir (List): List of paths to the uploaded files.
- chatbot: An instance of the chatbot for communication.
Returns:
Tuple: A tuple containing an empty string and the updated chatbot instance.
Example:
```python
files_dir = ['/path/to/uploaded/files']
chatbot_instance = Chatbot() # Assuming Chatbot is an existing class
GradioUploadFile.process_uploaded_files(files_dir, chatbot_instance)
```
"""
GradioUploadFile.check_directory(custom_persist_directory)
prepare_vectordb_instance = PrepareVectorDB(data_directory=files_dir,
persist_directory=custom_persist_directory,
embedding_model_engine=embedding_model_engine,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap)
prepare_vectordb_instance.prepare_and_save_vectordb()
chatbot.append(
("I just uploaded some documents.", "Uploaded files are ready. Please ask your question"))
return "", chatbot
| Farzad-R/LLM-playground | RAG-GPT/gradio_app_utils.py | gradio_app_utils.py | py | 10,047 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cfg.load_cfg",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "yaml.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "yaml.FullLoader",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "langchain.embeddings.open... |
29860315580 | import collections
def get_matrix(path_to_file="input.txt"):
matrix_str = ""
with open(path_to_file,"r") as file:
count = file.readline()
matrix_str = file.read()
matrix = []
lines = matrix_str.split("\n")
for line in lines:
elements = line.split(" ")
matrix.append(elements)
return matrix
def is_acyclic(matrix, root=0):
nodes = range(0, len(matrix))
# посещенные вершины
visited = collections.deque()
visited.append(root)
# массив, где лежит предыдущая вершина для каждой вершины(из которой в нее пришли)
previous_nodes = [None for i in range(0, len(matrix))]
while len(visited) != 0:
current_node = visited.popleft()
# проверяем, есть ли смежная нашей и не посещенная вершина
for next_node in nodes:
# индекс следующей вершины должен быть больше предыдущей
if current_node <= next_node:
if previous_nodes[next_node] is None:
if matrix[current_node][next_node] == "1":
visited.append(next_node)
previous_nodes[next_node] = current_node
#print(str(current_node+1)+" "+str(next_node+1))
#print(previous_nodes)
else:
if matrix[current_node][next_node] == "1":
#print(str(current_node+1)+" "+str(next_node+1))
#print(previous_nodes)
# если вершина была посещена и мы можем в нее пойти, то мы встретили цикл
cycle = get_cycle(matrix, previous_nodes, current_node, next_node, root)
return False, cycle
return True, None
def get_cycle(matrix, previous_nodes, current_node, next_node, root):
cycle = []
# возвращаемся по списку посещенных вершин и собираем все "предыдущие" вершины
previous_nodes[next_node] = current_node
node = next_node
cycle.append(node)
while node != root:
node = previous_nodes[node]
cycle.append(node)
cycle.sort()
# дабы избавиться от хвоста в цикле, удаляю все вершины до той,
# из которой впервые можно перейти в "последнюю" вершину цикла
i = 0
for i in range(0, len(cycle)):
if matrix[cycle[i]][next_node] == "1":
break
cycle = cycle[i:]
return cycle
matrix = get_matrix()
graph_is_acyclic, cycle = is_acyclic(matrix)
if graph_is_acyclic:
print("A")
else:
print("N " + str(cycle))
| KeiserKholod/Cycles_BFS_Python | KA1.py | KA1.py | py | 3,093 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 19,
"usage_type": "call"
}
] |
11286756056 | import argparse
import cv2
import minerl
import numpy as np
import plotly.express as px
import random
import streamlit as st
from pathlib import Path
import time
@st.cache(suppress_st_warning=True, allow_output_mutation=True, max_entries=1)
def get_timeseries_actions_fig(actions_wide, action_labels, rewards):
st.warning("Cache miss: `get_timeseries_actions_fig` ran")
fig = px.imshow(
actions_wide,
x = list(range(actions_wide.shape[1])),
y = action_labels,
height=300,
)
fig.update_traces(dict(
showscale=False,
coloraxis=None,
colorscale=[(0, "#FFF"), (1, "#3f51b5")],
), selector={'type':'heatmap'})
fig.update_layout(
margin=dict(l=0, r=20, t=20, b=0),
)
return fig
class App:
def __init__(self):
# self.data_frames = []
pass
@st.cache(suppress_st_warning=True, allow_output_mutation=True, max_entries=1)
def get_trajectory_names(self, data_dir: Path):
st.warning("Cache miss: `get_trajectory_names` ran")
traj_dirs = sorted([x for x in data_dir.glob("*/*") if x.is_dir()])
traj_names = [str(Path(x.parent.stem) / x.stem) for x in traj_dirs]
return traj_names
@st.cache(suppress_st_warning=True, allow_output_mutation=True, max_entries=1)
def load_data(self, data_dir, env_name, stream_name):
st.warning("Cache miss: `load_data` ran")
minerl_data = minerl.data.make(env_name, data_dir=data_dir)
data_frames = list(minerl_data.load_data(stream_name, include_metadata=True))
return data_frames
def get_timeseries_reward(self, data_frames):
rewards = [float(frame[2]) for frame in data_frames]
return rewards
def get_timeseries_actions(self, data_frames):
action_labels = sorted([key for key in data_frames[0][1].keys()])
actions_timeseries_wide = []
for key in action_labels:
action_sample = data_frames[0][1][key]
if type(action_sample) == np.ndarray:
actions_timeseries_wide.append([(1 if np.any(frame[1][key]) else 0) for frame in data_frames])
elif type(action_sample) == np.str_:
actions_timeseries_wide.append([(0 if frame[1][key] == "none" else 1) for frame in data_frames])
elif type(action_sample) == np.int64:
actions_timeseries_wide.append([float(frame[1][key]) for frame in data_frames])
else:
raise Exception(f"Action type not supported! `{action_sample}` of type `{type(action_sample)}`")
return np.array(actions_timeseries_wide), action_labels
def run(self, data_dir):
st.set_page_config(page_title="MineRL Trajectory Viewer", page_icon=None, layout='wide')
st.title('MineRL Trajectory Viewer')
col1, col2, col3, col4 = st.columns([6,2,2,2])
with col1:
data_dir = Path(data_dir)
st.write(f"Data dir: `{data_dir}`")
# Select trajectory
traj_names = self.get_trajectory_names(data_dir)
option = st.selectbox(
'Select a trajectory:',
traj_names)
chosen_path = data_dir / option
env_name = str(Path(chosen_path).parent.stem)
stream_name = Path(chosen_path).stem
data_frames = self.load_data(data_dir, env_name, stream_name)
# TODO: Display the video!
# video_frames = np.stack([frame[0]["pov"].transpose(2,0,1) for frame in self.data_frames])
# video_frames = np.stack([frame[0]["pov"] for frame in self.data_frames])
# st.write(video_frames.shape)
# tmp_vid_path = "vid.avi"
# fourcc = cv2.VideoWriter_fourcc(*'MJPG')
# video = cv2.VideoWriter(tmp_vid_path, fourcc, 20.0, video_frames[0].shape[:2])
# for image in video_frames:
# video.write(image)
# video.release()
# print("Video saved to", tmp_vid_path)
# st.video(tmp_vid_path)
# # Display GIF / video
# st.write("## Playback")
# video_path = str(Path(chosen_path) / "recording_.mp4")
# st.video(video_frames)
# See Streamlit issue:
# https://github.com/streamlit/streamlit/pull/1583
# Select current frame
max_frame = len(data_frames) - 1
frame_idx = st.slider("Select frame:", 0, max_frame, 0)
current_frame = data_frames[frame_idx]
state, action, reward, next_state, done, meta = current_frame
# Aggregate plots
actions_wide, action_labels = self.get_timeseries_actions(data_frames)
rewards = self.get_timeseries_reward(data_frames)
fig = get_timeseries_actions_fig(actions_wide, action_labels, rewards)
st.write("### Actions over time")
st.plotly_chart(fig, use_container_width=True)
st.write("### Rewards over time")
st.area_chart(rewards, height=100)
with col2:
st.write("### Actions")
st.write(action)
st.write("### Reward")
st.write(f"`{reward}`")
st.write("### Done")
st.write(done)
st.write("### Metadata")
st.write(meta)
with col3:
st.write("### State")
st.image(state["pov"], use_column_width=True, caption="Current State POV")
for key, val in state.items():
if key == "pov":
continue
st.write(f"#### {key}")
st.write(val)
with col4:
st.write("### Next State")
st.image(next_state["pov"], use_column_width=True, caption="Next State POV")
for key, val in next_state.items():
if key == "pov":
continue
st.write(f"#### {key}")
st.write(val)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Visualize MineRL trajectories')
parser.add_argument("-d", "--data-dir", required=True,
help="Root directory containing trajectory data. Default: %(default)s")
options = parser.parse_args()
app = App()
app.run(options.data_dir) | JunShern/minerl-trajectory-viewer | main.py | main.py | py | 6,347 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "streamlit.warning",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "plotly.express.imshow",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "streamlit.cach... |
75143000674 | import os
import imageio.v3 as iio
import imageio
import torch
import argparse
import torchvision
import matplotlib.pyplot as plt
import numpy as np
from torchvision import datasets, transforms
from torchvision.transforms.functional import hflip, vflip
from utils import initialize_equi_model, set_seed
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cifar10_classes = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
def get_trainsets(args, input_size):
# Transformations
data_transforms = {
'train': transforms.Compose([
transforms.Resize(input_size),
# transforms.CenterCrop(input_size),
transforms.ToTensor(),
# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(input_size),
# transforms.CenterCrop(input_size),
transforms.ToTensor(),
# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
# Datasets and Dataloaders
print("Initializing CIFAR10 Datasets and Dataloaders...")
# create datasets
trainset = torchvision.datasets.CIFAR10(root='data', train=True, download=True, transform=data_transforms['train'])
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=data_transforms['val'])
return trainset
def save_features(args):
set_seed(args.seed)
eq_model, input_size = initialize_equi_model(args.model, device, 2, feature_extract=True, use_pretrained=True,
symmetry_group=args.model_symmetry, visualize_features=True)
noneq_model, input_size = initialize_equi_model(args.model, device, 2, feature_extract=True, use_pretrained=True,
symmetry_group=None, visualize_features=True)
trainset = get_trainsets(args, input_size=224)
# get a sample from trainset
input_image, label = trainset[77]
input_image = input_image.unsqueeze(dim=0)
print(f"label: {cifar10_classes[label]}")
# get rotated images
rotated_input_images = []
for i in range(4):
rotated_input_images.append(torch.rot90(input_image, k=3-i, dims=(2, 3)))
# get equivariant features
eq_output_features = []
for i in range(4):
eq_output_features.append(eq_model(rotated_input_images[i]))
# get nonequivariant features
noneq_output_features = []
for i in range(4):
noneq_output_features.append(noneq_model(rotated_input_images[i]))
# plot equivariant and nonequivariant features
for i in range(4):
plt.axis('off')
plt.imshow(torchvision.utils.make_grid(rotated_input_images[i][0]).permute(1, 2, 0))
# plt.show()
plt.savefig(args.saved_features_dir + "input_" + str(i) + ".png", bbox_inches='tight', pad_inches=0)
plt.imshow(torchvision.utils.make_grid(eq_output_features[i][0, :3]).permute(1, 2, 0))
plt.savefig(args.saved_features_dir + "eq_features_" + str(i) + ".png", bbox_inches='tight', pad_inches=0)
plt.imshow(torchvision.utils.make_grid(noneq_output_features[i][0, :3]).permute(1, 2, 0))
plt.savefig(args.saved_features_dir + "noneq_features_" + str(i) + ".png", bbox_inches='tight', pad_inches=0)
def create_gifs(args):
input_filenames = []
eq_features_filenames = []
noneq_features_filenames = []
for i in range(4):
input_filenames.append(args.saved_features_dir + "input" + "_" + str(i) + ".png")
for i in range(4):
eq_features_filenames.append(args.saved_features_dir + "eq_features" + "_" + str(i) + ".png")
for i in range(4):
noneq_features_filenames.append(args.saved_features_dir + "noneq_features" + "_" + str(i) + ".png")
images = []
for i in range(4):
input_img = iio.imread(input_filenames[i])
eq_features_img = iio.imread(eq_features_filenames[i])
noneq_features_img = iio.imread(noneq_features_filenames[i])
img = np.concatenate([eq_features_img, input_img, noneq_features_img], axis=1)
images.append(img)
imageio.mimsave('feature_visualizations/saved_gifs/' + 'features_' + str(i) + '.gif', images, duration=1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Parser for feature visualizations")
parser.add_argument("--seed", default=0, type=int)
parser.add_argument("--model", default="alexnet", type=str)
parser.add_argument("--dataset", default="CIFAR10", type=str)
parser.add_argument("--model_symmetry", default="rot90", type=str)
parser.add_argument('--saved_features_dir', default="./feature_visualizations/saved_features/", type=str)
parser.add_argument('--saved_gifs_dir', default="./feature_visualizations/saved_gifs/", type=str)
args = parser.parse_args()
# save equivariant and non-equivariant features
save_features(args)
# create gifs
create_gifs(args)
| basusourya/lambda_equitune | EquiClassification/feature_visualization.py | feature_visualization.py | py | 5,072 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "torch.device",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torchvision.tran... |
25626976499 | from django import forms
from .models import ContinuingEducationLog, UserProfile
from django.contrib.auth.models import User
class CELogForm(forms.ModelForm):
class Meta:
model = ContinuingEducationLog
fields = ('required_CE', 'oversight_entity', 'repeat', 'hours', 'date_completed', 'top_items_learned',)
class UserForm(forms.Form):
class Meta:
model = User
fields = ('first_name', 'last_name', 'email',)
def __init__(self, *args, **kwargs):
user = kwargs.pop('user')
super(UserForm, self).__init__(*args, **kwargs)
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('extension', 'title', 'address', 'city', 'state', 'zip', 'home_phone', 'mobile_phone',
'birth_date', 'emergency_contact_first_name', 'emergency_contact_last_name', 'emergency_contact_phone',
'emergency_contact_doctor', 'emergency_contact_doctor_phone',)
def __init__(self, *args, **kwargs):
user = kwargs.pop('user')
super(UserProfileForm, self).__init__(*args, **kwargs)
| lydia-rodriguez/compliance | compliance/forms.py | forms.py | py | 1,024 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "models.ContinuingEducationLog",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "d... |
6912557640 | #!/usr/bin/env python3
"""
Benchmark on calling C methods for FASTCALL.
http://bugs.python.org/issue29263
Created at 2017-01-14 by INADA Naoki.
"""
import pyperf
runner = pyperf.Runner()
runner.timeit('b"".decode()',
"empty_bytes.decode()",
setup="empty_bytes = b''",
duplicate=100)
runner.timeit('b"".decode("ascii")',
"empty_bytes.decode('ascii')",
setup="empty_bytes = b''",
duplicate=100)
runner.timeit("[0].count(0)",
"my_list.count(0)",
setup="my_list = [0]",
duplicate=100)
| vstinner/pymicrobench | bench_fastcall_c_method.py | bench_fastcall_c_method.py | py | 611 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "pyperf.Runner",
"line_number": 12,
"usage_type": "call"
}
] |
40823649994 | import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import time
import torch.utils.model_zoo as model_zoo
import numpy as np
import torch.utils.data as data_utils
import math
from collections import OrderedDict
def train(trainloader, net, criterion, optimizer, device):
for epoch in range(10): # loop over the dataset multiple times
start = time.time()
running_loss = 0.0
for i, (images, labels) in enumerate(trainloader):
images = images.to(device)
labels = labels.view(-1, 1).to(device)
optimizer.zero_grad()
outputs = net(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 5 == 4: # print every 2000 mini-batches
end = time.time()
print('[epoch %d, iter %5d] loss: %.3f eplased time %.3f' %
(epoch + 1, i + 1, running_loss / 100, end-start))
start = time.time()
running_loss = 0.0
print('Finished Training')
def test(testloader, net, device):
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
images = images.to(device)
labels = labels.view(-1, 1).to(device)
outputs = net(images)
predicted = (outputs.data > 0).float()
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the test images: %d %%' % (
100 * correct / total))
def main():
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
model_urls = {
'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
}
data_control = np.load('data.npy')
data_control = data_control[:, 4:7, :, :]
data_control = torch.from_numpy(data_control.astype(float)).float()
y_control = torch.zeros(data_control.size(0), dtype=torch.float)
data_control_train = data_control[0:int(data_control.size(0)*0.8)]
data_control_test = data_control[int(data_control.size(0)*0.8):]
y_control_train = y_control[0:int(data_control.size(0)*0.8)]
y_control_test = y_control[int(data_control.size(0)*0.8):]
data_pd = np.load('PD_data.npy')
data_pd = data_pd[:, 4:7, :, :]
data_pd = torch.from_numpy(data_pd.astype(float)).float()
y_pd = torch.ones(data_pd.size(0), dtype=torch.float)
data_pd_train = data_pd[0:int(data_pd.size(0)*0.8)]
data_pd_test = data_pd[int(data_pd.size(0)*0.8):]
y_pd_train = y_pd[0:int(data_pd.size(0)*0.8)]
y_pd_test = y_pd[int(data_pd.size(0)*0.8):]
data_train = torch.cat((data_control_train, data_pd_train), 0)
y_train = torch.cat((y_control_train, y_pd_train), 0)
data_test = torch.cat((data_control_test, data_pd_test), 0)
y_test = torch.cat((y_control_test, y_pd_test), 0)
trainset = data_utils.TensorDataset(data_train, y_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=10,
shuffle=True)
testset = data_utils.TensorDataset(data_test, y_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=10, shuffle=False)
net = torchvision.models.alexnet(pretrained=False, num_classes=1)
net = net.to(device)
# net.load_state_dict(model_zoo.load_url(model_urls['alexnet']), strict=False)
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)
train(trainloader, net, criterion, optimizer, device)
test(testloader, net, device)
if __name__ == "__main__":
main()
| htcao/AI_project | train.py | train.py | py | 3,891 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "time.time",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 42,
... |
9925796207 | import json
import logging
from typing import Optional
from ph4_walkingpad.profile import Profile, calories_walk2_minute, calories_rmrcb_minute
from ph4_walkingpad.reader import reverse_file
logger = logging.getLogger(__name__)
class StatsAnalysis:
def __init__(self, profile=None, profile_file=None, stats_file=None):
self.profile_file = profile_file
self.stats_file = stats_file
self.profile = profile
self.last_record = None
self.loaded_margins = []
def load_profile(self):
self.profile = Profile(age=30, male=True, weight=80, height=180) # some random average person
if self.profile_file:
with open(self.profile_file, 'r') as fh:
dt = json.load(fh)
self.profile = Profile.from_data(dt)
def load_stats(self, limit=None, collect_details=False):
for margins in self.parse_stats(limit, collect_details=collect_details):
self.loaded_margins.append(margins)
def feed_records(self):
"""Feed records from stats file in reversed order, one record per entry"""
if not self.stats_file:
return
with open(self.stats_file) as fh:
reader = reverse_file(fh)
for line in reader:
if line is None:
return
if not line:
continue
try:
js = json.loads(line)
except Exception as e:
continue
yield js
def analyze_records_margins(self, records, limit=None, collect_details=False):
# Load margins - boundary speed changes. In order to determine segments of the same speed.
last_rec = None
last_rec_diff = None
in_record = False
num_done = 0
margins = []
sub_records = []
for js in records:
if not self.last_record:
self.last_record = js
if js['speed'] != 0:
in_record = True
if not last_rec_diff or not in_record:
last_rec_diff = js
last_rec = js
sub_records = []
if not in_record:
margins = [js]
else:
margins.append(js)
continue
time_diff = last_rec['time'] - js['time']
steps_diff = last_rec['steps'] - js['steps']
dist_diff = last_rec['dist'] - js['dist']
rtime_diff = last_rec['rec_time'] - js['rec_time']
time_to_rtime = abs(time_diff - rtime_diff)
js['_ldiff'] = [time_diff, steps_diff, dist_diff, rtime_diff, time_to_rtime]
breaking = time_diff < 0 or steps_diff < 0 or dist_diff < 0 or rtime_diff < 0 or time_to_rtime > 5*60
stats_changed = False
if in_record and collect_details:
sub_records.append(dict(js))
if breaking:
if margins:
mm = margins[-1]
mm['_breaking'] = breaking
if (in_record or not breaking) \
and (last_rec_diff['speed'] != js['speed']
or (breaking and last_rec_diff['speed'] != 0)
or (js['speed'] == 0 and js['time'] == 0)):
js['_breaking'] = breaking
js_src = js if not breaking else last_rec
if margins:
mm = margins[-1]
mm['_segment_time'] = last_rec_diff['time'] - js_src['time']
mm['_segment_rtime'] = last_rec_diff['rec_time'] - js_src['rec_time']
mm['_segment_dist'] = last_rec_diff['dist'] - js_src['dist']
mm['_segment_steps'] = last_rec_diff['steps'] - js_src['steps']
if collect_details:
mm['_records'] = sub_records[:-1]
sub_records = [dict(js)]
margins.append(js)
stats_changed = True
last_rec_diff = js
if (stats_changed and js['speed'] == 0 and js['time'] == 0) or breaking:
# print("done", breaking, time_to_rtime, time_diff, steps_diff, dist_diff, rtime_diff, js)
# logger.info(json.dumps(margins, indent=2))
if margins:
yield margins
num_done += 1
if limit and num_done >= limit:
return
margins = [js]
in_record = False
last_rec_diff = js
# last inst.
last_rec = js
def parse_stats(self, limit=None, collect_details=False):
gen = self.feed_records()
return self.analyze_records_margins(gen, limit, collect_details=collect_details)
def comp_calories(self, margins):
# logger.debug(json.dumps(margins, indent=2))
# Calories segment computation
if not self.profile:
logger.debug('No profile loaded')
return
calorie_acc = []
calorie_acc_net = []
for exp in margins:
if '_segment_time' not in exp:
continue
el_time = exp['_segment_time']
speed = exp['speed'] / 10.
ccal = (el_time / 60) * calories_walk2_minute(speed, self.profile.weight, 0.00)
ccal_net = ccal - (el_time / 60) * calories_rmrcb_minute(self.profile.weight, self.profile.height,
self.profile.age, self.profile.male)
logger.info('Calories for time %5s, speed %4.1f, seg time: %4s, dist: %5.2f, steps: %5d, '
'cal: %7.2f, ncal: %7.2f'
% (exp['time'], speed, el_time, exp['_segment_dist'] / 100., exp['_segment_steps'],
ccal, ccal_net))
calorie_acc.append(ccal)
calorie_acc_net.append(ccal_net)
logger.info('Calories burned so far this walk: %7.2f kcal, %7.2f kcal net'
% (sum(calorie_acc), sum(calorie_acc_net)))
return calorie_acc, calorie_acc_net
def load_last_stats(self, count=1):
self.load_stats(count)
if self.loaded_margins:
logger.debug('Loaded margins: %s' % (json.dumps(self.loaded_margins[0], indent=2),))
return self.comp_calories(self.loaded_margins[0])
def remove_records(self, margins):
ret = []
for recs in margins:
nrecs = [dict(x) for x in recs]
for rec in nrecs:
rec['_records'] = None
ret.append(nrecs)
return ret
| ph4r05/ph4-walkingpad | ph4_walkingpad/analysis.py | analysis.py | py | 6,736 | python | en | code | 47 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "ph4_walkingpad.profile.Profile",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "ph4_walking... |
12943532357 | import timeit
import matplotlib.pyplot as plt
def algoritm1(item):
uniq_dict = {}
for x in item:
if x in uniq_dict:
return False
uniq_dict[x] = True
return True
def algoritm2(item):
uniq_lst = []
for x in item:
if x in uniq_lst:
return False
uniq_lst.append(x)
return True
plt_x = []
time1 = []
time2 = []
for i in range(1000, 10001, 1000):
plt_x.append(i)
lst = list(range(i))
time1.append(timeit.timeit(
f"algoritm1({lst})", number=5, globals=globals()))
time2.append(timeit.timeit(
f"algoritm2({lst})", number=5, globals=globals()))
plt.xlabel('Numbers')
plt.ylabel('Time')
plt.plot(plt_x, time1, label='Algoritm1 - {}')
plt.plot(plt_x, time2, label='Algoritm2 - []')
plt.legend()
plt.show()
| Polinaaa567/university_ipynb | 5_term/Alg/lab1_begin/2.2.py | 2.2.py | py | 817 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "timeit.timeit",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "timeit.timeit",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplo... |
3281837914 | # -*- coding: utf-8 -*-
# import accounts
from raven.contrib.flask import Sentry
from flask import Flask, request, redirect, abort
import ses
import urlparse
# from util import jsonify
app = Flask(__name__)
sentry = Sentry()
sentry.init_app(app)
@app.route('/')
def hello():
return 'Hello World!\n'
@app.route('/submit', methods=["GET", "POST"])
def submit():
data = dict(request.form)
try:
referer = request.referrer
# form_name = data.pop("forms:form")[0]
# account_name = data.pop("forms:account")[0]
# redirect_path = data.pop("forms:redirect")[0]
except KeyError:
abort(403)
attachments = [file for name, file in request.files.iteritems() if file]
# account = accounts.find_one({"name": account_name})
# if account is None:
# print("Account not found")
# abort(404)
# form = account.form(form_name)
# if form is None:
# print("Form not found %s" % form_name)
# abort(404)
# if not form.referer_allowed(referer):
# print("Referer not allowed %s" % referer)
# abort(404)
form = UnholsterForm(data)
# result = ses.send_with_template(form, data, attachments)
ses.send(form.recipients, form.subject, form.content, attachments)
# print(result)
# account.submits().insert({'form': form['_id'], 'data': data, 'mandrill_result': esult})
if form.redirect_url:
redirect_url = urlparse.urljoin(referer, form.redirect_url[0])
return redirect(redirect_url)
else:
return 'Sent'
# @app.route('/account/<account_name>')
# def account(account_name):
# account = accounts.find_one({"name": account_name})
# return jsonify(
# account=account,
# submits=list(account.submits().find())
# )
class UnholsterForm:
def __init__(self, data):
self.recipients = ['contacto@unholster.com']
self.subject = u'Contacto: {subject[0]}'.format(**data)
self.content = self._content(data)
self.redirect_url = data.get('redirect_url')
def _content(self, data):
tmpl = (
u"Nombre: {name[0]} <br/>"
u"E-mail: <a href=mailto:{email[0]}>{email[0]}</a> </br>"
u"<pre>{message[0]}</pre>"
)
return tmpl.format(**data)
if __name__ == "__main__":
app.run(port=8000, debug=True)
| Unholster/unholster-forms | web.py | web.py | py | 2,366 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "raven.contrib.flask.Sentry",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "flask.r... |
2033204475 | import gzip
from keybert import KeyBERT
from yake import KeywordExtractor
import re
import setup_dir
NWORDS=100
FMT="{}\t{}\t{}\t{}\t{}\n"
cleans={ #important give all in lowercase!!!
r"international\s+water\s+management\s+institute": "iwmi",
r"sunil\s+mawatha": "",
r"the\s+consultative\s+group\s+on\s+international\s+agricultural\s+research": "cgiar",
r"fulltext": "",
r"keywords": "",
r"email" : "",
r"e-mail" : "",
r"\btel\b" : "",
r"telephone": "",
}
yake_kw_extractor = KeywordExtractor(lan="en", n=2, top=NWORDS)
kw_extractor = KeyBERT('distilbert-base-nli-mean-tokens')
with open(setup_dir.DIR_KW+"results.csv", "wt", encoding='utf-8') as kwf:
kwf.write(FMT.format("YEAR", "METHOD", "RANK", "KEYWORD", "METRIC"))
for year in range(2002,2021+1):
with gzip.open(setup_dir.DIR_PROC+"{}.gz".format(year), "rt", encoding='utf-8') as inf:
txt=inf.read()
txt=txt.lower()
for item in cleans.items():
l1=len(txt)
txt=re.sub(*item,txt)
l2=len(txt)
print ('{} recuded due to {}'.format(l1-l2,item))
#txt="Research Reports Working Papers SWIM Papers Other Research Pubs. Newsletters Pb. Catalogue (pdf) Library Services Resource Pages Research Archive |Whats new ___________________| |Recent research ______________| SIMA Global Dialogue Comprehensive Assessment A basin persective on water savings How do we ensure enough water for the future? Many believe that the worlds increased water demands can be met by reducing the amount of water wasted in agriculture. But when looked at from a basin perspective,"
keywords_yake = yake_kw_extractor.extract_keywords(text=txt)
for ii,kw in enumerate(keywords_yake):
kwf.write(FMT.format(year, "Yake", ii, *kw))
print("Yake: Year {}: {}".format(year, keywords_yake))
keywords_bert = kw_extractor.extract_keywords(txt, stop_words='english', top_n=NWORDS)
for ii,kw in enumerate(keywords_bert):
kwf.write(FMT.format(year, "Bert", ii, *kw))
print("Bert: Year {}: {}".format(year, keywords_bert))
| asselapathirana/waybackcrawler | src/process_keywords.py | process_keywords.py | py | 2,190 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "yake.KeywordExtractor",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "keybert.KeyBERT",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "setup_dir.DIR_KW",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "gzip.open... |
30211562412 | from concurrent.futures.thread import ThreadPoolExecutor
from contextlib import asynccontextmanager
import asyncio
class AsyncFile:
def __init__(self, file, loop=None, executor=None):
if not loop:
loop = asyncio.get_running_loop()
if not executor:
executor = ThreadPoolExecutor()
self.file = file
self.loop = loop
self.executor = executor
self.pending = []
self.result = []
def write(self, string):
self.pending.append(self.loop.run_in_executor(self.executor, self.file.write, string))
def read(self, size=-1):
self.pending.append(self.loop.run_in_executor(self.executor, self.file.read, size))
def readlines(self):
self.pending.append(self.loop.run_in_executor(self.executor, self.file.readlines))
@asynccontextmanager
async def async_open(path, mode='w'):
'''异步上下为管理器实现非阻塞文件IO'''
with open(path, mode) as f:
loop = asyncio.get_running_loop()
async_file_obj = AsyncFile(f, loop=loop)
try:
# try语句模块对应__aenter__, with ....open as
yield async_file_obj
finally:
# finally语句块对应__aexit__, 执行读取任务
async_file_obj.result = await asyncio.gather(*async_file_obj.pending)
import tempfile
import os
async def main():
tempdir = tempfile.gettempdir()
path = os.path.join(tempdir, 'run.txt')
print(f'writing asynchronously(异步) to {path}')
async with async_open(path, mode='w') as f:
f.write('顺序\n')
f.write('可能\n')
f.write('就是\n')
f.write('乱的\n')
f.write('!\n')
print(AsyncFile('/').result)
asyncio.run(main()) | Smile-Cats/asyncio-demo | my_asyncio_demo/编写异步上下文管理器.py | 编写异步上下文管理器.py | py | 1,765 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "asyncio.get_running_loop",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.thread.ThreadPoolExecutor",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "asyncio.get_running_loop",
"line_number": 32,
"usage_type": "call"
... |
2931427678 | import email
from typing import Any, List
from fastapi import APIRouter, Body, Depends, HTTPException
from fastapi.encoders import jsonable_encoder
from pydantic.networks import EmailStr
from app import crud, models
from app.models.user import *
from app.api import deps
from app.core.config import settings
from app.utils import send_new_account_email
router = APIRouter()
# TODO all userid changed to email,becareful when coding front
@router.get("/", response_model=List[User])
def read_users(
skip: int = 0,
limit: int = 100,
# current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Retrieve users.
"""
users = crud.user.get_multi(skip=skip, limit=limit)
return users
@router.post("/", response_model=User)
def create_user(
*,
user_in: UserCreate,
current_user: User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Create new user.
"""
user = crud.user.get_by_email(email=user_in.email)
if user:
raise HTTPException(
status_code=400,
detail="The user with this username already exists in the system.",
)
user = crud.user.create(obj_in=user_in)
# if settings.EMAILS_ENABLED and user_in.email:
# send_new_account_email(
# email_to=user_in.email, username=user_in.email, password=user_in.password
# )
return user
@router.put("/me", response_model=User)
def update_user_me(
*,
password: str = Body(None),
full_name: str = Body(None),
email: EmailStr = Body(None),
current_user: User = Depends(deps.get_current_active_user),
) -> Any:
"""
Update own user.
"""
current_user_data = jsonable_encoder(current_user)
user_in = UserUpdate(**current_user_data)
if password is not None:
user_in.password = password
if full_name is not None:
user_in.full_name = full_name
if email is not None:
user_in.email = email
user = crud.user.update(email=user_in.email, obj_in=user_in)
return user
@router.get("/me", response_model=User,response_model_by_alias=True)
def read_user_me(
current_user: User = Depends(deps.get_current_active_user),
) -> Any:
"""
Get current user.
"""
return current_user.dict(by_alias=True)
@router.post("/open", response_model=User)
def create_user_open(
*,
password: str = Body(...),
email: EmailStr = Body(...),
full_name: str = Body(None),
) -> Any:
"""
Create new user without the need to be logged in.
"""
if not settings.USERS_OPEN_REGISTRATION:
raise HTTPException(
status_code=403,
detail="Open user registration is forbidden on this server",
)
user = crud.user.get_by_email(email=email)
if user:
raise HTTPException(
status_code=400,
detail="The user with this username already exists in the system",
)
user_in = UserCreate(password=password, email=email, full_name=full_name)
user = crud.user.create(obj_in=user_in)
return user
@router.get("/{email}", response_model=User)
def read_user_by_id(
email: str,
current_user: User = Depends(deps.get_current_active_user),
) -> Any:
"""
Get a specific user by email.
"""
user = crud.user.get_by_email(email=email)
if user == current_user:
return user
if not crud.user.is_superuser(current_user):
raise HTTPException(
status_code=400, detail="The user doesn't have enough privileges"
)
return user
@router.put("/{email}", response_model=User)
def update_user(
*,
email: str,
user_in: UserUpdate,
current_user: User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Update a user.
"""
user = crud.user.get_by_email(email=email)
if not user:
raise HTTPException(
status_code=404,
detail="The user with this username does not exist in the system",
)
user = crud.user.update(email=email, user_in=user_in)
return user
| VittorioYan/full-stack-fastapi-mongodb | {{cookiecutter.project_slug}}/backend/app/app/api/api_v1/endpoints/users.py | users.py | py | 4,083 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "app.crud.user.get_multi",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "app.crud.user",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "app.crud... |
2962763568 | import torch
def eval_model(model, loader, device):
with torch.no_grad():
test_accuracy = []
for input, target in loader:
input, target = input.to(device), target.to(device)
output = model(input)
batch_accuracy = (output.argmax(dim=1) == target).float()
test_accuracy.append(batch_accuracy)
return torch.cat(test_accuracy, 0).mean().item()
| kefirski/pruning | utils.py | utils.py | py | 421 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.no_grad",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 15,
"usage_type": "call"
}
] |
41880736603 | from django.conf import settings
from django.conf.urls.static import static
from django.urls import path
from django.contrib.auth.views import LoginView, LogoutView
from users.views import home, register, profileDetials, question1, question2, question3, compatibility, editProfile
urlpatterns = [
path('', home, name='home'),
path('signup/', register, name='register'),
path('login/', LoginView.as_view(template_name='users/login.html'), name='login'),
path('logout/', LogoutView.as_view(template_name='users/logout.html'), name='logout'),
path('profile/', profileDetials, name='profile-details'),
path('questions/1', question1, name='question1'),
path('questions/2', question2, name='question2'),
path('questions/3', question3, name='question3'),
path('compatibility/', compatibility, name='compatibility'),
path('edit-profile/', editProfile, name='edit-profile'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | NethraGunti/ProfileCompatibility | users/urls.py | urls.py | py | 1,015 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "users.views.home",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "users.views.regi... |
29882430075 | from rest_framework.response import Response
from rest_framework import status, mixins, generics, viewsets
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.exceptions import NotFound
from .models import Drop
from .renderers import DropJSONRenderer
from .serializers import DropSerializer
# class DropListCreateAPIView(generics.ListCreateAPIView):
#
# renderer_classes = (DropJSONRenderer,)
# queryset = Drop.objects.all()
# serializer_class = DropSerializer
#
# def list(self, request, *args, **kwargs):
# queryset = self.get_queryset()
# serializer = DropSerializer(queryset, many=True)
# return Response(serializer.data)
#
# def post(self, request, *args, **kwargs):
#
# serializer = DropSerializer(data=request.data)
# serializer.is_valid(raise_exception=True)
# serializer.save()
# return Response(serializer.data, status=status.HTTP_201_CREATED)
class DropViewSet(mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
queryset = Drop.objects.select_related('agent', 'agent__user')
permission_classes = (IsAuthenticatedOrReadOnly,)
renderer_classes = (DropJSONRenderer,)
serializer_class = DropSerializer
lookup_field = 'id'
def _check_exists(self, id):
try:
serializer_instance = self.queryset.get(id=id)
return serializer_instance
except Drop.DoesNotExist:
raise NotFound('A drop with this id does not exist.')
def create(self, request, *args, **kwargs):
serializer_context = {'agent': request.user.profile}
serializer_data = request.data.get('drop', {})
serializer = self.serializer_class(
data=serializer_data,
context=serializer_context
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
def update(self, request, id):
serializer_instance = self._check_exists(id)
serializer_data = request.data.get('drop', {})
serializer = self.serializer_class(serializer_instance, serializer_data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
def retrieve(self, request, id):
serializer_instance = self._check_exists(id)
serializer = self.serializer_class(serializer_instance)
return Response(serializer.data, status=status.HTTP_200_OK)
def destroy(self, request, id, *args, **kwargs):
drop = self._check_exists(id)
drop.delete()
return Response(None, status=status.HTTP_204_NO_CONTENT)
| rmbrntt/deaddrop | deaddrop-api/deaddrop/apps/drops/views.py | views.py | py | 2,893 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.mixins.CreateModelMixin",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.mixins",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "rest_framework.mixins.ListModelMixin",
"line_number": 32,
"usage_type":... |
4213543541 | """berrybeef.beef: provides entry point main()."""
import sys
from PyQt5.QtWidgets import QApplication
from .Info import Info
from BERRYBEEF.Main import MainWindow
from BERRYBEEF.constants import *
def main():
try:
app = QApplication(sys.argv)
screen = app.primaryScreen()
print('Screen: %s' % screen.name())
size = screen.size()
print('Size: %d x %d' % (size.width(), size.height()))
rect = screen.availableGeometry()
print('Available: %d x %d' % (rect.width(), rect.height()))
info = Info('Valter Ferlete', 'ferlete@gmail.com')
print(SOFTWARE_NAME)
print("Author %s" % info.author)
window = MainWindow(SOFTWARE_NAME, rect.width(), rect.height())
window.show()
sys.exit(app.exec_())
except Exception as ex:
print(ex)
| ferlete/BerryBeef | BERRYBEEF/berrybeef.py | berrybeef.py | py | 849 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "Info.Info",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "BERRYBEEF.Main.M... |
15725734700 | from os import listdir, path
from time import clock
from pickle import dumps, loads
from gzip import compress, decompress
from random import shuffle
from sklearn.feature_selection import SelectKBest
import logging
class Vectors(object):
def __init__(self, corpos_folder_path, corpora_label_dict, debugging_output_folder, meta_data_folder_path,
chunk_size=5000, debugging_mode=False):
self.__corpora_file_path_list = \
[path.join(corpos_folder_path, filename) for filename in listdir(corpos_folder_path)]
self.__chunk_size = chunk_size
self.__features_vector = {}
self.__corpora_label_dict = corpora_label_dict
self.__debugging_output_folder = debugging_output_folder
self.__debugging_mode = debugging_mode
self.__meta_data_folder_path = meta_data_folder_path
def __generate_vectors(self):
corpora_sentences, features = [], set()
logging.info("\nCreates the dictionary of vector features")
for corpos_text_file_path in self.__corpora_file_path_list:
corpora_sentences += self.__read_lines_from_file(corpos_text_file_path)
for sentence in corpora_sentences:
for index, char in enumerate(sentence):
features.add(char)
if index < len(sentence) - 1:
features.add(char + sentence[index + 1])
if index < len(sentence) - 2:
features.add(char + sentence[index + 1] + sentence[index + 2])
self.__features_vector = {feature: index for index, feature in enumerate(features)}
logging.info("\nFinished creating the dictionary of vector features")
def __get_chunks(self, corpora_lines):
chunks = []
last_chunk = []
last_chunk_size = 0
for line in corpora_lines:
if last_chunk_size <= self.__chunk_size < last_chunk_size + len(line):
chunks.append(last_chunk)
last_chunk = []
last_chunk_size = 0
last_chunk.append(line)
last_chunk_size += len(line)
chunks.append(last_chunk)
return chunks
def __get_feature_vectors(self, chunks, corpora_label):
feature_vectors, init_vector = [], [0] * len(self.__features_vector)
for chunk in chunks:
# initialize feature vector
feature_vector = init_vector.copy()
for sentence in chunk:
for index, char in enumerate(sentence):
feature_vector[self.__features_vector[sentence[index]]] += 1
if index < len(sentence) - 1:
feature_vector[self.__features_vector[''.join(sentence[index: index + 2])]] += 1
if index < len(sentence) - 2:
feature_vector[self.__features_vector[''.join(sentence[index: index + 3])]] += 1
feature_vectors.append((feature_vector, corpora_label))
return feature_vectors
@staticmethod
def __save_pickle_file(pickle_file_path, pickle_data):
with open(pickle_file_path, 'wb') as fp:
fp.write(compress(dumps(pickle_data)))
@staticmethod
def __read_pickle_file(pickle_file_path):
with open(pickle_file_path, 'rb') as fp:
return loads(decompress(fp.read()))
@staticmethod
def __read_lines_from_file(file_path):
with open(file_path, encoding='utf-8') as f:
return f.readlines()
def __best_words_features(self, feature_vectors_list, best_words_features_size=100):
vectors_list, vectors_label_list = [], []
file_path = path.join(self.__meta_data_folder_path, "best_features.txt")
idx_to_features_dict = \
dict([(vectors_idx, chars_features) for chars_features, vectors_idx in self.__features_vector.items()])
sel = SelectKBest(k=best_words_features_size)
for vectors_details in feature_vectors_list:
vectors_list.append(vectors_details[0])
vectors_label_list.append(vectors_details[1])
sel.fit_transform(vectors_list, vectors_label_list)
k_best_index_list = sel.get_support(indices="True")
with open(file_path, mode='w', encoding='utf-8') as f:
for feature_idx in sorted(k_best_index_list):
f.write("'" + idx_to_features_dict[feature_idx] + "'\n")
logging.info("\nThe '{}' file created successfully".format(file_path))
def part_b_run(self, hebrew_corpora_path_file, save_vectors_file=True):
hebrew_corpora_lines, translation_corpora_lines, translation_corpora_file_path_list, feature_vectors \
= [], [], self.__corpora_file_path_list.copy(), []
translation_corpora_file_path_list.remove(hebrew_corpora_path_file)
feature_vector_file_path = path.join(self.__debugging_output_folder, 'feature_vector.pickle.gz')
if not self.__debugging_mode or not path.isfile(feature_vector_file_path):
self.__generate_vectors()
self.__save_pickle_file(feature_vector_file_path, self.__features_vector)
else:
self.__features_vector = self.__read_pickle_file(feature_vector_file_path)
hebrew_corpora_lines = self.__read_lines_from_file(hebrew_corpora_path_file)
translation_corpora_lines = []
for corpora_file_path in translation_corpora_file_path_list:
translation_corpora_lines += self.__read_lines_from_file(corpora_file_path)
logging.info("\nShuffle the lines")
shuffle(hebrew_corpora_lines)
shuffle(translation_corpora_lines)
logging.info("\nClump the lines into chunks smaller or equal to {} chars".format(self.__chunk_size))
hebrew_corpora_chunks = self.__get_chunks(hebrew_corpora_lines)
translation_corpora_chunks = self.__get_chunks(translation_corpora_lines)
chunks_time = clock()
logging.info("\nCalculate feature vector for each chunk")
hebrew_label, translation_label = self.__corpora_label_dict['hebrew'], self.__corpora_label_dict['translation']
hebrew_corpora_feature_vectors = self.__get_feature_vectors(hebrew_corpora_chunks, hebrew_label)
translation_corpora_feature_vectors = self.__get_feature_vectors(translation_corpora_chunks, translation_label)
logging.info("\nTotal time taken created chunk are {:0.4f} minutes".format((clock() - chunks_time) / 60))
if save_vectors_file is True:
logging.info("\nStore the feature vectors")
self.__save_pickle_file(path.join(self.__debugging_output_folder, 'source_feature_vectors.pickle.gz'),
hebrew_corpora_feature_vectors)
self.__save_pickle_file(path.join(self.__debugging_output_folder, 'translation_feature_vectors.pickle.gz'),
translation_corpora_feature_vectors)
logging.info("\nFinished store the feature vectors")
feature_vectors += hebrew_corpora_feature_vectors
feature_vectors += translation_corpora_feature_vectors
self.__best_words_features(feature_vectors)
return feature_vectors
def part_c_run(self, corpora_path_file_list, save_vectors_file=True):
corpora_data_dict, feature_vectors_list = {}, []
self.__corpora_file_path_list = corpora_path_file_list
for corpora_file_path in corpora_path_file_list:
if not path.isfile(corpora_file_path):
logging.info("\nThe '{}' file not exist".format(corpora_file_path))
exit()
corpora_name = path.split(corpora_file_path)[-1].split('.')[0]
corpora_data_dict[corpora_name] = self.__read_lines_from_file(corpora_file_path)
feature_vector_file_path = path.join(self.__debugging_output_folder, 'feature_vector.pickle.gz')
if not self.__debugging_mode or not path.isfile(feature_vector_file_path):
self.__generate_vectors()
self.__save_pickle_file(feature_vector_file_path, self.__features_vector)
else:
self.__features_vector = self.__read_pickle_file(feature_vector_file_path)
logging.info("\nShuffle the lines")
for corpora_lines in corpora_data_dict.values():
shuffle(corpora_lines)
logging.info("\nClump the lines into chunks smaller or equal to {} chars".format(self.__chunk_size))
for corpora_name, corpora_lines in corpora_data_dict.items():
corpora_data_dict[corpora_name] = self.__get_chunks(corpora_lines)
chunks_time = clock()
logging.info("\nCalculate feature vector for each chunk")
for corpora_name, corpora_chunks in corpora_data_dict.items():
temp_vector = self.__get_feature_vectors(corpora_chunks, self.__corpora_label_dict[corpora_name])
corpora_data_dict[corpora_name] = temp_vector
feature_vectors_list += temp_vector
logging.info("\nTotal time taken created chunk are {:0.4f} minutes".format((clock() - chunks_time) / 60))
if save_vectors_file is True:
logging.info("\nStore the feature vectors")
self.__save_pickle_file(path.join(self.__debugging_output_folder, 'corporas_feature_vectors.pickle.gz'),
corpora_data_dict)
logging.info("\nFinished store the feature vectors")
self.__best_words_features(feature_vectors_list)
return feature_vectors_list
def part_d_run(self, corpora_path_files):
languages_lines = dict()
languages_chunks = dict()
languages_feature_vectors = dict()
if not path.isfile('feature_vector.pickle.gz'):
self.__generate_vectors()
else:
with open('feature_vector.pickle.gz', 'rb') as fp:
self.__features_vector = loads(decompress(fp.read()))
for language, file in corpora_path_files.items():
language_file_path = ''
for file_path in self.__corpora_file_path_list:
if file_path.endswith(file):
language_file_path = file_path
break
with open(language_file_path, encoding='utf-8') as f:
languages_lines[language] = f.readlines()
logging.info("\nShuffle the lines")
for language in languages_lines:
shuffle(languages_lines[language])
logging.info("\nClump the lines into chunks smaller or equal to {} chars".format(self.__chunk_size))
for language, lines in languages_lines.items():
languages_chunks[language] = self.__get_chunks(lines)
chunks_time = clock()
logging.info("\nCalculate feature vector for each chunk")
for language, chunks in languages_chunks.items():
languages_feature_vectors[language] = self.__get_feature_vectors(chunks,
self.__corpora_label_dict[language])
logging.info("\nTotal time taken created chunk are {:0.4f} minutes".format((clock() - chunks_time) / 60))
feature_vectors_union = []
languages = []
for language, feature_vectors in languages_feature_vectors.items():
# feature_vectors_union += [feature_vector[0] for feature_vector in feature_vectors]
languages.append(language)
mean = [0] * len(feature_vectors[0][0])
for feature_vector, corpora_label in feature_vectors:
for index, value in enumerate(feature_vector):
mean[index] += value
for index in range(len(mean)):
mean[index] /= len(feature_vectors)
feature_vectors_union.append(mean)
return feature_vectors_union, languages
| Orenef11/NLP-Lab | Vectors.py | Vectors.py | py | 11,821 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 25,... |
40239161316 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# external
import numpy as np
import matplotlib.pyplot as plt
import pandas
# local
from dtk import walk
obinna = pandas.read_csv('../data/obinna-walking.txt', delimiter='\t',
index_col="TimeStamp", na_values='0.000000')
# change the degrees to radians
for col in obinna.columns:
if col.endswith('.Ang'):
obinna[col] = np.deg2rad(obinna[col])
start = 500
stop = 3000
data = walk.WalkingData(obinna.iloc[start:stop].copy())
angles = ['RKneeFlexion.Ang',
'LKneeFlexion.Ang']
rates = ['RKneeFlexion.Rate',
'LKneeFlexion.Rate']
data.time_derivative(angles, rates)
data.grf_landmarks('FP2.ForY', 'FP1.ForY', threshold=28.0)
right_steps = data.split_at('right', num_samples=15)
data.plot_steps('FP2.ForY', 'RKneeFlexion.Ang', 'RKneeFlexion.Rate',
'RKneeFlexion.Mom', linestyle='-', marker='o')
data.plot_steps('FP2.ForY', 'RKneeFlexion.Ang', 'RKneeFlexion.Rate',
'RKneeFlexion.Mom', mean=True)
controls = ['RKneeFlexion.Mom',
'LKneeFlexion.Mom']
sensors = ['RKneeFlexion.Ang',
'RKneeFlexion.Rate',
'LKneeFlexion.Ang',
'LKneeFlexion.Rate']
solver = walk.SimpleControlSolver(right_steps, sensors, controls)
gain_omission_matrix = np.ones((len(controls), len(sensors))).astype(bool)
gain_omission_matrix[0, 2:] = False
gain_omission_matrix[1, :2] = False
#gain_omission_matrix = None
gains, controls, variance, gain_var, control_var, estimated_controls = \
solver.solve(gain_omission_matrix=gain_omission_matrix)
solver.plot_gains(gains, gain_var)
solver.plot_estimated_vs_measure_controls(estimated_controls, variance)
solver.plot_control_contributions(estimated_controls)
plt.show()
| csu-hmc/gait-control-direct-id-paper | src/control_solver_example.py | control_solver_example.py | py | 1,779 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.deg2rad",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "dtk.walk.WalkingData",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "dtk.walk",
"li... |
45645200524 | from django.http import JsonResponse
from django.shortcuts import redirect, get_object_or_404
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from vigil.models import AlertChannel, Alert
from celery import uuid, signature
from vigil import tasks
from vigil.models import VigilTaskResult
class AlertsAcknowledge(View):
@staticmethod
def post(request):
for alert_pk in request.POST.getlist('acknowledge_alerts'):
try:
alert = Alert.objects.get(pk=alert_pk)
alert.active = False
alert.save()
except Alert.DoesNotExist:
continue
return redirect('active_alert_channel_list')
class AlertVigil(View):
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
return super(AlertVigil, self).dispatch(request, *args, **kwargs)
@staticmethod
def post(request, alert_channel_uuid):
"""
Main endpoint for activating or updating an alert on an existing alert channel
"""
alert_channel = get_object_or_404(
AlertChannel,
alert_id=alert_channel_uuid
)
# First we run the preprocessor task
preprocessor_task = getattr(tasks, alert_channel.preprocessor_action.task.name)
task_id = uuid()
VigilTaskResult.objects.create(
alert_channel=alert_channel,
alert_task_object=alert_channel.preprocessor_action,
task_id=task_id
)
# build the preprocessor signature
preprocessor_sig = signature(
preprocessor_task,
kwargs={
'data': request.POST
},
task_id=task_id,
immutable=True
)
# run the preprocessor async.
# when this has finished, the logic tasks will run.
# This is handled by the signals
# Notification swill be run by the periodic tasks
preprocessor_sig.apply_async()
return JsonResponse({'success': True})
| inuitwallet/vigil | vigil/views/api_views.py | api_views.py | py | 2,058 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.views.View",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "vigil.models.Alert.objects.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "vigil.models.Alert.objects",
"line_number": 18,
"usage_type": "attribute"
},
{
"... |
10990266944 | from torch import nn
from towhee.models.vis4mer.transposelinear import TransposedLinear
from towhee.models.vis4mer.activation import Activation
from towhee.models.vis4mer.get_initializer import get_initializer
def LinearActivation(
d_input,
d_output,
bias=True,
zero_bias_init=False,
transposed=False,
initializer=None,
activation=None,
activate=False,
weight_norm=False,
**kwargs):
"""
generate activation module
Args:
d_input (int): input dimension
d_output (int): output dimension
bias (bool): bias
zero_bias_init (bool): bias
transposed (bool): transposed
initializer (str): initializer
activation (str): activation
activate (str): activation
weight_norm (bool): weight normalization
return: a linear nn.Module with control over axes order, initialization, and activation
"""
# Construct core module
linear_cls = TransposedLinear if transposed else nn.Linear
if activation == 'glu':
d_output *= 2
linear = linear_cls(d_input, d_output, bias=bias, **kwargs)
# Initialize weight
if initializer is not None:
get_initializer(initializer, activation)(linear.weight)
# Initialize bias
if bias and zero_bias_init:
nn.init.zeros_(linear.bias)
# Weight norm
if weight_norm:
linear = nn.utils.weight_norm(linear)
if activate and activation is not None:
activation = Activation(activation, dim=-2 if transposed else -1)
linear = nn.Sequential(linear, activation)
return linear
| towhee-io/towhee | towhee/models/vis4mer/linearactivation.py | linearactivation.py | py | 1,645 | python | en | code | 2,843 | github-code | 1 | [
{
"api_name": "towhee.models.vis4mer.transposelinear.TransposedLinear",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 34,
"usage_type": "name"
},
{
... |
19547549511 | import datetime
import subprocess
log_file = open("logs.log", "a")
def log(message: str) -> None:
text = str(datetime.datetime.now()) + ": " + message + "\n"
print(text, end="")
log_file.write(text)
log("Checking to see if package 'MariaDB' is installed...")
check_stat_proc = subprocess.run(["dpkg", "-s", "mariadb"], capture_output=True, text=True)
if "Status: install ok installed" in check_stat_proc.stdout:
log("Package 'MariaDB' is already installed")
else:
log("Package 'MariaDB' is not installed")
log("Installing...")
install_proc = subprocess.run(["pkg", "in", "mariadb", "-y"])
code = install_proc.returncode
if code == 0:
log("Installation complete")
else:
if code == 100:
log("Could not locate package 'MariaDB'")
else:
log(f"Some unkown error occured during installation (code:{code})")
log("Failed to install package 'MariaDB'")
for fname in ["setpass", "start-client", "start-server"]:
mk_exec_proc = subprocess.run(["chmod", "u+x", f"{fname}.sh"])
code = mk_exec_proc.returncode
if code == 0:
log(f"Made '{fname}.sh' executable successfully")
else:
log(f"Failed to make '{fname}.sh' executable (code:{code})")
with open("/data/data/com.termux/files/usr/etc/bash.bashrc", "r+") as file:
log(f"Checking to see if alias {fname} exists...")
contents = file.read()
if fname in contents:
log(f"Alias '{fname}' already exists")
else:
log(f"Alias '{fname}' does not exist")
log(f"Creating alias '{fname}'...")
try:
file.write(f"alias {fname}=\"~/mysql-for-termux/{fname}.sh\"\n")
except Exception as e:
log(f"Failed to create alias '${fname}' (message: {e.args[0]})")
else:
log(f"Alias '{fname}' created successfully")
start_server_proc = subprocess.run(["./start-server.sh"])
code = start_server_proc.returncode
if code == 0:
log("MySQL server started successfully")
else:
log(f"MySQL server failed to start (code:{code})")
acquire_wakelock_proc = subprocess.run(["termux-wake-lock"])
code = acquire_wakelock_proc.returncode
if code == 0:
log("Wakelock acquired successfully")
else:
log(f"Failed to acquire wakelock (code:{code})")
print("MySQL installed successfully")
log_file.close()
| anay-p/mysql-for-termux | installer.py | installer.py | py | 2,407 | python | en | code | 36 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "subprocess.run",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "subprocess.... |
10603942515 | from django.shortcuts import render, redirect
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import login, logout
from django.contrib.auth.decorators import login_required
def signup(request):
'''signup function'''
if request.method != 'POST':
form = UserCreationForm()
else:
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect('accounts:signin')
context = {'form': form}
return render(request, 'accounts/signup.html', context)
def signin(request):
'''signin funtion'''
if request.method != 'POST':
form = AuthenticationForm(request)
else:
form = AuthenticationForm(request, request.POST)
if form.is_valid():
user = form.get_user()
login(request, user)
return redirect('/reviews/feeds/')
context = {'form': form}
return render(request, 'accounts/signin.html', context)
@login_required
def signout(request):
'''signout function'''
logout(request)
return redirect('/accounts/signin/')
| nopalpite/OCP9 | LITReview/accounts/views.py | views.py | py | 1,175 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.contrib.auth.forms.UserCreationForm",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.forms.UserCreationForm",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 16,
"usa... |
40793798969 | import uuid
import speech_recognition as sr
r = sr.Recognizer()
language='ru_RU'
def recognise(filename):
with sr.AudioFile(filename) as source:
audio_text = r.listen(source)
try:
text = r.recognize_google(audio_text,language=language)
print(text)
return text
except:
return "Sorry.. run again..."
@bot.message_handler(content_types=['voice'])
def voice_processing(message):
user_id = message.from_user.username
if user_id == USER_DEFAULT:
filename = str(uuid.uuid4())
file_name_full = "./voice/" + filename + ".ogg"
file_name_full_converted = "./ready/" + filename + ".wav"
file_info = bot.get_file(message.voice.file_id)
downloaded_file = bot.download_file(file_info.file_path)
with open(file_name_full, 'wb') as new_file:
new_file.write(downloaded_file)
os.system("ffmpeg -i " + file_name_full + " " + file_name_full_converted)
text = recognise(file_name_full_converted)
bot.reply_to(message, text)
os.remove(file_name_full)
os.remove(file_name_full_converted)
else:
bot.send_message(message.chat.id, "Вы не тот пользователь")
| setta1a/bot_god | first/botTelegram/for stt/stt.py | stt.py | py | 1,251 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "speech_recognition.Recognizer",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "speech_recognition.AudioFile",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 23,
"usage_type": "call"
}
] |
5796985227 | import torch
import torchvision
from torch import nn
import os
class autoencoder(nn.Module):
def __init__(self, n_channel=1, n_class=36):
super(autoencoder, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(n_channel, 32, 5, stride=3, padding=2), # b, 16, 10, 10
nn.ReLU(True),
nn.MaxPool2d(2, stride=2), # b, 16, 5, 5
nn.Conv2d(32, 256, 5, stride=2, padding=5), # b, 8, 3, 3
nn.ReLU(True),
nn.MaxPool2d(2, stride=2) # b, 8, 2, 2
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(256, 32, 5, stride=2, padding=2), # b, 16, 5, 5
nn.ReLU(True),
nn.ConvTranspose2d(32, 8, 5, stride=3, padding=1), # b, 8, 15, 15
nn.ReLU(True),
nn.ConvTranspose2d(8, n_channel, 2, stride=2, padding=1), # b, 1, 28, 28
nn.Tanh()
)
self.classfier = nn.Sequential(
nn.Conv2d(256, 128, 3, padding=1), # b, 8, 3, 3
nn.ReLU(True),
nn.Conv2d(128, 64, 3, padding=1), # b, 8, 3, 3
nn.Flatten(),
nn.Linear(576, 200),
nn.Linear(200, n_class),
)
def forward(self, x):
lant = self.encoder(x)
rec = self.decoder(lant)
c = self.classfier(lant)
return rec, c
def get_latent(self, x):
return self.encoder(x)
| purelyvivid/captcha5 | model.py | model.py | py | 1,442 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_n... |
11003028307 | import collections
import heapq
class Solution:
def leastInterval(self, tasks: List[str], n: int) -> int:
lookup = {}
for item in tasks:
lookup[item] = lookup.get(item, 0) + 1
hold = []
for value in lookup.values():
heapq.heappush(hold, -value)
time = 0
queue = collections.deque()
while hold or queue:
time += 1
if hold:
amount = 1 + heapq.heappop(hold)
if amount != 0:
queue.append([amount, time + n])
if queue and time == queue[0][1]:
heapq.heappush(hold, queue.popleft()[0])
return time
#O(n * m), O(n) | peaqi/mock | Python/621. Task Scheduler/heap, queue.py | heap, queue.py | py | 725 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "heapq.heappush",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "heapq.heappop",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "heapq.heappush",
"... |
31509600396 | from django import forms
from .models import General, HotWorks, ElectricalWorks
form_styles = {'contractor': forms.TextInput(attrs={'class': 'textinputfield'}),
'contractor_name': forms.TextInput(attrs={'class': 'textinputfield'}),
'facility': forms.TextInput(attrs={'class': 'textinputfield'}),
'date_of_arrival': forms.DateInput(attrs={'class': 'textinputfield'}),
'time_of_arrival': forms.TimeInput(attrs={'class': 'textinputfield'}),
'date_of_finish': forms.DateInput(attrs={'class': 'textinputfield'}),
'time_of_finish': forms.TimeInput(attrs={'class': 'textinputfield'}),
'job_location': forms.TextInput(attrs={'class': 'textinputfield'}),
'job_spec': forms.TextInput(attrs={'class': 'textinputfield'}),
'equipment': forms.TextInput(attrs={'class': 'textinputfield'}),
'safety_precautions': forms.TextInput(attrs={'class': 'textinputfield'}),
'location1': forms.TextInput(attrs={'class': 'textinputfield'}),
'location2': forms.TextInput(attrs={'class': 'textinputfield'}),
'location3': forms.TextInput(attrs={'class': 'textinputfield'}),
}
field_order_list = ('contractor', 'contractor_name', 'facility', 'date_of_arrival',
'time_of_arrival', 'date_of_finish', 'time_of_finish', 'job_location', 'job_spec', 'equipment',)
general_field_list = ('safety_precautions', 'ra_ready', 'ms_ready', 'confined_space_entry')
hotworks_field_list = ('ppe', 'welding_screen', 'smoke_heat_isolated')
electrical_field_list = ('location1', 'location2', 'location3')
class GeneralForm(forms.ModelForm):
class Meta:
model = General
exclude = ('profile', )
widgets = form_styles
field_order = ['status_closed', 'works_completed', field_order_list, general_field_list]
def __init__(self, *args, **kwargs):
super(GeneralForm, self).__init__(*args, **kwargs)
self.fields['date_of_finish'].label = 'Date of Completion'
self.fields['time_of_finish'].label = 'Time of Completion'
self.fields['ra_ready'].label = 'Risk Assessment Ready'
self.fields['ms_ready'].label = 'Method Statement Ready'
class HotWorksForm(forms.ModelForm):
class Meta:
model = HotWorks
exclude = ('profile', )
widgets = form_styles
field_order = ['status_closed', 'works_completed', field_order_list, hotworks_field_list]
def __init__(self, *args, **kwargs):
super(HotWorksForm, self).__init__(*args, **kwargs)
self.fields['date_of_finish'].label = 'Date of Completion'
self.fields['time_of_finish'].label = 'Time of Completion'
self.fields['ppe'].label = 'PPE ready'
self.fields['welding_screen'].label = 'Welding Screen Required'
self.fields['smoke_heat_isolated'].label = 'Smoke/Heat Detectors Isolated'
class ElectricalWorksForm(forms.ModelForm):
class Meta:
model = ElectricalWorks
exclude = ('profile', )
widgets = form_styles
field_order = ['status_closed', 'works_completed', field_order_list, electrical_field_list]
def __init__(self, *args, **kwargs):
super(ElectricalWorksForm, self).__init__(*args, **kwargs)
self.fields['date_of_finish'].label = 'Date of Completion'
self.fields['time_of_finish'].label = 'Time of Completion'
self.fields['location1'].label = 'Location 1'
self.fields['location2'].label = 'Location 2'
self.fields['location3'].label = 'Location 3' | Megaprotas/work_project | permits/forms.py | forms.py | py | 3,621 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.forms.TextInput",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.forms.TextInput",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.forms"... |
73696765472 | import os, sys
import gym
import torch
import cv2
import numpy as np
import random
import time
import schedule
sys.path.append("./")
from network.backbone import BasicNet
from policy_optimizer import Optimizer
from network.dqn import DQN
from utils.dataloader import tnx2batch, batch4net
from utils.atari_wrapper import wrap_rainbow
from utils.trace import PriorTrace
from policy_optimizer import Optimizer
import time
torch.backends.cudnn.benchmark = True
"""
Transition = {"state": np.array, "action": int, "next_state": np.array, "reward": float, "done": logical}
"""
class BasicWorker():
def __init__(self, env_name="PongNoFrameskip-v4", save_interval=1000, max_steps=100000,
phase="train", db=None, db_write=None, suffix="default"):
assert phase in ["train", "valid", "test"], "phase can only be train/test/valid"
self.phase = phase
if self.phase == "train":
self.env = wrap_rainbow(gym.make(env_name), swap=True, phase="train")
else:
self.env = wrap_rainbow(gym.make(env_name), swap=True, phase="test")
self.env_name = env_name
self.save_interval = save_interval
self.max_steps = max_steps
self.db = db
if db_write:
assert "db" in db_write.__code__.co_varnames and "data" in db_write.__code__.co_varnames
self.db_write = db_write
self.ob = self.reset()
self.info = {}
self.cache = []
self.save_count = 0
self.video_path = "./video/{}/{}".format(env_name, suffix)
self.sche = schedule.Sched()
print("{}\tOb Space: {}\tActions: {}".format(self.env_name, self._shape(), self._na()))
def reset(self):
"""return ob"""
return self.env.reset()
def step(self, a):
next_ob, rw, done, info = self.env.step(a)
return next_ob, rw, done, info
def _simulate_with_train(self):
self.ob = self.reset()
done, episod_len, episod_rw, episod_real_rw = False, 0, 0, 0
while not done and episod_len < self.max_steps:
a = self._action()
next_ob, rw, done, info = self.step(a)
self.cache.append({"state": self.ob, "action": a, "next_state": next_ob, "reward": rw, "done": done})
self.ob = next_ob
episod_len += 1
self.save_count += 1
episod_rw += rw
episod_real_rw += info["reward"]
if self.save_count % self.save_interval == 0:
if len(self.sche):
self.sche.wait()
self.sche.add(None, self.db_write, db=self.db, data=self.cache)
self.cache.clear()
self.save_count = 0
self.info["episod_rw"] = episod_rw
self.info["episod_real_rw"] = episod_real_rw
self.info["episod_len"] = episod_len
if "total_env_steps" in self.info:
self.info["total_env_steps"] += episod_len
else:
self.info["total_env_steps"] = episod_len
return self.info
def _simulate_with_test(self, episod=30):
self.ob = self.reset()
true_ob = self.env.render(mode="rgb_array")
fourcc = cv2.VideoWriter_fourcc(*'XVID')
if not os.path.exists(self.video_path):
os.makedirs(self.video_path)
out = cv2.VideoWriter(os.path.join(self.video_path, "video-{}.avi".format(time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()))),
fourcc, 25.0, (true_ob.shape[1], true_ob.shape[0]))
acc_rw, done = 0, False
count, acc_len = 0, 0
while count < episod and acc_len < episod*self.max_steps:
episod_len, done = 0, False
self.ob = self.reset()
while not done and episod_len < self.max_steps:
a = self._action(eps=0.05)
self.ob, rw, done, _ = self.step(a)
acc_rw += rw
episod_len += 1
acc_len += 1
true_ob = self.env.render(mode="rgb_array")
out.write(true_ob)
count += 1
out.release()
self.ob = self.reset()
return acc_rw / count
def __iter__(self):
return self
def __next__(self):
if self.phase == "train":
return self._simulate_with_train()
elif self.phase == "valid":
return self._simulate_with_test(episod=30)
else:
return self._simulate_with_test(episod=200)
def _action(self, eps=None):
return self.env.action_space.sample()
def _shape(self):
return self.ob.shape
def _na(self):
return self.env.action_space.n
def update(self):
raise NotImplementedError
def load(self):
raise NotImplementedError
class DQN_Worker(BasicWorker):
def __init__(self, env_name="PongNoFrameskip-v4", arch=DQN, backbone=BasicNet, cuda=True,
save_interval=1000, max_steps=100000, phase="train", db=None, db_write=None,
suffix="default", **kwargs):
super(DQN_Worker, self).__init__(env_name, save_interval, max_steps, phase, db, db_write, suffix)
self.shape = self._shape()
self.na = self._na()
self.alg = arch(self.shape, self.na, backbone).eval()
self.alg.cuda() if cuda == True else None
self.cuda = cuda
self.eps = 0
def _action(self, eps=None):
eps = self.eps if eps is None else eps
if random.random() < eps:
a = self.env.action_space.sample()
else:
with torch.no_grad():
ob = torch.from_numpy(self.ob).cuda().float() if self.cuda else torch.from_numpy(self.ob).float()
a = self.alg.action(ob).item()
return a
def update(self, state_dict=None, eps=None):
if state_dict is not None:
self.alg.load_state_dict(state_dict["policy"])
# if self.write_prior:
# self.target.load_state_dict(state_dict["target"])
if eps is not None:
self.eps = eps
def load(self, path):
self.alg.load_state_dict(torch.load(path))
class PriorDQN_Worker(DQN_Worker):
"""
todo discount
"""
def __init__(self, env_name="PongNoFrameskip-v4", arch=DQN, backbone=BasicNet, cuda=True,
save_interval=1000, max_steps=100000, phase="train", db=None, db_write=None,
suffix="default", **kwargs):
super(PriorDQN_Worker, self).__init__(env_name, arch, backbone, cuda, save_interval, max_steps,
phase, db,db_write,suffix)
self.trace = PriorTrace(discount=0.99)
def _action(self, eps=None):
eps = self.eps if eps is None else eps
with torch.no_grad():
ob = torch.from_numpy(self.ob).cuda().float() if self.cuda else torch.from_numpy(self.ob).float()
v, a = self.alg.value_action(ob)
if random.random() < eps:
a = self.env.action_space.sample()
else:
a = a.item()
self.trace.add_value(v)
self.trace.add_action(a)
return a
def _simulate_with_train(self):
self.ob = self.reset()
done, episod_len, episod_rw, episod_real_rw = False, 0, 0, 0
while not done and episod_len < self.max_steps:
a = self._action()
next_ob, rw, done, info = self.step(a)
self.trace.add_reward(rw)
self.trace.add_done(done)
self.cache.append({"state": self.ob, "action": a, "next_state": next_ob, "reward": rw, "done": done})
self.ob = next_ob
episod_len += 1
self.save_count += 1
episod_rw += rw
episod_real_rw += info["reward"]
if self.save_count % self.save_interval == 0:
self._action()
if len(self.sche):
self.sche.wait()
prior = self.trace.prior()
assert len(prior) == len(self.cache), "prior len {} not match cache len {}".format(len(prior), len(self.cache))
self.sche.add(None, self.db_write, db=self.db, data=self.cache, prior=prior)
self.cache.clear()
self.save_count = 0
self.trace.reinit()
self.info["episod_rw"] = episod_rw
self.info["episod_real_rw"] = episod_real_rw
self.info["episod_len"] = episod_len
if "total_env_steps" in self.info:
self.info["total_env_steps"] += episod_len
else:
self.info["total_env_steps"] = episod_len
return self.info
| fangyuedong/rainbow-with-ray | agent.py | agent.py | py | 8,661 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.backends",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "utils.atari_wrapper.w... |
6255000766 | from python_graphql_client import GraphqlClient
import feedparser
import httpx
import json
import pathlib
import re
import os
import datetime
root = pathlib.Path(__file__).parent.resolve()
client = GraphqlClient(endpoint="https://api.github.com/graphql")
TOKEN = os.environ.get("GH_TOKEN", "")
def replace_chunk(content, marker, chunk, inline=False):
r = re.compile(
r"<!\-\- {} starts \-\->.*<!\-\- {} ends \-\->".format(marker, marker),
re.DOTALL,
)
if not inline:
chunk = "\n{}\n".format(chunk)
chunk = "<!-- {} starts -->{}<!-- {} ends -->".format(marker, chunk, marker)
return r.sub(chunk, content)
def formatGMTime(timestamp):
GMT_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
dateStr = datetime.datetime.strptime(timestamp, GMT_FORMAT) + datetime.timedelta(hours=8)
return dateStr.date()
def make_query(after_cursor=None):
return """
query {
viewer {
repositories(first: 100, privacy: PUBLIC, after:AFTER) {
pageInfo {
hasNextPage
endCursor
}
nodes {
name
description
url
releases(last:1) {
totalCount
nodes {
name
publishedAt
url
}
}
}
}
}
}
""".replace(
"AFTER", '"{}"'.format(after_cursor) if after_cursor else "null"
)
def fetch_blog_entries():
entries = feedparser.parse("http://feed.cnblogs.com/blog/u/678190/rss/")["entries"]
return [
{
"title": entry["title"],
"url": entry["link"].split("#")[0],
"published": entry["published"].split("T")[0],
}
for entry in entries
]
if __name__ == "__main__":
readme = root / "README.md"
readme_contents = readme.open().read()
entries = fetch_blog_entries()[:5]
entries_md = "\n".join(
["* <a href='{url}' target='_blank'>{title}</a> - {published}".format(**entry) for entry in entries]
)
rewritten = replace_chunk(readme_contents, "blog", entries_md)
readme.open("w").write(rewritten)
| wintermorn1ng/wintermorn1ng | build_readme.py | build_readme.py | py | 2,078 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "python_graphql_client.GraphqlClient",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.en... |
42631122942 | from tkinter.filedialog import askopenfilename, asksaveasfile
import numpy as np
import matplotlib.pyplot as plt
class Calibration:
def __init__(self, defaultPath="calibration_files/kalib.txt", plot=None):
self.filepath = defaultPath
self.nm = []
self.pixels = []
self.model = None
self.plot = None
self.loadFile()
self.initPlot(plot)
self.calculateModel()
def initPlot(self, plot):
"""initializes plot for calib class to access"""
self.plot = plot
def __str__(self):
"""returns string format, formatted to multiple lines"""
return "".join([str(self.pixels[i]) + " " + str(self.nm[i]) + '\n' for i in range(len(self.pixels))])
def chooseFile(self):
"""chooses calibration file"""
self.filepath = askopenfilename(filetypes=[("Text Files", "*.txt"), ("All Files", "*.*")])
if self.filepath == "":
self.filepath = None
def saveFile(self):
"""saves calibration file"""
file = asksaveasfile(mode='w', defaultextension=".txt")
if file is None:
return
file.write(self.__str__())
file.close()
def pushArrays(self, file):
""""sets vvalues to arrays"""
self.pixels = [float(line.split()[0]) for line in file]
self.nm = [float(line.split()[1]) for line in file]
def loadFile(self):
"""reads file from given path"""
if self.filepath is None:
raise Exception("Please choose file")
with open(self.filepath, mode='r') as file:
# self.calibrationChart = [[float(column) for column in line.split()] for line in file.readlines()]
fileCopy = file.readlines()
self.pushArrays(fileCopy)
def loadFileFromApp(self, file):
"""loads calibration chart from app"""
self.pixels = []
self.nm = []
for line in file.split('\n'):
elem = line.split()
if len(elem) != 2:
continue
if not elem[0].replace('.', '').isnumeric() or not elem[1].replace('.', '').isnumeric():
return False
self.pixels.append(float(elem[0]))
self.nm.append(float(elem[1]))
return True
def calculateModel(self, polynomDegree=3):
"""calculates calibration chart"""
self.model = np.poly1d(np.polyfit(self.pixels, self.nm, polynomDegree))
self.plot.initModel(self.model)
def getModel(self):
"""returns model"""
return self.model
class CalibrationRender:
def __init__(self):
self.maxx = None
self.minx = None
self.offset = None
def render(self, calibration):
"""renders calibration, based on data from calib.px, calib.nm, calib.model"""
fig = plt.figure(" Spectrometer calibration chart ",
figsize=(10, 6),
facecolor='xkcd:mint green',
edgecolor='r',
linewidth=4)
if calibration.model is None:
raise Exception("No Calibration!")
legendPos = calibration.nm - calibration.model(calibration.pixels)
ax1 = plt.subplot(211)
ax3 = plt.subplot(212)
self.offset = np.max(calibration.pixels) / len(calibration.pixels)
self.minx = np.min(calibration.pixels) - self.offset
self.maxx = np.max(calibration.pixels) + self.offset
polyline = self._createScatterplot(ax1, ax3, legendPos, calibration)
ax1.plot(polyline, calibration.model(polyline), color='green')
plt.xlim([self.minx, self.maxx])
self._setax1(ax1)
self._setax3(ax3)
fig.show()
def _createScatterplot(self, ax1, ax3, legendPos, calibration):
# create scatterplot
ax1.scatter(calibration.pixels, calibration.nm, label="Kalibračný bod", color="red", marker="o", s=150)
ax3.plot([0, 1300], [0, 0], color="green")
ax3.plot(calibration.pixels, legendPos, label="Kalibračná krivka", color="blue")
ax3.scatter(calibration.pixels, legendPos, label="Kalibračný bod", color="red", marker="o", s=150)
return np.linspace(self.minx, self.maxx, 800)
@staticmethod
def _setax1(ax1):
ax1.set_xlabel('x - px')
ax1.set_ylabel('y - nm')
ax1.set_title('Calibration chart')
ax1.grid(color='w')
ax1.set_facecolor((0.9, 0.9, 0.9))
ax1.legend()
@staticmethod
def _setax3(ax3):
ax3.set_xlabel('x - px')
ax3.set_ylabel('y - nm')
ax3.grid(color='w')
ax3.set_facecolor((0.9, 0.9, 0.9))
ax3.legend()
class CalibrationHandler:
def __init__(self):
pass
@staticmethod
def calibrateFromFile(calibration, polynomeDegree=3):
"""args=calibration object, polynomial degree
handles calibration from file, chosen from OS system"""
calibration.chooseFile()
if calibration.filepath == "":
return
calibration.loadFile()
calibration.calculateModel(polynomeDegree)
@staticmethod
def calibrateFromApp(calibration, file, polynomeDegree=3):
"""handles calibration from app, values are given in text window"""
if calibration.loadFileFromApp(file):
calibration.calculateModel(polynomeDegree)
| TIS2022-FMFI/spektroskop-mikroskop | gui_widgets/Calibration.py | Calibration.py | py | 5,407 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tkinter.filedialog.askopenfilename",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog.asksaveasfile",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.poly1d",
"line_number": 69,
"usage_type": "call"
},
{
"... |
5299224856 | # -*- coding: utf-8 -*-
import torch as t
import sys
sys.path.append("../")
from models import HRcanNet
import cv2
import numpy as np
import h_psnr
import os
import time
class CIR:
def __init__(self, weights_file="../weights/dejpeg_HRcanNet2_1_33_best.pth"):
self._device = 'cuda'
# self._net = HSISRNet().to(self._device)
self._net = t.load(weights_file).to(self._device)
self._net.eval()
def query(self, img):
image = img.astype(np.float32)
image = t.from_numpy(image).to(self._device) / 255
image = image.permute(2, 0, 1).unsqueeze(0)
with t.no_grad():
preds = self._net(image).clamp(0.0, 1.0)
preds = preds.mul(255.0).cpu().numpy().squeeze(0).transpose(1, 2, 0)
del image
return preds.astype(np.uint8)
def query_file(self, imagename):
image = cv2.imread(imagename)
return self.query(image)
def main(s_mp4=None):
scale = 1
if s_mp4 is None:
s_mp4 = "d:/workroom/testroom/156_45.mp4"
if '.mp4' not in s_mp4:
raise Exception("input video must be mp4 format")
cap = cv2.VideoCapture(s_mp4)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)*scale)
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)*scale)
fps = cap.get(cv2.CAP_PROP_FPS)
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = cv2.VideoWriter(s_mp4.replace('.mp4', '_IR.avi'), fourcc, fps, (width, height))
net = CIR()
count = 0
starttime = time.time()
while True:
if count % 25 == 0:
print('frame id ', count)
ret, frame = cap.read()
if ret is not True:
break
ts = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000
print('ts :', ts)
print("cost time : ", time.time() - starttime)
pred_img = net.query(frame)
# cv2.imshow("1", pred_img)
# cv2.waitKey()
out.write(pred_img)
count += 1
if ts > 30:
break
out.release()
cap.release()
def testjpg():
im = cv2.imread("d:/workroom/testroom/old.png")
img_Out = np.copy(im)
ret, lr_buf = cv2.imencode(".jpg", img_Out, [int(cv2.IMWRITE_JPEG_QUALITY), np.random.randint(15, 16)])
img_Out = cv2.imdecode(lr_buf, 1)
cv2.imshow("1", img_Out)
cv2.waitKey()
if __name__ == "__main__":
# testjpg()
# exit(0)
print("Hi, this is video IR program!")
mp4s = ['d:/workroom/testroom/lowquality_video/dm_3_1920x1080.mp4',
'd:/workroom/testroom/lowquality_video/dy_1_1920x1080.mp4',
'd:/workroom/testroom/lowquality_video/lq_3_1920x1080.mp4']
# for mp4 in mp4s:
# main(s_mp4=mp4)
main()
print('done')
| riverlight/HSR | scripts/ir_video.py | ir_video.py | py | 2,697 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_numb... |
29558607777 |
from pathlib import Path
from . import data
from . import plot
from . import excel
from . import config
from . import latex
import subprocess
class Document:
def __init__(self, prefix, title, subtitle, author, date):
self.prefix = prefix
self.title = title
self.subtitle = subtitle
self.author = author
self.date = date
self.captions = []
self.tmp_dir = Path('/tmp')
self.output_dir = Path('.')
self.tables = {}
def make_table(self, col_format, print_lines, rows):
ident = len(self.captions)
self.tables[ident] = (col_format, print_lines, rows)
caption = 'To be specified...'
self.captions.append(caption)
def make_pie(self, rows, attr, legend = True):
filename = 'fig-{:06d}.pdf'.format(len(self.captions))
plot_data, total = data.pie_data(rows, attr)
plot_data['legend'] = legend
caption = '{} elements, gathered by {}'.format(total, attr)
self.captions.append(caption)
plot.pie(self.tmp_dir / filename, plot_data)
return total
def make_histo(self, rows, category_attr, legend = True):
filename = 'fig-{:06d}.pdf'.format(len(self.captions))
plot_data, total = data.histo_data(rows, category_attr)
plot_data['legend'] = legend
caption = '{} elements, categorized by {}.'.format(total, category_attr)
self.captions.append(caption)
plot.histo(self.tmp_dir / filename, plot_data)
return total
def make_bars(self, rows, category_attr, sort_attr, legend = True, display_values = False):
filename = 'fig-{:06d}.pdf'.format(len(self.captions))
plot_data, total = data.bar_data(rows, category_attr, sort_attr)
plot_data['legend'] = legend
caption = '{} elements, categorized by {}, colored by {}.'.format(total, category_attr, sort_attr)
self.captions.append(caption)
plot.bar(self.tmp_dir / filename, plot_data, display_values)
return total
def caption(self, caption):
self.captions[-1] = caption
def generate(self):
texname = self.prefix + '.tex'
pdfname = self.prefix + '.pdf'
path = self.tmp_dir / texname
f = path.open('w')
latex.begin_document(f)
latex.title(f, self.title, self.subtitle, self.author, self.date)
for idx, caption in enumerate(self.captions):
if idx in self.tables:
col_format, print_lines, rows = self.tables[idx]
latex.table(f, col_format, print_lines, rows, caption, idx)
else:
latex.fig(f, self.tmp_dir / 'fig-{:06d}.pdf'.format(idx), caption, idx)
latex.end_document(f)
f.close()
print('File "{}" generated.'.format(str(path.absolute())))
cmd = 'pdflatex {}'.format(texname)
print(cmd)
subprocess.run(cmd.split(), cwd = self.tmp_dir)
cmd = 'cp {} {}'.format(pdfname, self.output_dir.absolute())
subprocess.run(cmd.split(), cwd = self.tmp_dir)
print()
print()
print()
print('File "{}" generated.'.format((self.output_dir / pdfname).absolute()))
| HerveFrezza-Buet/bilan | pybilan/pybilan/report.py | report.py | py | 3,261 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_n... |
22525702483 | import collections
import glob
import logging
import os
from typing import List
import torch
from torch import nn
from torch.optim.lr_scheduler import LambdaLR
from torch.serialization import default_restore_location
logger = logging.getLogger()
CheckpointState = collections.namedtuple(
"CheckpointState",
[
"model_dict",
"optimizer_dict",
"scheduler_dict",
"offset",
"epoch",
"encoder_params",
],
)
def setup_for_distributed_mode(
model: nn.Module,
optimizer: torch.optim.Optimizer,
device: object,
n_gpu: int = 1,
local_rank: int = -1,
fp16: bool = False,
fp16_opt_level: str = "O1",
) -> (nn.Module, torch.optim.Optimizer):
model.to(device)
if fp16:
try:
import apex
from apex import amp
apex.amp.register_half_function(torch, "einsum")
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
)
model, optimizer = amp.initialize(model, optimizer, opt_level=fp16_opt_level)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
if local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[device if device else local_rank],
output_device=local_rank,
find_unused_parameters=True,
)
return model, optimizer
def move_to_cuda(sample):
if len(sample) == 0:
return {}
def _move_to_cuda(maybe_tensor):
if torch.is_tensor(maybe_tensor):
return maybe_tensor.cuda()
elif isinstance(maybe_tensor, dict):
return {key: _move_to_cuda(value) for key, value in maybe_tensor.items()}
elif isinstance(maybe_tensor, list):
return [_move_to_cuda(x) for x in maybe_tensor]
elif isinstance(maybe_tensor, tuple):
return [_move_to_cuda(x) for x in maybe_tensor]
else:
return maybe_tensor
return _move_to_cuda(sample)
def move_to_device(sample, device):
if len(sample) == 0:
return {}
def _move_to_device(maybe_tensor, device):
if torch.is_tensor(maybe_tensor):
return maybe_tensor.to(device)
elif isinstance(maybe_tensor, dict):
return {
key: _move_to_device(value, device)
for key, value in maybe_tensor.items()
}
elif isinstance(maybe_tensor, list):
return [_move_to_device(x, device) for x in maybe_tensor]
elif isinstance(maybe_tensor, tuple):
return [_move_to_device(x, device) for x in maybe_tensor]
else:
return maybe_tensor
return _move_to_device(sample, device)
def get_schedule_linear(
optimizer,
warmup_steps,
total_training_steps,
steps_shift=0,
last_epoch=-1,
):
"""Create a schedule with a learning rate that decreases linearly after
linearly increasing during a warmup period.
"""
def lr_lambda(current_step):
current_step += steps_shift
if current_step < warmup_steps:
return float(current_step) / float(max(1, warmup_steps))
return max(
1e-7,
float(total_training_steps - current_step)
/ float(max(1, total_training_steps - warmup_steps)),
)
return LambdaLR(optimizer, lr_lambda, last_epoch)
def init_weights(modules: List):
for module in modules:
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def get_model_obj(model: nn.Module):
return model.module if hasattr(model, "module") else model
def get_model_file(args, file_prefix) -> str:
if args.model_file and os.path.exists(args.model_file):
return args.model_file
out_cp_files = (
glob.glob(os.path.join(args.output_dir, file_prefix + "*"))
if args.output_dir
else []
)
logger.info("Checkpoint files %s", out_cp_files)
model_file = None
if len(out_cp_files) > 0:
model_file = max(out_cp_files, key=os.path.getctime)
return model_file
def load_states_from_checkpoint(model_file: str) -> CheckpointState:
logger.info("Reading saved model from %s", model_file)
state_dict = torch.load(
model_file, map_location=lambda s, l: default_restore_location(s, "cpu")
)
logger.info("model_state_dict keys %s", state_dict.keys())
return CheckpointState(**state_dict)
| microsoft/LMOps | uprise/DPR/dpr/utils/model_utils.py | model_utils.py | py | 4,819 | python | en | code | 2,623 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "torch.n... |
41330510557 | from databases import Database
from sqlalchemy import select, func
from typing import Dict
from db import models
from schemas import companies_members as schema_cm
from utils.exceptions import MyExceptions
class GenericDatabase:
def __init__(self, db: Database):
self.db = db
self.company_members_table = models.company_members
self.company_table = models.companies
self.users_table = models.users
self.quiz_table = models.quiz
self.question_table = models.question
self.avarage_mark_table=models.avarage_mark
self.uiz_result_table = models.companies
self.quiz_result_table=models.quiz_result
self.exception = MyExceptions
async def insert_values(self, table, values: Dict):
return await self.db.execute(table.insert().values(values))
async def company_members_join_company(self):
query_joins = self.company_members_table.join(self.company_table)
query = select(
self.company_table.c.id.label('company_id'),
self.company_table.c.name.label('company_name'),
self.company_members_table.c.is_company_admin,
self.company_members_table.c.active_member.label('active_from'),
).select_from(query_joins)
return query | zxc322/fast_api_app | repositories/services/generic_database.py | generic_database.py | py | 1,363 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "databases.Database",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "db.models.company_members",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "db.models",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "db.models... |
71015536355 | # Title: 감소하는 수
# Link: https://www.acmicpc.net/problem/1038
import itertools
import sys
sys.setrecursionlimit(10 ** 6)
def read_list_int():
return list(map(int, sys.stdin.readline().strip().split(' ')))
def read_single_int():
return int(sys.stdin.readline().strip())
def add_to_one(d_number):
if d_number == 9876543210:
return -1
r_str = []
d_str = list(reversed(str(d_number)))
carry = 0
num = '1' + '0'*(len(d_str)-1)
for i, d in enumerate(d_str):
if i+1 == len(d_str):
r = int(d) + int(num[i]) + carry
if r >= 10:
r_str.append(str(len(r_str)))
r_str.append(str(len(r_str)))
else:
r_str.append(str(r))
else:
r = int(d) + int(num[i]) + carry
if r < int(d_str[i+1]):
r_str.append(str(r))
carry = 0
else:
carry = 1
if len(r_str) == 0:
r_str.append('0')
else:
r_str.append(str(int(r_str[-1])+1))
return int(''.join(reversed(r_str)))
def get_nth_number(N):
d_number = 0
for _ in range(N):
d_number = add_to_one(d_number)
if d_number == -1:
return -1
return d_number
def get_nth_decreasing_numbers(N):
decreasing_numbers = []
for n in range(1, 11):
decreasing_numbers += sorted(map(lambda l: int(''.join(l)), itertools.combinations('9876543210', n)))
if len(decreasing_numbers) > N:
return decreasing_numbers[N]
return -1
if __name__ == '__main__':
N = read_single_int()
# print(get_nth_number(N))
print(get_nth_decreasing_numbers(N)) | yskang/AlgorithmPractice | baekjoon/python/decreasing_number_1038.py | decreasing_number_1038.py | py | 1,814 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "sys.setrecursionlimit",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.stdin.readline",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin.read... |
32105962555 | #!/usr/bin/env python3
"""Contains function sum_list"""
from typing import Union, List
def sum_mixed_list(mxd_list: List[Union[int, float]]) -> float:
"""Calculates the sum of floats in the given list"""
sum: float = 0.00
for num in mxd_list:
sum += num
return sum
| Caesar-12/alx-backend-python | 0x00-python_variable_annotations/6-sum_mixed_list.py | 6-sum_mixed_list.py | py | 292 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 6,
"usage_type": "name"
}
] |
36250456309 | #bfs1 단지번호 붙이기
from collections import deque
n = int(input())
maps = []
for _ in range(n):
tmp = [int(i) for i in input()]
maps.append(tmp)
def bfs(coord,maps,visited):
dirs = [(1,0),(-1,0),(0,1),(0,-1)]
queue = deque()
queue.append((coord))
count = 0
while queue:
y,x = queue.popleft()
#방문하지 않은 지점을 경우
if visited[y][x] == 0:
visited[y][x] = 1
count += 1
#현재 위치에서 상하좌우를 탐색하며 같은 단지의 집인지 확인한다.
for _dir in dirs:
new_y,new_x = y + _dir[0], x + _dir[1]
if new_x >= 0 and new_x < n and \
new_y >= 0 and new_y < n and visited[new_y][new_x] == 0 and maps[new_y][new_x] ==1:
new_coord = (new_y, new_x)
queue.append(new_coord)
return count
visited = [[0 for _ in range(n)] for _ in range(n)]
count = 0
answer = []
for y in range(n):
for x in range(n):
if visited[y][x] == 0 and maps[y][x] == 1:
#단지가 총 몇개있는지
count +=1
coord = (y,x)
answer.append(bfs(coord,maps,visited))
answer.sort()
print(count)
print("\n".join([str(i) for i in answer])) | Choisiz/coding-test-Team | 김수빈/BFS/단지번호붙이기.py | 단지번호붙이기.py | py | 1,293 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 11,
"usage_type": "call"
}
] |
24913233421 | import requests
from get_access_token_by_refresh_token import get_access_token
def get_modules():
url = 'https://www.zohoapis.in/crm/v2/settings/modules'
access_token = get_access_token()
headers = {
'Authorization': 'Zoho-oauthtoken {}'.format(access_token)
}
response = requests.get(url=url, headers=headers)
if response is not None:
print("HTTP Status Code : " + str(response.status_code))
print(response.json())
get_modules() | kirankigi5/remote_pregnancy_monitor | Remote-pregnancy-monitor-main/zoho_apis/fetch_modules.py | fetch_modules.py | py | 484 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "get_access_token_by_refresh_token.get_access_token",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
}
] |
21150388793 | import flet as ft
import views as views_handle
def main(page: ft.Page):
#defining the fonts
page.fonts = {
"SF Pro": "https://raw.githubusercontent.com/google/fonts/master/ofl/sfprodisplay/SFProDisplay-Bold.ttf",
}
#defining the window size
page.window_min_width = 425
page.window_width = 425
page.window_min_height = 820
page.window_height = 820
page.title = "Project 334 Appication"
page.horizontal_alignment = ft.CrossAxisAlignment.CENTER
#page.scroll = ft.ScrollMode.HIDDEN
page.padding = 0
page.bgcolor = "#ddf7f1"
page.theme = ft.Theme(font_family="SF Pro")
page.theme_mode = ft.ThemeMode.LIGHT
def route_change(route):
print(page.route)
# CLEAR ALL PAGE
page.views.clear()
page.views.append(
views_handle.views(page)[page.route],
)
page.update()
page.on_route_change = route_change
page.go('/')
ft.app(target = main) | Sirapobchon/CPE334Project | fletapp/main.py | main.py | py | 997 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flet.Page",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "flet.CrossAxisAlignment",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "flet.Theme",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "flet.ThemeMode"... |
15718283398 | # import eventlet
# eventlet.monkey_patch()
from flask import Flask, request, jsonify, render_template
from flask_socketio import SocketIO, emit
from flask_cors import CORS
import time
import pandas as pd
import numpy as np
from math import sqrt
import heapq
from copy import deepcopy
import json
app = Flask(__name__, static_url_path='', static_folder='front-end/build')
app.config['SECRET_KEY'] = 'secret!'
CORS(app, resources={r"/*": {"origins": "*"}})
socketio = SocketIO(app, cors_allowed_origins="*")
@app.route("/http-call")
def http_call():
"""return JSON with string data as the value"""
data = {'data': 'This text was fetched using an HTTP call to server on render'}
return jsonify(data)
@app.route("/")
def index():
return app.send_static_file('index.html')
@socketio.on("connect")
def connected():
"""event listener when client connects to the server"""
print(request.sid)
print("client has connected")
emit("connect", {"data": f"id: {request.sid} is connected"})
# @socketio.on('dimension')
# def get_dimensions(data):
# print("data for dimensions from the front end: ",str(data))
@socketio.on('data')
def handle_message(data):
"""event listener when client types a message"""
print("data from the front end: ", (data))
wjdata = json.loads(data)
print("data from the front end wjdata: ", (wjdata))
# emit("data",{'data':data,'id':request.sid},broadcast=True)
# recursively call and emit data
# for i in range(20):
# socketio.sleep(0.5)
# emit("data",{'data':i,'id':request.sid},broadcast=False)
# print(i)
def create_grid(p, dim):
'''
:param p: probability with which a node is blocked
:param dim: dimension of the matrix
:return: a 2d list denoting grid where 1 = traversable node, 0 = non traversable
This function generates a random grid by taking into account probability 'p' and dimension 'dim'
'''
# initialise a grid with all 0. 0 = cell blocked, 1 = cell unblocked
grid = [[0 for i in range(dim)] for j in range(dim)]
# Loop over inputted dimension
for i in range(dim):
for j in range(dim):
actual_prob = np.random.random_sample() # generating a random number
# if the generated random number > p, assign it 1 (meaning it is
if actual_prob > p:
grid[i][j] = 1 # traversable.
else:
grid[i][j] = 0
grid[0][0] = 1 # start node and end node is always traversable.
grid[dim - 1][dim - 1] = 1
return grid
class Node:
'''
A node class that stores 5 things for a node - position, parent, g(n), h(n), f(n)
'''
def __init__(self, parent=None, position=None):
'''
This function initalises a node by setting parent , position and heuristic values as 0
:param parent: parent of the current code
:param position: position of the current code
'''
self.parent = parent
self.position = position
self.g = 0
self.f = 0
self.h = 0
def __eq__(self, node):
'''
This function is overload for == operator in python. It is used to compare if two nodes are equal.
:param node: node to compare with
:return: 1 if two self and node is equal, otherwise 0
'''
if (self.position[0] == node.position[0] and self.position[1] == node.position[1]):
return True
else:
return False
def __lt__(self, other):
'''
This function is overload for < operator in python. It is used to compare if one node is less than other.
:param other: node to compare with
:return: 1 if self's f value is less than other's f value, otherwise 0
'''
return self.f < other.f
def generate_children(grid, knowledge_grid, fringe, visited_list, current, all_moves, end_node, is_gridknown):
'''
This function uses a grid (be it grid or knowledge) and generates all valid children of the current node.
:param grid: the original actual grid
:param knowledge_grid: the knowledge grid
:param fringe: list of nodes in the priority queue
:param visited_list: a dictionary of nodes already visited
:param current: current node in the queue
:param all_moves: array of all valid moves
:param end_node: end position/node in the grid
:param is_gridknown: parameter to switch between grid and knowledge grid
:return: array of relevant children
'''
current_x, current_y = current.position
relevant_children = []
dim = len(grid)
for a_move in all_moves: # looping over all valid moves
child_x = current_x + a_move[0]
child_y = current_y + a_move[1]
# condition to check if node is in within
if child_x > dim-1 or child_x < 0 or child_y > dim-1 or child_y < 0:
# boundaries of the grid
continue
# initalising children node with current
children_node = Node(current, (child_x, child_y))
# as parent and child_x, child_y as position
if (is_gridknown == "No"): # isgridknown checks whether to we have grid
# loaded in the memory, if not we use knowledge
# grid
grid_for_pathcalculation = knowledge_grid
else:
grid_for_pathcalculation = grid
# condition to check is current node
if (grid_for_pathcalculation[child_x][child_y] != 0) and (visited_list.get(children_node.position) != "Added"):
# is not blocked and current node is
# not in visited list
# assigining current g = g(parent) + 1
children_node.g = current.g + 1
children_node.h = abs(children_node.position[0] - end_node.position[0]) + abs( # using manhattan distance as our heuristic
children_node.position[1] - end_node.position[1])
children_node.f = children_node.g + \
children_node.h # f(n) = g(n) + f(n)
relevant_children.append(children_node)
return relevant_children
def search(grid, fringe, knowledge_grid, start_position, end_position, is_gridknown):
'''
:param grid: the original actual grid
:param fringe: list of all processed nodes
:param knowledge_grid: the knowledge grid
:param start_position: start position in grid
:param end_position: end position in grid
:param is_gridknown: parameter to switch between grid and knowledge grid
:return: the path from start node to end node
'''
startNode = Node(None, start_position)
endNode = Node(None, end_position)
fringe = []
visited_nodes = {}
already_fringed = {} # a hashmap to keep track of fringed nodes and its lowest cost
already_fringed[startNode.position] = startNode.f
# pushing start node in fringe
heapq.heappush(fringe, (startNode.f, startNode))
all_moves = [[1, 0], # defined all moves -
[0, 1], # [1,0] - move right
[-1, 0], # [0,1] - move down
[0, -1]] # [0,-1] - move up
# [-1,0] - move left
path = []
while fringe: # while fringe is not empty
current = heapq.heappop(fringe) # popping node from fringe
current = current[1]
# assigning current node to visited
visited_nodes[current.position] = "Added"
if current.position == endNode.position:
i = current
while (i is not None): # traversing path if current=goal to get the path from start to goal
path.append(i.position)
i = i.parent
return "Solvable", path
children = generate_children( # otherwise generate children
grid, knowledge_grid, fringe, visited_nodes, current, all_moves, endNode, is_gridknown)
if children:
for node in children:
if node.position in already_fringed: # checking if the children is already fringed,
# if yes update and push the moinimum cost one
if already_fringed[node.position] > node.f:
# otherwise ignore the child
already_fringed[node.position] = node.f
heapq.heappush(fringe, (node.f, node))
else:
# if the child is not already fringed, push it
heapq.heappush(fringe, (node.f, node))
# to priority queue and assign in the hashmap
already_fringed[node.position] = node.f
return "Unsolvable", path
fringe = []
dim = int(wjdata["dim"])
is_gridknown = "No"
density = float(wjdata["density"])
# create a grid with entered density and dim values.
grid = create_grid(density, dim)
# assuming unblocked for all cells
# intialise knowledge grid to all 1's
knowledge_grid = [[1 for _ in range(dim)] for _ in range(dim)]
im = None
pltGrid = deepcopy(grid)
pltGrid[0][0] = 3
prevPos = [0, 0]
start = (0, 0)
end = (dim-1, dim-1)
all_moves = [[1, 0],
[0, 1],
[-1, 0],
[0, -1]]
for a_move in all_moves:
child_x = start[0] + a_move[0]
child_y = start[1] + a_move[1]
if (child_x > dim-1 or child_x < 0 or child_y > dim-1 or child_y < 0):
continue
else:
if (grid[child_x][child_y] == 0):
# update the knowledge grid with field of view
knowledge_grid[child_x][child_y] = 0
ll, path = search(grid, fringe, knowledge_grid, start, end, is_gridknown)
final_path = []
if (ll != "Unsolvable" and is_gridknown == "No"):
while (len(path) > 1 and ll != "Unsolvable"):
count = 0
flag = 0
# traverse the path obtained from search function to see if blocked cell exists or not.
# If blocked cell exists, run search function again to calculate the path
# Continue in this while loop -1) either path returned is 0 that means nothing left in fringe and no path to reach goal 2) or path exists to reach goal
for i in path[::-1]:
count += 1
for a_move in all_moves:
child_x = i[0] + a_move[0]
child_y = i[1] + a_move[1]
if (child_x > dim-1 or child_x < 0 or child_y > dim-1 or child_y < 0):
continue
else:
if (grid[child_x][child_y] == 0):
knowledge_grid[child_x][child_y] = 0
final_path.append((i[0], i[1]))
if (grid[i[0]][i[1]] == 0):
pltGrid[i[0]][i[1]] = 4 # blocked in grid
final_path.pop()
knowledge_grid[i[0]][i[1]] = 0 # updating knowledge_grid
new_start_position = path[path.index(
i)+1][0], path[path.index(i)+1][1]
ll, path = search(grid, fringe, knowledge_grid,
new_start_position, end, is_gridknown)
finalresult = ll
break
pltGrid[prevPos[0]][prevPos[1]] = 2
pltGrid[i[0]][i[1]] = 3
prevPos = [i[0], i[1]]
socketio.sleep(0.5)
# print("pltGrid", pltGrid)
emit("data", {'data': pltGrid,
'id': request.sid, 'status': "Solving..."}, broadcast=False)
if (count == len(path)):
emit("data", {'data': pltGrid,
'id': request.sid, 'status': "Reached the destination!"}, broadcast=False)
print("Solved")
flag = 1
break
if (flag == 1):
return final_path, knowledge_grid
break
if (ll == "Unsolvable"):
emit("data", {'data': pltGrid, 'status': "Unsolvable"
}, broadcast=False)
print("Unsolvable")
# return [], knowledge_grid
if (flag != 1):
print("finalresult", finalresult)
# return [], knowledge_grid
elif (is_gridknown == "Yes"):
print(ll)
print("path", path)
else:
emit("data", {'data': pltGrid, 'status': "Unsolvable"
}, broadcast=False)
print("Unsolvable")
for (i, j) in final_path:
grid[i][j] = 2
# return [], knowledge_grid
@socketio.on("disconnect")
def disconnected():
"""event listener when client disconnects to the server"""
print("user disconnected")
emit("disconnect", f"user {request.sid} disconnected", broadcast=True)
if __name__ == '__main__':
socketio.run(app, debug=True, port=5001)
| GunjanAS/RepeatedAStar-algo-visualizer | app.py | app.py | py | 13,501 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask_socketio.SocketIO",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
... |
42956395054 | __all__ = ["load", "save", "load_single_particle_model", "load_multi_particle_model"]
import os
import numpy as np
import tensorflow as tf
import deeptrack as dt
from tqdm import tqdm
import glob
import pandas as pd
def load(filename: str):
*_, ext = filename.split(os.path.extsep)
if os.path.isdir(filename):
return load_dir(filename)
if ext == "py":
return load_python(filename)
return load_video(filename)
def load_python(filename):
pass
def load_video(filename):
import cv2
cap = cv2.VideoCapture(filename)
n_frames, width, height = (
int(cap.get(cv2.CAP_PROP_FRAME_COUNT)),
int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
)
frames = np.empty((n_frames, width, height, 3))
print("Reading video...")
for t in tqdm(range(n_frames)):
_, frames[t] = cap.read()
return frames
def load_dir(filename):
npys = glob.glob(os.path.join(filename, "*.npy"))
csvs = glob.glob(os.path.join(filename, "*.csv"))
npy_files = [np.load(f) for f in npys]
csv_files = [pd.read_csv(f) for f in csvs]
npy_files = np.stack(npy_files, axis=0)
csv_files = pd.concat(csv_files)
# csv_files = np.stack(csv_files, axis=0)
npy_files = np.reshape(npy_files, (-1, *npy_files.shape[-3:]))
return npy_files, csv_files
def load_image(filename):
pass
def save(model, args):
_out_path = f"checkpoints/{args.prefix}{os.path.split(args.filename)[-1]}"
out_path = _out_path
idx = 0
while os.path.exists(out_path):
idx += 1
out_path = f"{_out_path}_{idx}"
out_path = os.path.normcase(out_path)
os.makedirs(out_path, exist_ok=True)
model.save(out_path)
def load_single_particle_model(path):
return tf.keras.models.load_model(
path,
custom_objects={
"AutoTrackerModel": dt.models.autotrack.AutoTracker.AutoTrackerModel
},
)
def load_multi_particle_model(path):
return tf.keras.models.load_model(
path,
custom_objects={
"AutoTrackerModel": dt.models.autotrack.AutoTracker.AutoTrackerModel
},
)
| BenjaminMidtvedt/AutoTracking | autotracker/fs.py | fs.py | py | 2,200 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoCapture",
"line_... |
29928571724 |
import networkx as nx
import numpy as np
import torch
from datanetAPI import DatanetAPI
POLICIES = np.array(['WFQ', 'SP', 'DRR'])
def sample_to_dependency_graph(sample):
G = nx.DiGraph(sample.get_topology_object())
R = sample.get_routing_matrix()
T = sample.get_traffic_matrix()
P = sample.get_performance_matrix()
D_G = nx.DiGraph()
for src in range(G.number_of_nodes()):
for dst in range(G.number_of_nodes()):
if src != dst:
D_G.add_node('p_{}_{}'.format(src, dst),
traffic=T[src, dst]['Flows'][0]['AvgBw'],
packets=T[src, dst]['Flows'][0]['PktsGen'],
tos=int(T[src, dst]['Flows'][0]['ToS']),
source=src,
destination=dst,
drops=float(P[src, dst]['AggInfo']['PktsDrop']) / float(T[src, dst]['Flows'][0]['PktsGen']),
delay=float(P[src, dst]['AggInfo']['AvgDelay']),
jitter=float(P[src, dst]['AggInfo']['Jitter']))
if G.has_edge(src, dst):
D_G.add_node('l_{}_{}'.format(src, dst),
capacity=G.edges[src, dst]['bandwidth'],
policy=np.where(G.nodes[src]['schedulingPolicy'] == POLICIES)[0][0])
for h_1, h_2 in [R[src, dst][i:i + 2] for i in range(0, len(R[src, dst]) - 1)]:
D_G.add_edge('p_{}_{}'.format(src, dst), 'l_{}_{}'.format(h_1, h_2))
if 'schedulingWeights' in G.nodes[h_1]:
q_w = str(G.nodes[h_1]['schedulingWeights']).split(',')
else:
q_w = ['-']
if 'tosToQoSqueue' in G.nodes[h_1]:
map = [m.split(',') for m in str(G.nodes[h_1]['tosToQoSqueue']).split(';')]
else:
map = [['0'], ['1'], ['2']]
q_n = 0
for q in range(G.nodes[h_1]['levelsQoS']):
D_G.add_node('q_{}_{}_{}'.format(h_1, h_2, q),
priority=q_n,
weight=float(q_w[q]) if q_w[0] != '-' else 0)
D_G.add_edge('l_{}_{}'.format(h_1, h_2), 'q_{}_{}_{}'.format(h_1, h_2, q))
if str(int(T[src, dst]['Flows'][0]['ToS'])) in map[q]:
D_G.add_edge('p_{}_{}'.format(src, dst), 'q_{}_{}_{}'.format(h_1, h_2, q))
D_G.add_edge('q_{}_{}_{}'.format(h_1, h_2, q), 'p_{}_{}'.format(src, dst))
q_n += 1
D_G.remove_nodes_from([node for node, out_degree in D_G.out_degree() if out_degree == 0])
n_q = 0
n_p = 0
n_l = 0
mapping = {}
for entity in list(D_G.nodes()):
if entity.startswith('q'):
mapping[entity] = ('q_{}'.format(n_q))
n_q += 1
elif entity.startswith('p'):
mapping[entity] = ('p_{}'.format(n_p))
n_p += 1
elif entity.startswith('l'):
mapping[entity] = ('l_{}'.format(n_l))
n_l += 1
D_G = nx.relabel_nodes(D_G, mapping)
return D_G, n_q, n_p, n_l
def generator(data_dir, label, shuffle=False):
tool = DatanetAPI(data_dir, [], shuffle)
it = iter(tool)
for sample in it:
D_G, n_q, n_p, n_l = sample_to_dependency_graph(sample)
link_to_path = np.array([], dtype='int32')
queue_to_path = np.array([], dtype='int32')
l_p_s = np.array([], dtype='int32')
l_q_p = np.array([], dtype='int32')
path_ids = np.array([], dtype='int32')
for i in range(n_p):
l_s_l = 0
q_s_l = 0
for elem in D_G['p_{}'.format(i)]:
if elem.startswith('l_'):
link_to_path = np.append(link_to_path, int(elem.replace('l_', '')))
l_s_l += 1
elif elem.startswith('q_'):
queue_to_path = np.append(queue_to_path, int(elem.replace('q_', '')))
q_s_l += 1
path_ids = np.append(path_ids, [i] * q_s_l)
l_p_s = np.append(l_p_s, range(l_s_l))
l_q_p = np.append(l_q_p, range(q_s_l))
path_to_queue = np.array([], dtype='int32')
sequence_queues = np.array([], dtype='int32')
for i in range(n_q):
seq_len = 0
for elem in D_G['q_{}'.format(i)]:
path_to_queue = np.append(path_to_queue, int(elem.replace('p_', '')))
seq_len += 1
sequence_queues = np.append(sequence_queues, [i] * seq_len)
queue_to_link = np.array([], dtype='int32')
sequence_links = np.array([], dtype='int32')
l_q_l = np.array([], dtype='int32')
for i in range(n_l):
seq_len = 0
for elem in D_G['l_{}'.format(i)]:
queue_to_link = np.append(queue_to_link, int(elem.replace('q_', '')))
seq_len += 1
sequence_links = np.append(sequence_links, [i] * seq_len)
l_q_l = np.append(l_q_l, range(seq_len))
if 0 in list(nx.get_node_attributes(D_G, 'jitter').values()) or 0 in list(nx.get_node_attributes(D_G, 'delay').values()):
continue
yield {"traffic": list(nx.get_node_attributes(D_G, 'traffic').values()),
"packets": list(nx.get_node_attributes(D_G, 'packets').values()),
"capacity": list(nx.get_node_attributes(D_G, 'capacity').values()),
"policy": list(nx.get_node_attributes(D_G, 'policy').values()),
"priority": list(nx.get_node_attributes(D_G, 'priority').values()),
"weight": [w / 100 for w in list(nx.get_node_attributes(D_G, 'weight').values())],
"link_to_path": link_to_path,
"queue_to_path": queue_to_path,
"path_to_queue": path_to_queue,
"queue_to_link": queue_to_link,
"sequence_queues": sequence_queues,
"sequence_links": sequence_links,
"path_ids": path_ids,
"l_p_s": l_p_s,
"l_q_p": l_q_p,
"l_q_l": l_q_l,
"n_queues": n_q,
"n_links": n_l,
"n_paths": n_p,
}, list(nx.get_node_attributes(D_G, label).values())
# torch dataset
class GenDataset(torch.utils.data.Dataset):
def __init__(self, data_dir, label, shuffle, sample=4000, transform=None):
super(GenDataset).__init__()
self.data_dir = data_dir
self.label = label
self.shuffle = shuffle
self.transform = transform
self.len = sample
self.data_gen = generator(data_dir, label, shuffle)
def __getitem__(self, idx):
tempx, tempy = next(self.data_gen)
# for name in tempx:
# tempx[name] = torch.tensor(tempx[name])
tempx['traffic'] = torch.tensor(tempx['traffic'],dtype=torch.float32) # list of float
tempx['packets'] = torch.tensor(tempx['packets'],dtype=torch.float32)
tempx['capacity'] = torch.tensor(tempx['capacity'],dtype=torch.float32)
tempx['policy'] = torch.tensor(tempx['policy'],dtype=torch.int64)# index
tempx['priority'] = torch.tensor(tempx['priority'],dtype=torch.int64)
tempx['weight'] = torch.tensor(tempx['weight'],dtype=torch.float32)
tempx['link_to_path'] = torch.tensor(tempx['link_to_path'],dtype=torch.int64)
tempx['queue_to_path'] = torch.tensor(tempx['queue_to_path'],dtype=torch.int64)
tempx['path_to_queue'] = torch.tensor(tempx['path_to_queue'],dtype=torch.int64)
tempx['queue_to_link'] = torch.tensor(tempx['queue_to_link'],dtype=torch.int64)
tempx['sequence_links'] = torch.tensor(tempx['sequence_links'],dtype=torch.int32)
tempx['sequence_queues'] = torch.tensor(tempx['sequence_queues'],dtype=torch.int64)
tempx['path_ids'] = torch.tensor(tempx['path_ids'],dtype=torch.int32)
tempx['l_p_s'] = torch.tensor(tempx['l_p_s'],dtype=torch.int64)
tempx['l_q_p'] = torch.tensor(tempx['l_q_p'],dtype=torch.int64)
tempx['l_q_l'] = torch.tensor(tempx['l_q_l'],dtype=torch.int64)
tempx['n_queues'] = torch.tensor(tempx['n_queues'],dtype=torch.int32)
tempx['n_links'] = torch.tensor(tempx['n_links'],dtype=torch.int32)
tempx['n_paths'] = torch.tensor(tempx['n_paths'],dtype=torch.int32)
tempy = torch.tensor(tempy)
if self.transform:
tempx, tempy = self.transform(tempx, tempy)
return tempx, tempy
def map(self, map_func):
x, y = next(self.data_gen)
return map_func(x, y)
def __len__(self):
return self.len | Mengxue12/RouteNet-E_pytorch | scheduling/read_dataset.py | read_dataset.py | py | 8,871 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "networkx.DiGraph",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "networkx.DiGraph",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_... |
18720834678 | #길이제한 해주는 프로그램 GPT3 최대 2048개의 토큰 처리 가능
import pandas as pd
from transformers import GPT2Tokenizer
def trim_to_combined_token_limit(csv_file, max_tokens=2048):
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
df = pd.read_csv(csv_file)
for idx, row in df.iterrows():
# convert 'nan' or empty inputs to ""
row['Title'] = row['Title'] if row['Title'] and str(row['Title']).lower() != 'nan' else ""
row['Content'] = row['Content'] if row['Content'] and str(row['Content']).lower() != 'nan' else ""
row['Comments'] = row['Comments'] if row['Comments'] and str(row['Comments']).lower() != 'nan' else ""
tokenized_title = tokenizer.encode(row['Title'], truncation=False)
tokenized_content = tokenizer.encode(row['Content'], truncation=False)
tokenized_comments = tokenizer.encode(row['Comments'], truncation=False)
# If the combined tokens exceed the limit, trim the 'content' from the end
# and the 'comments' from the beginning until they fit
while len(tokenized_title) + len(tokenized_content) + len(tokenized_comments) > max_tokens:
if len(tokenized_content) > len(tokenized_comments):
tokenized_content = tokenized_content[:-1]
else:
tokenized_comments = tokenized_comments[1:]
df.loc[idx, 'Title'] = tokenizer.decode(tokenized_title)
df.loc[idx, 'Content'] = tokenizer.decode(tokenized_content)
df.loc[idx, 'Comments'] = tokenizer.decode(tokenized_comments)
df.to_csv('trimmed_dataset.csv', index=False)
trim_to_combined_token_limit('everytime_hotarticle_merged.csv')
| Dumul-KJH/everytime_ai | everytime_length.py | everytime_length.py | py | 1,693 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "transformers.GPT2Tokenizer.from_pretrained",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "transformers.GPT2Tokenizer",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
}
] |
73782221473 | # coding: utf-8
from pycparser import c_parser, c_ast
from cflags_loader import read_compiler_dflags_lib, get_cpp_dflags
from c_preprocessing import preprocess
def parse_text(text):
parser = c_parser.CParser()
ast = parser.parse(text, filename="<none>")
return ast
def get_ast(grouprep, filename, include_paths, cpp_flags_file):
dflags = read_compiler_dflags_lib(cpp_flags_file)
grouprep_dflags = get_cpp_dflags(grouprep, dflags)
text = preprocess(filename, grouprep_dflags, include_paths)
return parse_text(text)
def get_asts_for_all_files(grouprep, include_paths, source_files, cpp_flags_file):
return dict(
print(f"Processing {filename}")
or (filename, get_ast(grouprep, filename, include_paths, cpp_flags_file))
for filename in source_files
)
def ast_rec_iterator(node, tag="", level=0):
yield tag, node
if hasattr(node, "children"):
for tag, child in node.children():
yield from ast_rec_iterator(child, tag, level + 1)
def find_function_definitions(ast):
# Function definitions should be unique.
decl_list = [(node.decl.name, node.decl.storage, node)
for _, node in ast_rec_iterator(ast)
if isinstance(node, c_ast.FuncDef)]
names = [ name for name,storage,node in decl_list ]
for name in set(names):
count = names.count(name)
assert count == 1, f"Function {name} is declared {count} times!"
return decl_list
def find_function_call_names(ast):
fcalls = set(
node.name.name
for _, node in ast_rec_iterator(ast)
if isinstance(node, c_ast.FuncCall)
)
ids = set(
node.name
for _, node in ast_rec_iterator(ast)
if isinstance(node, c_ast.ID)
)
return set.union(ids,fcalls)
| sa2c/shoplifter | files_and_functions/ast_parsing.py | ast_parsing.py | py | 1,801 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pycparser.c_parser.CParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pycparser.c_parser",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "cflags_loader.read_compiler_dflags_lib",
"line_number": 15,
"usage_type": "call"
},
{
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.