code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import sys
class CommandError(Exception):
def __init__(self, message):
super().__init__(message)
self.message = message
class BaseCommand:
def run(self):
parser = self.get_optparser()
(options, names) = parser.parse_args()
try:
self.handle(names, options)
except CommandError as e:
print(e.message)
parser.print_usage()
sys.exit(1)
def handle(self, args, options):
raise NotImplementedError
def get_optparser(self):
raise NotImplementedError
| [
"sys.exit"
] | [((428, 439), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (436, 439), False, 'import sys\n')] |
import unittest.mock
import pytest
import requests
from lektor_twitter_embed import _init_params, _tweet
def _mock_request_valid(url, params):
return {"html": "<blockquote..."}
def _mock_request_exception(url, params):
raise requests.exceptions.HTTPError()
_tweet_url = "https://twitter.com/thisstuartlaws/status/1353838316198756352"
def test_init_params_none():
assert _init_params(_tweet_url, None) == {
"url": _tweet_url,
"dnt": "true",
}
def test_init_params_dict():
assert _init_params(_tweet_url, {"dnt": "false", "align": "center"},) == {
"url": _tweet_url,
"dnt": "false",
"align": "center",
}
@unittest.mock.patch("lektor_twitter_embed._send_request", _mock_request_valid)
def test_tweet_valid():
assert _tweet(_tweet_url) == "<blockquote..."
@unittest.mock.patch("lektor_twitter_embed._send_request", _mock_request_exception)
def test_tweet_exception_no_fallback():
with pytest.raises(requests.exceptions.HTTPError):
_tweet(_tweet_url)
@unittest.mock.patch("lektor_twitter_embed._send_request", _mock_request_exception)
def test_tweet_exception_with_fallback():
assert (
_tweet(_tweet_url, fallback=True) == f'<a href="{_tweet_url}">{_tweet_url}</a>'
)
| [
"lektor_twitter_embed._tweet",
"requests.exceptions.HTTPError",
"lektor_twitter_embed._init_params",
"pytest.raises"
] | [((239, 270), 'requests.exceptions.HTTPError', 'requests.exceptions.HTTPError', ([], {}), '()\n', (268, 270), False, 'import requests\n'), ((392, 422), 'lektor_twitter_embed._init_params', '_init_params', (['_tweet_url', 'None'], {}), '(_tweet_url, None)\n', (404, 422), False, 'from lektor_twitter_embed import _init_params, _tweet\n'), ((526, 587), 'lektor_twitter_embed._init_params', '_init_params', (['_tweet_url', "{'dnt': 'false', 'align': 'center'}"], {}), "(_tweet_url, {'dnt': 'false', 'align': 'center'})\n", (538, 587), False, 'from lektor_twitter_embed import _init_params, _tweet\n'), ((795, 813), 'lektor_twitter_embed._tweet', '_tweet', (['_tweet_url'], {}), '(_tweet_url)\n', (801, 813), False, 'from lektor_twitter_embed import _init_params, _tweet\n'), ((969, 1013), 'pytest.raises', 'pytest.raises', (['requests.exceptions.HTTPError'], {}), '(requests.exceptions.HTTPError)\n', (982, 1013), False, 'import pytest\n'), ((1023, 1041), 'lektor_twitter_embed._tweet', '_tweet', (['_tweet_url'], {}), '(_tweet_url)\n', (1029, 1041), False, 'from lektor_twitter_embed import _init_params, _tweet\n'), ((1191, 1224), 'lektor_twitter_embed._tweet', '_tweet', (['_tweet_url'], {'fallback': '(True)'}), '(_tweet_url, fallback=True)\n', (1197, 1224), False, 'from lektor_twitter_embed import _init_params, _tweet\n')] |
# -*- coding: utf-8 -*-
"""
@Author: <NAME>
For Citibike rebancing simulation
"""
import simpy
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
order_threshold = 2.0
order_up_to = 5.0
delivery_delay = 20 # in minutes
SIM_RUN = 1000 #number of simulation runs
initial_bikes = 15
operation_cost = 2 # USD per bike for operation
oil_gas = 3 # USD per 1 refillment
service_fee = 3 # USD per bike per ride
PENALTY = 3 # USD for cost of loss of business oportunity
loss_profit = 0.1 # USD per bike per minute for loss of business opportunity
def arrival(env, _lambda ,requirement):
global num_bikes, revenue, penalty, Loss_profit
interarrival = np.random.exponential(1./_lambda)
yield env.timeout(interarrival)
Loss_profit += loss_profit * num_bikes * interarrival
# print 'Arrival @ t={}, require# {}'.format(env.now, requirement)
if requirement == 'Rent a bike':
if num_bikes > 0:
num_bikes -= 1
revenue += service_fee
else:
penalty += PENALTY
else:
num_bikes += 1
# print ('current num of bikes = {}'.format(num_bikes))
def rebalancing(env, quantity):
global num_bikes, num_ordered, revenue , cost, Loss_profit
num_ordered = quantity
cost += (num_ordered * operation_cost) + oil_gas
yield env.timeout(delivery_delay)
num_bikes += num_ordered
# print (" Fill bikes up to ={}".format(num_bikes))
num_ordered = 0
def citibike_run(env, _lambda ,requirement, order_up_to, order_threshold):
global num_bikes, quantity, num_ordered, revenue , cost, penalty, Loss_profit
num_ordered = 0.0
quantity = 0.0
while True:
yield env.process(arrival(env, _lambda ,requirement))
get_bikes.append(num_bikes)
if num_bikes <= order_threshold and num_ordered == 0:
quantity = order_up_to - num_bikes
env.process(rebalancing(env, quantity))
def observe(env):
while True:
obs_time.append(env.now)
obs_bikes.append(num_bikes)
obs_balance.append(revenue - cost - penalty - Loss_profit)
yield env.timeout(0.2)
avg_bikes = []
avg_balance = []
get_balance = []
for i in range(SIM_RUN):
np.random.seed(i)
num_bikes = initial_bikes
revenue = 0
cost = 0
penalty = 0
Loss_profit = 0
obs_time = []
obs_bikes = []
obs_balance = []
get_bikes = []
env = simpy.Environment()
env.process(citibike_run(env, 1.572 ,'Rent a bike', order_up_to, order_threshold))
env.process(citibike_run(env, 1.183 ,'Return a bike', order_up_to, order_threshold))
env.process(observe(env))
env.run(until=180.0) # during 5pm to 8am
avg_bikes.append(np.mean(get_bikes))
avg_balance.append(revenue - cost - penalty - Loss_profit)
if SIM_RUN > 1:
print ("The average number of available bikes during the interval = ", np.mean(avg_bikes))
plt.figure()
plt.scatter(range(len(avg_bikes)), avg_bikes, c='b', alpha=0.4)
plt.xlabel('Simulation runs')
plt.ylabel('Bike Level')
plt.title('Average Bike levels at each runs (Threshold= {:.0f}, order-up-to= {:.0f})'.format(order_threshold, order_up_to))
plt.savefig('Average bike level.png')
plt.figure()
plt.hist( avg_bikes, color='g')
plt.xlabel('X Bin')
plt.ylabel('Count')
plt.title(' Histogram (average number of bike)(Threshold= {:.0f}, order-up-to= {:.0f})'.format(order_threshold, order_up_to))
plt.legend(loc='best')
plt.savefig('Histogram Average bike level.png')
if SIM_RUN <= 1:
plt.figure()
plt.step(obs_time, obs_bikes, where='post' , color = 'g')
plt.xlabel('Time (Minutes)')
plt.ylabel('Bike Level')
plt.title(' Simulation (Initial bikes = {:.0f}, Threshold = {:.0f}, order-up-to = {:.0f})'.format(initial_bikes, order_threshold, order_up_to))
plt.savefig('Bikes level (Tshold = {:.0f}, orderut = {:.0f}).png'.format(order_threshold, order_up_to))
plt.show()
plt.figure()
plt.step(obs_time, obs_balance, where='post', color = 'r')
plt.xlabel('Time (Minutes)')
plt.ylabel('Balance ($)')
plt.title('Balance (Threshold = {:.0f}, order-up-to = {:.0f})'.format(order_threshold, order_up_to))
plt.savefig('balance level (Tshold = {:.0f}, orderut = {:.0f}).png'.format(order_threshold, order_up_to))
plt.show()
confidence_level = 0.05
z_crit = stats.norm.ppf(1-confidence_level/2)
print ('200 simulation runs = {:.3f} +/- {:.3f} (95% CI)'.format(np.mean(avg_balance), z_crit*stats.sem(avg_balance))) | [
"numpy.mean",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"scipy.stats.norm.ppf",
"numpy.random.exponential",
"simpy.Environment",
"matplotlib.pyplot.figure",
"numpy.random.seed",
"scipy.stats.sem",
"matplotlib.pyplot.step",
... | [((4533, 4573), 'scipy.stats.norm.ppf', 'stats.norm.ppf', (['(1 - confidence_level / 2)'], {}), '(1 - confidence_level / 2)\n', (4547, 4573), True, 'import scipy.stats as stats\n'), ((684, 720), 'numpy.random.exponential', 'np.random.exponential', (['(1.0 / _lambda)'], {}), '(1.0 / _lambda)\n', (705, 720), True, 'import numpy as np\n'), ((2310, 2327), 'numpy.random.seed', 'np.random.seed', (['i'], {}), '(i)\n', (2324, 2327), True, 'import numpy as np\n'), ((2527, 2546), 'simpy.Environment', 'simpy.Environment', ([], {}), '()\n', (2544, 2546), False, 'import simpy\n'), ((3043, 3055), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3053, 3055), True, 'import matplotlib.pyplot as plt\n'), ((3132, 3161), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Simulation runs"""'], {}), "('Simulation runs')\n", (3142, 3161), True, 'import matplotlib.pyplot as plt\n'), ((3166, 3190), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Bike Level"""'], {}), "('Bike Level')\n", (3176, 3190), True, 'import matplotlib.pyplot as plt\n'), ((3323, 3360), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Average bike level.png"""'], {}), "('Average bike level.png')\n", (3334, 3360), True, 'import matplotlib.pyplot as plt\n'), ((3370, 3382), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3380, 3382), True, 'import matplotlib.pyplot as plt\n'), ((3387, 3417), 'matplotlib.pyplot.hist', 'plt.hist', (['avg_bikes'], {'color': '"""g"""'}), "(avg_bikes, color='g')\n", (3395, 3417), True, 'import matplotlib.pyplot as plt\n'), ((3423, 3442), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X Bin"""'], {}), "('X Bin')\n", (3433, 3442), True, 'import matplotlib.pyplot as plt\n'), ((3447, 3466), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (3457, 3466), True, 'import matplotlib.pyplot as plt\n'), ((3601, 3623), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (3611, 3623), True, 'import matplotlib.pyplot as plt\n'), ((3628, 3675), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Histogram Average bike level.png"""'], {}), "('Histogram Average bike level.png')\n", (3639, 3675), True, 'import matplotlib.pyplot as plt\n'), ((3709, 3721), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3719, 3721), True, 'import matplotlib.pyplot as plt\n'), ((3726, 3780), 'matplotlib.pyplot.step', 'plt.step', (['obs_time', 'obs_bikes'], {'where': '"""post"""', 'color': '"""g"""'}), "(obs_time, obs_bikes, where='post', color='g')\n", (3734, 3780), True, 'import matplotlib.pyplot as plt\n'), ((3788, 3816), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (Minutes)"""'], {}), "('Time (Minutes)')\n", (3798, 3816), True, 'import matplotlib.pyplot as plt\n'), ((3821, 3845), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Bike Level"""'], {}), "('Bike Level')\n", (3831, 3845), True, 'import matplotlib.pyplot as plt\n'), ((4106, 4116), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4114, 4116), True, 'import matplotlib.pyplot as plt\n'), ((4126, 4138), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4136, 4138), True, 'import matplotlib.pyplot as plt\n'), ((4143, 4199), 'matplotlib.pyplot.step', 'plt.step', (['obs_time', 'obs_balance'], {'where': '"""post"""', 'color': '"""r"""'}), "(obs_time, obs_balance, where='post', color='r')\n", (4151, 4199), True, 'import matplotlib.pyplot as plt\n'), ((4206, 4234), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (Minutes)"""'], {}), "('Time (Minutes)')\n", (4216, 4234), True, 'import matplotlib.pyplot as plt\n'), ((4239, 4264), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Balance ($)"""'], {}), "('Balance ($)')\n", (4249, 4264), True, 'import matplotlib.pyplot as plt\n'), ((4484, 4494), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4492, 4494), True, 'import matplotlib.pyplot as plt\n'), ((2827, 2845), 'numpy.mean', 'np.mean', (['get_bikes'], {}), '(get_bikes)\n', (2834, 2845), True, 'import numpy as np\n'), ((3019, 3037), 'numpy.mean', 'np.mean', (['avg_bikes'], {}), '(avg_bikes)\n', (3026, 3037), True, 'import numpy as np\n'), ((4637, 4657), 'numpy.mean', 'np.mean', (['avg_balance'], {}), '(avg_balance)\n', (4644, 4657), True, 'import numpy as np\n'), ((4666, 4688), 'scipy.stats.sem', 'stats.sem', (['avg_balance'], {}), '(avg_balance)\n', (4675, 4688), True, 'import scipy.stats as stats\n')] |
# Generated by Django 3.0.4 on 2020-04-01 14:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('search', '0002_mymodel'),
]
operations = [
migrations.CreateModel(
name='ADStest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total', models.IntegerField(default=0)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"django.db.models.DateTimeField",
"django.db.models.AutoField",
"django.db.models.IntegerField"
] | [((319, 412), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (335, 412), False, 'from django.db import migrations, models\n'), ((437, 467), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (456, 467), False, 'from django.db import migrations, models\n'), ((498, 537), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (518, 537), False, 'from django.db import migrations, models\n')] |
import pandas as pd
import numpy as np
from sklearn.preprocessing import Imputer, LabelBinarizer, StandardScaler
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import StratifiedShuffleSplit, GridSearchCV, train_test_split, cross_val_predict, cross_val_score
from sklearn.metrics import f1_score
from sklearn.neural_network import MLPClassifier
np.random.seed(42)
def display_scores(scores):
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard deviation:", scores.std())
num_attribs = ["Pclass", "Age", "SibSp", "Parch", "Fare"]
cat_attribs = ["Sex", "Embarked"]
# Create a class to select numerical or categorical columns
# since Scikit-Learn doesn't handle DataFrames yet
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names].values
num_pipeline = Pipeline([
('selector', DataFrameSelector(num_attribs)),
('imputer', Imputer(strategy="median")),
('std_scaler', StandardScaler()),
])
cat_pipeline = Pipeline([
('selector', DataFrameSelector(["Sex"])),
('label_binarizer', LabelBinarizer()),
])
cat_pipeline_emb = Pipeline([
('selector', DataFrameSelector(["Embarked"])),
('label_binarizer', LabelBinarizer()),
])
full_pipeline = FeatureUnion(transformer_list=[
("num_pipeline", num_pipeline),
#("cat_pipeline_emb", cat_pipeline_emb),
("cat_pipeline", cat_pipeline),
])
def predict_labels(clf, features, target):
y_pred = clf.predict(features)
return f1_score(target, y_pred), sum(target == y_pred) / float(len(y_pred))
#train = pd.read_csv("train.csv")
train = pd.read_csv("train.csv", index_col = "PassengerId")
train_set, test_set, train_label, test_label = train_test_split(train, train["Survived"], random_state=42, train_size=0.8)
train_prepared = full_pipeline.fit_transform(train_set)
test_prepared = full_pipeline.fit_transform(test_set)
from sklearn.linear_model import LogisticRegression
param_grid ={
'max_iter': range(5, 100, 10),
'C' : [0.2, 0.4, 0.6, 0.8, 1.0],
'solver' : ['newton-cg', 'lbfgs', 'liblinear', 'sag']
}
log_clf = LogisticRegression(random_state=42)
log_clf.fit(train_prepared, train_label)
f1_score(log_clf.predict(train_prepared), train_label)
f1_score(log_clf.predict(test_prepared), test_label)
neu_clf = MLPClassifier(random_state=42)
neu_clf.fit(train_prepared, train_label)
f1_score(neu_clf.predict(train_prepared), train_label)
f1_score(neu_clf.predict(test_prepared), test_label)
parameters={
'learning_rate': ["constant", "invscaling", "adaptive"],
'hidden_layer_sizes': [x for x in itertools.product((10,20,30,40,50,100),repeat=3)],
'activation': ["logistic", "relu", "Tanh"]
}
parameters={
'alpha': 10.0 ** -np.arange(1, 7),
'max_iter' : range(100, 5000, 100)
}
grid_search = GridSearchCV(MLPClassifier(random_state=42), parameters, cv=5, scoring='f1')
grid_search.fit(train_prepared, train_label)
neu_clf = grid_search.best_estimator_
neu_clf
f1_score(neu_clf.predict(train_prepared), train_label)
f1_score(neu_clf.predict(test_prepared), test_label)
MLPClassifier(activation='relu', alpha=0.0001, batch_size='auto', beta_1=0.9,
beta_2=0.999, early_stopping=False, epsilon=1e-08,
hidden_layer_sizes=(100,), learning_rate='constant',
learning_rate_init=0.001, max_iter=300, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=42, shuffle=True,
solver='adam', tol=0.0001, validation_fraction=0.1, verbose=False,
warm_start=False)
neu_clf = MLPClassifier(activation='relu', alpha=0.0001, batch_size='auto', beta_1=0.9,
beta_2=0.999, early_stopping=False, epsilon=1e-08,
hidden_layer_sizes=(100,), learning_rate='constant',
learning_rate_init=0.001, max_iter=300, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=42, shuffle=True,
solver='adam', tol=0.0001, validation_fraction=0.1, verbose=False,
warm_start=False)
neu_clf.fit(full_pipeline.fit_transform(train), train["Survived"])
test = pd.read_csv("test.csv", index_col = "PassengerId")
test["Survived"] = neu_clf.predict(full_pipeline.fit_transform(test))
test['Survived'].to_csv("result_2.csv")
parameters={
'alpha': [0.00008, 0.0001, 0.00012],
'max_iter' : range(100, 1000, 100),
'hidden_layer_sizes': [x for x in itertools.product((10,20,30,40,50,100),repeat=3)]
}
grid_search = GridSearchCV(MLPClassifier(random_state=42), parameters, cv=5, scoring='f1')
grid_search.fit(train_prepared, train_label)
neu_clf = grid_search.best_estimator_
neu_clf
f1_score(neu_clf.predict(train_prepared), train_label)
f1_score(neu_clf.predict(test_prepared), test_label) | [
"sklearn.pipeline.FeatureUnion",
"sklearn.preprocessing.LabelBinarizer",
"sklearn.metrics.f1_score",
"sklearn.neural_network.MLPClassifier",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.Imputer",
"sklearn.linear_model.LogisticRegression",
"sklearn.preprocessi... | [((445, 463), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (459, 463), True, 'import numpy as np\n'), ((1583, 1683), 'sklearn.pipeline.FeatureUnion', 'FeatureUnion', ([], {'transformer_list': "[('num_pipeline', num_pipeline), ('cat_pipeline', cat_pipeline)]"}), "(transformer_list=[('num_pipeline', num_pipeline), (\n 'cat_pipeline', cat_pipeline)])\n", (1595, 1683), False, 'from sklearn.pipeline import Pipeline, FeatureUnion\n'), ((1966, 2015), 'pandas.read_csv', 'pd.read_csv', (['"""train.csv"""'], {'index_col': '"""PassengerId"""'}), "('train.csv', index_col='PassengerId')\n", (1977, 2015), True, 'import pandas as pd\n'), ((2068, 2143), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train', "train['Survived']"], {'random_state': '(42)', 'train_size': '(0.8)'}), "(train, train['Survived'], random_state=42, train_size=0.8)\n", (2084, 2143), False, 'from sklearn.model_selection import StratifiedShuffleSplit, GridSearchCV, train_test_split, cross_val_predict, cross_val_score\n'), ((2532, 2567), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(42)'}), '(random_state=42)\n', (2550, 2567), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2733, 2763), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'random_state': '(42)'}), '(random_state=42)\n', (2746, 2763), False, 'from sklearn.neural_network import MLPClassifier\n'), ((3515, 3927), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'activation': '"""relu"""', 'alpha': '(0.0001)', 'batch_size': '"""auto"""', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'early_stopping': '(False)', 'epsilon': '(1e-08)', 'hidden_layer_sizes': '(100,)', 'learning_rate': '"""constant"""', 'learning_rate_init': '(0.001)', 'max_iter': '(300)', 'momentum': '(0.9)', 'nesterovs_momentum': '(True)', 'power_t': '(0.5)', 'random_state': '(42)', 'shuffle': '(True)', 'solver': '"""adam"""', 'tol': '(0.0001)', 'validation_fraction': '(0.1)', 'verbose': '(False)', 'warm_start': '(False)'}), "(activation='relu', alpha=0.0001, batch_size='auto', beta_1=\n 0.9, beta_2=0.999, early_stopping=False, epsilon=1e-08,\n hidden_layer_sizes=(100,), learning_rate='constant', learning_rate_init\n =0.001, max_iter=300, momentum=0.9, nesterovs_momentum=True, power_t=\n 0.5, random_state=42, shuffle=True, solver='adam', tol=0.0001,\n validation_fraction=0.1, verbose=False, warm_start=False)\n", (3528, 3927), False, 'from sklearn.neural_network import MLPClassifier\n'), ((3966, 4378), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'activation': '"""relu"""', 'alpha': '(0.0001)', 'batch_size': '"""auto"""', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'early_stopping': '(False)', 'epsilon': '(1e-08)', 'hidden_layer_sizes': '(100,)', 'learning_rate': '"""constant"""', 'learning_rate_init': '(0.001)', 'max_iter': '(300)', 'momentum': '(0.9)', 'nesterovs_momentum': '(True)', 'power_t': '(0.5)', 'random_state': '(42)', 'shuffle': '(True)', 'solver': '"""adam"""', 'tol': '(0.0001)', 'validation_fraction': '(0.1)', 'verbose': '(False)', 'warm_start': '(False)'}), "(activation='relu', alpha=0.0001, batch_size='auto', beta_1=\n 0.9, beta_2=0.999, early_stopping=False, epsilon=1e-08,\n hidden_layer_sizes=(100,), learning_rate='constant', learning_rate_init\n =0.001, max_iter=300, momentum=0.9, nesterovs_momentum=True, power_t=\n 0.5, random_state=42, shuffle=True, solver='adam', tol=0.0001,\n validation_fraction=0.1, verbose=False, warm_start=False)\n", (3979, 4378), False, 'from sklearn.neural_network import MLPClassifier\n'), ((4491, 4539), 'pandas.read_csv', 'pd.read_csv', (['"""test.csv"""'], {'index_col': '"""PassengerId"""'}), "('test.csv', index_col='PassengerId')\n", (4502, 4539), True, 'import pandas as pd\n'), ((3244, 3274), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'random_state': '(42)'}), '(random_state=42)\n', (3257, 3274), False, 'from sklearn.neural_network import MLPClassifier\n'), ((4863, 4893), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'random_state': '(42)'}), '(random_state=42)\n', (4876, 4893), False, 'from sklearn.neural_network import MLPClassifier\n'), ((1851, 1875), 'sklearn.metrics.f1_score', 'f1_score', (['target', 'y_pred'], {}), '(target, y_pred)\n', (1859, 1875), False, 'from sklearn.metrics import f1_score\n'), ((1202, 1228), 'sklearn.preprocessing.Imputer', 'Imputer', ([], {'strategy': '"""median"""'}), "(strategy='median')\n", (1209, 1228), False, 'from sklearn.preprocessing import Imputer, LabelBinarizer, StandardScaler\n'), ((1255, 1271), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1269, 1271), False, 'from sklearn.preprocessing import Imputer, LabelBinarizer, StandardScaler\n'), ((1391, 1407), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (1405, 1407), False, 'from sklearn.preprocessing import Imputer, LabelBinarizer, StandardScaler\n'), ((1536, 1552), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (1550, 1552), False, 'from sklearn.preprocessing import Imputer, LabelBinarizer, StandardScaler\n'), ((3158, 3173), 'numpy.arange', 'np.arange', (['(1)', '(7)'], {}), '(1, 7)\n', (3167, 3173), True, 'import numpy as np\n')] |
import tarfile
import os
tar_content_files = [ {"name": "config", "arc_name": "config"},
{"name": "out/chart-verifier", "arc_name": "chart-verifier"} ]
def create(release):
tgz_name = f"chart-verifier-{release}.tgz"
if os.path.exists(tgz_name):
os.remove(tgz_name)
with tarfile.open(tgz_name, "x:gz") as tar:
for tar_content_file in tar_content_files:
tar.add(os.path.join(os.getcwd(),tar_content_file["name"]),arcname=tar_content_file["arc_name"])
return os.path.join(os.getcwd(),tgz_name)
| [
"os.path.exists",
"os.getcwd",
"tarfile.open",
"os.remove"
] | [((245, 269), 'os.path.exists', 'os.path.exists', (['tgz_name'], {}), '(tgz_name)\n', (259, 269), False, 'import os\n'), ((279, 298), 'os.remove', 'os.remove', (['tgz_name'], {}), '(tgz_name)\n', (288, 298), False, 'import os\n'), ((309, 339), 'tarfile.open', 'tarfile.open', (['tgz_name', '"""x:gz"""'], {}), "(tgz_name, 'x:gz')\n", (321, 339), False, 'import tarfile\n'), ((534, 545), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (543, 545), False, 'import os\n'), ((432, 443), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (441, 443), False, 'import os\n')] |
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from unittest.mock import Mock, patch
from opentelemetry.sdk._metrics import MeterProvider
from opentelemetry.sdk._metrics.measurement_consumer import (
MeasurementConsumer,
SynchronousMeasurementConsumer,
)
class TestSynchronousMeasurementConsumer(TestCase):
def test_parent(self):
self.assertIsInstance(
SynchronousMeasurementConsumer(), MeasurementConsumer
)
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_measurement_consumer_class(
self, mock_serial_measurement_consumer
):
MeterProvider()
mock_serial_measurement_consumer.assert_called()
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_register_asynchronous_instrument(
self, mock_serial_measurement_consumer
):
meter_provider = MeterProvider()
meter_provider._measurement_consumer.register_asynchronous_instrument.assert_called_with(
meter_provider.get_meter("name").create_observable_counter(
"name", Mock()
)
)
meter_provider._measurement_consumer.register_asynchronous_instrument.assert_called_with(
meter_provider.get_meter("name").create_observable_up_down_counter(
"name", Mock()
)
)
meter_provider._measurement_consumer.register_asynchronous_instrument.assert_called_with(
meter_provider.get_meter("name").create_observable_gauge(
"name", Mock()
)
)
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_consume_measurement_counter(
self, mock_serial_measurement_consumer
):
meter_provider = MeterProvider()
counter = meter_provider.get_meter("name").create_counter("name")
counter.add(1)
meter_provider._measurement_consumer.consume_measurement.assert_called()
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_consume_measurement_up_down_counter(
self, mock_serial_measurement_consumer
):
meter_provider = MeterProvider()
counter = meter_provider.get_meter("name").create_up_down_counter(
"name"
)
counter.add(1)
meter_provider._measurement_consumer.consume_measurement.assert_called()
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_consume_measurement_histogram(
self, mock_serial_measurement_consumer
):
meter_provider = MeterProvider()
counter = meter_provider.get_meter("name").create_histogram("name")
counter.record(1)
meter_provider._measurement_consumer.consume_measurement.assert_called()
| [
"opentelemetry.sdk._metrics.measurement_consumer.SynchronousMeasurementConsumer",
"unittest.mock.patch",
"opentelemetry.sdk._metrics.MeterProvider",
"unittest.mock.Mock"
] | [((1027, 1093), 'unittest.mock.patch', 'patch', (['"""opentelemetry.sdk._metrics.SynchronousMeasurementConsumer"""'], {}), "('opentelemetry.sdk._metrics.SynchronousMeasurementConsumer')\n", (1032, 1093), False, 'from unittest.mock import Mock, patch\n'), ((1277, 1343), 'unittest.mock.patch', 'patch', (['"""opentelemetry.sdk._metrics.SynchronousMeasurementConsumer"""'], {}), "('opentelemetry.sdk._metrics.SynchronousMeasurementConsumer')\n", (1282, 1343), False, 'from unittest.mock import Mock, patch\n'), ((2175, 2241), 'unittest.mock.patch', 'patch', (['"""opentelemetry.sdk._metrics.SynchronousMeasurementConsumer"""'], {}), "('opentelemetry.sdk._metrics.SynchronousMeasurementConsumer')\n", (2180, 2241), False, 'from unittest.mock import Mock, patch\n'), ((2566, 2632), 'unittest.mock.patch', 'patch', (['"""opentelemetry.sdk._metrics.SynchronousMeasurementConsumer"""'], {}), "('opentelemetry.sdk._metrics.SynchronousMeasurementConsumer')\n", (2571, 2632), False, 'from unittest.mock import Mock, patch\n'), ((2995, 3061), 'unittest.mock.patch', 'patch', (['"""opentelemetry.sdk._metrics.SynchronousMeasurementConsumer"""'], {}), "('opentelemetry.sdk._metrics.SynchronousMeasurementConsumer')\n", (3000, 3061), False, 'from unittest.mock import Mock, patch\n'), ((1197, 1212), 'opentelemetry.sdk._metrics.MeterProvider', 'MeterProvider', ([], {}), '()\n', (1210, 1212), False, 'from opentelemetry.sdk._metrics import MeterProvider\n'), ((1471, 1486), 'opentelemetry.sdk._metrics.MeterProvider', 'MeterProvider', ([], {}), '()\n', (1484, 1486), False, 'from opentelemetry.sdk._metrics import MeterProvider\n'), ((2364, 2379), 'opentelemetry.sdk._metrics.MeterProvider', 'MeterProvider', ([], {}), '()\n', (2377, 2379), False, 'from opentelemetry.sdk._metrics import MeterProvider\n'), ((2763, 2778), 'opentelemetry.sdk._metrics.MeterProvider', 'MeterProvider', ([], {}), '()\n', (2776, 2778), False, 'from opentelemetry.sdk._metrics import MeterProvider\n'), ((3186, 3201), 'opentelemetry.sdk._metrics.MeterProvider', 'MeterProvider', ([], {}), '()\n', (3199, 3201), False, 'from opentelemetry.sdk._metrics import MeterProvider\n'), ((957, 989), 'opentelemetry.sdk._metrics.measurement_consumer.SynchronousMeasurementConsumer', 'SynchronousMeasurementConsumer', ([], {}), '()\n', (987, 989), False, 'from opentelemetry.sdk._metrics.measurement_consumer import MeasurementConsumer, SynchronousMeasurementConsumer\n'), ((1682, 1688), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1686, 1688), False, 'from unittest.mock import Mock, patch\n'), ((1915, 1921), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1919, 1921), False, 'from unittest.mock import Mock, patch\n'), ((2138, 2144), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (2142, 2144), False, 'from unittest.mock import Mock, patch\n')] |
import numpy as np
import pandas as pd
import pytest
from sklearn.base import is_classifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import OneHotEncoder
from sklearndf.classification import RandomForestClassifierDF
from sklearndf.pipeline import ClassifierPipelineDF
from test.sklearndf.pipeline import make_simple_transformer
def test_classification_pipeline_df(
iris_features: pd.DataFrame, iris_target_sr: pd.DataFrame
) -> None:
cls_p_df = ClassifierPipelineDF(
classifier=RandomForestClassifierDF(),
preprocessing=make_simple_transformer(
impute_median_columns=iris_features.select_dtypes(
include=np.number
).columns,
one_hot_encode_columns=iris_features.select_dtypes(include=object).columns,
),
)
assert is_classifier(cls_p_df)
cls_p_df.fit(X=iris_features, y=iris_target_sr)
cls_p_df.predict(X=iris_features)
# test-type check within constructor:
with pytest.raises(TypeError):
# noinspection PyTypeChecker
ClassifierPipelineDF(
classifier=RandomForestClassifier(), preprocessing=OneHotEncoder()
)
| [
"sklearn.base.is_classifier",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.ensemble.RandomForestClassifier",
"pytest.raises",
"sklearndf.classification.RandomForestClassifierDF"
] | [((848, 871), 'sklearn.base.is_classifier', 'is_classifier', (['cls_p_df'], {}), '(cls_p_df)\n', (861, 871), False, 'from sklearn.base import is_classifier\n'), ((1015, 1039), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1028, 1039), False, 'import pytest\n'), ((536, 562), 'sklearndf.classification.RandomForestClassifierDF', 'RandomForestClassifierDF', ([], {}), '()\n', (560, 562), False, 'from sklearndf.classification import RandomForestClassifierDF\n'), ((1131, 1155), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (1153, 1155), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1171, 1186), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (1184, 1186), False, 'from sklearn.preprocessing import OneHotEncoder\n')] |
from uuid import uuid4
class Products:
def __init__(self, name, price, description, quantity):
self.__products_id = str(uuid4())
self.__image = None
self.__name = name
self.__price = price
self.__description = description
self.__quantity = quantity
def get_products_id(self):
return self.__products_id
def get_image(self):
return self.__image
def get_name(self):
return self.__name
def get_price(self):
return self.__price
def get_description(self):
return self.__description
def get_quantity(self):
return self.__quantity
def set_products_id(self, products_id):
self.__products_id = products_id
def set_image(self, image):
self.__image = image
def set_name(self, name):
self.__name = name
def set_price(self, price):
self.__price = price
def set_description(self, description):
self.__description = description
def set_quantity(self, quantity):
self.__quantity = quantity
| [
"uuid.uuid4"
] | [((132, 139), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (137, 139), False, 'from uuid import uuid4\n')] |
from yaml.serializer import Serializer as YamlSerializer
from yaml.events import DocumentStartEvent, DocumentEndEvent
# override serialzier class to store data needed
# for extra data on anchor lines
class Serializer(YamlSerializer):
def __init__(self, encoding=None,
explicit_start=None, explicit_end=None, version=None, tags=None):
super().__init__(encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end, version=version,
tags=tags)
self.extra_anchor_data = {}
def serialize(self, node):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif self.closed:
raise SerializerError("serializer is closed")
self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
version=self.use_version, tags=self.use_tags))
self.anchor_node(node)
self.serialize_node(node, None, None)
self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
self.serialized_nodes = {}
self.anchors = {}
self.extra_anchor_data = {}
self.last_anchor_id = 0
| [
"yaml.events.DocumentEndEvent",
"yaml.events.DocumentStartEvent"
] | [((770, 873), 'yaml.events.DocumentStartEvent', 'DocumentStartEvent', ([], {'explicit': 'self.use_explicit_start', 'version': 'self.use_version', 'tags': 'self.use_tags'}), '(explicit=self.use_explicit_start, version=self.\n use_version, tags=self.use_tags)\n', (788, 873), False, 'from yaml.events import DocumentStartEvent, DocumentEndEvent\n'), ((977, 1025), 'yaml.events.DocumentEndEvent', 'DocumentEndEvent', ([], {'explicit': 'self.use_explicit_end'}), '(explicit=self.use_explicit_end)\n', (993, 1025), False, 'from yaml.events import DocumentStartEvent, DocumentEndEvent\n')] |
import os.path
import shutil
from django.conf import settings
from django.core import management
from django.core.files import File
from django.template import TemplateSyntaxError
from django.template.loader import render_to_string
from django.test import TestCase
from testapp.models import Item
DATA_DIR = os.path.join(settings.MEDIA_ROOT, 'data')
class BaseTestCase(TestCase):
def setUp(self):
if not os.path.exists(settings.MEDIA_ROOT):
os.makedirs(settings.MEDIA_ROOT)
shutil.copytree(settings.DATA_ROOT, DATA_DIR)
def tearDown(self):
shutil.rmtree(settings.MEDIA_ROOT)
class ModelTestCase(BaseTestCase):
def test_auto_generating_thumbs(self):
self.item = Item()
self.item.image.save(
'bamboo.png',
File(open(os.path.join(settings.DATA_ROOT, 'bamboo.png'), 'rb')),
save=False
)
# small: 80x80
self.assertTrue(
os.path.exists(
os.path.join(
settings.MEDIA_ROOT,
#'cache/65/04/650496dff97f883e3df125025a2dcd65.jpg'
'cache/3a/83/3a8334660aa38c27220ef11e8681ea06.jpg'
)
)
)
# big: 500x400
self.assertTrue(
os.path.exists(
os.path.join(
settings.MEDIA_ROOT,
#'cache/72/58/7258f6b747cba3161d7866fbb66ccd87.jpg'
'cache/4a/20/4a2010bd55d5605a75cad0338f38f72e.jpg'
)
)
)
class TemplateTestCase(BaseTestCase):
def setUp(self):
super(TemplateTestCase, self).setUp()
self.item, created = Item.objects.get_or_create(
image='data/bamboo.png'
)
def test_templatetag(self):
val = render_to_string('testapp/usethumbnail_big.html', {
'item': self.item,
}).strip()
self.assertEqual(val, '<img width="500" height="400">')
val = render_to_string('testapp/usethumbnail_small.html', {
'item': self.item,
}).strip()
self.assertEqual(val, '<img width="80" height="80">')
with self.assertRaises(TemplateSyntaxError):
val = render_to_string('testapp/usethumbnail_error.html', {
'item': self.item,
}).strip()
class CommandTestCase(BaseTestCase):
def setUp(self):
super(CommandTestCase, self).setUp()
Item.objects.get_or_create(image='data/bamboo.png')
Item.objects.get_or_create(image='data/flower.jpg')
def test_make_thumbnail(self):
management.call_command('make_thumbnails', 'testapp.Item', 'image', verbosity=1)
for f in ('cache/65/04/650496dff97f883e3df125025a2dcd65.jpg',
'cache/72/58/7258f6b747cba3161d7866fbb66ccd87.jpg',
'cache/84/8f/848f13e3183c0ed9bb5d96eb95de70f0.jpg',
'cache/ed/87/ed875125009a8755bc64944268e81557.jpg'):
self.assertTrue(
os.path.exists(os.path.join(settings.MEDIA_ROOT, f))
)
def test_make_thumbnail_force(self):
management.call_command('make_thumbnails', 'testapp.Item', 'image', verbosity=1, force=True)
for f in ('cache/65/04/650496dff97f883e3df125025a2dcd65.jpg',
'cache/72/58/7258f6b747cba3161d7866fbb66ccd87.jpg',
'cache/84/8f/848f13e3183c0ed9bb5d96eb95de70f0.jpg',
'cache/ed/87/ed875125009a8755bc64944268e81557.jpg'):
self.assertTrue(
os.path.exists(os.path.join(settings.MEDIA_ROOT, f))
)
def test_make_thumbnail_error(self):
with self.assertRaises(management.CommandError):
management.call_command('make_thumbnails', 'testapp.Item', verbosity=1)
with self.assertRaises(management.CommandError):
management.call_command('make_thumbnails', 'testapp.Item', 'image', 'arg', verbosity=1)
with self.assertRaises(management.CommandError):
management.call_command('make_thumbnails', 'Item', 'image', verbosity=1)
with self.assertRaises(management.CommandError):
management.call_command('make_thumbnails', 'noapp.Noitem', 'image', verbosity=1)
| [
"testapp.models.Item.objects.get_or_create",
"django.core.management.call_command",
"shutil.copytree",
"testapp.models.Item",
"shutil.rmtree",
"django.template.loader.render_to_string"
] | [((596, 630), 'shutil.rmtree', 'shutil.rmtree', (['settings.MEDIA_ROOT'], {}), '(settings.MEDIA_ROOT)\n', (609, 630), False, 'import shutil\n'), ((731, 737), 'testapp.models.Item', 'Item', ([], {}), '()\n', (735, 737), False, 'from testapp.models import Item\n'), ((1708, 1759), 'testapp.models.Item.objects.get_or_create', 'Item.objects.get_or_create', ([], {'image': '"""data/bamboo.png"""'}), "(image='data/bamboo.png')\n", (1734, 1759), False, 'from testapp.models import Item\n'), ((2474, 2525), 'testapp.models.Item.objects.get_or_create', 'Item.objects.get_or_create', ([], {'image': '"""data/bamboo.png"""'}), "(image='data/bamboo.png')\n", (2500, 2525), False, 'from testapp.models import Item\n'), ((2534, 2585), 'testapp.models.Item.objects.get_or_create', 'Item.objects.get_or_create', ([], {'image': '"""data/flower.jpg"""'}), "(image='data/flower.jpg')\n", (2560, 2585), False, 'from testapp.models import Item\n'), ((2630, 2715), 'django.core.management.call_command', 'management.call_command', (['"""make_thumbnails"""', '"""testapp.Item"""', '"""image"""'], {'verbosity': '(1)'}), "('make_thumbnails', 'testapp.Item', 'image', verbosity=1\n )\n", (2653, 2715), False, 'from django.core import management\n'), ((3155, 3251), 'django.core.management.call_command', 'management.call_command', (['"""make_thumbnails"""', '"""testapp.Item"""', '"""image"""'], {'verbosity': '(1)', 'force': '(True)'}), "('make_thumbnails', 'testapp.Item', 'image',\n verbosity=1, force=True)\n", (3178, 3251), False, 'from django.core import management\n'), ((517, 562), 'shutil.copytree', 'shutil.copytree', (['settings.DATA_ROOT', 'DATA_DIR'], {}), '(settings.DATA_ROOT, DATA_DIR)\n', (532, 562), False, 'import shutil\n'), ((3753, 3824), 'django.core.management.call_command', 'management.call_command', (['"""make_thumbnails"""', '"""testapp.Item"""'], {'verbosity': '(1)'}), "('make_thumbnails', 'testapp.Item', verbosity=1)\n", (3776, 3824), False, 'from django.core import management\n'), ((3895, 3986), 'django.core.management.call_command', 'management.call_command', (['"""make_thumbnails"""', '"""testapp.Item"""', '"""image"""', '"""arg"""'], {'verbosity': '(1)'}), "('make_thumbnails', 'testapp.Item', 'image', 'arg',\n verbosity=1)\n", (3918, 3986), False, 'from django.core import management\n'), ((4053, 4125), 'django.core.management.call_command', 'management.call_command', (['"""make_thumbnails"""', '"""Item"""', '"""image"""'], {'verbosity': '(1)'}), "('make_thumbnails', 'Item', 'image', verbosity=1)\n", (4076, 4125), False, 'from django.core import management\n'), ((4196, 4281), 'django.core.management.call_command', 'management.call_command', (['"""make_thumbnails"""', '"""noapp.Noitem"""', '"""image"""'], {'verbosity': '(1)'}), "('make_thumbnails', 'noapp.Noitem', 'image', verbosity=1\n )\n", (4219, 4281), False, 'from django.core import management\n'), ((1830, 1900), 'django.template.loader.render_to_string', 'render_to_string', (['"""testapp/usethumbnail_big.html"""', "{'item': self.item}"], {}), "('testapp/usethumbnail_big.html', {'item': self.item})\n", (1846, 1900), False, 'from django.template.loader import render_to_string\n'), ((2011, 2083), 'django.template.loader.render_to_string', 'render_to_string', (['"""testapp/usethumbnail_small.html"""', "{'item': self.item}"], {}), "('testapp/usethumbnail_small.html', {'item': self.item})\n", (2027, 2083), False, 'from django.template.loader import render_to_string\n'), ((2249, 2321), 'django.template.loader.render_to_string', 'render_to_string', (['"""testapp/usethumbnail_error.html"""', "{'item': self.item}"], {}), "('testapp/usethumbnail_error.html', {'item': self.item})\n", (2265, 2321), False, 'from django.template.loader import render_to_string\n')] |
#!/usr/bin/python
## File : strip_comments.py
## Created : <2017-08-03>
## Updated: Time-stamp: <2017-08-03 18:12:22>
## Description :
## For a block of string, remove useless stuff
## 1. Remove leading whitespace
## 2. Remove tailing whitespace
## 3. Remove any lines start with #
##
## Sample:
## export server_list="# server ip
##
## ## APP
## 172.16.31.10:22
## ## loadbalancer
## #172.16.17.32:2711
## #172.16.31.10:2712"
## server_list=$(echo "$server_list" | python ./strip_comments.py)
## server_list: "172.16.31.10:22"
##-------------------------------------------------------------------
import os, sys
def strip_comment(string):
string_list = []
for line in string.split("\n"):
line = line.strip()
if line.startswith("#") or line == "":
continue
string_list.append(line)
return "\n".join(string_list)
if __name__ == '__main__':
string = sys.stdin.read()
print(strip_comment(string))
## File : strip_comments.py ends
| [
"sys.stdin.read"
] | [((1005, 1021), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (1019, 1021), False, 'import os, sys\n')] |
#!/usr/bin/env python
'''
'''
__author__ = 'M@Campbell'
import os
import requests
import json
from cmislib.model import CmisClient
import re
from flask import current_app
class AlfrescoCMIS(object):
def __init__(self):
object.__init__(self)
'''
Set up the config variables.
'''
self.ALFRESCO_URL = current_app.config['ALFRESCO_CMIS_URL'] or \
os.environ.get('ALFRSCO_CMIS_URL')
self.ALFRESCO_UN = current_app.config['ALFRESCO_USERNAME'] or \
os.environ.get('ALFRESCO_USERNAME')
self.ALFRESCO_PW = current_app.config['ALFRESCO_PASSWORD'] or \
os.environ.get('ALFRESCO_PW')
self.ALFRESCO_ID = current_app.config['ALFRESCO_REPO_ID'] or \
os.environ.get('ALFRESCO_REPO_ID')
self.ALFRESCO_TICKET_URL = current_app.config['ALFRESCO_TICKET_URL'] or \
os.environ.get('ALFRSCO_TICKET_URL')
self.ALFRESCO_DL_URL = current_app.config['ALFRESCO_DL_URL'] or \
os.environ.get('ALFRESCO_DL_URL')
self.ALFRESCO_LINK_URL = current_app.config['ALFRESCO_LINK_URL'] or \
os.environ.get('ALFRESCO_LINK_URL')
pass
def make_alfresco_conn(self):
# create the cmis client
client = CmisClient(
self.ALFRESCO_URL, self.ALFRESCO_UN, self.ALFRESCO_PW)
# connect to the alfresco server and return the repo object
repo = client.getRepository(self.ALFRESCO_ID)
# this will be what we work with primarily
# returns a repo object
return repo
def make_alfresco_ticket(self):
# make a post request, which is the preferred method to get
# a alfresco ticket. ticket will last 1 hour.
response = requests.post(self.ALFRESCO_TICKET_URL,
headers={'Content-Type': 'application/json'},
data=json.dumps({'username': self.ALFRESCO_UN,
'password': self.ALFRESCO_PW}))
return response
def make_alfresco_query(self, query):
'''
query the alfresco server for all documents matching the search param.
**CAUTION: THIS MAY TAKE A WHILE TO RESPOND**
'''
# create the cmis client
client = CmisClient(
self.ALFRESCO_URL, self.ALFRESCO_UN, self.ALFRESCO_PW)
# connect to the alfresco server and return the repo object
repo = client.getRepository(self.ALFRESCO_ID)
# use this files connection method
# issue the query
results = repo.query("select * from cmis:document where contains('\"%s\"')" % query)
return results
def get_cruise_documents(self, cruise):
"""
Get_cruise_documents for KN-222
=====Pioneer-3_Leg-1_KN-222_2014-10-03=====
-- 3204-00302_Quick_Look_Cruise_Report_Coastal_Pioneer_3_Leg_1_2015-01-29_Ver_1-00.pdf workspace://SpacesStore/5b22d992-7165-4e5b-80aa-5e6cca46fdd5
-- 3204-00301_Cruise_Plan_Coastal_Pioneer_3_Leg_1_2014-10-02_Ver_1-00.pdf workspace://SpacesStore/afac7be0-8b79-421e-86a9-70fd65f24270
-- 3204-00303_Cruise_Report_Coastal_Pioneer_3_Leg_1_2015-04-28_Ver_1-00.pdf workspace://SpacesStore/30d41068-2714-443a-bd92-660319cf9f68
document.name: 3204-00303_Cruise_Report_Coastal_Pioneer_3_Leg_1_2015-04-28_Ver_1-00.pdf
document.id: workspace://SpacesStore/30d41068-2714-443a-bd92-660319cf9f68
"""
# Create the cmis client
client = CmisClient(self.ALFRESCO_URL, self.ALFRESCO_UN, self.ALFRESCO_PW)
# Connect to the alfresco server and return the repo object
repo = client.getRepository(self.ALFRESCO_ID)
cruise_param = '%'+cruise+'%'
query_folder_id = "select cmis:objectId, cmis:name from cmis:folder where cmis:name like '" +cruise_param+ "'"
folders = repo.query(query_folder_id)
cruise_id = None
documents = []
# Get items in folder
for folder in folders:
cruise_id = folder
cruise_id.type = 'cruise'
query_get_files = "select cmis:objectId, cmis:name from cmis:document where in_folder('%s') order by cmis:lastModificationDate desc" % folder.id
documents = repo.query(query_get_files)
for document in documents:
#print " --", document.name, document.id
document.type = 'cruise'
# UI can only process one set at a time with current response structure.
break
return documents, cruise_id
def make_alfresco_cruise_query(self, array, cruise):
""" Query the alfresco server for all documents relating to a cruise
"""
# create the cmis client
client = CmisClient(self.ALFRESCO_URL, self.ALFRESCO_UN, self.ALFRESCO_PW)
# connect to the alfresco server and return the repo object
repo = client.getRepository(self.ALFRESCO_ID)
# use this files connection method
doc = repo.getObjectByPath("/OOI/"+array+" Array/Cruise Data")
folder_query = "IN_FOLDER('"+doc.id+"')"
array_cruises = repo.query("select * FROM cmis:folder WHERE "+folder_query)
#setup the cruise information
results = []
cruise_id = None
if len(cruise) > 0:
cruise_split = re.split('\s|-|;|,|\*|\n',cruise)
cruise = "-".join(cruise_split)
#unique case...
if cruise == "Scarlett-Isabella":
cruise = "SI"
for r in array_cruises:
cruise_str = r.getName().split("_")[1]
if cruise in cruise_str:
cruise_id = r
break
# Only if the cruise information if its available.
if cruise_id is not None:
cruise_results = repo.query("select * FROM cmis:document where IN_FOLDER('"+cruise_id.id+"')")
for c in cruise_results:
c.type = "cruise"
#add the cruise link
cruise_id.type = "link"
return cruise_results,cruise_id
#return the defaults if not available
return results, cruise_id
def make_alfresco_page_link(self,id,ticket):
'''
creates an alfresco url page link
'''
arrID = id.split('/')
hex_id = arrID[3]
url = self.ALFRESCO_LINK_URL+hex_id
return url
def make_alfresco_download_link(self, id, ticket):
'''
In order to download a document, a specific URL needs to be used.
Each link needs to be passed in the new ticket in order to download.
param: ticket ; this is the ticket that will be used to authenticate
the request
'''
# then lets split out the object id and use use the hex portion . . .
arrID = id.split('/')
hex_id = arrID[3]
# and before we return, we just need to combine all three parts
# to make a nice url for a authenticated user to download.
download_url = ''.join((self.ALFRESCO_DL_URL, '/', hex_id, '?alf_ticket=', ticket))
return download_url
| [
"cmislib.model.CmisClient",
"re.split",
"json.dumps",
"os.environ.get"
] | [((1265, 1330), 'cmislib.model.CmisClient', 'CmisClient', (['self.ALFRESCO_URL', 'self.ALFRESCO_UN', 'self.ALFRESCO_PW'], {}), '(self.ALFRESCO_URL, self.ALFRESCO_UN, self.ALFRESCO_PW)\n', (1275, 1330), False, 'from cmislib.model import CmisClient\n'), ((2228, 2293), 'cmislib.model.CmisClient', 'CmisClient', (['self.ALFRESCO_URL', 'self.ALFRESCO_UN', 'self.ALFRESCO_PW'], {}), '(self.ALFRESCO_URL, self.ALFRESCO_UN, self.ALFRESCO_PW)\n', (2238, 2293), False, 'from cmislib.model import CmisClient\n'), ((3463, 3528), 'cmislib.model.CmisClient', 'CmisClient', (['self.ALFRESCO_URL', 'self.ALFRESCO_UN', 'self.ALFRESCO_PW'], {}), '(self.ALFRESCO_URL, self.ALFRESCO_UN, self.ALFRESCO_PW)\n', (3473, 3528), False, 'from cmislib.model import CmisClient\n'), ((4718, 4783), 'cmislib.model.CmisClient', 'CmisClient', (['self.ALFRESCO_URL', 'self.ALFRESCO_UN', 'self.ALFRESCO_PW'], {}), '(self.ALFRESCO_URL, self.ALFRESCO_UN, self.ALFRESCO_PW)\n', (4728, 4783), False, 'from cmislib.model import CmisClient\n'), ((403, 437), 'os.environ.get', 'os.environ.get', (['"""ALFRSCO_CMIS_URL"""'], {}), "('ALFRSCO_CMIS_URL')\n", (417, 437), False, 'import os\n'), ((522, 557), 'os.environ.get', 'os.environ.get', (['"""ALFRESCO_USERNAME"""'], {}), "('ALFRESCO_USERNAME')\n", (536, 557), False, 'import os\n'), ((642, 671), 'os.environ.get', 'os.environ.get', (['"""ALFRESCO_PW"""'], {}), "('ALFRESCO_PW')\n", (656, 671), False, 'import os\n'), ((755, 789), 'os.environ.get', 'os.environ.get', (['"""ALFRESCO_REPO_ID"""'], {}), "('ALFRESCO_REPO_ID')\n", (769, 789), False, 'import os\n'), ((884, 920), 'os.environ.get', 'os.environ.get', (['"""ALFRSCO_TICKET_URL"""'], {}), "('ALFRSCO_TICKET_URL')\n", (898, 920), False, 'import os\n'), ((1007, 1040), 'os.environ.get', 'os.environ.get', (['"""ALFRESCO_DL_URL"""'], {}), "('ALFRESCO_DL_URL')\n", (1021, 1040), False, 'import os\n'), ((1131, 1166), 'os.environ.get', 'os.environ.get', (['"""ALFRESCO_LINK_URL"""'], {}), "('ALFRESCO_LINK_URL')\n", (1145, 1166), False, 'import os\n'), ((5297, 5333), 're.split', 're.split', (['"""\\\\s|-|;|,|\\\\*|\n"""', 'cruise'], {}), "('\\\\s|-|;|,|\\\\*|\\n', cruise)\n", (5305, 5333), False, 'import re\n'), ((1864, 1936), 'json.dumps', 'json.dumps', (["{'username': self.ALFRESCO_UN, 'password': self.ALFRESCO_PW}"], {}), "({'username': self.ALFRESCO_UN, 'password': self.ALFRESCO_PW})\n", (1874, 1936), False, 'import json\n')] |
import os
import sys
import string
import pyfiglet
import random
def save(code):
file = open("combo.txt", 'a')
write_code = code + "\n"
file.write(write_code)
file.close()
os.system("cls")
result = pyfiglet.figlet_format("Discord Combo Maker", font = "slant")
print(result)
print("\n github.com/krytyYT/NitroCodesGen\n\n")
option = str(input(" [?] Generate codes with links? (Y/n): "))
option2 = int(input(" [?] How many?: "))
for x in range(option2):
if option == "y":
code = ('').join(random.choices(string.ascii_letters + string.digits, k=16))
code = "https://discord.gift/" + code
save(code)
print("Generated: " + code)
else:
code = ('').join(random.choices(string.ascii_letters + string.digits, k=16))
save(code)
print("Generated: " + code)
option3 = str(option2)
print("Saved " + option3 + " codes to combo.txt file!")
os.system("pause")
| [
"os.system",
"pyfiglet.figlet_format",
"random.choices"
] | [((202, 218), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (211, 218), False, 'import os\n'), ((229, 288), 'pyfiglet.figlet_format', 'pyfiglet.figlet_format', (['"""Discord Combo Maker"""'], {'font': '"""slant"""'}), "('Discord Combo Maker', font='slant')\n", (251, 288), False, 'import pyfiglet\n'), ((952, 970), 'os.system', 'os.system', (['"""pause"""'], {}), "('pause')\n", (961, 970), False, 'import os\n'), ((552, 610), 'random.choices', 'random.choices', (['(string.ascii_letters + string.digits)'], {'k': '(16)'}), '(string.ascii_letters + string.digits, k=16)\n', (566, 610), False, 'import random\n'), ((753, 811), 'random.choices', 'random.choices', (['(string.ascii_letters + string.digits)'], {'k': '(16)'}), '(string.ascii_letters + string.digits, k=16)\n', (767, 811), False, 'import random\n')] |
from rest_framework.test import APITestCase
from camphoric import models
class RegisterTests(APITestCase):
def setUp(self):
self.organization = models.Organization.objects.create(name="Test Organization")
def test_dataSchema(self):
event = models.Event.objects.create(
organization=self.organization,
name="Test Data Event",
registration_schema={
'type': 'object',
'properties': {
'billing_name': {'type': 'string'},
'billing_address': {'type': 'string'},
},
},
camper_schema={
'type': 'object',
'properties': {
'name': {'type': 'string'},
},
},
)
response = self.client.get(f'/api/events/{event.id}/register')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['dataSchema'], {
'type': 'object',
'definitions': {
'camper': {
'type': 'object',
'properties': {
'name': {'type': 'string'},
},
},
},
'properties': {
'campers': {
'type': 'array',
'items': {
'$ref': '#/definitions/camper',
},
},
'billing_name': {'type': 'string'},
'billing_address': {'type': 'string'},
},
})
def test_uiSchema(self):
event = models.Event.objects.create(
organization=self.organization,
name="Test uiSchema Event",
registration_ui_schema={
'ui:title': "Test UI Schema",
'ui:description': "Test Description",
}
)
response = self.client.get(f'/api/events/{event.id}/register')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['uiSchema'], {
'ui:title': "Test UI Schema",
'ui:description': "Test Description",
})
def test_pricing_fields(self):
event = models.Event.objects.create(
organization=self.organization,
name="Test Price Fields",
pricing={
'adult': 790,
'teen': 680,
},
camper_pricing_logic={
'tuition': {'+': [1, 2]},
'meals': {'*': [2, 3]}
},
registration_pricing_logic={
'donation': {'var': 'registration.donation'}
},
)
response = self.client.get(f'/api/events/{event.id}/register')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['pricing'], event.pricing)
self.assertEqual(response.data['pricingLogic'], {
'camper': event.camper_pricing_logic,
'registration': event.registration_pricing_logic,
})
| [
"camphoric.models.Event.objects.create",
"camphoric.models.Organization.objects.create"
] | [((159, 219), 'camphoric.models.Organization.objects.create', 'models.Organization.objects.create', ([], {'name': '"""Test Organization"""'}), "(name='Test Organization')\n", (193, 219), False, 'from camphoric import models\n'), ((268, 578), 'camphoric.models.Event.objects.create', 'models.Event.objects.create', ([], {'organization': 'self.organization', 'name': '"""Test Data Event"""', 'registration_schema': "{'type': 'object', 'properties': {'billing_name': {'type': 'string'},\n 'billing_address': {'type': 'string'}}}", 'camper_schema': "{'type': 'object', 'properties': {'name': {'type': 'string'}}}"}), "(organization=self.organization, name=\n 'Test Data Event', registration_schema={'type': 'object', 'properties':\n {'billing_name': {'type': 'string'}, 'billing_address': {'type':\n 'string'}}}, camper_schema={'type': 'object', 'properties': {'name': {\n 'type': 'string'}}})\n", (295, 578), False, 'from camphoric import models\n'), ((1666, 1855), 'camphoric.models.Event.objects.create', 'models.Event.objects.create', ([], {'organization': 'self.organization', 'name': '"""Test uiSchema Event"""', 'registration_ui_schema': "{'ui:title': 'Test UI Schema', 'ui:description': 'Test Description'}"}), "(organization=self.organization, name=\n 'Test uiSchema Event', registration_ui_schema={'ui:title':\n 'Test UI Schema', 'ui:description': 'Test Description'})\n", (1693, 1855), False, 'from camphoric import models\n'), ((2272, 2555), 'camphoric.models.Event.objects.create', 'models.Event.objects.create', ([], {'organization': 'self.organization', 'name': '"""Test Price Fields"""', 'pricing': "{'adult': 790, 'teen': 680}", 'camper_pricing_logic': "{'tuition': {'+': [1, 2]}, 'meals': {'*': [2, 3]}}", 'registration_pricing_logic': "{'donation': {'var': 'registration.donation'}}"}), "(organization=self.organization, name=\n 'Test Price Fields', pricing={'adult': 790, 'teen': 680},\n camper_pricing_logic={'tuition': {'+': [1, 2]}, 'meals': {'*': [2, 3]}},\n registration_pricing_logic={'donation': {'var': 'registration.donation'}})\n", (2299, 2555), False, 'from camphoric import models\n')] |
import logging
logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s: %(message)s',
level=logging.INFO) | [
"logging.basicConfig"
] | [((17, 119), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(name)s %(levelname)s: %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s %(name)s %(levelname)s: %(message)s', level=logging.INFO)\n", (36, 119), False, 'import logging\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# import python libs
import re
import json
import argparse
import json
import random
from os import listdir
from os.path import isfile, join
from pprint import pprint as pp
from collections import deque
# import project libs
import create_annotated_corpus
# defining globals & constants
GOLD_ANNOTATED_CORPUS_FILES = '/Users/rg/Nextcloud/Uni/Bachelorarbeit/Korpora/Implisense/json gold/'
SUBJECTS_TABEL_JSON = '/Users/rg/Nextcloud/Uni/Bachelorarbeit/Apps/Vorverarbeitung/subjects_tabel.json'
TARGET_DISTRIBUTION = [
[
10, # correct annotation
16.5141, # manipulated span
6.606, # manipulated cat
0, # manipulated cat & span
3.3021, # unnecessary annotation
63.5778 # no annotation
],
[
50, # correct annotation
9.1745, # manipulated span
3.67, # manipulated cat
0, # manipulated cat & span
1.8345, # unnecessary annotation
35.321 # no annotation
],
[
90, # correct annotation
1.8349, # manipulated span
0.734, # manipulated cat
0, # manipulated cat & span
0.3669, # unnecessary annotation
7.0642 # no annotation
]
]
TOTAL_ANNOTATIONS = 310
MIN_NUMBER_OF_ANNOTATIONS_PER_BLOCK = 75
NUMBER_OF_SUBJECTS = 66
# methods
def read_corpus_files(path):
corpus = []
for file_name in sorted(listdir(path)):
if not (isfile(join(path, file_name)) and file_name.endswith('.json')): continue
file_handler = open(path + file_name, 'r', encoding='utf-8')
raw_content = file_handler.read()
file_handler.close()
deconded_content = json.JSONDecoder().decode(raw_content)
corpus.append(deconded_content)
return corpus
def define_blocks(annotated_corpus):
annotations_per_paragraph = []
# create list with annotations per sentence / paragraph
for document in annotated_corpus:
for paragraph in document['data']:
annotations_per_sentences = []
for sentence in paragraph:
annotations_per_sentence = 0
for token in sentence:
if 'annotation' in token:
annotations_per_sentence += 1
annotations_per_sentences.append(annotations_per_sentence)
annotations_per_paragraph.append(annotations_per_sentences)
# define blocks
blocks = []
annotations_per_block = 0
last_index = 0
for index, annotations_per_sentence_in_paragraph in enumerate(annotations_per_paragraph):
annotations_per_block += sum(annotations_per_sentence_in_paragraph)
if annotations_per_block >= MIN_NUMBER_OF_ANNOTATIONS_PER_BLOCK or index == (len(annotations_per_paragraph) - 1):
print('add', annotations_per_block, 'annotations to block')
index_for_partitioning = index + 1
blocks.append(annotations_per_paragraph[last_index:index_for_partitioning])
last_index = index_for_partitioning
annotations_per_block = 0
return blocks
def create_reference_distributions(blocks):
def distribution_per_block(annotations, level):
factor = annotations / 100.0
absolute_distribution = list(map(lambda x: int(x * factor), TARGET_DISTRIBUTION[level]))
difference = annotations - sum(absolute_distribution)
if difference > 0:
for i in range(0, difference):
index = random.choice([0, 1, 2, 4, 5]) # 3 is missing, because it is not occuring
absolute_distribution[index] += 1
annotation_class_list = []
for class_id, count in enumerate(absolute_distribution):
for i in range(0, count):
annotation_class_list.append(class_id)
random.shuffle(annotation_class_list)
return annotation_class_list
def collate_distribution_to_block_structure(block, distribution):
block_with_annotation_classes = []
for document in block:
annotation_classes_per_document = []
for number_of_annotations_per_sentence in document:
annotation_classes_per_sentence = []
for i in range(0, number_of_annotations_per_sentence):
annotation_classes_per_sentence.append(distribution.pop(0))
annotation_classes_per_document.append(annotation_classes_per_sentence)
block_with_annotation_classes.append(annotation_classes_per_document)
return block_with_annotation_classes
annotation_classes_in_blocks_per_level = []
for level in range(0, 3):
annotation_classes_in_blocks = []
for block in blocks:
annotations = sum(map(lambda sentence: sum(sentence), block))
distribution = distribution_per_block(annotations, level)
collated_block = collate_distribution_to_block_structure(block, distribution)
annotation_classes_in_blocks.append(collated_block)
annotation_classes_in_blocks_per_level.append(annotation_classes_in_blocks)
return annotation_classes_in_blocks_per_level
def create_distribution_per_subject(reference_annotation_classes, subject_id):
def clear_block(block):
clean_block = []
for document in block:
cleaned_document = []
for sentence in document:
cleaned_sentence = []
for annotation in sentence:
cleaned_sentence.append(5)
cleaned_document.append(cleaned_sentence)
clean_block.append(cleaned_document)
return clean_block
def collate_distribution_to_block_structure(block, distribution):
block_with_annotation_classes = []
for document in block:
annotation_classes_per_document = []
for sentence in document:
annotation_classes_per_sentence = []
for annotation_class in sentence:
annotation_classes_per_sentence.append(distribution.pop(0))
annotation_classes_per_document.append(annotation_classes_per_sentence)
block_with_annotation_classes.append(annotation_classes_per_document)
return block_with_annotation_classes
def shift_annotation_classes(block, offset):
distribution = flat_block_structure(block)
items = deque(distribution)
items.rotate(offset)
rotated_distribution = list(items)
return collate_distribution_to_block_structure(block, rotated_distribution)
subject_blocks = []
for i in range(subject_id, (subject_id + 4)):
reference_block = reference_annotation_classes[i - subject_id]
if i % 2 == 0:
block = clear_block(reference_block)
else:
block = shift_annotation_classes(reference_block, subject_id - 1)
subject_blocks.append(block)
return subject_blocks
def add_no_annotations_to_unnecessary_annotations(blocks, gold_annotated_corpus):
def flatten_blocks(blocks_per_subject):
# return a list of paragraphs for each subject
paragraphs = []
for block in blocks_per_subject:
for paragraph in block:
paragraphs.append(paragraph)
return paragraphs
def insert_index_addition(token_index, sentence):
current_annotation_length = sentence[token_index]['annotation']['length']
space_to_the_left = token_index - create_annotated_corpus.earliest_chunk_start_index(sentence, token_index)
space_to_the_right = create_annotated_corpus.maximum_chunk_length(sentence, token_index) - current_annotation_length
# print('sentence')
# pp(sentence)
# print('index', token_index, 'left', space_to_the_left, 'right', space_to_the_right)
if space_to_the_left > space_to_the_right: return 1
return 0
def collate_paragraphs_to_blocks_structure(blocks, paragraphs):
total_paragraph_index = -1
for block in blocks:
for i in range(0, len(block)):
total_paragraph_index += 1
block[i] = paragraphs[total_paragraph_index]
return blocks
total_paragraph_index = -1
annotations_per_paragraph = flatten_blocks(blocks)
for document_index, document in enumerate(gold_annotated_corpus):
for paragraph_index, paragraph in enumerate(document['data']):
total_paragraph_index += 1
for sentence_index, sentence in enumerate(paragraph):
annotations_per_sentence = annotations_per_paragraph[total_paragraph_index][sentence_index]
annotation_index = -1
for token_index, token in enumerate(sentence):
if 'annotation' in token:
annotation_index += 1
if annotations_per_sentence[annotation_index] == 4:
# print('annotations list before', annotations_per_sentence)
insert_index = annotation_index + insert_index_addition(token_index, sentence)
# print('insert_index', insert_index)
annotations_per_sentence.insert(insert_index, 5)
# print('annotations list after', annotations_per_sentence)
annotation_index += 1
return collate_paragraphs_to_blocks_structure(blocks, annotations_per_paragraph)
# helpers
def validate_target_distribution():
for index, distribution in enumerate(TARGET_DISTRIBUTION):
if sum(distribution) != 100:
print('TARGET_DISTRIBUTION is not valid!', index)
exit()
def flat_block_structure(block):
flatted_list = []
for document in block:
for sentence in document:
for annotation_class in sentence:
flatted_list.append(annotation_class)
return flatted_list
def save_document_to_file(document):
json_encoded_document = json.dumps(document)
file_handler = open(SUBJECTS_TABEL_JSON, 'w')
file_handler.write(json_encoded_document)
file_handler.close()
def find_four(blocks, prefix = False):
return find_occurences(4, blocks, prefix)
def find_occurences(number, blocks, prefix = False):
relevant_sentences = []
for block_index, block in enumerate(blocks):
for paragraph_index, paragraph in enumerate(block):
for sentence_index, sentence in enumerate(paragraph):
if number in sentence:
relevant_sentences.append(sentence)
if prefix: print(prefix, sentence)
return relevant_sentences
# entry point as a stand alone script
if __name__ == '__main__':
validate_target_distribution()
gold_annotated_corpus = read_corpus_files(GOLD_ANNOTATED_CORPUS_FILES)
blocks = define_blocks(gold_annotated_corpus)
reference_annotation_classes_in_blocks = create_reference_distributions(blocks)
for level, reference_annotation_classes_per_level in enumerate(reference_annotation_classes_in_blocks):
find_occurences(3, reference_annotation_classes_per_level, 'found forbidden number in level' + str(level))
subject_table = []
for subject_id in range(0, NUMBER_OF_SUBJECTS):
level = subject_id % 3
reference_annotation_classes = reference_annotation_classes_in_blocks[level]
subject_annotation_classes = create_distribution_per_subject(reference_annotation_classes, subject_id)
with_no_annotations = add_no_annotations_to_unnecessary_annotations(subject_annotation_classes, gold_annotated_corpus)
subject_table.append(with_no_annotations)
save_document_to_file(subject_table)
# blocks = [[[[4, 1]]]]
# gold = [
# {
# 'data': [
# [
# [
# {'annotation': {'label': 'COM', 'length': 2}, 'term': 'eBay'},
# {'term': 'Kleinanzeigen'},
# {'term': 'gehört'},
# {'term': 'zur'},
# {'term': 'internationalen'},
# {'annotation': {'label': 'COM', 'length': 3}, 'term': 'eBay'},
# {'term': 'Classifieds'},
# {'term': 'Group'},
# {'term': '.'}
# ]
# ]
# ]
# }
# ]
# add_no_annotations_to_unnecessary_annotations(blocks, gold)
| [
"os.listdir",
"collections.deque",
"random.shuffle",
"create_annotated_corpus.maximum_chunk_length",
"random.choice",
"json.dumps",
"os.path.join",
"create_annotated_corpus.earliest_chunk_start_index",
"json.JSONDecoder"
] | [((10011, 10031), 'json.dumps', 'json.dumps', (['document'], {}), '(document)\n', (10021, 10031), False, 'import json\n'), ((1416, 1429), 'os.listdir', 'listdir', (['path'], {}), '(path)\n', (1423, 1429), False, 'from os import listdir\n'), ((3816, 3853), 'random.shuffle', 'random.shuffle', (['annotation_class_list'], {}), '(annotation_class_list)\n', (3830, 3853), False, 'import random\n'), ((6396, 6415), 'collections.deque', 'deque', (['distribution'], {}), '(distribution)\n', (6401, 6415), False, 'from collections import deque\n'), ((7479, 7552), 'create_annotated_corpus.earliest_chunk_start_index', 'create_annotated_corpus.earliest_chunk_start_index', (['sentence', 'token_index'], {}), '(sentence, token_index)\n', (7529, 7552), False, 'import create_annotated_corpus\n'), ((7582, 7649), 'create_annotated_corpus.maximum_chunk_length', 'create_annotated_corpus.maximum_chunk_length', (['sentence', 'token_index'], {}), '(sentence, token_index)\n', (7626, 7649), False, 'import create_annotated_corpus\n'), ((1689, 1707), 'json.JSONDecoder', 'json.JSONDecoder', ([], {}), '()\n', (1705, 1707), False, 'import json\n'), ((3490, 3520), 'random.choice', 'random.choice', (['[0, 1, 2, 4, 5]'], {}), '([0, 1, 2, 4, 5])\n', (3503, 3520), False, 'import random\n'), ((1455, 1476), 'os.path.join', 'join', (['path', 'file_name'], {}), '(path, file_name)\n', (1459, 1476), False, 'from os.path import isfile, join\n')] |
from viewstate import ViewState
from src import config
import requests_html
from datetime import datetime, timedelta
import re
base_url = "https://appiris.infofer.ro/MyTrainRO.aspx?tren={}"
def get_station_id_by_name(name):
if name in config.global_station_list:
return config.global_station_list[name]
return None
def extract_viewstate(reply):
state = reply.html.find('#__VIEWSTATE', first=True)
if not state:
raise Exception("__VIEWSTATE element not present on webpage")
state_value = state.attrs['value']
return state_value
def state_decoder(state):
main_page = state[0][1][1][1][1][3][1][1][1]
departure_date_raw = main_page[13][0][0][0][7]
departure_date = re.findall(r"(\d+.\d+.\d+)", departure_date_raw)[0]
# Collect the info box details
info_box = main_page[13][1][1][1]
# Process the latest info field
latest_info_raw = info_box[13][1][1][0][0][1]
info_station, info_status = re.findall(r"(.*?) \[(.*?)\]", latest_info_raw)[0]
info_station = info_station.strip()
# Process the last update time
info_time_raw = info_box[15][1][1][0][0][1]
info_time = None
if info_time_raw and info_time_raw != ' ':
info_time = datetime.timestamp(datetime.strptime(info_time_raw, '%d.%m.%Y %H:%M'))
# Process the delay field
delay_raw = info_box[17][1][1][0][0][1]
delay = None
if delay_raw != '':
delay = int(delay_raw)
# Process the next stop information
next_station_raw = info_box[23][1][1][0][0][1]
next_station = next_station_raw.strip()
# Process the next stop time
next_stop_time_raw = info_box[25][1][1][0][0][1]
next_stop_time = None
if next_stop_time_raw and next_stop_time_raw != ' ':
next_stop_time = datetime.timestamp(datetime.strptime(next_stop_time_raw, '%d.%m.%Y %H:%M'))
# Other information
destination_station = info_box[19][1][1][0][0][1]
destination_arrival_time_raw = info_box[21][1][1][0][0][1]
destination_arrival_time = None
if destination_arrival_time_raw and destination_arrival_time_raw != ' ':
destination_arrival_time = datetime.timestamp(datetime.strptime(destination_arrival_time_raw, '%d.%m.%Y %H:%M'))
# Build the data dict
info_box_data = {
'rank': info_box[3][1][1][0][0][1],
'train_id': info_box[5][1][1][0][0][1],
'operator': info_box[7][1][1][0][0][1],
'route': info_box[9][1][1][0][0][1],
'status': info_box[11][1][1][0][0][1],
'latest_information': {
'station': {
'name': info_station,
'id': get_station_id_by_name(info_station),
},
'status': info_status,
'time': int(info_time) if info_time else None,
},
'delay': delay,
'destination': {
'station': {
'name': destination_station,
'id': get_station_id_by_name(destination_station)
},
'arrival_time': int(destination_arrival_time) if destination_arrival_time else None
},
'next_stop': {
'station': {
'name': next_station,
'id': get_station_id_by_name(next_station),
},
'time': int(next_stop_time) if next_stop_time else None,
},
'distance': info_box[27][1][1][0][0][1][:-1],
'trip_duration': info_box[29][1][1][0][0][1][:-1],
'average_speed': info_box[31][1][1][0][0][1][:-1],
}
# Collect the route info box data, if available
# Note: The route info is not displayed for canceled trains,
# yet it is available in the state information, albeit at a different place
# in the structure
route_data = []
route_info_box = None
# Find the route info box
try:
route_info_box = main_page[17][1][1][1]
except TypeError:
try:
# The route info box is usually found here on cancelled trains
route_info_box = main_page[15][1][1][1]
except TypeError:
pass
if route_info_box:
last_arrival_timestamp = 0
last_departure_timestamp = 0
for entry_number in range(1, int(len(route_info_box) / 2)):
entry = route_info_box[2 * entry_number - 1][1]
# Compute the arrival timestamp for this station
arrival_time_raw = entry[5][0][0][1]
arrival_timestamp = None
if arrival_time_raw and arrival_time_raw != ' ':
# We assume the train arrives at this station on the same day it left
arrival_time_assumption = datetime.strptime(departure_date + ' ' + arrival_time_raw, '%d.%m.%Y %H:%M')
if last_arrival_timestamp and last_arrival_timestamp > datetime.timestamp(arrival_time_assumption):
# We were wrong in our assumption and the train actually arrives
# on the next day at this station, hence we must add one day to the arrival time
arrival_time_assumption = arrival_time_assumption + timedelta(days=1)
last_arrival_timestamp = datetime.timestamp(arrival_time_assumption)
arrival_timestamp = int(last_arrival_timestamp)
# Compute the departure timestamp for this station
departure_time_raw = entry[9][0][0][1]
departure_timestamp = None
if departure_time_raw and departure_time_raw != ' ':
# We assume the train departs from this station on the same day it left
departure_time_assumption = datetime.strptime(departure_date + ' ' + departure_time_raw, '%d.%m.%Y %H:%M')
if last_departure_timestamp and last_departure_timestamp > datetime.timestamp(departure_time_assumption):
# We were wrong in our assumption and the train actually departs
# on the next day from this station, hence we must add one day to the departure time
departure_time_assumption = departure_time_assumption + timedelta(days=1)
last_departure_timestamp = datetime.timestamp(departure_time_assumption)
departure_timestamp = int(last_departure_timestamp)
# Decode other raw data
milepost_raw = entry[1][0][0][1]
milepost = None
if milepost_raw:
milepost = int(milepost_raw)
station = entry[3][0][0][1].strip()
stop_duration_raw = entry[7][0][0][1]
stop_duration = None
if stop_duration_raw and stop_duration_raw != " ":
stop_duration = int(stop_duration_raw)
delay_raw = entry[13][0][0][1]
delay = 0
if delay_raw and delay_raw != " ":
delay = int(delay_raw)
mentions_raw = entry[15][0][0][1]
mentions = None
if mentions_raw and mentions_raw != " ":
mentions = mentions_raw
entry_data = {
'milepost': milepost,
'station': {
'name': station,
'id': get_station_id_by_name(station),
},
'arrival_time': arrival_timestamp,
'stop_duration': stop_duration,
'departure_time': departure_timestamp,
'is_real_time': entry[11][0][0][1] == 'Real',
'delay': delay,
'mentions': mentions,
}
try:
entry_data['mentions_extra'] = entry[15][0][1][1]
except TypeError:
entry_data['mentions_extra'] = None
route_data.append(entry_data)
return {
'departure_date': departure_date,
'info_box': info_box_data,
'route_data': route_data,
}
def get_train(train_id):
session = requests_html.HTMLSession()
# Get the initial page and retrieve its __VIEWSTATE
reply = session.get(base_url.format(train_id))
state_value = extract_viewstate(reply)
vs = ViewState(state_value)
state = vs.decode()
# Check whether the train actually exists
if state[0][1][1][1][1][3][1][1][1][11][0][0][1] != '':
raise Exception("Train not found")
trips = []
# Decode the current page and append it to the trips array
current_trip = state_decoder(state)
trips.append(current_trip)
# Check whether the train is single-page or multi-page
if state[1]['DetailsView1'][0][6] == 1:
print("Single-page train")
else:
print("Multi-page train!")
# Switch the page
reply = session.post(base_url.format(train_id), data={
'__EVENTTARGET': 'DetailsView1',
'__EVENTARGUMENT': 'Page$2',
'__VIEWSTATE': state_value,
'__VIEWSTATEGENERATOR': '86BE64DB',
'TextTrnNo': str(train_id),
})
state_value = extract_viewstate(reply)
vs = ViewState(state_value)
state = vs.decode()
# Decode the current page and append it to the trips array
current_trip = state_decoder(state)
trips.append(current_trip)
return trips
| [
"viewstate.ViewState",
"datetime.datetime.strptime",
"datetime.timedelta",
"requests_html.HTMLSession",
"datetime.datetime.timestamp",
"re.findall"
] | [((7951, 7978), 'requests_html.HTMLSession', 'requests_html.HTMLSession', ([], {}), '()\n', (7976, 7978), False, 'import requests_html\n'), ((8139, 8161), 'viewstate.ViewState', 'ViewState', (['state_value'], {}), '(state_value)\n', (8148, 8161), False, 'from viewstate import ViewState\n'), ((726, 776), 're.findall', 're.findall', (['"""(\\\\d+.\\\\d+.\\\\d+)"""', 'departure_date_raw'], {}), "('(\\\\d+.\\\\d+.\\\\d+)', departure_date_raw)\n", (736, 776), False, 'import re\n'), ((971, 1019), 're.findall', 're.findall', (['"""(.*?) \\\\[(.*?)\\\\]"""', 'latest_info_raw'], {}), "('(.*?) \\\\[(.*?)\\\\]', latest_info_raw)\n", (981, 1019), False, 'import re\n'), ((9047, 9069), 'viewstate.ViewState', 'ViewState', (['state_value'], {}), '(state_value)\n', (9056, 9069), False, 'from viewstate import ViewState\n'), ((1258, 1308), 'datetime.datetime.strptime', 'datetime.strptime', (['info_time_raw', '"""%d.%m.%Y %H:%M"""'], {}), "(info_time_raw, '%d.%m.%Y %H:%M')\n", (1275, 1308), False, 'from datetime import datetime, timedelta\n'), ((1812, 1867), 'datetime.datetime.strptime', 'datetime.strptime', (['next_stop_time_raw', '"""%d.%m.%Y %H:%M"""'], {}), "(next_stop_time_raw, '%d.%m.%Y %H:%M')\n", (1829, 1867), False, 'from datetime import datetime, timedelta\n'), ((2184, 2249), 'datetime.datetime.strptime', 'datetime.strptime', (['destination_arrival_time_raw', '"""%d.%m.%Y %H:%M"""'], {}), "(destination_arrival_time_raw, '%d.%m.%Y %H:%M')\n", (2201, 2249), False, 'from datetime import datetime, timedelta\n'), ((4669, 4745), 'datetime.datetime.strptime', 'datetime.strptime', (["(departure_date + ' ' + arrival_time_raw)", '"""%d.%m.%Y %H:%M"""'], {}), "(departure_date + ' ' + arrival_time_raw, '%d.%m.%Y %H:%M')\n", (4686, 4745), False, 'from datetime import datetime, timedelta\n'), ((5181, 5224), 'datetime.datetime.timestamp', 'datetime.timestamp', (['arrival_time_assumption'], {}), '(arrival_time_assumption)\n', (5199, 5224), False, 'from datetime import datetime, timedelta\n'), ((5646, 5724), 'datetime.datetime.strptime', 'datetime.strptime', (["(departure_date + ' ' + departure_time_raw)", '"""%d.%m.%Y %H:%M"""'], {}), "(departure_date + ' ' + departure_time_raw, '%d.%m.%Y %H:%M')\n", (5663, 5724), False, 'from datetime import datetime, timedelta\n'), ((6176, 6221), 'datetime.datetime.timestamp', 'datetime.timestamp', (['departure_time_assumption'], {}), '(departure_time_assumption)\n', (6194, 6221), False, 'from datetime import datetime, timedelta\n'), ((4818, 4861), 'datetime.datetime.timestamp', 'datetime.timestamp', (['arrival_time_assumption'], {}), '(arrival_time_assumption)\n', (4836, 4861), False, 'from datetime import datetime, timedelta\n'), ((5121, 5138), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (5130, 5138), False, 'from datetime import datetime, timedelta\n'), ((5801, 5846), 'datetime.datetime.timestamp', 'datetime.timestamp', (['departure_time_assumption'], {}), '(departure_time_assumption)\n', (5819, 5846), False, 'from datetime import datetime, timedelta\n'), ((6114, 6131), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (6123, 6131), False, 'from datetime import datetime, timedelta\n')] |
import turtle
wn = turtle.Screen() # Turtle screen
print("#Creates a playground for turtle. Not must?")
tess=turtle.Turtle() # Turtle assigned variables
alex=turtle.Turtle() # Turtle assigned variables
alex.speed(1)
alex.pen()
alex.pendown()
alex.hideturtle()
alex.left(60)
alex.forward(100)
alex.right(150)
alex.forward(100)
alex.right(150)
alex.forward(100)
alex.right(140)
alex.forward(100)
alex.right(140)
alex.forward(100)
alex.right(140)
alex.speed(3)
alex.pensize(7)
alex.shape("arrow")
alex.color("blue")
alex.left(60)
alex.forward(100)
alex.right(145)
alex.forward(100)
alex.right(145)
alex.forward(100)
alex.right(145)
alex.forward(100)
alex.right(145)
alex.forward(100)
alex.right(145)
size=5
for i in range(30):
tess.stamp()
size=size+2
tess.forward(size)
tess.right(24)
tess.pendown()
tess.pensize(25)
tess.color("black")
for i in range(5):
tess.forward(55)
tess.right(72)
alex.speed(1)
alex.pen()
alex.pendown()
alex.hideturtle()
alex.forward(100)
alex.left(60)
for i in range(5):
alex.forward(100)
alex.right(150)
alex.speed(3)
alex.pensize(7)
alex.shape("arrow")
alex.color("blue")
alex.forward(100)
alex.left(60)
for i in range(5):
alex.forward(100)
alex.right(145)
alex.forward(100)
alex.speed(2)
alex.pensize(17)
alex.shape("turtle")
alex.color("red")
for i in range(12):
#alex.penup()
alex.stamp()
alex.forward(77)
alex.stamp()
alex.backward(77)
alex.right(30)
alex.stamp()
alex.forward(-200)
alex.speed(2)
alex.pensize(37)
alex.shape("turtle")
alex.color("green")
for i in range(12):
alex.penup()
alex.stamp()
alex.forward(77)
alex.stamp()
alex.backward(77)
alex.right(30)
alex.stamp()
alex.right(145)
alex.forward(50)
alex.speed(2)
alex.pensize(17)
alex.shape("turtle")
alex.color("red")
alex.penup()
alex.stamp()
for i in range(12):
#alex.penup()
#alex.stamp()
alex.forward(77)
alex.stamp()
alex.backward(77)
alex.right(30)
#alex.stamp()
alex.forward(-300)
wn.mainloop()
| [
"turtle.Screen",
"turtle.Turtle"
] | [((22, 37), 'turtle.Screen', 'turtle.Screen', ([], {}), '()\n', (35, 37), False, 'import turtle\n'), ((116, 131), 'turtle.Turtle', 'turtle.Turtle', ([], {}), '()\n', (129, 131), False, 'import turtle\n'), ((169, 184), 'turtle.Turtle', 'turtle.Turtle', ([], {}), '()\n', (182, 184), False, 'import turtle\n')] |
from librosa import cqt, icqt
import numpy as np
def gl_cqt(S, n_iter=32, sr=22050, hop_length=512, bins_per_octave=12, fmin=None, window='hann',
dtype=np.float32, length=None, momentum=0.99, random_state=None, res_type='kaiser_fast'):
if fmin is None:
fmin = librosa.note_to_hz('C1')
if random_state is None:
rng = np.random
elif isinstance(random_state, int):
rng = np.random.RandomState(seed=random_state)
elif isinstance(random_state, np.random.RandomState):
rng = random_state
if momentum > 1:
warnings.warn('Griffin-Lim with momentum={} > 1 can be unstable. Proceed with caution!'.format(momentum))
elif momentum < 0:
raise ParameterError('griffinlim() called with momentum={} < 0'.format(momentum))
# randomly initialize the phase
angles = np.exp(2j * np.pi * rng.rand(*S.shape))
# And initialize the previous iterate to 0
rebuilt = 0.
for _ in range(n_iter):
# Store the previous iterate
tprev = rebuilt
__import__('pdb').set_trace()
# Invert with our current estimate of the phases
inverse = icqt(S * angles, sr=sr, hop_length=hop_length, bins_per_octave=bins_per_octave, fmin=fmin,
#window=window, length=length, res_type=res_type)
window=window)
# Rebuild the spectrogram
rebuilt = cqt(inverse, sr=sr, bins_per_octave=bins_per_octave, n_bins=S.shape[0],
hop_length=hop_length, fmin=fmin,
window=window, res_type=res_type)
# Update our phase estimates
angles[:] = rebuilt - (momentum / (1 + momentum)) * tprev
angles[:] /= np.abs(angles) + 1e-16
# Return the final phase estimates
return icqt(S * angles, sr=sr, hop_length=hop_length, bins_per_octave=bins_per_octave, fmin=fmin,
window=window,length=length, res_type=res_type)
| [
"numpy.abs",
"librosa.cqt",
"numpy.random.RandomState",
"librosa.icqt"
] | [((1807, 1956), 'librosa.icqt', 'icqt', (['(S * angles)'], {'sr': 'sr', 'hop_length': 'hop_length', 'bins_per_octave': 'bins_per_octave', 'fmin': 'fmin', 'window': 'window', 'length': 'length', 'res_type': 'res_type'}), '(S * angles, sr=sr, hop_length=hop_length, bins_per_octave=\n bins_per_octave, fmin=fmin, window=window, length=length, res_type=res_type\n )\n', (1811, 1956), False, 'from librosa import cqt, icqt\n'), ((1165, 1275), 'librosa.icqt', 'icqt', (['(S * angles)'], {'sr': 'sr', 'hop_length': 'hop_length', 'bins_per_octave': 'bins_per_octave', 'fmin': 'fmin', 'window': 'window'}), '(S * angles, sr=sr, hop_length=hop_length, bins_per_octave=\n bins_per_octave, fmin=fmin, window=window)\n', (1169, 1275), False, 'from librosa import cqt, icqt\n'), ((1422, 1565), 'librosa.cqt', 'cqt', (['inverse'], {'sr': 'sr', 'bins_per_octave': 'bins_per_octave', 'n_bins': 'S.shape[0]', 'hop_length': 'hop_length', 'fmin': 'fmin', 'window': 'window', 'res_type': 'res_type'}), '(inverse, sr=sr, bins_per_octave=bins_per_octave, n_bins=S.shape[0],\n hop_length=hop_length, fmin=fmin, window=window, res_type=res_type)\n', (1425, 1565), False, 'from librosa import cqt, icqt\n'), ((430, 470), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'random_state'}), '(seed=random_state)\n', (451, 470), True, 'import numpy as np\n'), ((1733, 1747), 'numpy.abs', 'np.abs', (['angles'], {}), '(angles)\n', (1739, 1747), True, 'import numpy as np\n')] |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^circuitos/$', views.circuitos, name='circuitos'),
url(r'^circuitos/(?P<circuito_id>[0-9]+)/$', views.circuito_detail,
name='circuito_detail'),
url(r'^grandes_premios/$', views.grandes_premios, name='grandes_premios'),
url(r'^grandes_premios/(?P<gran_premio_id>[0-9]+)/$', views.gran_premio,
name='gran_premio'),
]
| [
"django.conf.urls.url"
] | [((75, 111), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.index'], {'name': '"""index"""'}), "('^$', views.index, name='index')\n", (78, 111), False, 'from django.conf.urls import url\n'), ((118, 172), 'django.conf.urls.url', 'url', (['"""^circuitos/$"""', 'views.circuitos'], {'name': '"""circuitos"""'}), "('^circuitos/$', views.circuitos, name='circuitos')\n", (121, 172), False, 'from django.conf.urls import url\n'), ((179, 274), 'django.conf.urls.url', 'url', (['"""^circuitos/(?P<circuito_id>[0-9]+)/$"""', 'views.circuito_detail'], {'name': '"""circuito_detail"""'}), "('^circuitos/(?P<circuito_id>[0-9]+)/$', views.circuito_detail, name=\n 'circuito_detail')\n", (182, 274), False, 'from django.conf.urls import url\n'), ((284, 356), 'django.conf.urls.url', 'url', (['"""^grandes_premios/$"""', 'views.grandes_premios'], {'name': '"""grandes_premios"""'}), "('^grandes_premios/$', views.grandes_premios, name='grandes_premios')\n", (287, 356), False, 'from django.conf.urls import url\n'), ((363, 458), 'django.conf.urls.url', 'url', (['"""^grandes_premios/(?P<gran_premio_id>[0-9]+)/$"""', 'views.gran_premio'], {'name': '"""gran_premio"""'}), "('^grandes_premios/(?P<gran_premio_id>[0-9]+)/$', views.gran_premio,\n name='gran_premio')\n", (366, 458), False, 'from django.conf.urls import url\n')] |
import logging
import pandas as pd
logger = logging.getLogger('nodes.data_viz')
def update(client, params):
''' Esta funcao faz extracao de dados no database e salva os dados extraidos
em arquivo csv.
Parameters
----------
client : class
parametros de conexao da pipeline.
params : class
parametros da pipeline.
Returns
-------
None.
'''
# Construindo query
query = '''
SELECT MIN(data) AS data, ano, mes, semana,
CASE WHEN pass_bus.fim_de_semana = 0 THEN 'dia de semana' ELSE 'fim de semana' END AS fim_de_semana,
tipo, area, pass_bus.n_linha, desc_linha,
SUM(pagantes_dinheiro) AS pagantes_dinheiro, SUM(pagantes_bu_e_vt) AS pagantes_bu_e_vt,
SUM(pagantes_estudantes) AS pagantes_estudantes, SUM(total_estudantes) AS total_estudantes,
SUM(passageiros_pagantes) AS passageiros_pagantes, SUM(gratuidades_outras) AS gratuidades_outras,
SUM(integracao_onibus) AS integracao_onibus, SUM(total_passageiros) AS total_passageiros,
AVG(extensao_ida) AS extensao_ida, AVG(extensao_volta) AS extensao_volta,
SUM(partidas_ida) AS partidas_ida, SUM(partidas_volta) AS partidas_volta
FROM passageiros_onibus AS pass_bus
LEFT JOIN linha ON linha.n_linha = pass_bus.n_linha
AND linha.fim_de_semana = pass_bus.fim_de_semana
GROUP BY ano, mes, semana, pass_bus.fim_de_semana, tipo, area, pass_bus.n_linha, desc_linha'''
# Extraindo tabela do SQL
onibus = pd.read_sql(query, con=client.conn)
# Salvando resultado em csv
onibus.to_csv(params.processed_data+r'\\onibus.csv', index=False)
def done(client, params):
if params.viz == True:
return False
else:
return True | [
"logging.getLogger",
"pandas.read_sql"
] | [((45, 80), 'logging.getLogger', 'logging.getLogger', (['"""nodes.data_viz"""'], {}), "('nodes.data_viz')\n", (62, 80), False, 'import logging\n'), ((1522, 1557), 'pandas.read_sql', 'pd.read_sql', (['query'], {'con': 'client.conn'}), '(query, con=client.conn)\n', (1533, 1557), True, 'import pandas as pd\n')] |
from civicboom.lib.base import *
from civicboom.model import User, Group, PaymentAccount
from civicboom.model.payment import country_codes
from civicboom.lib.form_validators.validator_factory import build_schema
from civicboom.lib.form_validators.dict_overlay import validate_dict
from civicboom.controllers.payment_actions import PaymentActionsController
payment_actions_controller = PaymentActionsController()
import formencode
import civicboom.lib.form_validators
import civicboom.lib.form_validators.base
import civicboom.lib.form_validators.registration
import time, re
log = logging.getLogger(__name__)
country_ec_vat = ['AT', 'BE', 'BG', 'CY', 'CZ', 'DE', 'DK', 'EE', 'EL', 'ES', 'FI', 'FR', 'HU', 'IE', 'IT', 'LT', 'LU', 'LV', 'MT', 'NL', 'PL', 'PT', 'RO', 'SE', 'SI', 'SK', 'GB']
# Compiled regex for every EC state's vat registration number format
tax_code_regex = re.compile(
'^('
'((EE|EL|DE|PT)([0-9]{9}))|' \
'((FI|HU|LU|MT|SI)([0-9]{8}))|' \
'((PL|SK)([0-9]{10}))|' \
'((IT|LV)([0-9]{11}))|' \
'((SE)([0-9]{12}))|' \
'((BE)(0?[0-9]{9}))|' \
'((CY)([0-9]{8}[A-Z]))|' \
'((CZ)([0-9]{8,10}))|' \
'((DK)(([0-9]{2}\ ?){3}[0-9]{2}))|' \
'((ES)(([0-9A-Z][0-9]{7}[A-Z])|([A-Z][0-9]{7}[0-9A-Z])))|' \
'((FR)([0-9A-Z]{2}\ ?[0-9]{9}))|' \
'((GB)(([1-9][0-9]{2}\ ?[0-9]{4}\ ?[0-9]{2})|([1-9][0-9]{2}\ ?[0-9]{4}\ ?[0-9]{2}\ ?[0-9]{3})|((GD|HA)[0-9]{3})))|' \
'((IE)([0-9][0-9A-Z\+\*][0-9]{5}[A-Z]))|' \
'((LT)(([0-9]{9}|[0-9]{12})))|' \
'((NL)([0-9]{9}B[0-9]{2}))|' \
'((AT)(U[0-9]{8}))|' \
'((BG)([0-9]{9,10}))|' \
'((RO)([0-9]{2,10}))' \
')$'
)
tax_code_validator = formencode.validators.Regex(tax_code_regex, strip=True, not_empty=False)
class PaymentsController(BaseController):
"""
@title Payments
@doc payment
@desc controller for administering payment accounts
"""
# # Only allow these actions if in development mode
# def __before__(self, action, **params):
# if not config['development_mode']==True:
# return abort(404)
# BaseController.__before__(self)
@web
@authorize
@role_required('admin')
def index(self, **kwargs):
"""
GET /groups: All groups the current user is a member of
@api groups 1.0 (WIP)
@param * (see common list return controls)
@return 200 - data.list = array of group objects that logged in user is a member including the additional field 'members "role" in the group'
"""
# url('payments')
if not c.logged_in_persona.payment_account:
if c.format == 'html':
return redirect(url('new_payment'))
raise action_error(_('There is no payment account associated with this user, please create a new payment account'), code=404)
account = c.logged_in_persona.payment_account
return self.show(account.id)
@web
@authorize
@role_required('admin')
def new(self, **kwargs):
"""
"""
account = c.logged_in_persona.payment_account
if account:
return redirect(url('payment', action='show', id=account.id))
#url_for('new_payment')
return action_ok()
@web
@auth
@role_required('admin')
def create(self, **kwargs):
"""
"""
payment_account = PaymentAccount()
payment_account.members.append(c.logged_in_persona)
c.template_error = "payments/new"
self.update(payment_account, **kwargs)
payment_account.frequency = 'month'
Session.commit()
plans = ['free', 'plus', 'corp']
plan = [key[5:] for key in kwargs.keys() if key[0:5] == 'plan_' and key[5:] in plans]
if plan:
plan = plan[0]
else:
plan = 'free'
# Only regrade if plan != free (regrade errors on same plan)
if plan != 'free':
payment_actions_controller.regrade(id=payment_account.id, new_type=plan)
if c.format in ('html', 'redirect'):
return redirect(url('payment', action='show', id=payment_account.id))
return action_ok('Account created')
@web
@authorize
@role_required('admin')
def edit(self, id, **kwargs):
"""
"""
account = c.logged_in_persona.payment_account
if not account:
raise action_error(_('Payment account does not exist'), code=404)
if c.logged_in_persona not in account.members:
raise action_error(_('You do not have permission to view this account'), code=404)
account_dict = account.to_dict('full')
data = {'id':account.id}
data.update(dict([(key, account.config[key]) for key in account.config if key in account._user_edit_config]))
data = {'payment':data}
return action_ok(data=data)
@web
@auth
@role_required('admin')
def update(self, id, **kwargs):
"""
"""
if isinstance(id, PaymentAccount):
account = id
else:
account = Session.query(PaymentAccount).filter(PaymentAccount.id==id).first()
if not account:
raise action_error(_('Payment account does not exist'), code=404)
if c.logged_in_persona not in account.members:
raise action_error(_('You do not have permission to view this account'), code=404)
address_fields = PaymentAccount._address_config_order
# Build validation schema
schema = build_schema(
name_type = formencode.validators.OneOf(['org','ind'], messages={'missing': 'Please select a type'}, not_empty=True),
org_name = formencode.validators.UnicodeString(),
ind_name = formencode.validators.UnicodeString(),
)
if kwargs.get('name_type') == 'org':
schema.fields['org_name'] = formencode.validators.UnicodeString(not_empty=True)
else:
schema.fields['ind_name'] = formencode.validators.UnicodeString(not_empty=True)
for address_field in address_fields:
schema.fields[address_field] = formencode.validators.UnicodeString(not_empty=(address_field in PaymentAccount._address_required))
schema.fields['address_country'] = formencode.validators.OneOf(country_codes.keys(), messages={'missing': 'Please select a country'}, not_empty=True)
# if kwargs.get('address_country') in country_ec_vat:
# if kwargs.get('vat_no'):
# kwargs['vat_no'] = kwargs.get('address_country','') + kwargs['vat_no']
# schema.fields['vat_no'] = tax_code_validator
# else:
# schema.fields['vat_no'] = formencode.validators.Empty()
data = {'payment':kwargs}
data = validate_dict(data, schema, dict_to_validate_key='payment', template_error=c.template_error if hasattr(c, 'template_error') else 'payments/edit')
form = data['payment']
# if form.get('vat_no'):
# form['vat_no'] = form['vat_no'][2:]
for field in account._user_edit_config:
if form.get(field):
account.config[field] = form[field]
elif account.config.get(field):
del account.config[field]
if account.config.get('address_country') in country_ec_vat:
if account.config['address_country'] == 'GB':
account.taxable = True
elif account.config.get('vat_no'):
account.taxable = False
else:
account.taxable = True
else:
account.taxable = False
# if form.get('ind_name'):
# account.config['ind_name'] = form['ind_name']
# elif account.config.get('ind_name'):
# del account.config['ind_name']
# if form.get('org_name'):
# account.config['org_name'] = form['org_name']
# elif account.config.get('org_name'):
# del account.config['org_name']
#
# for field_name in address_fields:
# if form.get(field_name):
# account.config[field_name] = form[field_name]
# account.frequency = 'month' This can be changed in the future
#return redirect(url('payment', action='show', id=payment_account.id))
Session.commit()
# url('payment', id=ID)
if c.format == 'redirect':
redirect(url('payment', id=account.id))
return action_ok()
@web
@auth
@role_required('admin')
def delete(self, id, **kwargs):
"""
"""
# url('payment', id=ID)
return action_ok()
@web
@authorize
@role_required('admin')
def show(self, id, **kwargs):
"""
"""
if isinstance(id, PaymentAccount):
account = id
else:
account = Session.query(PaymentAccount).get(id)
if not account:
raise action_error(_('Payment account does not exist'), code=404)
if not c.logged_in_persona in account.members:
raise action_error(_('You do not have permission to view this account'), code=404)
data = account.to_dict('full')
return action_ok(code=200, data=data)
| [
"formencode.validators.Regex",
"formencode.validators.UnicodeString",
"re.compile",
"civicboom.controllers.payment_actions.PaymentActionsController",
"civicboom.model.PaymentAccount",
"civicboom.model.payment.country_codes.keys",
"formencode.validators.OneOf"
] | [((392, 418), 'civicboom.controllers.payment_actions.PaymentActionsController', 'PaymentActionsController', ([], {}), '()\n', (416, 418), False, 'from civicboom.controllers.payment_actions import PaymentActionsController\n'), ((892, 1489), 're.compile', 're.compile', (['"""^(((EE|EL|DE|PT)([0-9]{9}))|((FI|HU|LU|MT|SI)([0-9]{8}))|((PL|SK)([0-9]{10}))|((IT|LV)([0-9]{11}))|((SE)([0-9]{12}))|((BE)(0?[0-9]{9}))|((CY)([0-9]{8}[A-Z]))|((CZ)([0-9]{8,10}))|((DK)(([0-9]{2}\\\\ ?){3}[0-9]{2}))|((ES)(([0-9A-Z][0-9]{7}[A-Z])|([A-Z][0-9]{7}[0-9A-Z])))|((FR)([0-9A-Z]{2}\\\\ ?[0-9]{9}))|((GB)(([1-9][0-9]{2}\\\\ ?[0-9]{4}\\\\ ?[0-9]{2})|([1-9][0-9]{2}\\\\ ?[0-9]{4}\\\\ ?[0-9]{2}\\\\ ?[0-9]{3})|((GD|HA)[0-9]{3})))|((IE)([0-9][0-9A-Z\\\\+\\\\*][0-9]{5}[A-Z]))|((LT)(([0-9]{9}|[0-9]{12})))|((NL)([0-9]{9}B[0-9]{2}))|((AT)(U[0-9]{8}))|((BG)([0-9]{9,10}))|((RO)([0-9]{2,10})))$"""'], {}), "(\n '^(((EE|EL|DE|PT)([0-9]{9}))|((FI|HU|LU|MT|SI)([0-9]{8}))|((PL|SK)([0-9]{10}))|((IT|LV)([0-9]{11}))|((SE)([0-9]{12}))|((BE)(0?[0-9]{9}))|((CY)([0-9]{8}[A-Z]))|((CZ)([0-9]{8,10}))|((DK)(([0-9]{2}\\\\ ?){3}[0-9]{2}))|((ES)(([0-9A-Z][0-9]{7}[A-Z])|([A-Z][0-9]{7}[0-9A-Z])))|((FR)([0-9A-Z]{2}\\\\ ?[0-9]{9}))|((GB)(([1-9][0-9]{2}\\\\ ?[0-9]{4}\\\\ ?[0-9]{2})|([1-9][0-9]{2}\\\\ ?[0-9]{4}\\\\ ?[0-9]{2}\\\\ ?[0-9]{3})|((GD|HA)[0-9]{3})))|((IE)([0-9][0-9A-Z\\\\+\\\\*][0-9]{5}[A-Z]))|((LT)(([0-9]{9}|[0-9]{12})))|((NL)([0-9]{9}B[0-9]{2}))|((AT)(U[0-9]{8}))|((BG)([0-9]{9,10}))|((RO)([0-9]{2,10})))$'\n )\n", (902, 1489), False, 'import time, re\n'), ((1668, 1740), 'formencode.validators.Regex', 'formencode.validators.Regex', (['tax_code_regex'], {'strip': '(True)', 'not_empty': '(False)'}), '(tax_code_regex, strip=True, not_empty=False)\n', (1695, 1740), False, 'import formencode\n'), ((3439, 3455), 'civicboom.model.PaymentAccount', 'PaymentAccount', ([], {}), '()\n', (3453, 3455), False, 'from civicboom.model import User, Group, PaymentAccount\n'), ((6042, 6093), 'formencode.validators.UnicodeString', 'formencode.validators.UnicodeString', ([], {'not_empty': '(True)'}), '(not_empty=True)\n', (6077, 6093), False, 'import formencode\n'), ((6149, 6200), 'formencode.validators.UnicodeString', 'formencode.validators.UnicodeString', ([], {'not_empty': '(True)'}), '(not_empty=True)\n', (6184, 6200), False, 'import formencode\n'), ((6302, 6402), 'formencode.validators.UnicodeString', 'formencode.validators.UnicodeString', ([], {'not_empty': '(address_field in PaymentAccount._address_required)'}), '(not_empty=address_field in\n PaymentAccount._address_required)\n', (6337, 6402), False, 'import formencode\n'), ((6485, 6505), 'civicboom.model.payment.country_codes.keys', 'country_codes.keys', ([], {}), '()\n', (6503, 6505), False, 'from civicboom.model.payment import country_codes\n'), ((5702, 5811), 'formencode.validators.OneOf', 'formencode.validators.OneOf', (["['org', 'ind']"], {'messages': "{'missing': 'Please select a type'}", 'not_empty': '(True)'}), "(['org', 'ind'], messages={'missing':\n 'Please select a type'}, not_empty=True)\n", (5729, 5811), False, 'import formencode\n'), ((5838, 5875), 'formencode.validators.UnicodeString', 'formencode.validators.UnicodeString', ([], {}), '()\n', (5873, 5875), False, 'import formencode\n'), ((5907, 5944), 'formencode.validators.UnicodeString', 'formencode.validators.UnicodeString', ([], {}), '()\n', (5942, 5944), False, 'import formencode\n')] |
from PyQt5.QtWidgets import (QPushButton, QLabel, QFileDialog,
QComboBox, QWizard, QWizardPage, QLineEdit,
QVBoxLayout, QApplication, QHBoxLayout)
from ..services.actions import Call
from ..func import get_pattern
from ..statics import NODECNAMES, EDGECNAMES
# TODO: Call new graph after import wizard
class ImportWizard(QWizard):
def __init__(self, parent=None, isSingleFile=False):
super(ImportWizard, self).__init__(parent)
if not isSingleFile:
self.addPage(Page1(self))
self.addPage(Page2(self))
else:
self.addPage(Page2b(self))
self.setWindowTitle("Import Wizard")
# Trigger close event when pressing Finish button to redirect variables to backend
self.button(QWizard.FinishButton).clicked.connect(self.onFinished)
self.button(QWizard.NextButton).clicked.connect(self.page(0).receiveInputs)
# Initialize variables to send to backend
self.filepath = [None, None]
self.nodeColumns = []
self.nodeDelimiters = []
self.edgeColumns = []
self.edgeDelimiters = []
def onFinished(self):
print("Finish")
# Ask input from edge import page
self.page(1).receiveInputs()
regexN = get_pattern(self.nodeColumns, self.nodeDelimiters)
regexE = get_pattern(self.edgeColumns, self.edgeDelimiters)
# Communicate and transmit to backend
Call.connect()
result = Call.send_paths(self.filepath, regexN, regexE, self.nodeColumns, self.edgeColumns)
# TODO: Make use of return state to enable graph controls
if result == 'paths imported':
return True
class Page1(QWizardPage):
def __init__(self, parent=None):
super(Page1, self).__init__(parent)
self.columnSelectors = []
self.delimiterFields = []
nCols = len(NODECNAMES)
# Initialize comboboxes and text fields
for i in range(nCols):
self.columnSelectors.append(QComboBox())
for i in range(nCols + 1):
self.delimiterFields.append(QLineEdit())
self.openFileBtn = QPushButton("Import Node List")
self.stepLabel = QLabel()
self.formatLabel = QLabel()
layout = QVBoxLayout()
layout.addWidget(self.stepLabel)
layout.addWidget(self.openFileBtn)
layout.addWidget(self.formatLabel)
patternLayout = QHBoxLayout()
for i in range(nCols + 1):
patternLayout.addWidget(self.delimiterFields[i])
if i < nCols:
patternLayout.addWidget(self.columnSelectors[i])
self.setLayout(layout)
# Insert the layout of the regexp elements
layout.addLayout(patternLayout)
# Bind actions
self.openFileBtn.clicked.connect(self.openFileNameDialog)
def openFileNameDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(
self, "QFileDialog.getOpenFileName()", "",
"All Files (*);;Python Files (*.py)", options=options)
# if user selected a file store its path to a variable
if fileName:
self.wizard().filepath[0] = fileName
def initializePage(self):
self.stepLabel.setText("Nodes information")
self.formatLabel.setText("Nodes file format")
i = 0
for comboBox in self.columnSelectors:
comboBox.addItems(NODECNAMES)
comboBox.addItem('-')
# Initialize first selection to avoid error
comboBox.setCurrentIndex(i)
comboBox.activated.connect(self.handleActivated)
comboBox.selection = comboBox.currentText()
i += 1
# Initialize textboxes with multi-space expression
for delimiterField in self.delimiterFields:
delimiterField.setText('\\s+')
self.delimiterFields[0].setText('^')
self.delimiterFields[-1].setText('')
def handleActivated(self, index):
self.sender().selection = self.sender().itemText(index)
def receiveInputs(self):
''' activates on next button and sends the input to wizard '''
self.wizard().nodeDelimiters = [delim.text() for delim in self.delimiterFields]
self.wizard().nodeColumns = [comboBox.selection for comboBox in self.columnSelectors]
class Page2(QWizardPage):
def __init__(self, parent=None):
super(Page2, self).__init__(parent)
nCols = len(EDGECNAMES)
self.setWindowTitle("Edge phase")
self.stepLabel = QLabel()
self.openFileBtn = QPushButton("Import Edge List")
self.columnSelectors = []
self.delimiterFields = []
# Initialize comboboxes and text fields
for i in range(nCols):
self.columnSelectors.append(QComboBox())
for i in range(nCols + 1):
self.delimiterFields.append(QLineEdit())
layout = QVBoxLayout()
layout.addWidget(self.stepLabel)
layout.addWidget(self.openFileBtn)
patternLayout = QHBoxLayout()
for i in range(nCols + 1):
patternLayout.addWidget(self.delimiterFields[i])
if i < nCols:
patternLayout.addWidget(self.columnSelectors[i])
self.setLayout(layout)
# Insert the layout of the regexp elements
layout.addLayout(patternLayout)
# Bind actions
self.openFileBtn.clicked.connect(self.openFileNameDialog)
def openFileNameDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(
self, "QFileDialog.getOpenFileName()", "",
"All Files (*);;Python Files (*.py)", options=options)
# if user selected a file store its path to a variable
if fileName:
self.wizard().filepath[1] = fileName
def initializePage(self):
self.stepLabel.setText("Edges information")
i = 0
for comboBox in self.columnSelectors:
comboBox.addItems(EDGECNAMES)
comboBox.addItem('-')
# Initialize first selection to avoid error
comboBox.setCurrentIndex(i)
comboBox.activated.connect(self.handleActivated)
comboBox.selection = comboBox.currentText()
i += 1
# Initialize textboxes with multi-space expression
for delimiterField in self.delimiterFields:
delimiterField.setText('\\s+')
self.delimiterFields[0].setText('^')
self.delimiterFields[-1].setText('')
def handleActivated(self, index):
self.sender().selection = self.sender().itemText(index)
def receiveInputs(self):
''' activates on next button and sends the input to wizard '''
self.wizard().edgeDelimiters = [delim.text() for delim in self.delimiterFields]
self.wizard().edgeColumns = [comboBox.selection for comboBox in self.columnSelectors]
# To be called only on single file Import
class Page2b(QWizardPage):
def __init__(self, parent=None):
super(Page2b, self).__init__(parent)
nCols = len(EDGECNAMES)
self.setWindowTitle("Edge phase")
self.stepLabel = QLabel()
self.openFileBtn = QPushButton("Import Edge List")
self.columnSelectors = []
self.delimiterFields = []
# Initialize comboboxes and text fields
for i in range(nCols):
self.columnSelectors.append(QComboBox())
for i in range(nCols + 1):
self.delimiterFields.append(QLineEdit())
layout = QVBoxLayout()
layout.addWidget(self.stepLabel)
layout.addWidget(self.openFileBtn)
patternLayout = QHBoxLayout()
for i in range(nCols + 1):
patternLayout.addWidget(self.delimiterFields[i])
if i < nCols:
patternLayout.addWidget(self.columnSelectors[i])
self.setLayout(layout)
# Insert the layout of the regexp elements
layout.addLayout(patternLayout)
# Bind actions
self.openFileBtn.clicked.connect(self.openFileNameDialog)
def openFileNameDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(
self, "QFileDialog.getOpenFileName()", "",
"All Files (*);;Python Files (*.py)", options=options)
# if user selected a file store its path to a variable
if fileName:
self.wizard().filepath[1] = fileName
def initializePage(self):
self.stepLabel.setText("Edges information")
i = 0
for comboBox in self.columnSelectors:
comboBox.addItems(EDGECNAMES)
comboBox.addItem('-')
# Initialize first selection to avoid error
comboBox.setCurrentIndex(i)
comboBox.activated.connect(self.handleActivated)
comboBox.selection = comboBox.currentText()
i += 1
# Initialize textboxes with multi-space expression
for delimiterField in self.delimiterFields:
delimiterField.setText('\\s+')
self.delimiterFields[0].setText('^')
self.delimiterFields[-1].setText('')
def handleActivated(self, index):
self.sender().selection = self.sender().itemText(index)
def receiveInputs(self):
''' activates on next button and sends the input to wizard '''
self.wizard().edgeDelimiters = [delim.text() for delim in self.delimiterFields]
self.wizard().edgeColumns = [comboBox.selection for comboBox in self.columnSelectors]
| [
"PyQt5.QtWidgets.QLineEdit",
"PyQt5.QtWidgets.QComboBox",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtWidgets.QFileDialog.getOpenFileName",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtWidgets.QFileDialog.Options"
] | [((2189, 2220), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Import Node List"""'], {}), "('Import Node List')\n", (2200, 2220), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((2246, 2254), 'PyQt5.QtWidgets.QLabel', 'QLabel', ([], {}), '()\n', (2252, 2254), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((2282, 2290), 'PyQt5.QtWidgets.QLabel', 'QLabel', ([], {}), '()\n', (2288, 2290), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((2309, 2322), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (2320, 2322), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((2474, 2487), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (2485, 2487), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((2941, 2962), 'PyQt5.QtWidgets.QFileDialog.Options', 'QFileDialog.Options', ([], {}), '()\n', (2960, 2962), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((3036, 3165), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['self', '"""QFileDialog.getOpenFileName()"""', '""""""', '"""All Files (*);;Python Files (*.py)"""'], {'options': 'options'}), "(self, 'QFileDialog.getOpenFileName()', '',\n 'All Files (*);;Python Files (*.py)', options=options)\n", (3063, 3165), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((4665, 4673), 'PyQt5.QtWidgets.QLabel', 'QLabel', ([], {}), '()\n', (4671, 4673), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((4701, 4732), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Import Edge List"""'], {}), "('Import Edge List')\n", (4712, 4732), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((5040, 5053), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (5051, 5053), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((5162, 5175), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (5173, 5175), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((5629, 5650), 'PyQt5.QtWidgets.QFileDialog.Options', 'QFileDialog.Options', ([], {}), '()\n', (5648, 5650), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((5724, 5853), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['self', '"""QFileDialog.getOpenFileName()"""', '""""""', '"""All Files (*);;Python Files (*.py)"""'], {'options': 'options'}), "(self, 'QFileDialog.getOpenFileName()', '',\n 'All Files (*);;Python Files (*.py)', options=options)\n", (5751, 5853), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((7343, 7351), 'PyQt5.QtWidgets.QLabel', 'QLabel', ([], {}), '()\n', (7349, 7351), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((7379, 7410), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Import Edge List"""'], {}), "('Import Edge List')\n", (7390, 7410), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((7718, 7731), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (7729, 7731), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((7840, 7853), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (7851, 7853), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((8307, 8328), 'PyQt5.QtWidgets.QFileDialog.Options', 'QFileDialog.Options', ([], {}), '()\n', (8326, 8328), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((8402, 8531), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['self', '"""QFileDialog.getOpenFileName()"""', '""""""', '"""All Files (*);;Python Files (*.py)"""'], {'options': 'options'}), "(self, 'QFileDialog.getOpenFileName()', '',\n 'All Files (*);;Python Files (*.py)', options=options)\n", (8429, 8531), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((2060, 2071), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', ([], {}), '()\n', (2069, 2071), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((2148, 2159), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', ([], {}), '()\n', (2157, 2159), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((4921, 4932), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', ([], {}), '()\n', (4930, 4932), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((5009, 5020), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', ([], {}), '()\n', (5018, 5020), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((7599, 7610), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', ([], {}), '()\n', (7608, 7610), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n'), ((7687, 7698), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', ([], {}), '()\n', (7696, 7698), False, 'from PyQt5.QtWidgets import QPushButton, QLabel, QFileDialog, QComboBox, QWizard, QWizardPage, QLineEdit, QVBoxLayout, QApplication, QHBoxLayout\n')] |
import sys
import os
from quart import Quart, Response, jsonify, request, exceptions
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from common import MemoryRepo # noqa
repo = MemoryRepo()
repo.add_fixtures()
app = Quart(__name__)
# returns json on errors rather than html
class JsonException(exceptions.HTTPException):
def __init__(self, status_code: int) -> None:
self.status_code = status_code
def get_response(self) -> Response:
return JsonResponse('', status=self.status_code)
class JsonResponse(Response):
default_mimetype = 'application/json'
@app.route('/articles')
async def list_articles() -> Response:
return jsonify(repo.get_articles())
@app.route('/articles/<int:article_id>')
async def get_article(article_id) -> Response:
try:
article = repo.get_article(article_id)
except LookupError:
raise JsonException(404)
return jsonify(article)
@app.route('/articles', methods=['POST'])
async def create_article() -> Response:
data = await request.get_json()
print(data)
if not data.get('title'):
raise JsonException(400)
article_id = repo.add_article({'title': data['title']})
headers = {'Location': '/articles/%s' % article_id}
return JsonResponse('', headers=headers, status=201)
@app.route('/articles/<int:article_id>', methods=['POST'])
async def update_article(article_id) -> Response:
data = await request.get_json()
if not data.get('title'):
raise JsonException(400)
try:
repo.update_article(article_id, data['title'])
except LookupError:
raise JsonException(404)
return JsonResponse('')
@app.route('/articles/<int:article_id>', methods=['DELETE'])
async def delete_article(article_id) -> Response:
try:
repo.delete_article(article_id)
except LookupError:
raise JsonException(404)
return Response('', status=204)
if __name__ == '__main__':
app.run(port=8000, debug=True)
| [
"quart.jsonify",
"os.path.join",
"quart.Response",
"quart.request.get_json",
"common.MemoryRepo",
"quart.Quart"
] | [((185, 197), 'common.MemoryRepo', 'MemoryRepo', ([], {}), '()\n', (195, 197), False, 'from common import MemoryRepo\n'), ((225, 240), 'quart.Quart', 'Quart', (['__name__'], {}), '(__name__)\n', (230, 240), False, 'from quart import Quart, Response, jsonify, request, exceptions\n'), ((106, 137), 'os.path.join', 'os.path.join', (['sys.path[0]', '""".."""'], {}), "(sys.path[0], '..')\n", (118, 137), False, 'import os\n'), ((912, 928), 'quart.jsonify', 'jsonify', (['article'], {}), '(article)\n', (919, 928), False, 'from quart import Quart, Response, jsonify, request, exceptions\n'), ((1891, 1915), 'quart.Response', 'Response', (['""""""'], {'status': '(204)'}), "('', status=204)\n", (1899, 1915), False, 'from quart import Quart, Response, jsonify, request, exceptions\n'), ((1030, 1048), 'quart.request.get_json', 'request.get_json', ([], {}), '()\n', (1046, 1048), False, 'from quart import Quart, Response, jsonify, request, exceptions\n'), ((1430, 1448), 'quart.request.get_json', 'request.get_json', ([], {}), '()\n', (1446, 1448), False, 'from quart import Quart, Response, jsonify, request, exceptions\n')] |
# Generated by Django 3.2.2 on 2021-05-09 15:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('application', '0019_alter_member_telegram_username'),
]
operations = [
migrations.RemoveField(
model_name='member',
name='is_warning',
),
migrations.AddField(
model_name='member',
name='overdue_message_sent_at',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='member',
name='sos_alert_received_at',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='member',
name='warning_message_sent_at',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='member',
name='is_ok',
field=models.BooleanField(default=True, verbose_name='Is safe'),
),
]
| [
"django.db.models.DateTimeField",
"django.db.migrations.RemoveField",
"django.db.models.BooleanField"
] | [((251, 313), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""member"""', 'name': '"""is_warning"""'}), "(model_name='member', name='is_warning')\n", (273, 313), False, 'from django.db import migrations, models\n'), ((474, 505), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)'}), '(null=True)\n', (494, 505), False, 'from django.db import migrations, models\n'), ((640, 671), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)'}), '(null=True)\n', (660, 671), False, 'from django.db import migrations, models\n'), ((808, 839), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)'}), '(null=True)\n', (828, 839), False, 'from django.db import migrations, models\n'), ((960, 1017), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'verbose_name': '"""Is safe"""'}), "(default=True, verbose_name='Is safe')\n", (979, 1017), False, 'from django.db import migrations, models\n')] |
import sys
sys.path.append('C:/Python37/Lib/site-packages')
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
import random
from pyOpenBCI import OpenBCICyton
import threading
import time
import numpy as np
from scipy import signal
import random
import numpy as np
from PIL import Image
img = Image.open('heart_1.png').convert('RGBA')
arr = np.array(img)
img2 = Image.open('heart_2.png').convert('RGBA')
arr2 = np.array(img2)
SCALE_FACTOR = (4500000)/24/(2**23-1) #From the pyOpenBCI repo
colors = 'rgbycmwr'
app = QtGui.QApplication([])
win = pg.GraphicsWindow(title='Python OpenBCI GUI')
# title_graph = win.addPlot(row=0, col=0, colspan=4,title='Python OpenBCI GUI')
ts_plots = win.addPlot(row=0, col=0, colspan=4, title='Channel %d' % 1, labels={'left': 'uV'})
fft_plot = win.addPlot(row=2, col=0, rowspan=2, colspan=2, title='Filtered Plot', labels={'left': 'uV', 'bottom': 'Hz'})
fft_plot.setLimits(xMin=1,xMax=125, yMin=0, yMax=1e7)
ss_plot = win.addPlot(row=4, col=0, rowspan=2, colspan=2, title='signal',labels={'left':'Is beat'})
heart_im = win.addViewBox(lockAspect=True)
imv = pg.ImageItem()
heart_im.addItem(imv)
imv.setImage(arr)
data= [0]
def save_data(sample):
global data
data.append(sample.channels_data[0]*SCALE_FACTOR)
def updater():
global data, plots, colors
fs = 250 #Hz
disp_sec = 3 #Seconds to display
t_data = np.array(data[-(fs*disp_sec + 100):]).T #transpose data
#Notch Filter at 60 Hz
def notch_filter(val, data, fs=250, b=5):
notch_freq_Hz = np.array([float(val)])
for freq_Hz in np.nditer(notch_freq_Hz):
bp_stop_Hz = freq_Hz + 3.0 * np.array([-1, 1])
b, a = signal.butter(b, bp_stop_Hz / (fs / 2.0), 'bandstop')
fin = data = signal.lfilter(b, a, data)
return fin
def bandpass(start, stop, data, fs = 250):
bp_Hz = np.array([start, stop])
b, a = signal.butter(1, bp_Hz / (fs / 2.0), btype='bandpass')
return signal.lfilter(b, a, data, axis=0)
nf_data = np.array(notch_filter(60, t_data, b = 10))
nf_data = np.array(notch_filter(50, nf_data, b = 10))
bp_nf_data = np.array(bandpass(2, 50, nf_data))
ts_plots.clear()
ts_plots.plot(pen='r').setData(bp_nf_data[100:])
#fft of data
fft_plot.clear()
sp = np.absolute(np.fft.fft(bp_nf_data))
freq = np.fft.fftfreq(bp_nf_data.shape[-1], 1.0/fs)
fft_plot.plot(pen='y').setData(freq, sp)
one_beat = nf_data[100:300]
filt = one_beat[::-1]
ss_plot.clear()
new_arr = bp_nf_data > np.average(bp_nf_data) + np.std(bp_nf_data)
ss_plot.plot(pen='g').setData(new_arr[100:]*1)
if sum(new_arr[-100:]*1):
imv.setImage(arr2)
else:
imv.setImage(arr)
def start_board():
board = OpenBCICyton(port='COM5', daisy=False)
board.start_stream(save_data)
if __name__ == '__main__':
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
x = threading.Thread(target=start_board)
x.daemon = True
x.start()
timer = QtCore.QTimer()
timer.timeout.connect(updater)
timer.start(0)
QtGui.QApplication.instance().exec_()
| [
"pyOpenBCI.OpenBCICyton",
"PIL.Image.open",
"pyqtgraph.Qt.QtGui.QApplication.instance",
"numpy.average",
"numpy.fft.fftfreq",
"numpy.nditer",
"pyqtgraph.ImageItem",
"scipy.signal.butter",
"numpy.fft.fft",
"numpy.array",
"pyqtgraph.Qt.QtGui.QApplication",
"scipy.signal.lfilter",
"numpy.std",
... | [((12, 60), 'sys.path.append', 'sys.path.append', (['"""C:/Python37/Lib/site-packages"""'], {}), "('C:/Python37/Lib/site-packages')\n", (27, 60), False, 'import sys\n'), ((373, 386), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (381, 386), True, 'import numpy as np\n'), ((447, 461), 'numpy.array', 'np.array', (['img2'], {}), '(img2)\n', (455, 461), True, 'import numpy as np\n'), ((560, 582), 'pyqtgraph.Qt.QtGui.QApplication', 'QtGui.QApplication', (['[]'], {}), '([])\n', (578, 582), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((590, 635), 'pyqtgraph.GraphicsWindow', 'pg.GraphicsWindow', ([], {'title': '"""Python OpenBCI GUI"""'}), "(title='Python OpenBCI GUI')\n", (607, 635), True, 'import pyqtgraph as pg\n'), ((1146, 1160), 'pyqtgraph.ImageItem', 'pg.ImageItem', ([], {}), '()\n', (1158, 1160), True, 'import pyqtgraph as pg\n'), ((2443, 2489), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['bp_nf_data.shape[-1]', '(1.0 / fs)'], {}), '(bp_nf_data.shape[-1], 1.0 / fs)\n', (2457, 2489), True, 'import numpy as np\n'), ((2886, 2924), 'pyOpenBCI.OpenBCICyton', 'OpenBCICyton', ([], {'port': '"""COM5"""', 'daisy': '(False)'}), "(port='COM5', daisy=False)\n", (2898, 2924), False, 'from pyOpenBCI import OpenBCICyton\n'), ((324, 349), 'PIL.Image.open', 'Image.open', (['"""heart_1.png"""'], {}), "('heart_1.png')\n", (334, 349), False, 'from PIL import Image\n'), ((397, 422), 'PIL.Image.open', 'Image.open', (['"""heart_2.png"""'], {}), "('heart_2.png')\n", (407, 422), False, 'from PIL import Image\n'), ((1438, 1477), 'numpy.array', 'np.array', (['data[-(fs * disp_sec + 100):]'], {}), '(data[-(fs * disp_sec + 100):])\n', (1446, 1477), True, 'import numpy as np\n'), ((1643, 1667), 'numpy.nditer', 'np.nditer', (['notch_freq_Hz'], {}), '(notch_freq_Hz)\n', (1652, 1667), True, 'import numpy as np\n'), ((1945, 1968), 'numpy.array', 'np.array', (['[start, stop]'], {}), '([start, stop])\n', (1953, 1968), True, 'import numpy as np\n'), ((1985, 2039), 'scipy.signal.butter', 'signal.butter', (['(1)', '(bp_Hz / (fs / 2.0))'], {'btype': '"""bandpass"""'}), "(1, bp_Hz / (fs / 2.0), btype='bandpass')\n", (1998, 2039), False, 'from scipy import signal\n'), ((2056, 2090), 'scipy.signal.lfilter', 'signal.lfilter', (['b', 'a', 'data'], {'axis': '(0)'}), '(b, a, data, axis=0)\n', (2070, 2090), False, 'from scipy import signal\n'), ((2407, 2429), 'numpy.fft.fft', 'np.fft.fft', (['bp_nf_data'], {}), '(bp_nf_data)\n', (2417, 2429), True, 'import numpy as np\n'), ((3080, 3116), 'threading.Thread', 'threading.Thread', ([], {'target': 'start_board'}), '(target=start_board)\n', (3096, 3116), False, 'import threading\n'), ((3180, 3195), 'pyqtgraph.Qt.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (3193, 3195), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((1749, 1802), 'scipy.signal.butter', 'signal.butter', (['b', '(bp_stop_Hz / (fs / 2.0))', '"""bandstop"""'], {}), "(b, bp_stop_Hz / (fs / 2.0), 'bandstop')\n", (1762, 1802), False, 'from scipy import signal\n'), ((1829, 1855), 'scipy.signal.lfilter', 'signal.lfilter', (['b', 'a', 'data'], {}), '(b, a, data)\n', (1843, 1855), False, 'from scipy import signal\n'), ((2648, 2670), 'numpy.average', 'np.average', (['bp_nf_data'], {}), '(bp_nf_data)\n', (2658, 2670), True, 'import numpy as np\n'), ((2673, 2691), 'numpy.std', 'np.std', (['bp_nf_data'], {}), '(bp_nf_data)\n', (2679, 2691), True, 'import numpy as np\n'), ((3273, 3302), 'pyqtgraph.Qt.QtGui.QApplication.instance', 'QtGui.QApplication.instance', ([], {}), '()\n', (3300, 3302), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((1711, 1728), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (1719, 1728), True, 'import numpy as np\n')] |
"""integration_code_unique_false
Revision ID: 3b88f8ca0cb7
Revises: <KEY>
Create Date: 2021-02-24 22:56:55.068279
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_Integration_DataIntegration_Code', table_name='DataIntegration', schema='Integration')
op.create_index(op.f('ix_Integration_DataIntegration_Code'), 'DataIntegration', ['Code'], unique=False, schema='Integration')
op.create_index(op.f('ix_Operation_DataOperation_Name'), 'DataOperation', ['Name'], unique=False, schema='Operation')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_Operation_DataOperation_Name'), table_name='DataOperation', schema='Operation')
op.drop_index(op.f('ix_Integration_DataIntegration_Code'), table_name='DataIntegration', schema='Integration')
op.create_index('ix_Integration_DataIntegration_Code', 'DataIntegration', ['Code'], unique=True, schema='Integration')
# ### end Alembic commands ###
| [
"alembic.op.f",
"alembic.op.create_index",
"alembic.op.drop_index"
] | [((379, 488), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_Integration_DataIntegration_Code"""'], {'table_name': '"""DataIntegration"""', 'schema': '"""Integration"""'}), "('ix_Integration_DataIntegration_Code', table_name=\n 'DataIntegration', schema='Integration')\n", (392, 488), False, 'from alembic import op\n'), ((1082, 1205), 'alembic.op.create_index', 'op.create_index', (['"""ix_Integration_DataIntegration_Code"""', '"""DataIntegration"""', "['Code']"], {'unique': '(True)', 'schema': '"""Integration"""'}), "('ix_Integration_DataIntegration_Code', 'DataIntegration', [\n 'Code'], unique=True, schema='Integration')\n", (1097, 1205), False, 'from alembic import op\n'), ((504, 547), 'alembic.op.f', 'op.f', (['"""ix_Integration_DataIntegration_Code"""'], {}), "('ix_Integration_DataIntegration_Code')\n", (508, 547), False, 'from alembic import op\n'), ((634, 673), 'alembic.op.f', 'op.f', (['"""ix_Operation_DataOperation_Name"""'], {}), "('ix_Operation_DataOperation_Name')\n", (638, 673), False, 'from alembic import op\n'), ((874, 913), 'alembic.op.f', 'op.f', (['"""ix_Operation_DataOperation_Name"""'], {}), "('ix_Operation_DataOperation_Name')\n", (878, 913), False, 'from alembic import op\n'), ((981, 1024), 'alembic.op.f', 'op.f', (['"""ix_Integration_DataIntegration_Code"""'], {}), "('ix_Integration_DataIntegration_Code')\n", (985, 1024), False, 'from alembic import op\n')] |
"""Code specifically used by Jinja for rendering HTML
from Jinja templates.
"""
# TODO: move all the model stuff that's templating into here
import re
import os
import copy
import datetime
import base64
from urllib.parse import urlparse
from typing import Tuple
from flask import request
from bs4 import BeautifulSoup
import scrypt
import Identicon
import bleach
import markdown
from mdx_bleach.extension import BleachExtension
from mdx_unimoji import UnimojiExtension
from markdown.extensions.footnotes import FootnoteExtension
from markdown.extensions.smarty import SmartyExtension
from markdown.extensions.wikilinks import WikiLinkExtension
from . import models
from . import config
TRUNCATE_LENGTH = 140
def reference_links(post_model, message: str, reply_to: int = None) -> str:
"""Parse @id links.
Explicitly avoids generating links within links.
"""
def replace(match):
at_number = int(match.group(1))
# Construct the link based on determining if this is
# a reference to a thread or if it's a reference to a reply
post_referenced = post_model.query.get(at_number)
if not post_referenced:
valid = False
elif post_referenced.reply_to:
link = '%d#%d' % (post_referenced.reply_to, post_referenced.id)
valid = True
else:
link = str(post_referenced.id)
valid = True
if valid:
return '<a href="/threads/%s" class="reflink">@%d</a>' % (link, at_number)
else:
return '<span class="reflink reflink-invalid">@%d</span>' % at_number
soup = BeautifulSoup(message, 'html.parser')
at_link_pattern = re.compile(r'@(\d+)')
for text_match in soup.find_all(text=True):
if re.search(at_link_pattern, text_match) and text_match.parent.name != 'a':
new_text = re.sub(at_link_pattern, replace, text_match)
text_match.replace_with(new_text)
return soup.prettify(formatter=None)
# FIXME: what if passed a name which contains no tripcode?
def make_tripcode(form_name: str) -> Tuple[str, str]:
"""Create a tripcode from the name field of a post.
Returns:
tuple: A two-element tuple containing (in the order of):
name without tripcode, tripcode.
Warning:
Must have `this#format` or it will raise an exception
related to unpacking.
"""
# A valid tripcode is a name field containing an octothorpe
# that isn't the last character.
if not (form_name and '#' in form_name[:-1]):
return form_name, None
name, unhashed_tripcode = form_name.split('#', 1)
# Create the salt
if len(name) % 2 == 0:
salt = name + config.SECRET_SALT
else:
salt = config.SECRET_SALT + name
tripcode = str(
base64.b64encode(
scrypt.hash(
name + config.SECRET_KEY + unhashed_tripcode,
salt,
buflen=16,
),
),
)[2:-1].replace('/', '.').replace('+', '_').replace('=', '-')
return name, tripcode
def youtube_link_to_embed(markdown_message):
"""
Only replaces one link tops.
"""
replacement = r'<iframe allow="autoplay; encrypted-media" allowfullscreen frameborder="0" height="270" src="https://www.youtube.com/embed/\1" width="480"></iframe>'
regex = r"(?:https:\/\/)?(?:www\.)?(?:youtube\.com|youtu\.be)\/(?:watch\?v=)?(.+)"
return re.sub(regex, replacement, markdown_message, 1)
def parse_markdown(message: str, allow_all=False, unique_slug=None) -> str:
"""Parse a markdown document to HTML with python-markdown.
Configures/uses various python-markdown extensions.
Arguments:
message: The markdown message to parse into html.
allow_all: Don't use bleach, don't sanitize.
unique_slug: When specified overrides the timestamp slug
which is prepended to all HTML element id attribute values.
Returns:
The HTML resulted from parsing the markdown with
python-markdown + various extensions for it.
"""
# Generate a url-friendly timestamp to avoid creating
# the same id twice across two or more posts.
# FIXME: Implement for TOC
if unique_slug is None:
timestamp = datetime.datetime.utcnow()
# FIXME: surely there's a better way to url-ify this...
unique_slug = str(timestamp).replace(' ', '').replace(':', '').replace('.', '')
FootnoteExtension.get_separator = lambda x: unique_slug + '-'
# Configure the rest of the extensions!
extensions = [
SmartyExtension(
smart_dashes=True,
smart_quotes=True,
smart_ellipses=True,
substitutions={},
),
UnimojiExtension(), # FIXME: add in configurable emojis, etc.
'mdx_linkify',
'markdown.extensions.nl2br',
'markdown.extensions.footnotes',
'markdown.extensions.toc',
'markdown.extensions.def_list',
'markdown.extensions.abbr',
'markdown.extensions.fenced_code',
]
if not allow_all:
bleach = BleachExtension(
strip=True,
tags=[
'h2',
'h3',
'h4',
'h5',
'h6',
'blockquote',
'ul',
'ol',
'dl',
'dt',
'dd',
'li',
'code',
'sup',
'pre',
'br',
'a',
'p',
'em',
'strong',
'iframe',
],
attributes={
'*': [],
'h2': ['id'],
'h3': ['id'],
'h4': ['id'],
'h5': ['id'],
'h6': ['id'],
'li': ['id'],
'sup': ['id'],
'a': ['href', 'class'],
'iframe': ['allow', 'width', 'height', 'src', 'frameborder', 'allowfullscreen'],
},
styles={},
protocols=['http', 'https'],
)
extensions.append(bleach)
md = markdown.Markdown(extensions=extensions)
return md.convert(message)
def add_domains_to_link_texts(html_message: str) -> str:
"""Append domain in parenthese to all link texts.
Changes links like this:
<a href="http://example.org/picture.jpg">Pic</a>
<a href="/contact-us">Contact</a>
... to links like this:
<a href="http://example.org/picture.jpg">Pic (example.org)</a>
<a href="/contact-us">Contact (internal link)</a>
Arguments:
html_message: The HTML which to replace link text.
Return:
The HTML message with the links replaced as described above.
"""
soup = BeautifulSoup(html_message, 'html.parser')
# find every link in the message which isn't a "reflink"
# and append `(thedomain)` to the end of each's text
for anchor in soup.find_all('a'):
if (not anchor.has_attr('href')) or ('reflink' in anchor.attrs.get('class', [])):
continue
# Copy the tag, change its properties, and replace the original
new_tag = copy.copy(anchor)
href_parts = urlparse(anchor['href'])
link_class = 'external-link' if href_parts.hostname else 'internal-link'
new_tag['class'] = new_tag.get('class', []) + [link_class]
domain = href_parts.hostname if href_parts.hostname else 'internal link'
new_tag.string = '%s (%s)' % (anchor.string, domain)
anchor.replace_with(new_tag)
return soup.prettify(formatter=None)
def ensure_identicon(tripcode: str) -> str:
"""Make sure tripcode has an associated identicon.
The identicon is a file saved in static/identicons/
with the filename matching the tripcode.
If no such file exists it will be created.
Returns:
str: the path to the identicon.
"""
from . import app # FIXME: this is hacky
directory_where_identicons_go = os.path.join(
app.blueprint.static_folder,
'identicons',
)
if not os.path.exists(directory_where_identicons_go):
os.makedirs(directory_where_identicons_go)
path_where_identicon_should_be = os.path.join(
directory_where_identicons_go,
tripcode + '.png',
)
if not os.path.isfile(path_where_identicon_should_be):
identicon = Identicon.render(tripcode)
with open(path_where_identicon_should_be, 'wb') as f:
f.write(identicon)
return path_where_identicon_should_be
def truncate(some_string: str, length: int = None):
length = length if length else TRUNCATE_LENGTH
if len(some_string) > length:
return some_string[:length] + '…'
else:
return some_string
# TODO: no need for headline field...
# Strip HTML...
def post_summary(post, length=None):
return truncate(BeautifulSoup(message_to_html(post.message), 'html.parser').find().get_text().strip().split('\n')[0], length=length)
def get_stylesheet():
return request.cookies.get('stylesheet_url')
def get_pages():
return models.db.session.query(models.Page).all()
def get_blotter_entries():
return models.BlotterEntry.query.order_by(models.BlotterEntry.id.desc()).all()
# FIXME: rename to "contrast text" or something
# TODO: the brightness factor...
def complementary_color(my_hex):
"""Returns maximal contrast color to provided.
Example:
>>>complementaryColor('FFFFFF')
'000000'
"""
if my_hex[0] == '#':
my_hex = my_hex[1:]
my_hex_number = int(my_hex, 16)
absolute_grey = int('ffffff', 16) / 2
if my_hex_number > absolute_grey:
return '000000'
elif my_hex_number == absolute_grey:
return '000000'
else:
return 'ffffff'
def since_bumptime(bumptime, thread=None, reply=None):
total_seconds = int((bumptime.now() - bumptime).total_seconds())
days, seconds = divmod(total_seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
pairs = (
(days, 'Day'),
(hours, 'Hour'),
(minutes, 'Minute'),
(seconds, 'Second'),
)
parts = []
for value, unit_singular in pairs:
if value:
output = '%d %s' % (value, unit_singular)
if value > 1:
output += 's'
parts.append(output)
if parts:
very_readable = parts[0]
else:
very_readable = 'now'
datetime_w3c_spec = str(bumptime)[:-3]
if thread:
permalink = '<a href="/threads/{permalink}">{parts} ago</a>'.format(
permalink='%d#%d' % (thread, reply) if reply else thread,
parts=very_readable,
)
elif reply:
raise Exception('heck no!')
else:
permalink = very_readable + ' ago'
return '''
<time datetime="{bumptime}" title="{bumptime}">
{permalink}
</time>
'''.format(bumptime=datetime_w3c_spec, permalink=permalink)
def message_to_html(message: str, reply_to: int = None):
"""Take a post message (from database) that in its raw/markdown
form and create the final HTML output for that post based on it.
"""
message = youtube_link_to_embed(message)
message = parse_markdown(message)
message = reference_links(models.Post, message, reply_to)
message = add_domains_to_link_texts(message)
# If message gets filtered flag poster's IP
message, was_filtered = models.WordFilter.replace_all(message)
if was_filtered:
models.FlaggedIps.new(request.remote_addr, 'word filter')
return message
# FIXME: move to raw_message_to..
def mutate_message(form):
"""Change the message in various ways before saving to DB."""
message = form.message.data
message = templating.youtube_link_to_embed(message)
message = templating.parse_markdown(message)
message = templating.reference_links(cls, message, int(form.reply_to.data) if form.reply_to.data else None)
message = templating.add_domains_to_link_texts(message)
# If message gets filtered flag poster's IP
message, was_filtered = models.WordFilter.replace_all(message)
if was_filtered:
models.FlaggedIps.new(request.remote_addr, 'word filter')
return message
| [
"re.search",
"os.path.exists",
"markdown.Markdown",
"urllib.parse.urlparse",
"os.makedirs",
"re.compile",
"datetime.datetime.utcnow",
"scrypt.hash",
"markdown.extensions.smarty.SmartyExtension",
"os.path.join",
"mdx_unimoji.UnimojiExtension",
"bs4.BeautifulSoup",
"flask.request.cookies.get",... | [((1626, 1663), 'bs4.BeautifulSoup', 'BeautifulSoup', (['message', '"""html.parser"""'], {}), "(message, 'html.parser')\n", (1639, 1663), False, 'from bs4 import BeautifulSoup\n'), ((1686, 1707), 're.compile', 're.compile', (['"""@(\\\\d+)"""'], {}), "('@(\\\\d+)')\n", (1696, 1707), False, 'import re\n'), ((3454, 3501), 're.sub', 're.sub', (['regex', 'replacement', 'markdown_message', '(1)'], {}), '(regex, replacement, markdown_message, 1)\n', (3460, 3501), False, 'import re\n'), ((6184, 6224), 'markdown.Markdown', 'markdown.Markdown', ([], {'extensions': 'extensions'}), '(extensions=extensions)\n', (6201, 6224), False, 'import markdown\n'), ((6836, 6878), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html_message', '"""html.parser"""'], {}), "(html_message, 'html.parser')\n", (6849, 6878), False, 'from bs4 import BeautifulSoup\n'), ((8068, 8123), 'os.path.join', 'os.path.join', (['app.blueprint.static_folder', '"""identicons"""'], {}), "(app.blueprint.static_folder, 'identicons')\n", (8080, 8123), False, 'import os\n'), ((8294, 8356), 'os.path.join', 'os.path.join', (['directory_where_identicons_go', "(tripcode + '.png')"], {}), "(directory_where_identicons_go, tripcode + '.png')\n", (8306, 8356), False, 'import os\n'), ((9106, 9143), 'flask.request.cookies.get', 'request.cookies.get', (['"""stylesheet_url"""'], {}), "('stylesheet_url')\n", (9125, 9143), False, 'from flask import request\n'), ((4285, 4311), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (4309, 4311), False, 'import datetime\n'), ((4602, 4698), 'markdown.extensions.smarty.SmartyExtension', 'SmartyExtension', ([], {'smart_dashes': '(True)', 'smart_quotes': '(True)', 'smart_ellipses': '(True)', 'substitutions': '{}'}), '(smart_dashes=True, smart_quotes=True, smart_ellipses=True,\n substitutions={})\n', (4617, 4698), False, 'from markdown.extensions.smarty import SmartyExtension\n'), ((4763, 4781), 'mdx_unimoji.UnimojiExtension', 'UnimojiExtension', ([], {}), '()\n', (4779, 4781), False, 'from mdx_unimoji import UnimojiExtension\n'), ((5126, 5596), 'mdx_bleach.extension.BleachExtension', 'BleachExtension', ([], {'strip': '(True)', 'tags': "['h2', 'h3', 'h4', 'h5', 'h6', 'blockquote', 'ul', 'ol', 'dl', 'dt', 'dd',\n 'li', 'code', 'sup', 'pre', 'br', 'a', 'p', 'em', 'strong', 'iframe']", 'attributes': "{'*': [], 'h2': ['id'], 'h3': ['id'], 'h4': ['id'], 'h5': ['id'], 'h6': [\n 'id'], 'li': ['id'], 'sup': ['id'], 'a': ['href', 'class'], 'iframe': [\n 'allow', 'width', 'height', 'src', 'frameborder', 'allowfullscreen']}", 'styles': '{}', 'protocols': "['http', 'https']"}), "(strip=True, tags=['h2', 'h3', 'h4', 'h5', 'h6',\n 'blockquote', 'ul', 'ol', 'dl', 'dt', 'dd', 'li', 'code', 'sup', 'pre',\n 'br', 'a', 'p', 'em', 'strong', 'iframe'], attributes={'*': [], 'h2': [\n 'id'], 'h3': ['id'], 'h4': ['id'], 'h5': ['id'], 'h6': ['id'], 'li': [\n 'id'], 'sup': ['id'], 'a': ['href', 'class'], 'iframe': ['allow',\n 'width', 'height', 'src', 'frameborder', 'allowfullscreen']}, styles={},\n protocols=['http', 'https'])\n", (5141, 5596), False, 'from mdx_bleach.extension import BleachExtension\n'), ((7238, 7255), 'copy.copy', 'copy.copy', (['anchor'], {}), '(anchor)\n', (7247, 7255), False, 'import copy\n'), ((7277, 7301), 'urllib.parse.urlparse', 'urlparse', (["anchor['href']"], {}), "(anchor['href'])\n", (7285, 7301), False, 'from urllib.parse import urlparse\n'), ((8158, 8203), 'os.path.exists', 'os.path.exists', (['directory_where_identicons_go'], {}), '(directory_where_identicons_go)\n', (8172, 8203), False, 'import os\n'), ((8213, 8255), 'os.makedirs', 'os.makedirs', (['directory_where_identicons_go'], {}), '(directory_where_identicons_go)\n', (8224, 8255), False, 'import os\n'), ((8392, 8438), 'os.path.isfile', 'os.path.isfile', (['path_where_identicon_should_be'], {}), '(path_where_identicon_should_be)\n', (8406, 8438), False, 'import os\n'), ((8460, 8486), 'Identicon.render', 'Identicon.render', (['tripcode'], {}), '(tripcode)\n', (8476, 8486), False, 'import Identicon\n'), ((1767, 1805), 're.search', 're.search', (['at_link_pattern', 'text_match'], {}), '(at_link_pattern, text_match)\n', (1776, 1805), False, 'import re\n'), ((1864, 1908), 're.sub', 're.sub', (['at_link_pattern', 'replace', 'text_match'], {}), '(at_link_pattern, replace, text_match)\n', (1870, 1908), False, 'import re\n'), ((2846, 2920), 'scrypt.hash', 'scrypt.hash', (['(name + config.SECRET_KEY + unhashed_tripcode)', 'salt'], {'buflen': '(16)'}), '(name + config.SECRET_KEY + unhashed_tripcode, salt, buflen=16)\n', (2857, 2920), False, 'import scrypt\n')] |
# [1차]프렌츠4블록
import numpy as np
def new_borad(m, n, board):
remove = np.array([[True for _ in range(m)] for _ in range(n)])
count = 0
for i in range(n - 1):
for j in range(m - 1):
cur = board[i,j]
if cur == "-1":
break
if cur == board[i,j+1] and cur == board[i+1,j] and cur == board[i+1,j+1]:
remove[i,j] = False
remove[i,j+1] = False
remove[i+1,j] = False
remove[i+1,j+1] = False
count += 1
new_map = []
remove_count = 0
for i in range(n):
tmp = board[i][remove[i]].tolist()
while len(tmp) < m:
tmp.append("-1")
remove_count += 1
new_map.append(tmp)
return new_map, count, remove_count
def solution(m, n, board):
answer = 0
b = np.array(list(map(lambda x:list(x), board)))
b_t = np.transpose(b)
new_b = b_t[...,::-1]
count = -1
while count != 0:
new_b, count, remove_count = new_borad(m, n, new_b)
answer += remove_count
new_b = np.array(new_b)
return answer
'''
채점을 시작합니다.
정확성 테스트
테스트 1 〉 통과 (0.13ms, 27.9MB)
테스트 2 〉 통과 (0.19ms, 27.8MB)
테스트 3 〉 통과 (0.26ms, 27.7MB)
테스트 4 〉 통과 (1.92ms, 28MB)
테스트 5 〉 통과 (103.88ms, 28MB)
테스트 6 〉 통과 (8.26ms, 28MB)
테스트 7 〉 통과 (1.22ms, 27.6MB)
테스트 8 〉 통과 (2.04ms, 27.7MB)
테스트 9 〉 통과 (0.15ms, 27.6MB)
테스트 10 〉 통과 (0.99ms, 27.6MB)
테스트 11 〉 통과 (2.55ms, 28MB)
채점 결과
정확성: 100.0
합계: 100.0 / 100.0
'''
| [
"numpy.array",
"numpy.transpose"
] | [((915, 930), 'numpy.transpose', 'np.transpose', (['b'], {}), '(b)\n', (927, 930), True, 'import numpy as np\n'), ((1101, 1116), 'numpy.array', 'np.array', (['new_b'], {}), '(new_b)\n', (1109, 1116), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# coding:utf-8
from helper import ListNode
class Solution:
def __init__(self):
self.acceptStack = []
self.outputStack = []
def push(self, node):
self.acceptStack.append(node)
def pop(self):
if self.outputStack == []:
while self.acceptStack:
self.outputStack.append(self.acceptStack.pop())
if self.outputStack != []:
return self.outputStack.pop()
else:
return None
if __name__ == "__main__":
n1 = ListNode(1)
n2 = ListNode(2)
n3 = ListNode(3)
n4 = ListNode(4)
s = Solution()
s.push(n1)
s.push(n2)
print(s.pop().val)
s.push(n3)
s.push(n4)
print(s.pop().val)
| [
"helper.ListNode"
] | [((543, 554), 'helper.ListNode', 'ListNode', (['(1)'], {}), '(1)\n', (551, 554), False, 'from helper import ListNode\n'), ((564, 575), 'helper.ListNode', 'ListNode', (['(2)'], {}), '(2)\n', (572, 575), False, 'from helper import ListNode\n'), ((585, 596), 'helper.ListNode', 'ListNode', (['(3)'], {}), '(3)\n', (593, 596), False, 'from helper import ListNode\n'), ((606, 617), 'helper.ListNode', 'ListNode', (['(4)'], {}), '(4)\n', (614, 617), False, 'from helper import ListNode\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2016, <NAME> <<EMAIL>>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import division
from Algorithms.sway_sampler import sway, cont_dominate
from gmpy2 import popcount, mpz
import random
def count1(decint):
return popcount(mpz(decint))
def split_products(pop, groupC=5):
rand = random.choice(pop)
center = count1(int(rand, 2))
workloads = list()
dists = list()
for p in pop:
wl = count1(int(p, 2))
dist = count1(wl ^ center)
workloads.append(wl)
dists.append(dist)
poptuple = [(p, i, j) for p, i, j in zip(pop, workloads, dists)]
# sort by the workloads
poptuple = sorted(poptuple, key=lambda i:i[1])
n = int(len(poptuple)/groupC)
groups = [poptuple[i*n:i*n+n] for i in range(groupC)]
west, east, westItems, eastItems = list(), list(), list(), list()
for g in groups:
k = sorted(g, key=lambda i:i[2])
# filling the answers
west.append(k[0][0])
east.append(k[-1][0])
westItems.extend(map(lambda i: i[0], k[:len(k)//2]))
eastItems.extend(map(lambda i: i[0], k[len(k)//2:]))
return west, east, westItems, eastItems
def comparing(part1, part2):
onewin = 0
twowin = 0
for i, j in zip(part1, part2):
if cont_dominate(i, j) > 0:
onewin += 1
else:
twowin += 1
return onewin >= twowin
def optimize(init_pop, eval_func):
import warnings
warnings.filterwarnings('ignore')
return sway(init_pop, eval_func, split_products, comparing) | [
"random.choice",
"Algorithms.sway_sampler.cont_dominate",
"gmpy2.mpz",
"Algorithms.sway_sampler.sway",
"warnings.filterwarnings"
] | [((1443, 1461), 'random.choice', 'random.choice', (['pop'], {}), '(pop)\n', (1456, 1461), False, 'import random\n'), ((2598, 2631), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (2621, 2631), False, 'import warnings\n'), ((2643, 2695), 'Algorithms.sway_sampler.sway', 'sway', (['init_pop', 'eval_func', 'split_products', 'comparing'], {}), '(init_pop, eval_func, split_products, comparing)\n', (2647, 2695), False, 'from Algorithms.sway_sampler import sway, cont_dominate\n'), ((1382, 1393), 'gmpy2.mpz', 'mpz', (['decint'], {}), '(decint)\n', (1385, 1393), False, 'from gmpy2 import popcount, mpz\n'), ((2421, 2440), 'Algorithms.sway_sampler.cont_dominate', 'cont_dominate', (['i', 'j'], {}), '(i, j)\n', (2434, 2440), False, 'from Algorithms.sway_sampler import sway, cont_dominate\n')] |
__author__ = 'Veronica'
from random import randint
header = [0xeb, 0x95, 0x4, 0xfa, 0x0, 0x0, 0x0, 0x0, 0x0]
def checksum(frame):
return 0xFF - (sum(frame[4:]) & 0xFF)
def acknowledge():
ack = [0xeb, 0x95, 0x3, 0x0, 0x55, 0xAA]
#ack.append(checksum(ack))
return ack
def housekeeping(n, t):
n_Vp = [0x7f, 0x65, 0x51, 0x40, 0x33, 0x29, 0x21, 0x1a, 0x15, 0x10, 0xd, 0xa, 0x8, 0x7, 0x5, 0x4] #voltagem das placas
n_Cont = [0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64] #contador1
hkp = header
hkp.append(t)
hkp.append(0x7f)
for vp in n_Vp:
hkp.append(vp)
for c1 in n_Cont:
hkp.append(c1)
for c1 in n_Cont:
hkp.append(c1)
for vp in n_Vp:
hkp.append(vp)
for vp in n_Vp:
hkp.append(vp)
hkp.append(checksum(hkp))
return hkp
def modoNormal(n, t):
modoN = header
n_Cont1 = [0xad, 0x8a, 0x6e, 0x57, 0x46, 0x38, 0x2c, 0x23, 0x1c, 0x16, 0x12, 0xe, 0xb, 0x9, 0x7, 0x6]
n_Cont2 = [0x109, 0xd3, 0xa8, 0x86, 0x6b, 0x55, 0x44, 0x36, 0x2b, 0x22, 0x1b, 0x16, 0x11, 0xe, 0xb, 0x9]
n_Cont3 = [0x144, 0x102, 0xce, 0xa4, 0x83, 0x68, 0x53, 0x42, 0x35, 0x2a, 0x21, 0x1b, 0x15, 0x11, 0xd, 0xb]
modoN.append(t)
modoN.append(n)
n_Cont = []
random = randint(1,3)
if random == 3:
n_Cont = n_Cont1
elif random == 2:
n_Cont = n_Cont2
else:
n_Cont = n_Cont3
for num in n_Cont:
modoN.append(num)
n_Cont = []
random = randint(1,3)
if random == 3:
n_Cont = n_Cont1
elif random == 2:
n_Cont = n_Cont2
else:
n_Cont = n_Cont3
for num in n_Cont:
modoN.append(num)
modoN.append(t)
modoN.append(n)
n_Cont = []
random = randint(1,3)
if random == 3:
n_Cont = n_Cont1
elif random == 2:
n_Cont = n_Cont2
else:
n_Cont = n_Cont3
for num in n_Cont:
modoN.append(num)
n_Cont = []
random = randint(1,3)
if random == 3:
n_Cont = n_Cont1
elif random == 2:
n_Cont = n_Cont2
else:
n_Cont = n_Cont3
for num in n_Cont:
modoN.append(num)
modoN.append(checksum(modoN))
return modoN
| [
"random.randint"
] | [((1338, 1351), 'random.randint', 'randint', (['(1)', '(3)'], {}), '(1, 3)\n', (1345, 1351), False, 'from random import randint\n'), ((1558, 1571), 'random.randint', 'randint', (['(1)', '(3)'], {}), '(1, 3)\n', (1565, 1571), False, 'from random import randint\n'), ((1823, 1836), 'random.randint', 'randint', (['(1)', '(3)'], {}), '(1, 3)\n', (1830, 1836), False, 'from random import randint\n'), ((2043, 2056), 'random.randint', 'randint', (['(1)', '(3)'], {}), '(1, 3)\n', (2050, 2056), False, 'from random import randint\n')] |
import math
import numpy
def hill_chart_parametrisation(h, turbine_specs):
"""
Calculates power and flow rate through bulb turbines based on Aggidis and Feather (2012)
f_g = grid frequency, g_p = generator poles,
t_cap = Turbine capacity, h = head difference, dens = water density
"""
turb_sp = 2 * 60 * turbine_specs["f_g"] / turbine_specs["g_p"]
# Step 1: Calculate Hill Chart based on empirical equations
n_11 = turb_sp * turbine_specs["t_d"] / math.sqrt(h)
if n_11 < 255:
q_11 = 0.0166 * n_11 + 0.4861
else:
q_11 = 4.75
q = q_11 * (turbine_specs["t_d"] ** 2) * math.sqrt(h)
h_efficiency = -0.0019 * n_11 + 1.2461
# h_efficiency = 1
p1 = turbine_specs["dens"] * turbine_specs["g"] * q * h / (10 ** 6)
# Step 2 - Adjust Curve according to capacity
if p1 * h_efficiency < turbine_specs["t_cap"]: # 97.25% Gearbox efficiency
p2 = p1 * 0.9725 * h_efficiency
else:
p2 = turbine_specs["t_cap"] * 0.9725
p1 = p2 / (h_efficiency * 0.9725)
q = p1 * (10 ** 6) / (turbine_specs["dens"] * turbine_specs["g"] * h)
return p2, q
def ideal_turbine_parametrisation(h, turbine_specs):
"""
Calculates power and flow through a bulb turbine excluding efficiency loses
"""
q = math.pi * ((turbine_specs["t_d"] / 2)**2) * math.sqrt(2 * turbine_specs["g"] * h)
p1 = turbine_specs["dens"] * turbine_specs["g"] * q * h / (10 ** 6)
if p1 < turbine_specs["t_cap"]:
p2 = p1
else:
p2 = turbine_specs["t_cap"]
q = p2 * (10 ** 6) / (turbine_specs["dens"] * turbine_specs["g"] * h)
return p2, q
def turbine_parametrisation(h, turbine_specs):
"""
Chooses between hill chart or idealised turbine parameterisation.
"""
if turbine_specs["options"] == 0:
p, q = hill_chart_parametrisation(h, turbine_specs)
else:
p, q = ideal_turbine_parametrisation(h, turbine_specs)
return p, q
def gate_sluicing(h, ramp_f, N_s, q_s0, sluice_specs, flux_limiter=0.2):
"""
Calculates overall flow through power plant sluice gates given the status of the operation
"""
temp = ramp_f ** 2 * N_s * sluice_specs["c_d"] * sluice_specs["a_s"] * math.sqrt(2 * sluice_specs["g"] * abs(h))
if ramp_f >= 0.5 and abs(temp) >= abs(q_s0) > 0.:
q_s = -numpy.sign(h) * min(abs((1 + flux_limiter) * q_s0), abs(temp))
elif ramp_f >= 0.5 and abs(q_s0) >= abs(temp):
q_s = -numpy.sign(h) * max(abs((1 - flux_limiter) * q_s0), abs(temp))
else:
q_s = -numpy.sign(h) * temp
return q_s
def turbine_sluicing(h, ramp_f, N_t, q_t0, sluice_specs, turbine_specs, flux_limiter=0.2):
"""
Calculates flow through turbines operating in sluicing mode
"""
temp = ramp_f ** 2 * N_t * sluice_specs["c_t"] * (math.pi * (turbine_specs["t_d"] / 2) ** 2) *\
math.sqrt(2 * sluice_specs["g"] * abs(h))
if ramp_f >= 0.5 and abs(temp) >= abs(q_t0):
q_t = -numpy.sign(h) * min(abs((1 + flux_limiter) * q_t0), abs(temp))
elif ramp_f >= 0.5 and abs(q_t0) >= abs(temp):
q_t = -numpy.sign(h) * max(abs((1 - flux_limiter) * q_t0), abs(temp))
else:
q_t = -numpy.sign(h) * temp
if abs(h) != 0.0 and ramp_f >= 0.95 and q_t == 0.:
q_t = -numpy.sign(h) * temp
return q_t
| [
"math.sqrt",
"numpy.sign"
] | [((484, 496), 'math.sqrt', 'math.sqrt', (['h'], {}), '(h)\n', (493, 496), False, 'import math\n'), ((631, 643), 'math.sqrt', 'math.sqrt', (['h'], {}), '(h)\n', (640, 643), False, 'import math\n'), ((1346, 1383), 'math.sqrt', 'math.sqrt', (["(2 * turbine_specs['g'] * h)"], {}), "(2 * turbine_specs['g'] * h)\n", (1355, 1383), False, 'import math\n'), ((2344, 2357), 'numpy.sign', 'numpy.sign', (['h'], {}), '(h)\n', (2354, 2357), False, 'import numpy\n'), ((2984, 2997), 'numpy.sign', 'numpy.sign', (['h'], {}), '(h)\n', (2994, 2997), False, 'import numpy\n'), ((3293, 3306), 'numpy.sign', 'numpy.sign', (['h'], {}), '(h)\n', (3303, 3306), False, 'import numpy\n'), ((2473, 2486), 'numpy.sign', 'numpy.sign', (['h'], {}), '(h)\n', (2483, 2486), False, 'import numpy\n'), ((2561, 2574), 'numpy.sign', 'numpy.sign', (['h'], {}), '(h)\n', (2571, 2574), False, 'import numpy\n'), ((3113, 3126), 'numpy.sign', 'numpy.sign', (['h'], {}), '(h)\n', (3123, 3126), False, 'import numpy\n'), ((3201, 3214), 'numpy.sign', 'numpy.sign', (['h'], {}), '(h)\n', (3211, 3214), False, 'import numpy\n')] |
# Copyright (c) 2011, <NAME> [see LICENSE.txt]
# This software is funded in part by NIH Grant P20 RR016454.
import unittest
import warnings
import os
import math
from random import shuffle, random
from collections import Counter,OrderedDict
from dictset import DictSet,_rep_generator
from math import isnan, isinf, floor
import numpy as np
from pprint import pprint as pp
from pyvttbl import PyvtTbl
from pyvttbl import DataFrame
from pyvttbl.plotting import *
from pyvttbl.stats import *
from pyvttbl.misc.support import *
class Test_ttest_paired(unittest.TestCase):
def test0(self):
"""paired ttest"""
R=Ttest([('t', -1.4106912317171967),
('p2tail', 0.19601578492449323),
('p1tail', 0.09800789246224662),
('n1', 9),
('n2', 9),
('r', 0.10182008678393427),
('df', 8),
('mu1', 4.555555555555555),
('mu2', 7.888888888888889),
('var1', 6.777777777777778),
('var2', 47.111111111111114),
('tc2tail', 1.8595480375228424),
('tc1tail', 2.3060041350333704),
('cohen_d', 0.47023041057239895),
('delta', 1.410691231717197),
('power1tail', 0.36186192660269623),
('power2tail', 0.23741605057147952)],
paired=True,
aname='A', bname='B',
type='t-Test: Paired Two Sample for means')
A=[3,4, 5,8,9, 1,2,4, 5]
B=[6,19,3,2,14,4,5,17,1]
D=Ttest()
D.run(A,B,paired=True)
## print(D)
for k in list(R.keys()):
self.assertTrue(D[k],R[k])
def test01(self):
"""paired ttest"""
R="""t-Test: Paired Two Sample for means
A B
=========================================
Mean 4.556 7.889
Variance 6.778 47.111
Observations 9 9
Pearson Correlation 0.102
df 8
t Stat -1.411
alpha 0.050
P(T<=t) one-tail 0.098
t Critical one-tail 2.306
P(T<=t) two-tail 0.196
t Critical two-tail 1.860
P(T<=t) two-tail 0.196
Effect size dz 0.470
delta 1.411
Observed power one-tail 0.362
Observed power two-tail 0.237 """
A=[3,4, 5,8,9, 1,2,4, 5]
B=[6,19,3,2,14,4,5,17,1]
D=Ttest()
D.run(A,B,paired=True)
self.assertEqual(str(D),R)
def test4(self):
R="""t-Test: Paired Two Sample for means
PRE POST
=============================================
Mean 87.250 87.083
Variance 1207.659 1166.629
Observations 12 12
Pearson Correlation 0.995
df 11
t Stat 0.163
alpha 0.050
P(T<=t) one-tail 0.437
t Critical one-tail 2.201
P(T<=t) two-tail 0.873
t Critical two-tail 1.796
P(T<=t) two-tail 0.873
Effect size dz 0.047
delta 0.163
Observed power one-tail 0.068
Observed power two-tail 0.035 """
df = DataFrame()
df.read_tbl('data/example2_prepost.csv')
D = df.ttest('PRE','POST',paired=True)
self.assertEqual(str(D),R)
def test__repr__(self):
R=Ttest([('t', 2.310889197854228), ('p2tail', 0.026382412254338405), ('p1tail', 0.013191206127169203), ('n1', 21), ('n2', 23), ('df', 37.855400659439084), ('mu1', 51.476190476190474), ('mu2', 41.52173913043478), ('var1', 121.16190476190475), ('var2', 294.0790513833993), ('tc2tail', 1.6861153650443554), ('tc1tail', 2.0246481352107009), ('cohen_d', 0.6908475708680588), ('delta', 2.1846518399376538), ('power1tail', 0.6916337616595899), ('power2tail', 0.56712772561445368)], equal_variance=False, aname='A', bname='B', type='t-Test: Two-Sample Assuming Unequal Variances')
A=[24,61,59,46,43,44,52,43,58,67,62,57,71,49,54,43,53,57,49,56,33]
B=[42,33,46,37,43,41,10,42,55,19,17,55,26,54,60,28,62,20,53,48,37,85,42]
D=Ttest()
D.run(A,B,equal_variance=False)
for key in list(R.keys()):
self.assertAlmostEqual(D[key],R[key])
def suite():
return unittest.TestSuite((
unittest.makeSuite(Test_ttest_paired)
))
if __name__ == "__main__":
# run tests
runner = unittest.TextTestRunner()
runner.run(suite())
| [
"unittest.makeSuite",
"unittest.TextTestRunner",
"pyvttbl.DataFrame"
] | [((5018, 5043), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (5041, 5043), False, 'import unittest\n'), ((3730, 3741), 'pyvttbl.DataFrame', 'DataFrame', ([], {}), '()\n', (3739, 3741), False, 'from pyvttbl import DataFrame\n'), ((4890, 4927), 'unittest.makeSuite', 'unittest.makeSuite', (['Test_ttest_paired'], {}), '(Test_ttest_paired)\n', (4908, 4927), False, 'import unittest\n')] |
""" Contains TaskState class """
from tasksupervisor.endpoint.fiware_orion.entities.entity import FiwareEntity
from tasksupervisor.endpoint.fiware_orion.entities.task import Task
class TaskState(FiwareEntity):
""" Represents the current state of the Task """
def __init__(self, _task):
if not isinstance(_task, Task):
raise Exception("TypeMissmatch")
FiwareEntity.__init__(self, id=_task.id)
self.name = _task.taskName
self.state = State.Idle
self.task_id = _task.id
self.task_manager_id = _task.taskManagerId
self.error_message = ""
class State():
Idle = 0
Running = 1
Waiting = 2
Active = 3
Finished = 4
Aborted = 5
Error = 6
class UserAction():
Idle = 0
WaitForLoading = 1
WaitForUnloading = 2
| [
"tasksupervisor.endpoint.fiware_orion.entities.entity.FiwareEntity.__init__"
] | [((389, 429), 'tasksupervisor.endpoint.fiware_orion.entities.entity.FiwareEntity.__init__', 'FiwareEntity.__init__', (['self'], {'id': '_task.id'}), '(self, id=_task.id)\n', (410, 429), False, 'from tasksupervisor.endpoint.fiware_orion.entities.entity import FiwareEntity\n')] |
# standard library imports
import io
import logging
import struct
import warnings
# 3rd party library imports
import numpy as np
from uuid import UUID
# local imports
from glymur import Jp2k
from .lib import tiff as libtiff
from .jp2box import UUIDBox
# Map the numeric TIFF datatypes to the format string used by the struct module
# and keep track of how wide they are.
tag_dtype = {
1: {'format': 'B', 'nbytes': 1},
2: {'format': 'B', 'nbytes': 1},
3: {'format': 'H', 'nbytes': 2},
4: {'format': 'I', 'nbytes': 4},
5: {'format': 'II', 'nbytes': 8},
7: {'format': 'B', 'nbytes': 1},
9: {'format': 'i', 'nbytes': 4},
10: {'format': 'ii', 'nbytes': 8},
11: {'format': 'f', 'nbytes': 4},
12: {'format': 'd', 'nbytes': 8},
13: {'format': 'I', 'nbytes': 4},
16: {'format': 'Q', 'nbytes': 8},
17: {'format': 'q', 'nbytes': 8},
18: {'format': 'Q', 'nbytes': 8}
}
# Mnemonics for the two TIFF format version numbers.
_TIFF = 42
_BIGTIFF = 43
class Tiff2Jp2k(object):
"""
Attributes
----------
found_geotiff_tags : bool
If true, then this TIFF must be a GEOTIFF
tiff_filename : path or str
Path to TIFF file.
jp2_filename : path or str
Path to JPEG 2000 file to be written.
tilesize : tuple
The dimensions of a tile in the JP2K file.
create_uuid : bool
Create a UUIDBox for the TIFF IFD metadata.
version : int
Identifies the TIFF as 32-bit TIFF or 64-bit TIFF.
"""
def __init__(
self, tiff_filename, jp2_filename, tilesize=None,
verbosity=logging.CRITICAL, create_uuid=True, **kwargs
):
self.tiff_filename = tiff_filename
if not self.tiff_filename.exists():
raise FileNotFoundError(f'{tiff_filename} does not exist')
self.jp2_filename = jp2_filename
self.tilesize = tilesize
self.create_uuid = create_uuid
self.kwargs = kwargs
self.logger = logging.getLogger('tiff2jp2')
self.logger.setLevel(verbosity)
ch = logging.StreamHandler()
ch.setLevel(verbosity)
self.logger.addHandler(ch)
def __enter__(self):
self.tiff_fp = libtiff.open(self.tiff_filename)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
libtiff.close(self.tiff_fp)
def run(self):
self.copy_image()
if self.create_uuid:
self.copy_metadata()
def copy_metadata(self):
"""
Copy over the TIFF IFD. Place it in a UUID box. Append to the JPEG
2000 file.
"""
# create a bytesio object for the IFD
b = io.BytesIO()
with open(self.tiff_filename, 'rb') as tfp:
endian = self._process_header(b, tfp)
self._process_tags(b, tfp, endian)
if self.found_geotiff_tags:
# geotiff UUID
uuid = UUID('b14bf8bd-083d-4b43-a5ae-8cd7d5a6ce03')
payload = b.getvalue()
else:
# Make it an exif UUID.
uuid = UUID(bytes=b'JpgTiffExif->JP2')
payload = b'EXIF\0\0' + b.getvalue()
# the length of the box is the length of the payload plus 8 bytes
# to store the length of the box and the box ID
box_length = len(payload) + 8
uuid_box = UUIDBox(uuid, payload, box_length)
with open(self.jp2_filename, mode='ab') as f:
uuid_box.write(f)
def _process_tags(self, b, tfp, endian):
self.found_geotiff_tags = False
tag_length = 20 if self.version == _BIGTIFF else 12
# keep this for writing to the UUID, which will always be 32-bit
little_tiff_tag_length = 12
# how many tags?
if self.version == _BIGTIFF:
buffer = tfp.read(8)
num_tags, = struct.unpack(endian + 'Q', buffer)
else:
buffer = tfp.read(2)
num_tags, = struct.unpack(endian + 'H', buffer)
write_buffer = struct.pack('<H', num_tags)
b.write(write_buffer)
# Ok, so now we have the IFD main body, but following that we have
# the tag payloads that cannot fit into 4 bytes.
# the IFD main body in the TIFF. As it might be big endian, we cannot
# just process it as one big chunk.
buffer = tfp.read(num_tags * tag_length)
start_of_tags_position = b.tell()
after_ifd_position = start_of_tags_position + len(buffer)
if self.version == _BIGTIFF:
tag_format_str = endian + 'HHQQ'
tag_payload_offset = 12
max_tag_payload_length = 8
else:
tag_format_str = endian + 'HHII'
tag_payload_offset = 8
max_tag_payload_length = 4
for idx in range(num_tags):
self.logger.debug(f'tag #: {idx}')
b.seek(start_of_tags_position + idx * little_tiff_tag_length)
tag_data = buffer[idx * tag_length:(idx + 1) * tag_length]
tag, dtype, nvalues, offset = struct.unpack(tag_format_str, tag_data) # noqa : E501
if tag == 34735:
self.found_geotiff_tags = True
payload_length = tag_dtype[dtype]['nbytes'] * nvalues
if payload_length > max_tag_payload_length:
# the payload does not fit into the tag entry, so use the
# offset to seek to that position
current_position = tfp.tell()
tfp.seek(offset)
payload_buffer = tfp.read(payload_length)
tfp.seek(current_position)
# read the payload from the TIFF
payload_format = tag_dtype[dtype]['format'] * nvalues
payload = struct.unpack(
endian + payload_format, payload_buffer
)
# write the tag entry to the UUID
new_offset = after_ifd_position
outbuffer = struct.pack(
'<HHII', tag, dtype, nvalues, new_offset
)
b.write(outbuffer)
# now write the payload at the outlying position and then come
# back to the same position in the file stream
cpos = b.tell()
b.seek(new_offset)
out_format = '<' + tag_dtype[dtype]['format'] * nvalues
outbuffer = struct.pack(out_format, *payload)
b.write(outbuffer)
# keep track of the next position to write out-of-IFD data
after_ifd_position = b.tell()
b.seek(cpos)
else:
# the payload DOES fit into the TIFF tag entry
payload_buffer = tag_data[tag_payload_offset:]
# read ALL of the payload buffer
payload_format = (
tag_dtype[dtype]['format']
* int(max_tag_payload_length / tag_dtype[dtype]['nbytes'])
)
payload = struct.unpack(
endian + payload_format, payload_buffer
)
# Extract the actual payload. Two things going on here. First
# of all, not all of the items may be used. For example, if
# the payload length is 4 bytes but the format string was HHH,
# the that last 16 bit value is not wanted, so we should
# discard it. Second thing is that the signed and unsigned
# rational datatypes effectively have twice the number of
# values so we need to account for that.
if dtype in [5, 10]:
payload = payload[:2 * nvalues]
else:
payload = payload[:nvalues]
# Does it fit into the UUID tag entry (4 bytes)?
if payload_length <= 4:
# so write it back into the tag entry in the UUID
outbuffer = struct.pack('<HHI', tag, dtype, nvalues)
b.write(outbuffer)
payload_format = tag_dtype[dtype]['format'] * nvalues
# we may need to alter the output format
if payload_format in ['H', 'B', 'I']:
# just write it as an integer
payload_format = 'I'
outbuffer = struct.pack('<' + payload_format, *payload)
b.write(outbuffer)
else:
# UUID: write the tag entry after the IFD
new_offset = after_ifd_position
outbuffer = struct.pack(
'<HHII', tag, dtype, nvalues, new_offset
)
b.write(outbuffer)
# now write the payload at the outlying position and then
# come back to the same position in the file stream
cpos = b.tell()
b.seek(new_offset)
out_format = '<' + tag_dtype[dtype]['format'] * nvalues
outbuffer = struct.pack(out_format, *payload)
b.write(outbuffer)
# keep track of the next position to write out-of-IFD data
after_ifd_position = b.tell()
b.seek(cpos)
def _process_header(self, b, tfp):
buffer = tfp.read(4)
data = struct.unpack('BB', buffer[:2])
# big endian or little endian?
if data[0] == 73 and data[1] == 73:
# little endian
endian = '<'
elif data[0] == 77 and data[1] == 77:
# big endian
endian = '>'
else:
msg = (
f"The byte order indication in the TIFF header "
f"({data}) is invalid. It should be either "
f"{bytes([73, 73])} or {bytes([77, 77])}."
)
raise RuntimeError(msg)
# version number and offset to the first IFD
version, = struct.unpack(endian + 'H', buffer[2:4])
self.version = _TIFF if version == 42 else _BIGTIFF
if self.version == _BIGTIFF:
buffer = tfp.read(12)
_, _, offset = struct.unpack(endian + 'HHQ', buffer)
else:
buffer = tfp.read(4)
offset, = struct.unpack(endian + 'I', buffer)
tfp.seek(offset)
# write this 32-bit header into the UUID, no matter if we had bigtiff
# or regular tiff or big endian
data = struct.pack('<BBHI', 73, 73, 42, 8)
b.write(data)
return endian
def copy_image(self):
"""
Transfer the image data from the TIFF to the JPEG 2000 file. If the
TIFF has a stripped configuration, this may be somewhat inefficient.
"""
if libtiff.isTiled(self.tiff_fp):
isTiled = True
else:
isTiled = False
photo = libtiff.getFieldDefaulted(self.tiff_fp, 'Photometric')
imagewidth = libtiff.getFieldDefaulted(self.tiff_fp, 'ImageWidth')
imageheight = libtiff.getFieldDefaulted(self.tiff_fp, 'ImageLength')
spp = libtiff.getFieldDefaulted(self.tiff_fp, 'SamplesPerPixel')
sf = libtiff.getFieldDefaulted(self.tiff_fp, 'SampleFormat')
bps = libtiff.getFieldDefaulted(self.tiff_fp, 'BitsPerSample')
planar = libtiff.getFieldDefaulted(self.tiff_fp, 'PlanarConfig')
if sf not in [libtiff.SampleFormat.UINT, libtiff.SampleFormat.VOID]:
sampleformat_str = self.tagvalue2str(libtiff.SampleFormat, sf)
msg = (
f"The TIFF SampleFormat is {sampleformat_str}. Only UINT "
"and VOID are supported."
)
raise RuntimeError(msg)
if bps not in [8, 16]:
msg = (
f"The TIFF BitsPerSample is {bps}. Only 8 and 16 bits per "
"sample are supported."
)
raise RuntimeError(msg)
if bps == 8 and sf == libtiff.SampleFormat.UINT:
dtype = np.uint8
if bps == 16 and sf == libtiff.SampleFormat.UINT:
dtype = np.uint16
if (
planar == libtiff.PlanarConfig.SEPARATE
and self.tilesize is not None
):
msg = (
"A separated planar configuration is not supported when a "
"tile size is specified."
)
raise RuntimeError(msg)
if libtiff.isTiled(self.tiff_fp):
tw = libtiff.getFieldDefaulted(self.tiff_fp, 'TileWidth')
th = libtiff.getFieldDefaulted(self.tiff_fp, 'TileLength')
else:
tw = imagewidth
rps = libtiff.getFieldDefaulted(self.tiff_fp, 'RowsPerStrip')
num_strips = libtiff.numberOfStrips(self.tiff_fp)
if self.tilesize is not None:
jth, jtw = self.tilesize
num_jp2k_tile_rows = int(np.ceil(imagewidth / jtw))
num_jp2k_tile_cols = int(np.ceil(imagewidth / jtw))
if photo == libtiff.Photometric.YCBCR:
# Using the RGBA interface is the only reasonable way to deal with
# this.
use_rgba_interface = True
elif photo == libtiff.Photometric.PALETTE:
# Using the RGBA interface is the only reasonable way to deal with
# this. The single plane gets turned into RGB.
use_rgba_interface = True
spp = 3
else:
use_rgba_interface = False
jp2 = Jp2k(
self.jp2_filename,
shape=(imageheight, imagewidth, spp),
tilesize=self.tilesize,
**self.kwargs
)
if not libtiff.RGBAImageOK(self.tiff_fp):
photometric_string = self.tagvalue2str(libtiff.Photometric, photo)
msg = (
f"The TIFF Photometric tag is {photometric_string} and is "
"not supported."
)
raise RuntimeError(msg)
elif self.tilesize is None and libtiff.RGBAImageOK(self.tiff_fp):
# if no jp2k tiling was specified and if the image is ok to read
# via the RGBA interface, then just do that.
msg = (
"Reading using the RGBA interface, writing as a single tile "
"image."
)
self.logger.info(msg)
if photo not in [
libtiff.Photometric.MINISWHITE,
libtiff.Photometric.MINISBLACK,
libtiff.Photometric.PALETTE,
libtiff.Photometric.YCBCR,
libtiff.Photometric.RGB
]:
photostr = self.tagvalue2str(libtiff.Photometric, photo)
msg = (
"Beware, the RGBA interface to attempt to read this TIFF "
f"when it has a PhotometricInterpretation of {photostr}."
)
warnings.warn(msg)
image = libtiff.readRGBAImageOriented(self.tiff_fp)
if spp < 4:
image = image[:, :, :3]
jp2[:] = image
elif isTiled and self.tilesize is not None:
num_tiff_tile_cols = int(np.ceil(imagewidth / tw))
partial_jp2_tile_rows = (imageheight / jth) != (imageheight // jth)
partial_jp2_tile_cols = (imagewidth / jtw) != (imagewidth // jtw)
rgba_tile = np.zeros((th, tw, 4), dtype=np.uint8)
self.logger.debug(f'image: {imageheight} x {imagewidth}')
self.logger.debug(f'jptile: {jth} x {jtw}')
self.logger.debug(f'ttile: {th} x {tw}')
for idx, tilewriter in enumerate(jp2.get_tilewriters()):
# populate the jp2k tile with tiff tiles
self.logger.info(f'Tile: #{idx}')
self.logger.debug(f'J tile row: #{idx // num_jp2k_tile_cols}')
self.logger.debug(f'J tile col: #{idx % num_jp2k_tile_cols}')
jp2k_tile = np.zeros((jth, jtw, spp), dtype=dtype)
tiff_tile = np.zeros((th, tw, spp), dtype=dtype)
jp2k_tile_row = int(np.ceil(idx // num_jp2k_tile_cols))
jp2k_tile_col = int(np.ceil(idx % num_jp2k_tile_cols))
# the coordinates of the upper left pixel of the jp2k tile
julr, julc = jp2k_tile_row * jth, jp2k_tile_col * jtw
# loop while the upper left corner of the current tiff file is
# less than the lower left corner of the jp2k tile
r = julr
while (r // th) * th < min(julr + jth, imageheight):
c = julc
tilenum = libtiff.computeTile(self.tiff_fp, c, r, 0, 0)
self.logger.debug(f'TIFF tile # {tilenum}')
tiff_tile_row = int(np.ceil(tilenum // num_tiff_tile_cols))
tiff_tile_col = int(np.ceil(tilenum % num_tiff_tile_cols))
# the coordinates of the upper left pixel of the TIFF tile
tulr = tiff_tile_row * th
tulc = tiff_tile_col * tw
# loop while the left corner of the current tiff tile is
# less than the right hand corner of the jp2k tile
while ((c // tw) * tw) < min(julc + jtw, imagewidth):
if use_rgba_interface:
libtiff.readRGBATile(
self.tiff_fp, tulc, tulr, rgba_tile
)
# flip the tile upside down!!
tiff_tile = np.flipud(rgba_tile[:, :, :3])
else:
libtiff.readEncodedTile(
self.tiff_fp, tilenum, tiff_tile
)
# determine how to fit this tiff tile into the jp2k
# tile
#
# these are the section coordinates in image space
ulr = max(julr, tulr)
llr = min(julr + jth, tulr + th)
ulc = max(julc, tulc)
urc = min(julc + jtw, tulc + tw)
# convert to JP2K tile coordinates
jrows = slice(ulr % jth, (llr - 1) % jth + 1)
jcols = slice(ulc % jtw, (urc - 1) % jtw + 1)
# convert to TIFF tile coordinates
trows = slice(ulr % th, (llr - 1) % th + 1)
tcols = slice(ulc % tw, (urc - 1) % tw + 1)
jp2k_tile[jrows, jcols, :] = tiff_tile[trows, tcols, :]
# move exactly one tiff tile over
c += tw
tilenum = libtiff.computeTile(self.tiff_fp, c, r, 0, 0)
tiff_tile_row = int(
np.ceil(tilenum // num_tiff_tile_cols)
)
tiff_tile_col = int(
np.ceil(tilenum % num_tiff_tile_cols)
)
# the coordinates of the upper left pixel of the TIFF
# tile
tulr = tiff_tile_row * th
tulc = tiff_tile_col * tw
r += th
# last tile column? If so, we may have a partial tile.
if (
partial_jp2_tile_cols
and jp2k_tile_col == num_jp2k_tile_cols - 1
):
last_j2k_cols = slice(0, imagewidth - julc)
jp2k_tile = jp2k_tile[:, last_j2k_cols, :].copy()
if (
partial_jp2_tile_rows
and jp2k_tile_row == num_jp2k_tile_rows - 1
):
last_j2k_rows = slice(0, imageheight - julr)
jp2k_tile = jp2k_tile[last_j2k_rows, :, :].copy()
tilewriter[:] = jp2k_tile
elif not isTiled and self.tilesize is not None:
num_strips = libtiff.numberOfStrips(self.tiff_fp)
num_jp2k_tile_cols = int(np.ceil(imagewidth / jtw))
partial_jp2_tile_rows = (imageheight / jth) != (imageheight // jth)
partial_jp2_tile_cols = (imagewidth / jtw) != (imagewidth // jtw)
tiff_strip = np.zeros((rps, imagewidth, spp), dtype=dtype)
rgba_strip = np.zeros((rps, imagewidth, 4), dtype=np.uint8)
for idx, tilewriter in enumerate(jp2.get_tilewriters()):
self.logger.info(f'Tile: #{idx}')
jp2k_tile = np.zeros((jth, jtw, spp), dtype=dtype)
jp2k_tile_row = idx // num_jp2k_tile_cols
jp2k_tile_col = idx % num_jp2k_tile_cols
# the coordinates of the upper left pixel of the jp2k tile
julr, julc = jp2k_tile_row * jth, jp2k_tile_col * jtw
# Populate the jp2k tile with tiff strips.
# Move by strips from the start of the jp2k tile to the bottom
# of the jp2k tile. That last strip may be partially empty,
# worry about that later.
#
# loop while the upper left corner of the current tiff file is
# less than the lower left corner of the jp2k tile
r = julr
while (r // rps) * rps < min(julr + jth, imageheight):
stripnum = libtiff.computeStrip(self.tiff_fp, r, 0)
if stripnum >= num_strips:
# we've moved past the end of the tiff
break
if use_rgba_interface:
# must use the first row in the strip
libtiff.readRGBAStrip(
self.tiff_fp, stripnum * rps, rgba_strip
)
# must flip the rows (!!) and get rid of the alpha
# plane
tiff_strip = np.flipud(rgba_strip[:, :, :spp])
else:
libtiff.readEncodedStrip(
self.tiff_fp, stripnum, tiff_strip
)
# the coordinates of the upper left pixel of the TIFF
# strip
tulr = stripnum * rps
tulc = 0
# determine how to fit this tiff strip into the jp2k
# tile
#
# these are the section coordinates in image space
ulr = max(julr, tulr)
llr = min(julr + jth, tulr + rps)
ulc = max(julc, tulc)
urc = min(julc + jtw, tulc + tw)
# convert to JP2K tile coordinates
jrows = slice(ulr % jth, (llr - 1) % jth + 1)
jcols = slice(ulc % jtw, (urc - 1) % jtw + 1)
# convert to TIFF strip coordinates
trows = slice(ulr % rps, (llr - 1) % rps + 1)
tcols = slice(ulc % tw, (urc - 1) % tw + 1)
jp2k_tile[jrows, jcols, :] = tiff_strip[trows, tcols, :]
r += rps
# last tile column? If so, we may have a partial tile.
# j2k_cols is not sufficient here, must shorten it from 250
# to 230
if (
partial_jp2_tile_cols
and jp2k_tile_col == num_jp2k_tile_cols - 1
):
# decrease the number of columns by however many it sticks
# over the image width
last_j2k_cols = slice(0, imagewidth - julc)
jp2k_tile = jp2k_tile[:, last_j2k_cols, :].copy()
if (
partial_jp2_tile_rows
and stripnum == num_strips - 1
):
# decrease the number of rows by however many it sticks
# over the image height
last_j2k_rows = slice(0, imageheight - julr)
jp2k_tile = jp2k_tile[last_j2k_rows, :, :].copy()
tilewriter[:] = jp2k_tile
def tagvalue2str(self, cls, tag_value):
"""
Take a class that encompasses all of a tag's allowed values and find
the name of that value.
"""
tag_value_string = [
key for key in dir(cls) if getattr(cls, key) == tag_value
][0]
return tag_value_string
| [
"logging.getLogger",
"numpy.ceil",
"logging.StreamHandler",
"uuid.UUID",
"numpy.flipud",
"glymur.Jp2k",
"io.BytesIO",
"struct.pack",
"numpy.zeros",
"struct.unpack",
"warnings.warn"
] | [((1982, 2011), 'logging.getLogger', 'logging.getLogger', (['"""tiff2jp2"""'], {}), "('tiff2jp2')\n", (1999, 2011), False, 'import logging\n'), ((2065, 2088), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (2086, 2088), False, 'import logging\n'), ((2672, 2684), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2682, 2684), False, 'import io\n'), ((4002, 4029), 'struct.pack', 'struct.pack', (['"""<H"""', 'num_tags'], {}), "('<H', num_tags)\n", (4013, 4029), False, 'import struct\n'), ((9460, 9491), 'struct.unpack', 'struct.unpack', (['"""BB"""', 'buffer[:2]'], {}), "('BB', buffer[:2])\n", (9473, 9491), False, 'import struct\n'), ((10068, 10108), 'struct.unpack', 'struct.unpack', (["(endian + 'H')", 'buffer[2:4]'], {}), "(endian + 'H', buffer[2:4])\n", (10081, 10108), False, 'import struct\n'), ((10570, 10605), 'struct.pack', 'struct.pack', (['"""<BBHI"""', '(73)', '(73)', '(42)', '(8)'], {}), "('<BBHI', 73, 73, 42, 8)\n", (10581, 10605), False, 'import struct\n'), ((13589, 13694), 'glymur.Jp2k', 'Jp2k', (['self.jp2_filename'], {'shape': '(imageheight, imagewidth, spp)', 'tilesize': 'self.tilesize'}), '(self.jp2_filename, shape=(imageheight, imagewidth, spp), tilesize=self\n .tilesize, **self.kwargs)\n', (13593, 13694), False, 'from glymur import Jp2k\n'), ((2919, 2963), 'uuid.UUID', 'UUID', (['"""b14bf8bd-083d-4b43-a5ae-8cd7d5a6ce03"""'], {}), "('b14bf8bd-083d-4b43-a5ae-8cd7d5a6ce03')\n", (2923, 2963), False, 'from uuid import UUID\n'), ((3068, 3099), 'uuid.UUID', 'UUID', ([], {'bytes': "b'JpgTiffExif->JP2'"}), "(bytes=b'JpgTiffExif->JP2')\n", (3072, 3099), False, 'from uuid import UUID\n'), ((3835, 3870), 'struct.unpack', 'struct.unpack', (["(endian + 'Q')", 'buffer'], {}), "(endian + 'Q', buffer)\n", (3848, 3870), False, 'import struct\n'), ((3942, 3977), 'struct.unpack', 'struct.unpack', (["(endian + 'H')", 'buffer'], {}), "(endian + 'H', buffer)\n", (3955, 3977), False, 'import struct\n'), ((5041, 5080), 'struct.unpack', 'struct.unpack', (['tag_format_str', 'tag_data'], {}), '(tag_format_str, tag_data)\n', (5054, 5080), False, 'import struct\n'), ((10268, 10305), 'struct.unpack', 'struct.unpack', (["(endian + 'HHQ')", 'buffer'], {}), "(endian + 'HHQ', buffer)\n", (10281, 10305), False, 'import struct\n'), ((10375, 10410), 'struct.unpack', 'struct.unpack', (["(endian + 'I')", 'buffer'], {}), "(endian + 'I', buffer)\n", (10388, 10410), False, 'import struct\n'), ((5747, 5801), 'struct.unpack', 'struct.unpack', (['(endian + payload_format)', 'payload_buffer'], {}), '(endian + payload_format, payload_buffer)\n', (5760, 5801), False, 'import struct\n'), ((5967, 6020), 'struct.pack', 'struct.pack', (['"""<HHII"""', 'tag', 'dtype', 'nvalues', 'new_offset'], {}), "('<HHII', tag, dtype, nvalues, new_offset)\n", (5978, 6020), False, 'import struct\n'), ((6405, 6438), 'struct.pack', 'struct.pack', (['out_format', '*payload'], {}), '(out_format, *payload)\n', (6416, 6438), False, 'import struct\n'), ((7026, 7080), 'struct.unpack', 'struct.unpack', (['(endian + payload_format)', 'payload_buffer'], {}), '(endian + payload_format, payload_buffer)\n', (7039, 7080), False, 'import struct\n'), ((12997, 13022), 'numpy.ceil', 'np.ceil', (['(imagewidth / jtw)'], {}), '(imagewidth / jtw)\n', (13004, 13022), True, 'import numpy as np\n'), ((13061, 13086), 'numpy.ceil', 'np.ceil', (['(imagewidth / jtw)'], {}), '(imagewidth / jtw)\n', (13068, 13086), True, 'import numpy as np\n'), ((8004, 8044), 'struct.pack', 'struct.pack', (['"""<HHI"""', 'tag', 'dtype', 'nvalues'], {}), "('<HHI', tag, dtype, nvalues)\n", (8015, 8044), False, 'import struct\n'), ((8411, 8454), 'struct.pack', 'struct.pack', (["('<' + payload_format)", '*payload'], {}), "('<' + payload_format, *payload)\n", (8422, 8454), False, 'import struct\n'), ((8665, 8718), 'struct.pack', 'struct.pack', (['"""<HHII"""', 'tag', 'dtype', 'nvalues', 'new_offset'], {}), "('<HHII', tag, dtype, nvalues, new_offset)\n", (8676, 8718), False, 'import struct\n'), ((9139, 9172), 'struct.pack', 'struct.pack', (['out_format', '*payload'], {}), '(out_format, *payload)\n', (9150, 9172), False, 'import struct\n'), ((14996, 15014), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (15009, 15014), False, 'import warnings\n'), ((15474, 15511), 'numpy.zeros', 'np.zeros', (['(th, tw, 4)'], {'dtype': 'np.uint8'}), '((th, tw, 4), dtype=np.uint8)\n', (15482, 15511), True, 'import numpy as np\n'), ((15264, 15288), 'numpy.ceil', 'np.ceil', (['(imagewidth / tw)'], {}), '(imagewidth / tw)\n', (15271, 15288), True, 'import numpy as np\n'), ((16061, 16099), 'numpy.zeros', 'np.zeros', (['(jth, jtw, spp)'], {'dtype': 'dtype'}), '((jth, jtw, spp), dtype=dtype)\n', (16069, 16099), True, 'import numpy as np\n'), ((16128, 16164), 'numpy.zeros', 'np.zeros', (['(th, tw, spp)'], {'dtype': 'dtype'}), '((th, tw, spp), dtype=dtype)\n', (16136, 16164), True, 'import numpy as np\n'), ((20552, 20597), 'numpy.zeros', 'np.zeros', (['(rps, imagewidth, spp)'], {'dtype': 'dtype'}), '((rps, imagewidth, spp), dtype=dtype)\n', (20560, 20597), True, 'import numpy as np\n'), ((20623, 20669), 'numpy.zeros', 'np.zeros', (['(rps, imagewidth, 4)'], {'dtype': 'np.uint8'}), '((rps, imagewidth, 4), dtype=np.uint8)\n', (20631, 20669), True, 'import numpy as np\n'), ((16202, 16236), 'numpy.ceil', 'np.ceil', (['(idx // num_jp2k_tile_cols)'], {}), '(idx // num_jp2k_tile_cols)\n', (16209, 16236), True, 'import numpy as np\n'), ((16274, 16307), 'numpy.ceil', 'np.ceil', (['(idx % num_jp2k_tile_cols)'], {}), '(idx % num_jp2k_tile_cols)\n', (16281, 16307), True, 'import numpy as np\n'), ((20340, 20365), 'numpy.ceil', 'np.ceil', (['(imagewidth / jtw)'], {}), '(imagewidth / jtw)\n', (20347, 20365), True, 'import numpy as np\n'), ((20819, 20857), 'numpy.zeros', 'np.zeros', (['(jth, jtw, spp)'], {'dtype': 'dtype'}), '((jth, jtw, spp), dtype=dtype)\n', (20827, 20857), True, 'import numpy as np\n'), ((16907, 16945), 'numpy.ceil', 'np.ceil', (['(tilenum // num_tiff_tile_cols)'], {}), '(tilenum // num_tiff_tile_cols)\n', (16914, 16945), True, 'import numpy as np\n'), ((16987, 17024), 'numpy.ceil', 'np.ceil', (['(tilenum % num_tiff_tile_cols)'], {}), '(tilenum % num_tiff_tile_cols)\n', (16994, 17024), True, 'import numpy as np\n'), ((17716, 17746), 'numpy.flipud', 'np.flipud', (['rgba_tile[:, :, :3]'], {}), '(rgba_tile[:, :, :3])\n', (17725, 17746), True, 'import numpy as np\n'), ((19064, 19102), 'numpy.ceil', 'np.ceil', (['(tilenum // num_tiff_tile_cols)'], {}), '(tilenum // num_tiff_tile_cols)\n', (19071, 19102), True, 'import numpy as np\n'), ((19202, 19239), 'numpy.ceil', 'np.ceil', (['(tilenum % num_tiff_tile_cols)'], {}), '(tilenum % num_tiff_tile_cols)\n', (19209, 19239), True, 'import numpy as np\n'), ((22245, 22278), 'numpy.flipud', 'np.flipud', (['rgba_strip[:, :, :spp]'], {}), '(rgba_strip[:, :, :spp])\n', (22254, 22278), True, 'import numpy as np\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from mr_database import DataTypes
from mr_database import Table
from mr_database import Column
class TableTemplate(Table):
id = Column(DataTypes.integer, pk=True)
myCol = Column(DataTypes.varchar(16), default='Hello World')
class BrokenTable(Table):
id = Column(DataTypes.integer, pk=True)
postalCode = Column(DataTypes.smallint)
cityName = Column(DataTypes.varchar(40), not_null=True, default='New York')
cityId = Column(data_type=DataTypes.integer, default=0)
def __init__(self):
super().__init__()
self.counter = 10
class City(Table):
id = Column(DataTypes.integer, pk=True)
postalCode = Column(DataTypes.smallint, default=9999, display_name='Postal Code')
cityName = Column(DataTypes.varchar(40), default='New York', display_name='City Name')
def __init__(self):
super().__init__()
self.id: int = City.id.default
self.postalCode: int = City.postalCode.default
self.cityName: str = City.cityName.default
class Person(Table):
id = Column(DataTypes.integer, pk=True)
firstName = Column(DataTypes.varchar(40))
lastName = Column(DataTypes.varchar(40))
cityId = Column(data_type=DataTypes.integer, fk=(City, 'id'), default=0)
class Image(Table):
id = Column(DataTypes.integer, pk=True)
md5 = Column(DataTypes.char(32))
imageName = Column(DataTypes.varchar(40))
imageData = Column(DataTypes.blob)
| [
"mr_database.DataTypes.char",
"mr_database.DataTypes.varchar",
"mr_database.Column"
] | [((179, 213), 'mr_database.Column', 'Column', (['DataTypes.integer'], {'pk': '(True)'}), '(DataTypes.integer, pk=True)\n', (185, 213), False, 'from mr_database import Column\n'), ((317, 351), 'mr_database.Column', 'Column', (['DataTypes.integer'], {'pk': '(True)'}), '(DataTypes.integer, pk=True)\n', (323, 351), False, 'from mr_database import Column\n'), ((369, 395), 'mr_database.Column', 'Column', (['DataTypes.smallint'], {}), '(DataTypes.smallint)\n', (375, 395), False, 'from mr_database import Column\n'), ((489, 535), 'mr_database.Column', 'Column', ([], {'data_type': 'DataTypes.integer', 'default': '(0)'}), '(data_type=DataTypes.integer, default=0)\n', (495, 535), False, 'from mr_database import Column\n'), ((646, 680), 'mr_database.Column', 'Column', (['DataTypes.integer'], {'pk': '(True)'}), '(DataTypes.integer, pk=True)\n', (652, 680), False, 'from mr_database import Column\n'), ((698, 766), 'mr_database.Column', 'Column', (['DataTypes.smallint'], {'default': '(9999)', 'display_name': '"""Postal Code"""'}), "(DataTypes.smallint, default=9999, display_name='Postal Code')\n", (704, 766), False, 'from mr_database import Column\n'), ((1089, 1123), 'mr_database.Column', 'Column', (['DataTypes.integer'], {'pk': '(True)'}), '(DataTypes.integer, pk=True)\n', (1095, 1123), False, 'from mr_database import Column\n'), ((1228, 1291), 'mr_database.Column', 'Column', ([], {'data_type': 'DataTypes.integer', 'fk': "(City, 'id')", 'default': '(0)'}), "(data_type=DataTypes.integer, fk=(City, 'id'), default=0)\n", (1234, 1291), False, 'from mr_database import Column\n'), ((1324, 1358), 'mr_database.Column', 'Column', (['DataTypes.integer'], {'pk': '(True)'}), '(DataTypes.integer, pk=True)\n', (1330, 1358), False, 'from mr_database import Column\n'), ((1458, 1480), 'mr_database.Column', 'Column', (['DataTypes.blob'], {}), '(DataTypes.blob)\n', (1464, 1480), False, 'from mr_database import Column\n'), ((233, 254), 'mr_database.DataTypes.varchar', 'DataTypes.varchar', (['(16)'], {}), '(16)\n', (250, 254), False, 'from mr_database import DataTypes\n'), ((418, 439), 'mr_database.DataTypes.varchar', 'DataTypes.varchar', (['(40)'], {}), '(40)\n', (435, 439), False, 'from mr_database import DataTypes\n'), ((789, 810), 'mr_database.DataTypes.varchar', 'DataTypes.varchar', (['(40)'], {}), '(40)\n', (806, 810), False, 'from mr_database import DataTypes\n'), ((1147, 1168), 'mr_database.DataTypes.varchar', 'DataTypes.varchar', (['(40)'], {}), '(40)\n', (1164, 1168), False, 'from mr_database import DataTypes\n'), ((1192, 1213), 'mr_database.DataTypes.varchar', 'DataTypes.varchar', (['(40)'], {}), '(40)\n', (1209, 1213), False, 'from mr_database import DataTypes\n'), ((1376, 1394), 'mr_database.DataTypes.char', 'DataTypes.char', (['(32)'], {}), '(32)\n', (1390, 1394), False, 'from mr_database import DataTypes\n'), ((1419, 1440), 'mr_database.DataTypes.varchar', 'DataTypes.varchar', (['(40)'], {}), '(40)\n', (1436, 1440), False, 'from mr_database import DataTypes\n')] |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from config_settings import Args
# %load_ext autoreload
# %autoreload 2
args=Args()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("device=",device)
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return (-lim, lim)
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=args.layer_sizes[0], fc2_units=args.layer_sizes[1]): #States[0] (33,)
"""Initialize parameters and build model.
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer """
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
#self.bn0 = nn.BatchNorm1d(state_size).to(device)
self.fc1 = nn.Linear(state_size, fc1_units).to(device) #33
#self.bn1 = nn.BatchNorm1d(fc1_units).to(device)
self.fc2 = nn.Linear(fc1_units, fc2_units).to(device)
#self.bn2 = nn.BatchNorm1d(fc2_units).to(device)
self.fc3 = nn.Linear(fc2_units, action_size).to(device) #4
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
if str(type(state))=="<class \'numpy.ndarray\'>":
states = torch.from_numpy(states).float().to(device)
#x = self.bn0(state).to(device)
x = F.relu(self.fc1(state)) #x = F.relu(self.bn1(self.fc1(x))) #
x = F.relu(self.fc2(x)) #x = F.relu(self.bn2(self.fc2(x)))
return F.tanh(self.fc3(x))
class Critic(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, fcs1_units=args.layer_sizes[0], fc2_units=args.layer_sizes[1]):
"""Initialize parameters and build model.
Params
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fcs1_units (int): Number of nodes in the first hidden layer
fc2_units (int): Number of nodes in the second hidden layer """
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)
#self.bn0 = nn.BatchNorm1d(state_size).to(device)
self.fcs1 = nn.Linear(state_size, fcs1_units).to(device)
self.fc2 = nn.Linear(fcs1_units+action_size, fc2_units).to(device)
self.fc3 = nn.Linear(fc2_units, 1).to(device)
self.reset_parameters()
def reset_parameters(self):
self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state, action):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
if str(type(state))=="<class \'numpy.ndarray\'>":
state = torch.from_numpy(state).float().to(device)
#state = self.bn0(state)
xs = F.relu(self.fcs1(state))
x = torch.cat((xs, action), dim=1)
x = F.relu(self.fc2(x))
return self.fc3(x)
| [
"torch.manual_seed",
"numpy.sqrt",
"config_settings.Args",
"torch.from_numpy",
"torch.cuda.is_available",
"torch.nn.Linear",
"torch.cat"
] | [((165, 171), 'config_settings.Args', 'Args', ([], {}), '()\n', (169, 171), False, 'from config_settings import Args\n'), ((206, 231), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (229, 231), False, 'import torch\n'), ((349, 364), 'numpy.sqrt', 'np.sqrt', (['fan_in'], {}), '(fan_in)\n', (356, 364), True, 'import numpy as np\n'), ((972, 995), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (989, 995), False, 'import torch\n'), ((2644, 2667), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (2661, 2667), False, 'import torch\n'), ((3496, 3526), 'torch.cat', 'torch.cat', (['(xs, action)'], {'dim': '(1)'}), '((xs, action), dim=1)\n', (3505, 3526), False, 'import torch\n'), ((1074, 1106), 'torch.nn.Linear', 'nn.Linear', (['state_size', 'fc1_units'], {}), '(state_size, fc1_units)\n', (1083, 1106), True, 'import torch.nn as nn\n'), ((1198, 1229), 'torch.nn.Linear', 'nn.Linear', (['fc1_units', 'fc2_units'], {}), '(fc1_units, fc2_units)\n', (1207, 1229), True, 'import torch.nn as nn\n'), ((1317, 1350), 'torch.nn.Linear', 'nn.Linear', (['fc2_units', 'action_size'], {}), '(fc2_units, action_size)\n', (1326, 1350), True, 'import torch.nn as nn\n'), ((2746, 2779), 'torch.nn.Linear', 'nn.Linear', (['state_size', 'fcs1_units'], {}), '(state_size, fcs1_units)\n', (2755, 2779), True, 'import torch.nn as nn\n'), ((2810, 2856), 'torch.nn.Linear', 'nn.Linear', (['(fcs1_units + action_size)', 'fc2_units'], {}), '(fcs1_units + action_size, fc2_units)\n', (2819, 2856), True, 'import torch.nn as nn\n'), ((2885, 2908), 'torch.nn.Linear', 'nn.Linear', (['fc2_units', '(1)'], {}), '(fc2_units, 1)\n', (2894, 2908), True, 'import torch.nn as nn\n'), ((1790, 1814), 'torch.from_numpy', 'torch.from_numpy', (['states'], {}), '(states)\n', (1806, 1814), False, 'import torch\n'), ((3370, 3393), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (3386, 3393), False, 'import torch\n')] |
"""Constants for the DLNA DMR component."""
import logging
from typing import Final
LOGGER = logging.getLogger(__package__)
DOMAIN: Final = "dlna_dmr"
CONF_LISTEN_PORT: Final = "listen_port"
CONF_CALLBACK_URL_OVERRIDE: Final = "callback_url_override"
CONF_POLL_AVAILABILITY: Final = "poll_availability"
DEFAULT_NAME: Final = "DLNA Digital Media Renderer"
CONNECT_TIMEOUT: Final = 10
| [
"logging.getLogger"
] | [((95, 125), 'logging.getLogger', 'logging.getLogger', (['__package__'], {}), '(__package__)\n', (112, 125), False, 'import logging\n')] |
#!/usr/bin/python
import unittest
import db
import system
import accessrules
import json
class TestAccessRules(unittest.TestCase):
def createMock(self):
mdb = db.MemoryDB()
obj = system.System()
obj.setDB(mdb)
return accessrules.AccessRules(obj)
def testAdminUser(self):
obj = self.createMock()
obj.system.init()
self.assertEqual(True, obj.isAdmin("admin"))
self.assertEqual(True, obj.canCreateUser("admin"))
self.assertEqual(True, obj.canGrantAdmin("admin"))
def testNonAdmin(self):
obj = self.createMock()
obj.system.addUser("nonadmin", "nonadmin")
self.assertEqual(False, obj.isAdmin("nonadmin"))
self.assertEqual(False, obj.canCreateUser("nonadmin"))
def testDisabledAdminCannotCreateUser(self):
obj = self.createMock()
obj.system.addUser("admin2", "admin2")
obj.system.grantAdmin("admin2")
self.assertEqual(True, obj.canCreateUser("admin2"))
obj.system.disableUser("admin2")
def testDisabledAdminCannotGrantAdminAccess(self):
obj = self.createMock()
obj.system.addUser("admin2", "admin2")
obj.system.grantAdmin("admin2")
self.assertEqual(True, obj.canGrantAdmin("admin2"))
obj.system.disableUser("admin2")
self.assertEqual(False, obj.canGrantAdmin("admin2"))
def testDisabledUserCannotChangeOwnKeys(self):
obj = self.createMock()
obj.system.addUser("nonadmin", "nonadmin")
obj.system.disableUser("nonadmin")
self.assertEqual(False, obj.canChangeUserKeys("nonadmin", "nonadmin"))
def testUserCanChangeOwnProfile(self):
obj = self.createMock()
obj.system.addUser("nonadmin", "nonadmin")
self.assertEqual(True, obj.canUpdateUserProfile("nonadmin", "nonadmin"))
def testNonAdminUserCannotChangeSomeoneElsesProfile(self):
obj = self.createMock()
obj.system.addUser("nonadmin", "nonadmin")
obj.system.addUser("nonadmin2", "nonadmin2")
self.assertEqual(False, obj.canUpdateUserProfile("nonadmin", "nonadmin2"))
def testAdminCanChangeSomeoneElsesProfile(self):
obj = self.createMock()
obj.system.addUser("admin2", "admin2")
obj.system.grantAdmin("admin2")
obj.system.addUser("nonadmin", "nonadmin")
self.assertEqual(True, obj.canUpdateUserProfile("admin2", "nonadmin"))
def testUserCanChangeOwnKeys(self):
obj = self.createMock()
obj.system.addUser("nonadmin", "nonadmin")
self.assertEqual(True, obj.canChangeUserKeys("nonadmin", "nonadmin"))
def testNonAdminUserCannotChangeSomeoneElsesKeys(self):
obj = self.createMock()
obj.system.addUser("nonadmin", "nonadmin")
obj.system.addUser("nonadmin2", "nonadmin2")
self.assertEqual(False, obj.canChangeUserKeys("nonadmin", "nonadmin2"))
def testAdminCanChangeSomeoneElsesKeys(self):
obj = self.createMock()
obj.system.addUser("admin2", "admin2")
obj.system.grantAdmin("admin2")
obj.system.addUser("nonadmin", "nonadmin")
self.assertEqual(True, obj.canChangeUserKeys("admin2", "nonadmin"))
def testUserCanExportTheirOwnKeys(self):
obj = self.createMock()
obj.system.addUser("nonadmin", "nonadmin")
self.assertEqual(True, obj.canUserExportPublicKey("nonadmin", "nonadmin"))
self.assertEqual(True, obj.canUserExportPrivateKey("nonadmin", "nonadmin"))
def testNonAdminUserCanExportSomeoneElsesPublicKey(self):
obj = self.createMock()
obj.system.addUser("nonadmin", "nonadmin")
obj.system.addUser("nonadmin2", "nonadmin2")
self.assertEqual(True, obj.canUserExportPublicKey("nonadmin", "nonadmin2"))
def testNonAdminUserCannotExportSomeoneElsesPrivateKey(self):
obj = self.createMock()
obj.system.addUser("nonadmin", "nonadmin")
obj.system.addUser("nonadmin2", "nonadmin2")
self.assertEqual(False, obj.canUserExportPrivateKey("nonadmin", "nonadmin2"))
def testAdminUserCanExportSomeoneElsesKeys(self):
obj = self.createMock()
obj.system.addUser("admin2", "admin2")
obj.system.grantAdmin("admin2")
obj.system.addUser("nonadmin", "nonadmin")
self.assertEqual(True, obj.canUserExportPublicKey("admin2", "nonadmin"))
self.assertEqual(True, obj.canUserExportPrivateKey("admin2", "nonadmin"))
def testDisabledUserCannotExportSomeoneElsesPublicKey(self):
obj = self.createMock()
obj.system.addUser("nonadmin", "nonadmin")
obj.system.addUser("nonadmin2", "nonadmin2")
obj.system.disableUser("nonadmin")
self.assertEqual(False, obj.canUserExportPrivateKey("nonadmin", "nonadmin"))
def testUserCanSeeTheirOwnAttributes(self):
obj = self.createMock()
obj.system.addUser("admin", "admin")
obj.system.grantAdmin("admin")
obj.system.addUser("nonadmin", "nonadmin")
self.assertEqual(True, obj.canUserSeeAttributes("nonadmin", "nonadmin"))
self.assertEqual(True, obj.canUserSeeAttributes("admin", "admin"))
def testUserCannotSeeSomeoneElsesAttributes(self):
obj = self.createMock()
obj.system.addUser("nonadmin", "nonadmin")
obj.system.addUser("nonadmin2", "nonadmin2")
self.assertEqual(False, obj.canUserSeeAttributes("nonadmin", "nonadmin2"))
self.assertEqual(False, obj.canUserSeeAttributes("nonadmin2", "nonadmin"))
def testAdminCanSeeSomeoneElsesAttributes(self):
obj = self.createMock()
obj.system.addUser("nonadmin", "nonadmin")
obj.system.addUser("admin", "admin")
obj.system.grantAdmin("admin")
self.assertEqual(True, obj.canUserSeeAttributes("admin", "nonadmin"))
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"accessrules.AccessRules",
"system.System",
"db.MemoryDB"
] | [((5858, 5873), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5871, 5873), False, 'import unittest\n'), ((174, 187), 'db.MemoryDB', 'db.MemoryDB', ([], {}), '()\n', (185, 187), False, 'import db\n'), ((202, 217), 'system.System', 'system.System', ([], {}), '()\n', (215, 217), False, 'import system\n'), ((256, 284), 'accessrules.AccessRules', 'accessrules.AccessRules', (['obj'], {}), '(obj)\n', (279, 284), False, 'import accessrules\n')] |
# -*- coding: utf-8 -*-
"""
General description
-------------------
These are supplementary routines used in the power market model POMMES.
Installation requirements
-------------------------
Python version >= 3.8
@author: <NAME>, <NAME>
"""
import math
from datetime import datetime
import pandas as pd
def days_between(d1, d2):
"""Calculate the difference in days between two days
Parameters
----------
d1 : str
The first date string
d2 : str
The second date string
Returns
-------
day_diff: int
The difference between the two dates in days
"""
d1 = datetime.strptime(d1, "%Y-%m-%d %H:%M:%S")
d2 = datetime.strptime(d2, "%Y-%m-%d %H:%M:%S")
day_diff = abs((d2 - d1).days)
return day_diff
def time_steps_between_timestamps(ts1, ts2, freq):
"""Calculate the difference in hours between two timesteps
Parameters
----------
ts1 : pd.Timestamp
The first timestamp
ts2 : pd.Timestamp
The second timestamp
freq: str
The frequency information, e.g. '60min', '15min'
Returns
-------
hour_diff: int
The difference between the two dates in hours
"""
time_steps_seconds = {"60min": (24, 3600), "15min": (96, 900)}
diff = ts2 - ts1
time_step_diff = diff.days * time_steps_seconds[freq][0] + math.floor(
diff.seconds / time_steps_seconds[freq][1]
)
return time_step_diff
def convert_annual_limit(annual_limit, start_time, end_time):
"""Convert an annual limit to a sub- or multi-annual one
Parameters
----------
annual_limit: float or pd.Series of dtype float
An annual limit (e.g. for emissions, investment budgets)
if start_time and end_time are within the same year,
or a pd.Series of annual limits indexed by years if start_time and
end_time are not within one year
start_time: str
The first date string; start_time of the optimization run
end_time: str
The second date string; end_time of the optimization run
Returns
-------
new_limit: float
A sub-annual / multi-annual limit for the optimization timeframe
"""
dt_start = datetime.strptime(start_time, "%Y-%m-%d %H:%M:%S")
dt_end = datetime.strptime(end_time, "%Y-%m-%d %H:%M:%S")
start_year = dt_start.year
end_year = dt_end.year
new_limit = 0
if start_year == end_year:
day_diff = days_between(start_time, end_time)
year_fraction = day_diff / float(365)
if isinstance(annual_limit, float):
new_limit = annual_limit * year_fraction
else:
new_limit = annual_limit[start_year] * year_fraction
else:
start_year_begin = str(start_year) + "-01-01 00:00:00"
end_year_end = str(end_year) + "-12-31 23:59:59"
day_diff_start = days_between(start_year_begin, start_time)
day_diff_end = days_between(end_time, end_year_end)
start_year_fraction = (365 - day_diff_start) / float(365)
end_year_fraction = (365 - day_diff_end) / float(365)
full_years = end_year - start_year - 1
# Add annual limits for full years within optimization time frame
for i in range(full_years):
new_limit += annual_limit
# Add limits for fractions of the start year and end year
new_limit += (
annual_limit * start_year_fraction
+ annual_limit * end_year_fraction
)
return new_limit
def convert_annual_costs_nominal_to_real(
nominal_costs, inflation_rate=1.02, year=2022
):
"""Convert cost values of DataFrame from nominal to real terms
Parameters
----------
nominal_costs: pd.DataFrame
Nominal costs data in annual resolution (years = columns)
inflation_rate: float
Inflation rate
year: int
Year for which the nominal costs shall be expressed
Returns
-------
real_costs: :obj:`pd.DataFrame`
Real costs data in annual resolution
"""
real_costs = nominal_costs.copy()
for column in real_costs.columns:
if column != "label":
try:
real_costs[column] = real_costs[column].div(
inflation_rate ** (int(column) - year)
)
except TypeError:
msg = (
"DataFrame format not as expected\n"
"Except for column 'label', "
"all other columns must have column names of type int!"
)
raise TypeError(msg)
return real_costs
def convert_nominal_to_real_time_series(
nominal_time_series, inflation_rate=1.02, year=2022
):
"""Convert time series values of DataFrame from nominal to real terms
Parameters
----------
nominal_time_series: :obj:`pd.DataFrame`
Nominal time series data in hourly resolution
inflation_rate: float
Inflation rate
year: int
Year for which the nominal costs shall be expressed
Returns
-------
real_time_series: pd.DataFrame
Real time series data in hourly resolution
"""
if not type(nominal_time_series.index.year == pd.DatetimeIndex):
raise TypeError("Given time series must have a pd.DatetimeIndex!")
time_series_year = nominal_time_series.index.year[0]
real_time_series = nominal_time_series.div(
inflation_rate ** (time_series_year - year)
)
return real_time_series
| [
"datetime.datetime.strptime",
"math.floor"
] | [((623, 665), 'datetime.datetime.strptime', 'datetime.strptime', (['d1', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(d1, '%Y-%m-%d %H:%M:%S')\n", (640, 665), False, 'from datetime import datetime\n'), ((675, 717), 'datetime.datetime.strptime', 'datetime.strptime', (['d2', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(d2, '%Y-%m-%d %H:%M:%S')\n", (692, 717), False, 'from datetime import datetime\n'), ((2213, 2263), 'datetime.datetime.strptime', 'datetime.strptime', (['start_time', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(start_time, '%Y-%m-%d %H:%M:%S')\n", (2230, 2263), False, 'from datetime import datetime\n'), ((2277, 2325), 'datetime.datetime.strptime', 'datetime.strptime', (['end_time', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(end_time, '%Y-%m-%d %H:%M:%S')\n", (2294, 2325), False, 'from datetime import datetime\n'), ((1354, 1408), 'math.floor', 'math.floor', (['(diff.seconds / time_steps_seconds[freq][1])'], {}), '(diff.seconds / time_steps_seconds[freq][1])\n', (1364, 1408), False, 'import math\n')] |
from itertools import chain
from burrito.cmdsprovider import CmdsProvider
from burrito.utils import reply_to_user
class HelpCommands(CmdsProvider):
def __init__(self):
self.cmds = {'commands': {'function': self.cmd_list,
'aliases': ['comms', ],
'description': None},
'help': {'function': self.cmd_help_cmd,
'description': 'get help for a cmd',
'usage': 'usage: %(nick)s: help: <command>',
'args': ['command']}}
def _get_cmd_list(self):
return sorted(list(chain.from_iterable(
[c.list_commands() for c in CmdsProvider.get_plugins()])))
def _get_cmd_dict(self):
result = {}
for p in CmdsProvider.get_plugins():
result.update(p.cmds)
return result
def cmd_list(self, command, data):
cmds = self._get_cmd_list()
return reply_to_user(data, cmds)
def cmd_help_cmd(self, command, data):
splitcmd = [a.strip() for a in command.split(':')]
_, args = splitcmd[0], splitcmd[1:]
cmds = self._get_cmd_dict()
if len(args) == 1 and args[0] in cmds:
cmd = cmds[args[0]]
reply = cmd.get('help', cmd.get('description', None))
if not reply:
reply = "No help found for %s" % cmd
else:
reply = self.cmds['help']['usage'] % {'nick':
data['conn']._nickname}
return reply_to_user(data, reply)
| [
"burrito.cmdsprovider.CmdsProvider.get_plugins",
"burrito.utils.reply_to_user"
] | [((820, 846), 'burrito.cmdsprovider.CmdsProvider.get_plugins', 'CmdsProvider.get_plugins', ([], {}), '()\n', (844, 846), False, 'from burrito.cmdsprovider import CmdsProvider\n'), ((995, 1020), 'burrito.utils.reply_to_user', 'reply_to_user', (['data', 'cmds'], {}), '(data, cmds)\n', (1008, 1020), False, 'from burrito.utils import reply_to_user\n'), ((1589, 1615), 'burrito.utils.reply_to_user', 'reply_to_user', (['data', 'reply'], {}), '(data, reply)\n', (1602, 1615), False, 'from burrito.utils import reply_to_user\n'), ((722, 748), 'burrito.cmdsprovider.CmdsProvider.get_plugins', 'CmdsProvider.get_plugins', ([], {}), '()\n', (746, 748), False, 'from burrito.cmdsprovider import CmdsProvider\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
'''
Licensed under the terms of the MIT License
https://github.com/luchko/QCodeEditor
@author: <NAME> (<EMAIL>)
Python Highlighting added by:
https://github.com/unihernandez22/QCodeEditor
@author: unihernandez22
Adapted to Binary Ninja by:
@author: <NAME> (https://github.com/psifertex)
Integrating syntax highlighting from:
https://wiki.python.org/moin/PyQt/Python%20syntax%20highlighting
Released under the Modified BSD License: http://directory.fsf.org/wiki/License:BSD_3Clause
Note that this will not be merged back to the parent repositories as it's been
modified to be heavily dependent on the BN theme system.
'''
from PySide2.QtCore import Qt, QRect, QRegExp
from PySide2.QtWidgets import QWidget, QTextEdit, QPlainTextEdit
from PySide2.QtGui import (QPainter, QFont, QSyntaxHighlighter, QTextFormat, QTextCharFormat)
from binaryninjaui import (getMonospaceFont, getThemeColor, ThemeColor)
def format(color, style=''):
"""Return a QTextCharFormat with the given attributes."""
_color = eval('getThemeColor(ThemeColor.%s)' % color)
_format = QTextCharFormat()
_format.setForeground(_color)
if 'bold' in style:
_format.setFontWeight(QFont.Bold)
if 'italic' in style:
_format.setFontItalic(True)
return _format
STYLES = {
'keyword': format('StackVariableColor'),
'operator': format('TokenHighlightColor'),
'brace': format('LinearDisassemblySeparatorColor'),
'defclass': format('DataSymbolColor'),
'string': format('StringColor'),
'string2': format('TypeNameColor'),
'comment': format('AnnotationColor', 'italic'),
'self': format('KeywordColor', 'italic'),
'numbers': format('NumberColor'),
'numberbar': getThemeColor(ThemeColor.BackgroundHighlightDarkColor),
'blockselected': getThemeColor(ThemeColor.TokenHighlightColor),
'blocknormal': getThemeColor(ThemeColor.TokenSelectionColor)
}
class PythonHighlighter (QSyntaxHighlighter):
"""Syntax highlighter for the Python language.
"""
# Python keywords
keywords = [
'and', 'assert', 'break', 'class', 'continue', 'def',
'del', 'elif', 'else', 'except', 'exec', 'finally',
'for', 'from', 'global', 'if', 'import', 'in',
'is', 'lambda', 'not', 'or', 'pass', 'print',
'raise', 'return', 'try', 'while', 'yield',
'None', 'True', 'False',
]
# Python operators
operators = [
'=',
# Comparison
'==', '!=', '<', '<=', '>', '>=',
# Arithmetic
'\+', '-', '\*', '/', '//', '\%', '\*\*',
# In-place
'\+=', '-=', '\*=', '/=', '\%=',
# Bitwise
'\^', '\|', '\&', '\~', '>>', '<<',
]
# Python braces
braces = [
'\{', '\}', '\(', '\)', '\[', '\]',
]
def __init__(self, document):
QSyntaxHighlighter.__init__(self, document)
# Multi-line strings (expression, flag, style)
# FIXME: The triple-quotes in these two lines will mess up the
# syntax highlighting from this point onward
self.tri_single = (QRegExp("'''"), 1, STYLES['string2'])
self.tri_double = (QRegExp('"""'), 2, STYLES['string2'])
rules = []
# Keyword, operator, and brace rules
rules += [(r'\b%s\b' % w, 0, STYLES['keyword'])
for w in PythonHighlighter.keywords]
rules += [(r'%s' % o, 0, STYLES['operator'])
for o in PythonHighlighter.operators]
rules += [(r'%s' % b, 0, STYLES['brace'])
for b in PythonHighlighter.braces]
# All other rules
rules += [
# 'self'
(r'\bself\b', 0, STYLES['self']),
# Double-quoted string, possibly containing escape sequences
(r'"[^"\\]*(\\.[^"\\]*)*"', 0, STYLES['string']),
# Single-quoted string, possibly containing escape sequences
(r"'[^'\\]*(\\.[^'\\]*)*'", 0, STYLES['string']),
# 'def' followed by an identifier
(r'\bdef\b\s*(\w+)', 1, STYLES['defclass']),
# 'class' followed by an identifier
(r'\bclass\b\s*(\w+)', 1, STYLES['defclass']),
# From '#' until a newline
(r'#[^\n]*', 0, STYLES['comment']),
# Numeric literals
(r'\b[+-]?[0-9]+[lL]?\b', 0, STYLES['numbers']),
(r'\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b', 0, STYLES['numbers']),
(r'\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\b', 0, STYLES['numbers']),
]
# Build a QRegExp for each pattern
self.rules = [(QRegExp(pat), index, fmt)
for (pat, index, fmt) in rules]
def highlightBlock(self, text):
"""Apply syntax highlighting to the given block of text.
"""
# Do other syntax formatting
for expression, nth, format in self.rules:
index = expression.indexIn(text, 0)
while index >= 0:
# We actually want the index of the nth match
index = expression.pos(nth)
length = len(expression.cap(nth))
self.setFormat(index, length, format)
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0)
# Do multi-line strings
in_multiline = self.match_multiline(text, *self.tri_single)
if not in_multiline:
in_multiline = self.match_multiline(text, *self.tri_double)
def match_multiline(self, text, delimiter, in_state, style):
"""Do highlighting of multi-line strings. ``delimiter`` should be a
``QRegExp`` for triple-single-quotes or triple-double-quotes, and
``in_state`` should be a unique integer to represent the corresponding
state changes when inside those strings. Returns True if we're still
inside a multi-line string when this function is finished.
"""
# If inside triple-single quotes, start at 0
if self.previousBlockState() == in_state:
start = 0
add = 0
# Otherwise, look for the delimiter on this line
else:
start = delimiter.indexIn(text)
# Move past this match
add = delimiter.matchedLength()
# As long as there's a delimiter match on this line...
while start >= 0:
# Look for the ending delimiter
end = delimiter.indexIn(text, start + add)
# Ending delimiter on this line?
if end >= add:
length = end - start + add + delimiter.matchedLength()
self.setCurrentBlockState(0)
# No; multi-line string
else:
self.setCurrentBlockState(in_state)
length = len(text) - start + add
# Apply formatting
self.setFormat(start, length, style)
# Look for the next match
start = delimiter.indexIn(text, start + length)
# Return True if still inside a multi-line string, False otherwise
if self.currentBlockState() == in_state:
return True
else:
return False
class QCodeEditor(QPlainTextEdit):
'''
QCodeEditor inherited from QPlainTextEdit providing:
numberBar - set by DISPLAY_LINE_NUMBERS flag equals True
curent line highligthing - set by HIGHLIGHT_CURRENT_LINE flag equals True
setting up QSyntaxHighlighter
references:
https://john.nachtimwald.com/2009/08/19/better-qplaintextedit-with-line-numbers/
http://doc.qt.io/qt-5/qtwidgets-widgets-codeeditor-example.html
'''
class NumberBar(QWidget):
'''class that deifnes textEditor numberBar'''
def __init__(self, editor):
QWidget.__init__(self, editor)
self.editor = editor
self.editor.blockCountChanged.connect(self.updateWidth)
self.editor.updateRequest.connect(self.updateContents)
self.font = QFont()
self.numberBarColor = STYLES["numberbar"]
def paintEvent(self, event):
painter = QPainter(self)
painter.fillRect(event.rect(), self.numberBarColor)
block = self.editor.firstVisibleBlock()
# Iterate over all visible text blocks in the document.
while block.isValid():
blockNumber = block.blockNumber()
block_top = self.editor.blockBoundingGeometry(block).translated(self.editor.contentOffset()).top()
# Check if the position of the block is out side of the visible area.
if not block.isVisible() or block_top >= event.rect().bottom():
break
# We want the line number for the selected line to be bold.
if blockNumber == self.editor.textCursor().blockNumber():
self.font.setBold(True)
painter.setPen(STYLES["blockselected"])
else:
self.font.setBold(False)
painter.setPen(STYLES["blocknormal"])
painter.setFont(self.font)
# Draw the line number right justified at the position of the line.
paint_rect = QRect(0, block_top, self.width(), self.editor.fontMetrics().height())
painter.drawText(paint_rect, Qt.AlignLeft, str(blockNumber+1))
block = block.next()
painter.end()
QWidget.paintEvent(self, event)
def getWidth(self):
count = self.editor.blockCount()
width = self.fontMetrics().width(str(count)) + 10
return width
def updateWidth(self):
width = self.getWidth()
if self.width() != width:
self.setFixedWidth(width)
self.editor.setViewportMargins(width, 0, 0, 0);
def updateContents(self, rect, scroll):
if scroll:
self.scroll(0, scroll)
else:
self.update(0, rect.y(), self.width(), rect.height())
if rect.contains(self.editor.viewport().rect()):
fontSize = self.editor.currentCharFormat().font().pointSize()
self.font.setPointSize(fontSize)
self.font.setStyle(QFont.StyleNormal)
self.updateWidth()
def __init__(self, DISPLAY_LINE_NUMBERS=True, HIGHLIGHT_CURRENT_LINE=True,
SyntaxHighlighter=None, *args):
'''
Parameters
----------
DISPLAY_LINE_NUMBERS : bool
switch on/off the presence of the lines number bar
HIGHLIGHT_CURRENT_LINE : bool
switch on/off the current line highliting
SyntaxHighlighter : QSyntaxHighlighter
should be inherited from QSyntaxHighlighter
'''
super(QCodeEditor, self).__init__()
self.setFont(QFont("Ubuntu Mono", 11))
self.setLineWrapMode(QPlainTextEdit.NoWrap)
self.DISPLAY_LINE_NUMBERS = DISPLAY_LINE_NUMBERS
if DISPLAY_LINE_NUMBERS:
self.number_bar = self.NumberBar(self)
if HIGHLIGHT_CURRENT_LINE:
self.currentLineNumber = None
self.currentLineColor = STYLES['currentLine']
self.cursorPositionChanged.connect(self.highligtCurrentLine)
if SyntaxHighlighter is not None: # add highlighter to textdocument
self.highlighter = SyntaxHighlighter(self.document())
def resizeEvent(self, *e):
'''overload resizeEvent handler'''
if self.DISPLAY_LINE_NUMBERS: # resize number_bar widget
cr = self.contentsRect()
rec = QRect(cr.left(), cr.top(), self.number_bar.getWidth(), cr.height())
self.number_bar.setGeometry(rec)
QPlainTextEdit.resizeEvent(self, *e)
def highligtCurrentLine(self):
newCurrentLineNumber = self.textCursor().blockNumber()
if newCurrentLineNumber != self.currentLineNumber:
self.currentLineNumber = newCurrentLineNumber
hi_selection = QTextEdit.ExtraSelection()
hi_selection.format.setBackground(self.currentLineColor)
hi_selection.format.setProperty(QTextFormat.FullWidthSelection, True)
hi_selection.cursor = self.textCursor()
hi_selection.cursor.clearSelection()
self.setExtraSelections([hi_selection])
##############################################################################
if __name__ == '__main__':
# TESTING
def run_test():
from PySide2.QtGui import QApplication
import sys
app = QApplication([])
editor = QCodeEditor(DISPLAY_LINE_NUMBERS=True,
HIGHLIGHT_CURRENT_LINE=True,
SyntaxHighlighter=PythonHighlighter)
# text = '''<FINITELATTICE>
# <LATTICE name="myLattice">
# <BASIS>
# <VECTOR>1.0 0.0 0.0</VECTOR>
# <VECTOR>0.0 1.0 0.0</VECTOR>
# </BASIS>
# </LATTICE>
# <PARAMETER name="L" />
# <PARAMETER default="L" name="W" />
# <EXTENT dimension="1" size="L" />
# <EXTENT dimension="2" size="W" />
# <BOUNDARY type="periodic" />
# </FINITELATTICE>
# '''
text = """\
def hello(text):
print(text)
hello('Hello World')
# Comment"""
editor.setPlainText(text)
editor.resize(400,250)
editor.show()
sys.exit(app.exec_())
run_test()
| [
"binaryninjaui.getThemeColor",
"PySide2.QtGui.QFont",
"PySide2.QtWidgets.QPlainTextEdit.resizeEvent",
"PySide2.QtGui.QPainter",
"PySide2.QtWidgets.QTextEdit.ExtraSelection",
"PySide2.QtWidgets.QWidget.paintEvent",
"PySide2.QtGui.QTextCharFormat",
"PySide2.QtWidgets.QWidget.__init__",
"PySide2.QtCore... | [((1107, 1124), 'PySide2.QtGui.QTextCharFormat', 'QTextCharFormat', ([], {}), '()\n', (1122, 1124), False, 'from PySide2.QtGui import QPainter, QFont, QSyntaxHighlighter, QTextFormat, QTextCharFormat\n'), ((1686, 1740), 'binaryninjaui.getThemeColor', 'getThemeColor', (['ThemeColor.BackgroundHighlightDarkColor'], {}), '(ThemeColor.BackgroundHighlightDarkColor)\n', (1699, 1740), False, 'from binaryninjaui import getMonospaceFont, getThemeColor, ThemeColor\n'), ((1760, 1805), 'binaryninjaui.getThemeColor', 'getThemeColor', (['ThemeColor.TokenHighlightColor'], {}), '(ThemeColor.TokenHighlightColor)\n', (1773, 1805), False, 'from binaryninjaui import getMonospaceFont, getThemeColor, ThemeColor\n'), ((1823, 1868), 'binaryninjaui.getThemeColor', 'getThemeColor', (['ThemeColor.TokenSelectionColor'], {}), '(ThemeColor.TokenSelectionColor)\n', (1836, 1868), False, 'from binaryninjaui import getMonospaceFont, getThemeColor, ThemeColor\n'), ((2645, 2688), 'PySide2.QtGui.QSyntaxHighlighter.__init__', 'QSyntaxHighlighter.__init__', (['self', 'document'], {}), '(self, document)\n', (2672, 2688), False, 'from PySide2.QtGui import QPainter, QFont, QSyntaxHighlighter, QTextFormat, QTextCharFormat\n'), ((10077, 10113), 'PySide2.QtWidgets.QPlainTextEdit.resizeEvent', 'QPlainTextEdit.resizeEvent', (['self', '*e'], {}), '(self, *e)\n', (10103, 10113), False, 'from PySide2.QtWidgets import QWidget, QTextEdit, QPlainTextEdit\n'), ((10812, 10828), 'PySide2.QtGui.QApplication', 'QApplication', (['[]'], {}), '([])\n', (10824, 10828), False, 'from PySide2.QtGui import QApplication\n'), ((2872, 2886), 'PySide2.QtCore.QRegExp', 'QRegExp', (['"""\'\'\'"""'], {}), '("\'\'\'")\n', (2879, 2886), False, 'from PySide2.QtCore import Qt, QRect, QRegExp\n'), ((2931, 2945), 'PySide2.QtCore.QRegExp', 'QRegExp', (['"""""\\""""'], {}), '(\'"""\')\n', (2938, 2945), False, 'from PySide2.QtCore import Qt, QRect, QRegExp\n'), ((6780, 6810), 'PySide2.QtWidgets.QWidget.__init__', 'QWidget.__init__', (['self', 'editor'], {}), '(self, editor)\n', (6796, 6810), False, 'from PySide2.QtWidgets import QWidget, QTextEdit, QPlainTextEdit\n'), ((6968, 6975), 'PySide2.QtGui.QFont', 'QFont', ([], {}), '()\n', (6973, 6975), False, 'from PySide2.QtGui import QPainter, QFont, QSyntaxHighlighter, QTextFormat, QTextCharFormat\n'), ((7067, 7081), 'PySide2.QtGui.QPainter', 'QPainter', (['self'], {}), '(self)\n', (7075, 7081), False, 'from PySide2.QtGui import QPainter, QFont, QSyntaxHighlighter, QTextFormat, QTextCharFormat\n'), ((8152, 8183), 'PySide2.QtWidgets.QWidget.paintEvent', 'QWidget.paintEvent', (['self', 'event'], {}), '(self, event)\n', (8170, 8183), False, 'from PySide2.QtWidgets import QWidget, QTextEdit, QPlainTextEdit\n'), ((9305, 9329), 'PySide2.QtGui.QFont', 'QFont', (['"""Ubuntu Mono"""', '(11)'], {}), "('Ubuntu Mono', 11)\n", (9310, 9329), False, 'from PySide2.QtGui import QPainter, QFont, QSyntaxHighlighter, QTextFormat, QTextCharFormat\n'), ((10324, 10350), 'PySide2.QtWidgets.QTextEdit.ExtraSelection', 'QTextEdit.ExtraSelection', ([], {}), '()\n', (10348, 10350), False, 'from PySide2.QtWidgets import QWidget, QTextEdit, QPlainTextEdit\n'), ((4123, 4135), 'PySide2.QtCore.QRegExp', 'QRegExp', (['pat'], {}), '(pat)\n', (4130, 4135), False, 'from PySide2.QtCore import Qt, QRect, QRegExp\n')] |
from math import sqrt
from numpy import arange
from universal_constants import MARS_RADIUS
from universal_functions import mars_density
class Simulation:
@property
def xs(self):
return [v.x for v in self.ps]
@property
def ys(self):
return [v.y for v in self.ps]
@property
def zs(self):
return [v.z for v in self.ps]
@property
def rs(self):
return [p.module for p in self.ps]
@property
def hs(self):
return [r - MARS_RADIUS for r in self.rs]
def __init__(self, body, forces):
self.body = body
self.forces = forces
self.ps = []
self.vs = []
self.gs = []
self.duration = 0
self.delta_v = 0
def run(self, time, dt, condition=lambda b: False):
duration = 0
initial_speed = self.body.speed
for _ in arange(0, time, dt):
duration += dt
self.step(dt)
if condition(self.body):
break
self.duration = duration
self.delta_v = self.body.speed - initial_speed
def step(self, dt):
force = self.forces(self.body)
self.body.velocity += dt * force / self.body.mass
self.body.position += dt * self.body.velocity
self.ps.append(self.body.position)
self.vs.append(self.body.velocity)
self.gs.append(force.module / self.body.mass / 9.81 / dt)
class ThrustSimulation(Simulation):
@property
def engine_mass(self):
return 0.0014 * self.mass_0 * abs(self.delta_v) / self.duration + 49.6
def __init__(self, body, delta_mass, *forces):
super().__init__(body, *forces)
self.delta_mass = delta_mass
self.propellant_mass = 0
self.mass_0 = body.mass
def step(self, dt):
super().step(dt)
self.body.mass -= self.delta_mass * dt
self.propellant_mass += self.delta_mass * dt
def save_data(self, filename):
import csv
with open(filename + '.csv', 'a', newline='') as csvfile:
spamwriter = csv.writer(csvfile)
spamwriter.writerow(['Engine mass', str(self.engine_mass)])
spamwriter.writerow(['Propellant mass', str(self.propellant_mass)])
spamwriter.writerow(['Max gs', str(max(self.gs))])
class AerobreakingSimulation(Simulation):
@property
def shield_mass(self):
return self.body.mass * 0.00091 * (self.Q * 1e-4) ** 0.51575
@property
def structure_mass(self):
return self.body.mass * 0.0232 * max(self.pressures) ** -0.1708
@property
def back_shield_mass(self):
return 0.14 * self.body.mass
@property
def heat_shield_mass(self):
return self.shield_mass + self.structure_mass + self.back_shield_mass
def __init__(self, body, *forces):
super().__init__(body, *forces)
self.qs = []
self.pressures = []
self.Q = 0
self.k = 1.9027e-4 # [SI] Hinc sunt draconis
def q(self, b):
return self.k * sqrt(mars_density(b.radius) / b.r_nose) * b.speed ** 3
def p(self, b):
return mars_density(b.radius) * b.speed2 / 2
def run(self, time, dt=1, condition=lambda b: False):
super().run(time, dt, condition)
self.Q = sum(self.qs) * dt
def step(self, dt):
super().step(dt)
self.qs.append(self.q(self.body))
self.pressures.append(self.p(self.body))
def save_data(self, filename):
import csv
with open(filename + '.csv', 'a', newline='') as csvfile:
spamwriter = csv.writer(csvfile)
spamwriter.writerow(['Shield mass', str(self.heat_shield_mass)])
| [
"csv.writer",
"universal_functions.mars_density",
"numpy.arange"
] | [((871, 890), 'numpy.arange', 'arange', (['(0)', 'time', 'dt'], {}), '(0, time, dt)\n', (877, 890), False, 'from numpy import arange\n'), ((2073, 2092), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (2083, 2092), False, 'import csv\n'), ((3587, 3606), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (3597, 3606), False, 'import csv\n'), ((3127, 3149), 'universal_functions.mars_density', 'mars_density', (['b.radius'], {}), '(b.radius)\n', (3139, 3149), False, 'from universal_functions import mars_density\n'), ((3041, 3063), 'universal_functions.mars_density', 'mars_density', (['b.radius'], {}), '(b.radius)\n', (3053, 3063), False, 'from universal_functions import mars_density\n')] |
import numpy as np
import cv2
import matplotlib.pyplot as plt
from collections import deque
# Import configuration parameters
import config as cfg
# Define a class to receive the characteristics of each line detection
class Line:
def __init__(self, buf_len = 5):
# x values of the last n fits of the line
self.recent_xfitted = deque(maxlen=buf_len)
#average x values of the fitted line over the last n iterations
self.bestx = None
#polynomial coefficients averaged over the last n iterations
self.best_fit = deque(maxlen=buf_len) # circular buffer
#polynomial coefficients for the most recent fit
self.current_fit = [np.array([False]), np.array([False]), np.array([False])]
# This function is a reuse from the lecture. Finds lane pixels
def find_lane_pixels(binary_warped):
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0] // 2:, :], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0] // 2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0] // cfg.nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(cfg.nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window + 1) * window_height
win_y_high = binary_warped.shape[0] - window * window_height
win_xleft_low = leftx_current - cfg.margin
win_xleft_high = leftx_current + cfg.margin
win_xright_low = rightx_current - cfg.margin
win_xright_high = rightx_current + cfg.margin
# Draw the windows on the visualization image
cv2.rectangle(out_img, (win_xleft_low, win_y_low),
(win_xleft_high, win_y_high), (0, 255, 0), 2)
cv2.rectangle(out_img, (win_xright_low, win_y_low),
(win_xright_high, win_y_high), (0, 255, 0), 2)
# Identify the nonzero pixels in x and y within the window #
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > cfg.minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > cfg.minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty, out_img
# This function is a reuse from the lecture with minor modification to pass challenge video.
# Fits a second order polynomial.
def fit_polynomial(binary_warped, left_line, right_line):
# Find our lane pixels first
leftx, lefty, rightx, righty, out_img = find_lane_pixels(binary_warped)
# Fit a second order polynomial to each using `np.polyfit`
try:
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
# Checks that the lines are separated by approximately the same distance horizontally and
# compares poly coefficients with previous fits. If it fails this frame is discarded
# Perform this check only for the challenge video, don't do it for project video or test images
is_a_good_frame = ((np.abs(left_fitx[-1] - right_fitx[-1] - (left_fitx[0] - right_fitx[0])) < cfg.th) & \
(np.abs(left_fit[0] - left_line.current_fit[0]) < cfg.th1) \
& (np.abs(left_fit[1] - left_line.current_fit[1]) < cfg.th2) & \
(np.abs(left_fit[2] - left_line.current_fit[2]) < cfg.th3))
# Check against maximum lane width
is_a_good_frame &= (np.abs(left_fitx[-1] - right_fitx[-1]) < cfg.lane_max_width) & \
(np.abs(left_fitx[0] - right_fitx[0]) < cfg.lane_max_width)
#if (0 == cfg.video_mode) | (cfg.video_file_name == '../project_video') | is_a_good_frame:
if (0 == cfg.video_mode) | is_a_good_frame:
## Visualization ##
# Colors in the left and right lane regions
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
left_line.recent_xfitted.append(left_fitx)
left_line.best_fit.append(left_fit)
right_line.recent_xfitted.append(right_fitx)
right_line.best_fit.append(right_fit)
else:
print('bad frame')
#pass # skip this 'bad' frame
except:
print('bad frame')
#pass
return out_img, left_line, right_line
# Sets the poly coefficients to the last coefficients computed
def long_term_filter_init(left_line, right_line):
left_line.bestx = left_line.recent_xfitted[-1]
right_line.bestx = right_line.recent_xfitted[-1]
left_line.current_fit = left_line.best_fit[-1]
right_line.current_fit = right_line.best_fit[-1]
return left_line, right_line
# Takes a mean over accumulated over time poly coefficients
def long_term_filter(left_line, right_line):
left_line.bestx = np.mean(left_line.recent_xfitted, axis=0)
right_line.bestx = np.mean(right_line.recent_xfitted, axis=0)
left_line.current_fit = np.mean(left_line.best_fit, axis=0)
right_line.current_fit = np.mean(right_line.best_fit, axis=0)
return left_line, right_line
# Calculate the radius of curvature in meters for both lane lines
def measure_curvature(left_fit_cr, right_fit_cr, ploty):
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
# Calculation of R_curve in meters (radius of curvature)
left_curverad = ((1 + (left_fit_cr[0] * 2 * y_eval * cfg.ym_per_pix + left_fit_cr[1]) ** 2) ** (3 / 2)) / np.abs(
2 * left_fit_cr[0])
# Calculation of the left line here
right_curverad = ((1 + (right_fit_cr[0] * 2 * y_eval * cfg.ym_per_pix + right_fit_cr[1]) ** 2) ** (3 / 2)) / np.abs(
2 * right_fit_cr[0])
return left_curverad, right_curverad
# Calculate vehicle center offset in meters
def vehicle_offset_calc(undist, bottom_x_left, bottom_x_right):
# Calculate vehicle center offset in pixels
vehicle_offset = undist.shape[1]/2 - (bottom_x_left + bottom_x_right)/2
# Convert pixel offset to meters
vehicle_offset *= cfg.xm_per_pix
return vehicle_offset
# Fits a second order polynomial to each line. Reuse from a lecture
def fit_poly(img_shape, leftx, lefty, rightx, righty):
#Fit a second order polynomial to each with np.polyfit()
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, img_shape[0] - 1, img_shape[0])
# Calc both polynomials using ploty, left_fit and right_fit #
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
return left_fitx, right_fitx, ploty, left_fit, right_fit
# Search for the new line within +/- some margin around the old line center.
def search_around_poly(binary_warped, left_line, right_line):
margin = cfg.search_around_poly
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
left_fit = left_line.current_fit
right_fit = right_line.current_fit
# Set the area of search based on activated x-values
# within the +/- margin of our polynomial function
left_lane_inds = ((nonzerox > (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy +
left_fit[2] - margin)) & (nonzerox < (left_fit[0] * (nonzeroy ** 2) +
left_fit[1] * nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy +
right_fit[2] - margin)) & (nonzerox < (right_fit[0] * (nonzeroy ** 2) +
right_fit[1] * nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
## Visualization ##
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255
# Fit new polynomials
try:
left_fitx, right_fitx, ploty, left_fit, right_fit = \
fit_poly(binary_warped.shape, leftx, lefty, rightx, righty)
# Checks that the lines are separated by approximately the same distance horizontally and
# compares poly coefficients with previous fits. If it fails this frame is discarded
# Perform this check only for the challenge video, don't do it for project video or test images
is_a_good_frame = ((np.abs(left_fitx[-1] - right_fitx[-1] - (left_fitx[0] - right_fitx[0])) < cfg.th) & \
(np.abs(left_fit[0] - left_line.current_fit[0]) < cfg.th1) \
& (np.abs(left_fit[1] - left_line.current_fit[1]) < cfg.th2) & \
(np.abs(left_fit[2] - left_line.current_fit[2]) < cfg.th3))
# Check against maximum lane width
is_a_good_frame &= (np.abs(left_fitx[-1] - right_fitx[-1]) < cfg.lane_max_width) & \
(np.abs(left_fitx[0] - right_fitx[0]) < cfg.lane_max_width)
if is_a_good_frame:
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx - margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx + margin,
ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx - margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx + margin,
ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))
out_img = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
# Store coefficients into a circular buffer
left_line.recent_xfitted.append(left_fitx)
right_line.recent_xfitted.append(right_fitx)
left_line.best_fit.append(left_fit)
right_line.best_fit.append(right_fit)
else:
print('bad frame')
#pass # skip this 'bad' frame
except:
print('bad frame')
#pass
return out_img, left_line, right_line
| [
"cv2.rectangle",
"numpy.dstack",
"numpy.mean",
"numpy.abs",
"collections.deque",
"numpy.polyfit",
"numpy.hstack",
"numpy.zeros_like",
"numpy.argmax",
"numpy.max",
"numpy.sum",
"numpy.array",
"numpy.linspace",
"cv2.addWeighted",
"numpy.vstack",
"numpy.concatenate",
"numpy.int_",
"nu... | [((917, 979), 'numpy.sum', 'np.sum', (['binary_warped[binary_warped.shape[0] // 2:, :]'], {'axis': '(0)'}), '(binary_warped[binary_warped.shape[0] // 2:, :], axis=0)\n', (923, 979), True, 'import numpy as np\n'), ((1059, 1115), 'numpy.dstack', 'np.dstack', (['(binary_warped, binary_warped, binary_warped)'], {}), '((binary_warped, binary_warped, binary_warped))\n', (1068, 1115), True, 'import numpy as np\n'), ((1265, 1296), 'numpy.int', 'np.int', (['(histogram.shape[0] // 2)'], {}), '(histogram.shape[0] // 2)\n', (1271, 1296), True, 'import numpy as np\n'), ((1314, 1345), 'numpy.argmax', 'np.argmax', (['histogram[:midpoint]'], {}), '(histogram[:midpoint])\n', (1323, 1345), True, 'import numpy as np\n'), ((1498, 1544), 'numpy.int', 'np.int', (['(binary_warped.shape[0] // cfg.nwindows)'], {}), '(binary_warped.shape[0] // cfg.nwindows)\n', (1504, 1544), True, 'import numpy as np\n'), ((1670, 1690), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (1678, 1690), True, 'import numpy as np\n'), ((1706, 1726), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (1714, 1726), True, 'import numpy as np\n'), ((7067, 7108), 'numpy.mean', 'np.mean', (['left_line.recent_xfitted'], {'axis': '(0)'}), '(left_line.recent_xfitted, axis=0)\n', (7074, 7108), True, 'import numpy as np\n'), ((7132, 7174), 'numpy.mean', 'np.mean', (['right_line.recent_xfitted'], {'axis': '(0)'}), '(right_line.recent_xfitted, axis=0)\n', (7139, 7174), True, 'import numpy as np\n'), ((7203, 7238), 'numpy.mean', 'np.mean', (['left_line.best_fit'], {'axis': '(0)'}), '(left_line.best_fit, axis=0)\n', (7210, 7238), True, 'import numpy as np\n'), ((7268, 7304), 'numpy.mean', 'np.mean', (['right_line.best_fit'], {'axis': '(0)'}), '(right_line.best_fit, axis=0)\n', (7275, 7304), True, 'import numpy as np\n'), ((7611, 7624), 'numpy.max', 'np.max', (['ploty'], {}), '(ploty)\n', (7617, 7624), True, 'import numpy as np\n'), ((8600, 8627), 'numpy.polyfit', 'np.polyfit', (['lefty', 'leftx', '(2)'], {}), '(lefty, leftx, 2)\n', (8610, 8627), True, 'import numpy as np\n'), ((8644, 8673), 'numpy.polyfit', 'np.polyfit', (['righty', 'rightx', '(2)'], {}), '(righty, rightx, 2)\n', (8654, 8673), True, 'import numpy as np\n'), ((8729, 8775), 'numpy.linspace', 'np.linspace', (['(0)', '(img_shape[0] - 1)', 'img_shape[0]'], {}), '(0, img_shape[0] - 1, img_shape[0])\n', (8740, 8775), True, 'import numpy as np\n'), ((9320, 9340), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (9328, 9340), True, 'import numpy as np\n'), ((9356, 9376), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (9364, 9376), True, 'import numpy as np\n'), ((350, 371), 'collections.deque', 'deque', ([], {'maxlen': 'buf_len'}), '(maxlen=buf_len)\n', (355, 371), False, 'from collections import deque\n'), ((563, 584), 'collections.deque', 'deque', ([], {'maxlen': 'buf_len'}), '(maxlen=buf_len)\n', (568, 584), False, 'from collections import deque\n'), ((1364, 1395), 'numpy.argmax', 'np.argmax', (['histogram[midpoint:]'], {}), '(histogram[midpoint:])\n', (1373, 1395), True, 'import numpy as np\n'), ((2550, 2650), 'cv2.rectangle', 'cv2.rectangle', (['out_img', '(win_xleft_low, win_y_low)', '(win_xleft_high, win_y_high)', '(0, 255, 0)', '(2)'], {}), '(out_img, (win_xleft_low, win_y_low), (win_xleft_high,\n win_y_high), (0, 255, 0), 2)\n', (2563, 2650), False, 'import cv2\n'), ((2677, 2779), 'cv2.rectangle', 'cv2.rectangle', (['out_img', '(win_xright_low, win_y_low)', '(win_xright_high, win_y_high)', '(0, 255, 0)', '(2)'], {}), '(out_img, (win_xright_low, win_y_low), (win_xright_high,\n win_y_high), (0, 255, 0), 2)\n', (2690, 2779), False, 'import cv2\n'), ((3799, 3829), 'numpy.concatenate', 'np.concatenate', (['left_lane_inds'], {}), '(left_lane_inds)\n', (3813, 3829), True, 'import numpy as np\n'), ((3856, 3887), 'numpy.concatenate', 'np.concatenate', (['right_lane_inds'], {}), '(right_lane_inds)\n', (3870, 3887), True, 'import numpy as np\n'), ((4629, 4656), 'numpy.polyfit', 'np.polyfit', (['lefty', 'leftx', '(2)'], {}), '(lefty, leftx, 2)\n', (4639, 4656), True, 'import numpy as np\n'), ((4677, 4706), 'numpy.polyfit', 'np.polyfit', (['righty', 'rightx', '(2)'], {}), '(righty, rightx, 2)\n', (4687, 4706), True, 'import numpy as np\n'), ((4771, 4837), 'numpy.linspace', 'np.linspace', (['(0)', '(binary_warped.shape[0] - 1)', 'binary_warped.shape[0]'], {}), '(0, binary_warped.shape[0] - 1, binary_warped.shape[0])\n', (4782, 4837), True, 'import numpy as np\n'), ((7797, 7823), 'numpy.abs', 'np.abs', (['(2 * left_fit_cr[0])'], {}), '(2 * left_fit_cr[0])\n', (7803, 7823), True, 'import numpy as np\n'), ((7987, 8014), 'numpy.abs', 'np.abs', (['(2 * right_fit_cr[0])'], {}), '(2 * right_fit_cr[0])\n', (7993, 8014), True, 'import numpy as np\n'), ((10463, 10519), 'numpy.dstack', 'np.dstack', (['(binary_warped, binary_warped, binary_warped)'], {}), '((binary_warped, binary_warped, binary_warped))\n', (10472, 10519), True, 'import numpy as np\n'), ((688, 705), 'numpy.array', 'np.array', (['[False]'], {}), '([False])\n', (696, 705), True, 'import numpy as np\n'), ((707, 724), 'numpy.array', 'np.array', (['[False]'], {}), '([False])\n', (715, 724), True, 'import numpy as np\n'), ((726, 743), 'numpy.array', 'np.array', (['[False]'], {}), '([False])\n', (734, 743), True, 'import numpy as np\n'), ((11647, 11669), 'numpy.zeros_like', 'np.zeros_like', (['out_img'], {}), '(out_img)\n', (11660, 11669), True, 'import numpy as np\n'), ((12356, 12405), 'numpy.hstack', 'np.hstack', (['(left_line_window1, left_line_window2)'], {}), '((left_line_window1, left_line_window2))\n', (12365, 12405), True, 'import numpy as np\n'), ((12722, 12773), 'numpy.hstack', 'np.hstack', (['(right_line_window1, right_line_window2)'], {}), '((right_line_window1, right_line_window2))\n', (12731, 12773), True, 'import numpy as np\n'), ((13006, 13053), 'cv2.addWeighted', 'cv2.addWeighted', (['out_img', '(1)', 'window_img', '(0.3)', '(0)'], {}), '(out_img, 1, window_img, 0.3, 0)\n', (13021, 13053), False, 'import cv2\n'), ((3528, 3561), 'numpy.mean', 'np.mean', (['nonzerox[good_left_inds]'], {}), '(nonzerox[good_left_inds])\n', (3535, 3561), True, 'import numpy as np\n'), ((3645, 3679), 'numpy.mean', 'np.mean', (['nonzerox[good_right_inds]'], {}), '(nonzerox[good_right_inds])\n', (3652, 3679), True, 'import numpy as np\n'), ((5569, 5615), 'numpy.abs', 'np.abs', (['(left_fit[2] - left_line.current_fit[2])'], {}), '(left_fit[2] - left_line.current_fit[2])\n', (5575, 5615), True, 'import numpy as np\n'), ((5699, 5737), 'numpy.abs', 'np.abs', (['(left_fitx[-1] - right_fitx[-1])'], {}), '(left_fitx[-1] - right_fitx[-1])\n', (5705, 5737), True, 'import numpy as np\n'), ((5792, 5828), 'numpy.abs', 'np.abs', (['(left_fitx[0] - right_fitx[0])'], {}), '(left_fitx[0] - right_fitx[0])\n', (5798, 5828), True, 'import numpy as np\n'), ((11314, 11360), 'numpy.abs', 'np.abs', (['(left_fit[2] - left_line.current_fit[2])'], {}), '(left_fit[2] - left_line.current_fit[2])\n', (11320, 11360), True, 'import numpy as np\n'), ((11444, 11482), 'numpy.abs', 'np.abs', (['(left_fitx[-1] - right_fitx[-1])'], {}), '(left_fitx[-1] - right_fitx[-1])\n', (11450, 11482), True, 'import numpy as np\n'), ((11533, 11569), 'numpy.abs', 'np.abs', (['(left_fitx[0] - right_fitx[0])'], {}), '(left_fitx[0] - right_fitx[0])\n', (11539, 11569), True, 'import numpy as np\n'), ((12868, 12892), 'numpy.int_', 'np.int_', (['[left_line_pts]'], {}), '([left_line_pts])\n', (12875, 12892), True, 'import numpy as np\n'), ((12944, 12969), 'numpy.int_', 'np.int_', (['[right_line_pts]'], {}), '([right_line_pts])\n', (12951, 12969), True, 'import numpy as np\n'), ((5497, 5543), 'numpy.abs', 'np.abs', (['(left_fit[1] - left_line.current_fit[1])'], {}), '(left_fit[1] - left_line.current_fit[1])\n', (5503, 5543), True, 'import numpy as np\n'), ((11224, 11270), 'numpy.abs', 'np.abs', (['(left_fit[1] - left_line.current_fit[1])'], {}), '(left_fit[1] - left_line.current_fit[1])\n', (11230, 11270), True, 'import numpy as np\n'), ((5329, 5400), 'numpy.abs', 'np.abs', (['(left_fitx[-1] - right_fitx[-1] - (left_fitx[0] - right_fitx[0]))'], {}), '(left_fitx[-1] - right_fitx[-1] - (left_fitx[0] - right_fitx[0]))\n', (5335, 5400), True, 'import numpy as np\n'), ((5425, 5471), 'numpy.abs', 'np.abs', (['(left_fit[0] - left_line.current_fit[0])'], {}), '(left_fit[0] - left_line.current_fit[0])\n', (5431, 5471), True, 'import numpy as np\n'), ((11020, 11091), 'numpy.abs', 'np.abs', (['(left_fitx[-1] - right_fitx[-1] - (left_fitx[0] - right_fitx[0]))'], {}), '(left_fitx[-1] - right_fitx[-1] - (left_fitx[0] - right_fitx[0]))\n', (11026, 11091), True, 'import numpy as np\n'), ((11134, 11180), 'numpy.abs', 'np.abs', (['(left_fit[0] - left_line.current_fit[0])'], {}), '(left_fit[0] - left_line.current_fit[0])\n', (11140, 11180), True, 'import numpy as np\n'), ((12102, 12140), 'numpy.vstack', 'np.vstack', (['[left_fitx - margin, ploty]'], {}), '([left_fitx - margin, ploty])\n', (12111, 12140), True, 'import numpy as np\n'), ((12463, 12502), 'numpy.vstack', 'np.vstack', (['[right_fitx - margin, ploty]'], {}), '([right_fitx - margin, ploty])\n', (12472, 12502), True, 'import numpy as np\n'), ((12209, 12247), 'numpy.vstack', 'np.vstack', (['[left_fitx + margin, ploty]'], {}), '([left_fitx + margin, ploty])\n', (12218, 12247), True, 'import numpy as np\n'), ((12572, 12611), 'numpy.vstack', 'np.vstack', (['[right_fitx + margin, ploty]'], {}), '([right_fitx + margin, ploty])\n', (12581, 12611), True, 'import numpy as np\n')] |
"""
Utility used to extract information from the source code of `./models.py`
for the "Database" section of the Sami Paper.
The processed document is output in the local file `models.md`.
"""
from pathlib import Path
from models import all_models
from numpydoc.docscrape import ClassDoc
output_file = Path(__file__).parent / 'models.md'
# Define templates
table_doc_template = """### `{table}`
{desc}
{cols}
"""
bullet_point_template = "- `{type}` `{name}` - {desc}\n"
# Parse the models
output = '## Tables\n\n'
for model in all_models:
doc = ClassDoc(model)
cols = ''
for parameter in doc['Attributes']:
cols += bullet_point_template.format(
type=parameter.type,
name=parameter.name,
desc=' '.join(parameter.desc)
)
output += table_doc_template.format(
table=model.__tablename__,
desc=' '.join(doc['Summary']),
cols=cols,
)
output_file.write_text(output)
| [
"numpydoc.docscrape.ClassDoc",
"pathlib.Path"
] | [((555, 570), 'numpydoc.docscrape.ClassDoc', 'ClassDoc', (['model'], {}), '(model)\n', (563, 570), False, 'from numpydoc.docscrape import ClassDoc\n'), ((303, 317), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (307, 317), False, 'from pathlib import Path\n')] |
# Spell checker support
try:
import enchant
except ImportError:
enchant = None
pre_processing = {r"\binclude\b": (),
r"\bdefine\b": (r"\b-DFLEXIBLE\b", r"\b-DPOSRES\b")}
run_control = {r"\bintegrator\b": (r"\bmd\b", r"\bmd-vv\b",
"\bmd-vv-avek\b", r"\bsd\b",
"\bsd2\b", r"\bbd\b",
r"\bsteep\b", r"\bcg\b", "\bl-bfgs\b",
r"\bnm\b", "\btpi\b", r"\btpic\b"),
r"\btinit\b": (), r"\bdt\b": (), r"\bnsteps\b": (),
r"\binit-step\b": (), r"\bcomm-mode\b": (r"\bLinear\b",
r"\bAngular\b",
"\bNone\b"),
r"\bnstcomm\b": (), r"\bcomm-grps\b": ()}
Langevin_dynamics = {r"\bbd-fric\b": (), r"\bld-seed\b": ()}
Energy_minimization = {r"\bemtol\b": (), r"\bemstep\b": (), r"\bnstcgsteep\b": (),
r"\bnbfgscorr\b": ()}
Shell_Molecular_Dynamics = {r"\bemtol\b": (), r"\bniter\b": (),
r"\bfcstep\b": ()}
Test_particle_insertion = {r"\brtpi\b": ()}
Output_control = {r"\bnstxout\b": (), r"\bnstxtcout\b": (), r"\bnstvout\b": (), r"\bnstfout\b": (),
r"\bnstlog\b": (), r"\bnstcalcenergy\b": (), r"\bnstenergy\b": (),
r"\bnstxout-compressed\b": (), r"\bcompressed-x-precision\b": (), r"\bcompressed-x-grps\b": (),
r"\benergygrps\b": ()}
Neighbor_searching = {r"\bcutoff-scheme\b": (r"\bVerlet\b", r"\bgroup\b"),
r"\bnstlist\b": (), r"\bnstcalclr\b": (),
r"\bns-type\b": (r"\bgrid\b", r"\bsimple\b"),
r"\bns_type\b": (),
r"\bpbc\b": (r"\bxyz\b", r"\bno\b", r"\bxy\b"),
r"\bperiodic-molecules\b": (r"\bno\b", r"\byes\b"),
r"\bverlet-buffer-tolerance\b": (), r"\brlist\b": (), r"\brlistlong\b": ()}
Electrostaticselectrostatics = {r"\bcoulombtype\b": (r"\bCut-off\b", r"\bEwald\b", r"\bPME\b", r"\bP3M-AD\b",
r"\bReaction-Field electrostaticsreaction-field electrostatics\b",
r"\bGeneralized-Reaction-Field\b", r"\bReaction-Field-zero\b",
r"\bReaction-Field-nec\b", r"\bShift\b", r"\bEncad-Shift\b",
r"\bSwitch\b", r"\bUser\b", r"\bPME-Switch\b",
r"\bPME-User\b", r"\bPME-User-Switch\b"),
r"\bcoulomb-modifier\b": (
r"\bPotential-shift-Verlet\b", r"\bPotential-shift\b", r"\bNone\b"),
r"\brcoulomb-switch\b": (), r"\brcoulomb\b": (), r"\bepsilon-r\b": (),
r"\bepsilon-rf\b": ()}
VdW = {r"\bvdw-modifier\b": (r"\bPotential-shift-Verlet\b",
r"\bPotential-shift\b", r"\bNone\b", r"\bForce-switch\b",
r"\bPotential-switch\b"),
r"\brvdw-switch\b": (),
r"\brvdw\b": (),
r"\bDispCorr\b": (r"\bno\b"), r"\bEnerPres\b": (), r"\bEner\b": ()}
Tables = {r"\btable-extension\b": (),
r"\benergygrp-table\b": ()}
Ewald = {r"\bfourierspacing\b": (),
r"\bfourier-nx\b": (),
r"\bfourier_nx\b": (),
r"\bfourier-ny\b": (),
r"\bfourier_ny\b": (),
r"\bfourier-nz\b": (),
r"\bfourier_nz\b": (),
r"\bpme-order\b": (),
r"\bpme_order\b": (),
r"\bewald-rtol\b": (),
r"\bewald_rtol\b": (),
r"\bewald-rtol-lj\b": (),
r"\bewald_rtol_lj\b": (),
r"\blj-pme-comb-rule\b": (r"\bGeometric\b", r"\bLorentz-Berthelot\b"),
r"\bewald-geometry\b": (r"\b3d\b", r"\b3dc\b"),
r"\boptimize_fft\b": (r"\bno\b", r"\byes\b")}
Temperature_coupling = {r"\btcoupl\b": (r"\bno\b", r"\bBerendsen\b",
r"\bNose-Hoover\b", r"\bAndersen\b",
r"\bAndersen-Massive\b", r"\bV-rescale\b", r"\bv-rescale\b"),
r"\bTcoupl\b": (),
r"\bnsttcouple\b": (),
r"\bnh-chain-length\b": (),
r"\btc-grps\b": (),
r"\btc_grps\b": (),
r"\btau-t\b": (),
r"\btau_t\b": (),
r"\bref-t\b": (), r"\bref_t\b": (), r"\bref_t\b": ()}
Pressure_coupling = {r"\bpcoupl\b": (r"\bno\b", r"\bberendsen\b",
r"\bParrinello-Rahman\b"),
r"\bPcoupl\b": (),
r"\bMTTK\b": (),
r"\bpcoupltype\b": (r"\bisotropic\b", r"\bsemiisotropic\b",
r"\banisotropic\b", r"\bsurface-tension\b"),
r"\bnstpcouple\b": (),
r"\btau-p\b": (),
r"\btau_p\b": (),
r"\bcompressibility\b": (),
r"\bref-p\b": (),
r"\bref_p\b": (),
r"\brefcoord-scaling\b": (r"\bno\b", r"\ball\b",
r"\bcom\b"),
r"\brefcoord_scaling\b": (r"\bno\b", r"\ball\b",
r"\bcom\b")}
Simulated_annealing = {r"\bannealing\b": (r"\bno\b", r"\bsingle\b",
r"\bperiodic\b"),
r"\bannealing-npoints\b": (),
r"\bannealing-time\b": (),
r"\bannealing-temp\b": ()}
Velocity_generation = {r"\bgen_vel\b": (r"\bno\b", r"\byes\b"),
r"\bgen-temp\b": (), r"\bgen_temp\b": (),
r"\bgen-seed\b": (), r"\bgen_seed\b": ()}
Bonds = {r"\bconstraintsconstraint algorithms\b": (r"\bnone\b", r"\bh-bonds\b",
r"\ball-bonds\b", r"\bh-angles\b", r"\ball-angles\b"),
r"\bconstraint-algorithm\b": (r"\bLINCS\b", r"\blincs\b",
r"\bSHAKE\b", r"\bshake\b"),
r"\bconstraint_algorithm\b": (r"\byes\b", r"\bno\b"),
r"\bconstraints\b": (),
r"\bcontinuation\b": (r"\byes\b", r"\bno\b"),
r"\bshake-tol\b": (),
r"\bshake_tol\b": (),
r"\blincs-order\b": (),
r"\blincs_order\b": (),
r"\blincs-iter\b": (),
r"\blincs_iter\b": (),
r"\blincs-warnangle\b": (),
r"\blincs_warnangle\b": (),
r"\bmorse\b": (r"\byes\b", r"\bno\b")}
Energy_exclusions = {r"\benergygrp-excl\b": ()}
Wallswalls = {r"\bnwall\b": (),
r"\bwall-atomtype:\b": (),
r"\bwall-type\b": (r"\b9-3\b", r"\b10-4\b", r"\b12-6\b",
r"\btable\b"),
r"\bwall-r-linpot\b": (),
r"\bwall-density\b": (),
r"\bwall-ewald-zfac\b": ()}
COM_pulling = {r"\bpull\b": (r"\bno\b", r"\bumbrella\b",
r"\constraint\b", r"\bconstant-force\b"),
r"\bpull_geometry\b": (r"\bdistance\b", r"\bdirection\b",
r"\bdirection-periodic\b", r"\bcylinder\b"),
r"\bpull_dim\b": (),
r"\bpull_r1\b": (),
r"\bpull_r0\b": (),
r"\bpull_constr-tol\b": (),
r"\bpull_start\b": (r"\bno\b", r"\byes\b"),
r"\bpull_print-reference\b": (r"\bno\b", r"\byes\b"),
r"\bpull_nstxout\b": (),
r"\bpull_nstfout\b": (),
r"\bpull_ngroups\b": (),
r"\bpull_ncoords\b": (),
r"\bpull_group1-name\b": (),
r"\bpull_group1\b": (),
r"\bpull_group0\b": (),
r"\bpull_init1\b": (),
r"\bpull_rate1\b": (),
r"\bpull_k1\b": (),
r"\bpull_group1-weights\b": (),
r"\bpull_group1-pbcatom\b": (),
r"\bpull_coord1-groups\b": (),
r"\bpull_coord1-origin\b": (),
r"\bpull_coord1-vec\b": (),
r"\bpull_coord1-init\b": (),
r"\bpull_coord1-rate\b": (),
r"\bpull_coord1-k\b": (),
r"\bpull_coord1-kB\b": ()}
NMR_refinement = {r"\bdisre\b": (r"\bno\b", r"\bsimple\b",
r"\ensemble\b"),
r"\bdisre_weighting\b": (r"\bequal\b", r"\bconservative\b"),
r"\bdisre_mixed\b": (r"\bno\b", r"\byes\b"),
r"\bdisre-fc\b": (),
r"\bdisre_fc\b": (),
r"\bdisre-tau\b": (),
r"\bdisre_tau\b": (),
r"\borire\b": (r"\bno\b", r"\byes\b"),
r"\borire-fc\b": (),
r"\borire_fc\b": (),
r"\borire-tau\b": (),
r"\borire_tau\b": (),
r"\borire-fitgrp\b": (),
r"\borire_fitgrp\b": (),
r"\bnstorireout\b": ()}
Free_energy = {r"\free-energy\b": (r"\bno\b", r"\byes\b",
r"\expanded\b"),
r"\bfree_energy\b": (r"\bequal\b", r"\bconservative\b"),
r"\bdisre-mixed\b": (r"\bno\b", r"\byes\b"),
r"\bdisre-fc\b": (),
r"\bdisre_fc\b": (),
r"\bdisre-tau\b": (),
r"\bdisre_tau\b": (),
r"\borire\b": (r"\bno\b", r"\byes\b"),
r"\borire-fc\b": (),
r"\borire_fc\b": (),
r"\borire-tau\b": (),
r"\borire_tau\b": (),
r"\borire-fitgrp\b": (),
r"\borire_fitgrp\b": (),
r"\bnstorireout\b": ()}
Total = (pre_processing, run_control, Langevin_dynamics,
Energy_minimization, Shell_Molecular_Dynamics,
Test_particle_insertion, Output_control, Neighbor_searching,
Electrostaticselectrostatics, VdW, Tables, Ewald,
Temperature_coupling, Pressure_coupling, Simulated_annealing,
Velocity_generation, Bonds, Energy_exclusions, Wallswalls,
COM_pulling, NMR_refinement, Free_energy)
def create_custom_Dict():
custom_dict = enchant.pypwl.PyPWL()
for section in Total:
keys = list(section.keys())
for i in keys:
text_key = "%s" % (i[2:-2])
if len(text_key) > 0:
custom_dict.add(str(text_key))
list_is = section[i]
if len(list_is) > 0:
for element in list_is:
# print('element is ',element)
text = "%s" % (element[2:-2])
if len(text) > 0:
custom_dict.add(str(text))
return custom_dict
# dictus = create_custom_Dict()
##dictus = enchant.pypwl.PyPWL()
##print('tada ',dictus)
##dictus.add('fuck')
# print(dictus.check('damn'))
# print(dictus.check('distance'))
| [
"enchant.pypwl.PyPWL"
] | [((10494, 10515), 'enchant.pypwl.PyPWL', 'enchant.pypwl.PyPWL', ([], {}), '()\n', (10513, 10515), False, 'import enchant\n')] |
import Image
import os, sys
from glob import glob
top_folder = sys.argv[1]
size = 256, 256
input_image_dir = top_folder + '/jpg'
output_image_dir = top_folder + '/thumbnails'
if not os.path.isdir(output_image_dir):
os.mkdir(output_image_dir)
for infile in glob(input_image_dir + '/*.jpg'):
filename, ext = os.path.splitext(infile)
basename = os.path.basename(filename)
im = Image.open(infile)
im.thumbnail(size)
im.save(output_image_dir + '/' + basename + '_thumb.jpg', 'JPEG')
| [
"os.path.splitext",
"Image.open",
"os.path.isdir",
"os.mkdir",
"os.path.basename",
"glob.glob"
] | [((265, 297), 'glob.glob', 'glob', (["(input_image_dir + '/*.jpg')"], {}), "(input_image_dir + '/*.jpg')\n", (269, 297), False, 'from glob import glob\n'), ((186, 217), 'os.path.isdir', 'os.path.isdir', (['output_image_dir'], {}), '(output_image_dir)\n', (199, 217), False, 'import os, sys\n'), ((223, 249), 'os.mkdir', 'os.mkdir', (['output_image_dir'], {}), '(output_image_dir)\n', (231, 249), False, 'import os, sys\n'), ((319, 343), 'os.path.splitext', 'os.path.splitext', (['infile'], {}), '(infile)\n', (335, 343), False, 'import os, sys\n'), ((359, 385), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (375, 385), False, 'import os, sys\n'), ((395, 413), 'Image.open', 'Image.open', (['infile'], {}), '(infile)\n', (405, 413), False, 'import Image\n')] |
import os
import webapp2
import jinja2
from google.appengine.api import users
import logging
#Jinja Loader
template_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.getcwd()))
class HeaderHandler(webapp2.RequestHandler):
def get(self):
template = template_env.get_template('templates/dynamic/navigation/header.html')
context = {}
self.response.write(template.render(context))
class SideBarHandler(webapp2.RequestHandler):
def get(self):
template = template_env.get_template('templates/dynamic/navigation/sidebar.html')
context = {}
self.response.write(template.render(context))
class FooterHandler(webapp2.RequestHandler):
def get(self):
template = template_env.get_template('templates/dynamic/navigation/footer.html')
context = {}
self.response.write(template.render(context))
app = webapp2.WSGIApplication([
('/navigation/header', HeaderHandler),
('/navigation/sidebar', SideBarHandler),
('/navigation/footer', FooterHandler),
], debug=True)
| [
"webapp2.WSGIApplication",
"os.getcwd"
] | [((885, 1050), 'webapp2.WSGIApplication', 'webapp2.WSGIApplication', (["[('/navigation/header', HeaderHandler), ('/navigation/sidebar',\n SideBarHandler), ('/navigation/footer', FooterHandler)]"], {'debug': '(True)'}), "([('/navigation/header', HeaderHandler), (\n '/navigation/sidebar', SideBarHandler), ('/navigation/footer',\n FooterHandler)], debug=True)\n", (908, 1050), False, 'import webapp2\n'), ((177, 188), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (186, 188), False, 'import os\n')] |
import pdb
import os
import torch
import tqdm
import argparse
import sys
sys.path.append('../utils/')
sys.path.append('../models/')
from torch.utils.data import DataLoader
from distances.euclidean import euclidean_distance
from distances.mahalanobis import mahalanobis_distance
from distances.kullback_leibler import kl_distance
from distances.earth_mover import emd_distance
from distances.jensen_shannon import js_distance
from preprocess import preprocess
from dataloader import OmniglotLoader, MiniImageNetLoader
from prototypical import ProtoNet
# from learn2learn.data.transforms import NWays, KShots, LoadData, RemapLabels
QUERY_PER_CLASS = 1
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='omniglot', help='set dataset')
parser.add_argument('--epochs', type=int, default=2000, help='set epochs')
parser.add_argument('--train_bsz', type=int, default=32, help='set train batch size')
parser.add_argument('--val_bsz', type=int, default=1600, help='set validation batch size')
parser.add_argument('--test_bsz', type=int, default=1600, help='set test batch size')
parser.add_argument('--device', type=str, default='cpu', help='set device')
parser.add_argument('--kshot', type=int, default=5, help='set k_shot')
parser.add_argument('--nway', type=int, default=5, help='set n_way')
parser.add_argument('--lr', type=float, default=1e-3, help='set learning rate')
parser.add_argument('--metric', type=str, default='euclidean', help='set distance metric') # mahalanobis, kl
args = parser.parse_args()
return args
def from_torch(x):
"""
Convert from a torch tensor to numpy array
"""
return x.detach().cpu().numpy()
def train(net, train_metadataset, val_metadataset, args):
net = net.to(args.device)
opt = torch.optim.Adam(net.parameters(), lr=args.lr)
train_accuracies = []
val_accuracies = []
for step, tasks in tqdm.tqdm(zip(range(args.epochs), train_metadataset), total=args.epochs):
labels = torch.LongTensor(
[[i] for i in range(args.nway)]).view(-1,args.nway).repeat(tasks.shape[0],1).to(args.device)
loss, accuracy = net.forward(tasks, labels)
opt.zero_grad()
loss.backward()
opt.step()
train_loss, train_accuracy = map(from_torch, (loss, accuracy))
train_accuracies.append(train_accuracy)
if (step + 1) % 100 == 0:
val_loss, val_accuracy = evaluate(net, val_metadataset, args)
val_accuracies.append(val_accuracy)
print('step=%s train(loss=%.5g, accuracy=%.5g) val(loss=%.5g, accuracy=%.5g)' % (
step + 1, train_loss, train_accuracy, val_loss, val_accuracy
))
return net, train_accuracies, val_accuracies
def evaluate(net, metadataset, args):
with torch.no_grad(): # Evaluate without gradients
tasks = next(iter(metadataset))
labels = torch.Tensor(
[[i] for i in range(args.nway)]).view(-1,args.nway).repeat(tasks.shape[0],1).to(args.device)
loss, accuracy = net.forward(tasks, labels.long())
loss, accuracy = map(from_torch, (loss, accuracy))
return loss, accuracy
if __name__ == '__main__':
args = parse_args()
print("Preparing data ...")
if args.dataset == 'omniglot':
train_alphabets, val_alphabets, test_alphabets = preprocess(args.dataset)
train_loader = OmniglotLoader(
train_alphabets,
args.train_bsz,
k_shot=args.kshot,
augment_rotate=True,
augment_flip=True
)
valid_loader = OmniglotLoader(val_alphabets, args.val_bsz, k_shot=args.kshot)
test_loader = OmniglotLoader(test_alphabets, args.test_bsz, k_shot=args.kshot)
elif args.dataset == 'mini-imagenet':
path_data = '../data/mini-imagenet/'
train_dataset = l2l.vision.datasets.MiniImagenet(root=path_data, mode='train', download=True)
valid_dataset = l2l.vision.datasets.MiniImagenet(root=path_data, mode='validation', download=True)
test_dataset = l2l.vision.datasets.MiniImagenet(root=path_data, mode='test', download=True)
train_dataset = l2l.data.MetaDataset(train_dataset)
train_transforms = [
NWays(train_dataset, args.nway),
KShots(train_dataset, args.kshot+QUERY_PER_CLASS),
LoadData(train_dataset),
RemapLabels(train_dataset),
]
train_tasks = l2l.data.TaskDataset(train_dataset, task_transforms=train_transforms)
valid_dataset = l2l.data.MetaDataset(valid_dataset)
valid_transforms = [
NWays(valid_dataset, args.nway),
KShots(valid_dataset, args.kshot+QUERY_PER_CLASS),
LoadData(valid_dataset),
RemapLabels(valid_dataset),
]
valid_tasks = l2l.data.TaskDataset(valid_dataset,
task_transforms=valid_transforms)
test_dataset = l2l.data.MetaDataset(test_dataset)
test_transforms = [
NWays(test_dataset, args.nway),
KShots(test_dataset, args.kshot+QUERY_PER_CLASS),
LoadData(test_dataset),
RemapLabels(test_dataset),
]
test_tasks = l2l.data.TaskDataset(test_dataset,
task_transforms=test_transforms)
train_loader = MiniImageNetLoader(train_tasks, args.train_bsz, args.kshot+QUERY_PER_CLASS, args.nway)
val_loader = MiniImageNetLoader(valid_tasks, args.val_bsz, args.kshot+QUERY_PER_CLASS, args.nway)
test_loader = MiniImageNetLoader(test_tasks, args.test_bsz, args.kshot+QUERY_PER_CLASS, args.nway)
# NOTE: mini-imagenet has RGB channels (unsupported)
else:
raise NotImplementedError(f"Dataset {args.dataset} not compatible!")
if args.metric == 'euclidean':
metric = euclidean_distance
elif args.metric == 'mahalanobis':
metric = mahalanobis_distance
elif args.metric == 'kl':
metric = kl_distance
elif args.metric == 'emd':
metric = emd_distance
elif args.metric == "js":
metric = js_distance
else:
raise NotImplementedError(f"Distance metric {args.metric} not compatible!")
print("Training ...")
mn_net, mn_train_accuracies, mn_val_accuracies = \
train(ProtoNet(distance_function=metric), train_loader, valid_loader, args)
mn_test = evaluate(mn_net, test_loader, args)
print(mn_test)
print("Training Complete!")
| [
"dataloader.OmniglotLoader",
"argparse.ArgumentParser",
"dataloader.MiniImageNetLoader",
"preprocess.preprocess",
"torch.no_grad",
"sys.path.append",
"prototypical.ProtoNet"
] | [((74, 102), 'sys.path.append', 'sys.path.append', (['"""../utils/"""'], {}), "('../utils/')\n", (89, 102), False, 'import sys\n'), ((103, 132), 'sys.path.append', 'sys.path.append', (['"""../models/"""'], {}), "('../models/')\n", (118, 132), False, 'import sys\n'), ((687, 712), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (710, 712), False, 'import argparse\n'), ((2858, 2873), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2871, 2873), False, 'import torch\n'), ((3401, 3425), 'preprocess.preprocess', 'preprocess', (['args.dataset'], {}), '(args.dataset)\n', (3411, 3425), False, 'from preprocess import preprocess\n'), ((3449, 3559), 'dataloader.OmniglotLoader', 'OmniglotLoader', (['train_alphabets', 'args.train_bsz'], {'k_shot': 'args.kshot', 'augment_rotate': '(True)', 'augment_flip': '(True)'}), '(train_alphabets, args.train_bsz, k_shot=args.kshot,\n augment_rotate=True, augment_flip=True)\n', (3463, 3559), False, 'from dataloader import OmniglotLoader, MiniImageNetLoader\n'), ((3649, 3711), 'dataloader.OmniglotLoader', 'OmniglotLoader', (['val_alphabets', 'args.val_bsz'], {'k_shot': 'args.kshot'}), '(val_alphabets, args.val_bsz, k_shot=args.kshot)\n', (3663, 3711), False, 'from dataloader import OmniglotLoader, MiniImageNetLoader\n'), ((3734, 3798), 'dataloader.OmniglotLoader', 'OmniglotLoader', (['test_alphabets', 'args.test_bsz'], {'k_shot': 'args.kshot'}), '(test_alphabets, args.test_bsz, k_shot=args.kshot)\n', (3748, 3798), False, 'from dataloader import OmniglotLoader, MiniImageNetLoader\n'), ((6389, 6423), 'prototypical.ProtoNet', 'ProtoNet', ([], {'distance_function': 'metric'}), '(distance_function=metric)\n', (6397, 6423), False, 'from prototypical import ProtoNet\n'), ((5421, 5513), 'dataloader.MiniImageNetLoader', 'MiniImageNetLoader', (['train_tasks', 'args.train_bsz', '(args.kshot + QUERY_PER_CLASS)', 'args.nway'], {}), '(train_tasks, args.train_bsz, args.kshot +\n QUERY_PER_CLASS, args.nway)\n', (5439, 5513), False, 'from dataloader import OmniglotLoader, MiniImageNetLoader\n'), ((5529, 5619), 'dataloader.MiniImageNetLoader', 'MiniImageNetLoader', (['valid_tasks', 'args.val_bsz', '(args.kshot + QUERY_PER_CLASS)', 'args.nway'], {}), '(valid_tasks, args.val_bsz, args.kshot + QUERY_PER_CLASS,\n args.nway)\n', (5547, 5619), False, 'from dataloader import OmniglotLoader, MiniImageNetLoader\n'), ((5636, 5726), 'dataloader.MiniImageNetLoader', 'MiniImageNetLoader', (['test_tasks', 'args.test_bsz', '(args.kshot + QUERY_PER_CLASS)', 'args.nway'], {}), '(test_tasks, args.test_bsz, args.kshot + QUERY_PER_CLASS,\n args.nway)\n', (5654, 5726), False, 'from dataloader import OmniglotLoader, MiniImageNetLoader\n')] |
import base64
import flask
from marshmallow import ValidationError
import sqlalchemy as sa
from . import meta
from .exceptions import ApiError
from .utils import if_none, iter_validation_errors
# -----------------------------------------------------------------------------
class PaginationBase(object):
def get_page(self, query, view):
raise NotImplementedError()
def get_item_meta(self, item, view):
return None
# -----------------------------------------------------------------------------
class LimitPaginationBase(PaginationBase):
def get_page(self, query, view):
limit = self.get_limit()
if limit is not None:
query = query.limit(limit + 1)
items = query.all()
if limit is not None and len(items) > limit:
has_next_page = True
items = items[:limit]
else:
has_next_page = False
meta.update_response_meta({'has_next_page': has_next_page})
return items
def get_limit(self):
raise NotImplementedError()
class MaxLimitPagination(LimitPaginationBase):
def __init__(self, max_limit):
self._max_limit = max_limit
def get_limit(self):
return self._max_limit
class LimitPagination(LimitPaginationBase):
limit_arg = 'limit'
def __init__(self, default_limit=None, max_limit=None):
self._default_limit = if_none(default_limit, max_limit)
self._max_limit = max_limit
if self._max_limit is not None:
assert self._default_limit <= self._max_limit, (
"default limit exceeds max limit"
)
def get_limit(self):
limit = flask.request.args.get(self.limit_arg)
try:
return self.parse_limit(limit)
except ApiError as e:
raise e.update({'source': {'parameter': self.limit_arg}})
def parse_limit(self, limit):
if limit is None:
return self._default_limit
try:
limit = int(limit)
except ValueError:
raise ApiError(400, {'code': 'invalid_limit'})
if limit < 0:
raise ApiError(400, {'code': 'invalid_limit'})
if self._max_limit is not None:
limit = min(limit, self._max_limit)
return limit
def spec_declaration(self, path, spec, **kwargs):
path['get'].add_parameter(
name='limit',
type='int',
description="pagination limit",
)
path['get'].add_property_to_response(
prop_name='meta',
type='object',
properties={
'has_next_page': {'type': 'boolean'},
},
)
class LimitOffsetPagination(LimitPagination):
offset_arg = 'offset'
def get_page(self, query, view):
offset = self.get_offset()
query = query.offset(offset)
return super(LimitOffsetPagination, self).get_page(query, view)
def get_offset(self):
offset = flask.request.args.get(self.offset_arg)
try:
return self.parse_offset(offset)
except ApiError as e:
raise e.update({'source': {'parameter': self.offset_arg}})
def parse_offset(self, offset):
if offset is None:
return 0
try:
offset = int(offset)
except ValueError:
raise ApiError(400, {'code': 'invalid_offset'})
if offset < 0:
raise ApiError(400, {'code': 'invalid_offset'})
return offset
def spec_declaration(self, path, spec, **kwargs):
super(LimitOffsetPagination, self).spec_declaration(path, spec)
path['get'].add_parameter(
name='offset',
type='int',
description="pagination offset",
)
class PagePagination(LimitOffsetPagination):
page_arg = 'page'
def __init__(self, page_size):
super(PagePagination, self).__init__()
self._page_size = page_size
def get_offset(self):
return self.get_request_page() * self._page_size
def get_request_page(self):
page = flask.request.args.get(self.page_arg)
try:
return self.parse_page(page)
except ApiError as e:
raise e.update({'source': {'parameter': self.page_arg}})
def parse_page(self, page):
if page is None:
return 0
try:
page = int(page)
except ValueError:
raise ApiError(400, {'code': 'invalid_page'})
if page < 0:
raise ApiError(400, {'code': 'invalid_page'})
return page
def get_limit(self):
return self._page_size
def spec_declaration(self, path, spec, **kwargs):
super(PagePagination, self).spec_declaration(path, spec)
path['get'].add_parameter(
name='page',
type='int',
description="page number",
)
# -----------------------------------------------------------------------------
class CursorPaginationBase(LimitPagination):
cursor_arg = 'cursor'
def ensure_query_sorting(self, query, view):
sorting_field_orderings, missing_field_orderings = (
self.get_sorting_and_missing_field_orderings(view)
)
query = view.sorting.sort_query_by_fields(
query,
view,
missing_field_orderings,
)
field_orderings = sorting_field_orderings + missing_field_orderings
return query, field_orderings
def get_field_orderings(self, view):
sorting_field_orderings, missing_field_orderings = (
self.get_sorting_and_missing_field_orderings(view)
)
return sorting_field_orderings + missing_field_orderings
def get_sorting_and_missing_field_orderings(self, view):
sorting = view.sorting
assert sorting is not None, (
"sorting must be defined when using cursor pagination"
)
sorting_field_orderings = sorting.get_request_field_orderings(view)
sorting_ordering_fields = frozenset(
field_name for field_name, _ in sorting_field_orderings
)
# For convenience, use the ascending setting on the last explicit
# ordering when possible, such that reversing the sort will reverse
# the IDs as well.
if sorting_field_orderings:
last_field_asc = sorting_field_orderings[-1][1]
else:
last_field_asc = True
missing_field_orderings = tuple(
(id_field, last_field_asc) for id_field in view.id_fields
if id_field not in sorting_ordering_fields
)
return sorting_field_orderings, missing_field_orderings
def get_request_cursor(self, view, field_orderings):
cursor = flask.request.args.get(self.cursor_arg)
if not cursor:
return None
try:
return self.parse_cursor(cursor, view, field_orderings)
except ApiError as e:
raise e.update({'source': {'parameter': self.cursor_arg}})
def parse_cursor(self, cursor, view, field_orderings):
cursor = self.decode_cursor(cursor)
if len(cursor) != len(field_orderings):
raise ApiError(400, {'code': 'invalid_cursor.length'})
deserializer = view.deserializer
column_fields = (
deserializer.fields[field_name]
for field_name, _ in field_orderings
)
try:
cursor = tuple(
field.deserialize(value)
for field, value in zip(column_fields, cursor)
)
except ValidationError as e:
raise ApiError(400, *(
self.format_validation_error(message)
for message, path in iter_validation_errors(e.messages)
))
return cursor
def decode_cursor(self, cursor):
try:
cursor = cursor.split('.')
cursor = tuple(self.decode_value(value) for value in cursor)
except (TypeError, ValueError):
raise ApiError(400, {'code': 'invalid_cursor.encoding'})
return cursor
def decode_value(self, value):
value = value.encode('ascii')
value += (3 - ((len(value) + 3) % 4)) * b'=' # Add back padding.
value = base64.urlsafe_b64decode(value)
return value.decode()
def format_validation_error(self, message):
return {
'code': 'invalid_cursor',
'detail': message,
}
def get_filter(self, view, field_orderings, cursor):
sorting = view.sorting
column_cursors = tuple(
(sorting.get_column(view, field_name), asc, value)
for (field_name, asc), value in zip(field_orderings, cursor)
)
return sa.or_(
self.get_filter_clause(column_cursors[:i + 1])
for i in range(len(column_cursors))
)
def get_filter_clause(self, column_cursors):
previous_clauses = sa.and_(
column == value for column, _, value in column_cursors[:-1]
)
column, asc, value = column_cursors[-1]
if asc:
current_clause = column > value
else:
current_clause = column < value
return sa.and_(previous_clauses, current_clause)
def make_cursors(self, items, view, field_orderings):
column_fields = self.get_column_fields(view, field_orderings)
return tuple(
self.render_cursor(item, column_fields) for item in items
)
def make_cursor(self, item, view, field_orderings):
column_fields = self.get_column_fields(view, field_orderings)
return self.render_cursor(item, column_fields)
def get_column_fields(self, view, field_orderings):
serializer = view.serializer
return tuple(
serializer.fields[field_name]
for field_name, _ in field_orderings
)
def render_cursor(self, item, column_fields):
cursor = tuple(
field._serialize(getattr(item, field.name), field.name, item)
for field in column_fields
)
return self.encode_cursor(cursor)
def encode_cursor(self, cursor):
return '.'.join(self.encode_value(value) for value in cursor)
def encode_value(self, value):
value = str(value)
value = value.encode()
value = base64.urlsafe_b64encode(value)
value = value.rstrip(b'=') # Strip padding.
return value.decode('ascii')
def spec_declaration(self, path, spec, **kwargs):
super(CursorPaginationBase, self).spec_declaration(path, spec)
path['get'].add_parameter(
name='cursor',
type='string',
description="pagination cursor",
)
class RelayCursorPagination(CursorPaginationBase):
def get_page(self, query, view):
query, field_orderings = self.ensure_query_sorting(query, view)
cursor_in = self.get_request_cursor(view, field_orderings)
if cursor_in is not None:
query = query.filter(
self.get_filter(view, field_orderings, cursor_in),
)
items = super(RelayCursorPagination, self).get_page(query, view)
# Relay expects a cursor for each item.
cursors_out = self.make_cursors(items, view, field_orderings)
meta.update_response_meta({'cursors': cursors_out})
return items
def get_item_meta(self, item, view):
field_orderings = self.get_field_orderings(view)
cursor = self.make_cursor(item, view, field_orderings)
return {'cursor': cursor}
| [
"flask.request.args.get",
"sqlalchemy.and_",
"base64.urlsafe_b64decode",
"base64.urlsafe_b64encode"
] | [((1680, 1718), 'flask.request.args.get', 'flask.request.args.get', (['self.limit_arg'], {}), '(self.limit_arg)\n', (1702, 1718), False, 'import flask\n'), ((2999, 3038), 'flask.request.args.get', 'flask.request.args.get', (['self.offset_arg'], {}), '(self.offset_arg)\n', (3021, 3038), False, 'import flask\n'), ((4112, 4149), 'flask.request.args.get', 'flask.request.args.get', (['self.page_arg'], {}), '(self.page_arg)\n', (4134, 4149), False, 'import flask\n'), ((6800, 6839), 'flask.request.args.get', 'flask.request.args.get', (['self.cursor_arg'], {}), '(self.cursor_arg)\n', (6822, 6839), False, 'import flask\n'), ((8316, 8347), 'base64.urlsafe_b64decode', 'base64.urlsafe_b64decode', (['value'], {}), '(value)\n', (8340, 8347), False, 'import base64\n'), ((9009, 9077), 'sqlalchemy.and_', 'sa.and_', (['(column == value for column, _, value in column_cursors[:-1])'], {}), '(column == value for column, _, value in column_cursors[:-1])\n', (9016, 9077), True, 'import sqlalchemy as sa\n'), ((9283, 9324), 'sqlalchemy.and_', 'sa.and_', (['previous_clauses', 'current_clause'], {}), '(previous_clauses, current_clause)\n', (9290, 9324), True, 'import sqlalchemy as sa\n'), ((10414, 10445), 'base64.urlsafe_b64encode', 'base64.urlsafe_b64encode', (['value'], {}), '(value)\n', (10438, 10445), False, 'import base64\n')] |
import yaml
import sys
from os import path
import tempfile
from subprocess import check_output
from os import listdir
from os.path import isfile, join
from shutil import copyfile
import json
import logging
global LOGGER
MANDATORY_GENERAL_PARAMETERS = [
'pr-name',
'branch-name',
'commit-message',
'git-add',
'playbook-dir'
]
def create_logger():
# create logger with 'spam_application'
logger = logging.getLogger('git-manager')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('git-manager.logs')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
logger.info("------------------ STARTING ------------------")
logger.info("Logger initialized")
return logger
def error_message(message):
LOGGER.error(message)
LOGGER.info("------------------ ENDING ------------------")
# print("ERROR: {}".format(message))
exit(1)
def validate_config(config):
LOGGER.debug("Starting to validate config")
if 'orgs' not in config:
error_message("'orgs' was not provided in the config file")
if 'general' not in config:
error_message("'general' was not provided in the config file")
general = config['general']
for mandatory_parameter in MANDATORY_GENERAL_PARAMETERS:
if mandatory_parameter not in general:
error_message("'{}' was not provided in the config file".format(mandatory_parameter))
LOGGER.debug("Successfully validated config")
def get_repo_dir(tmp_folder, org, repo):
return "{}/{}/{}".format(tmp_folder, org, repo)
def get_config_file_path():
LOGGER.debug("Starting to get config file path")
# default config file path
config_file_path = "git-manager-config.yml"
# it can be provided as an argument
if len(sys.argv) == 2:
config_file_path = sys.argv[1]
# validate the config file path actuall exists
if not path.exists(config_file_path):
error_message("'{}' does not exists or is not readable".format(config_file_path))
LOGGER.debug("Using config file: '{}'".format(config_file_path))
LOGGER.debug("Successfully retrieved config file")
return config_file_path
def subprocess_command(command, working_dir):
LOGGER.info("executing: {}".format(" ".join(command)))
LOGGER.info(check_output(command, cwd=working_dir).decode('utf-8'))
def git_command(working_dir, command, org, repo):
git_url = "https://github.com/{}/{}.git".format(org, repo)
repo_dir = get_repo_dir(working_dir, org, repo)
command = ['git', command, git_url, repo_dir]
subprocess_command(command, None)
def git_clone(working_dir, org, repo):
git_command(working_dir, "clone", org, repo)
def git_branch(working_dir, org, repo, branch_name):
command = ['git', 'branch', branch_name]
repo_dir = get_repo_dir(working_dir, org, repo)
subprocess_command(command, repo_dir)
def git_checkout(working_dir, org, repo, branch_name):
command = ['git', 'checkout', branch_name]
repo_dir = get_repo_dir(working_dir, org, repo)
subprocess_command(command, repo_dir)
def run_playbook(working_dir, org, repo, extra_vars):
command = ['ansible-playbook', 'run.yml']
if extra_vars is not None:
command.append('--extra-vars')
command.append(json.dumps(extra_vars))
repo_dir = get_repo_dir(working_dir, org, repo)
subprocess_command(command, repo_dir)
def get_config():
config_file_path = get_config_file_path()
# read the config content
config_content = open(config_file_path, 'r').read()
# create the python dictionary of the config, this will throw yaml
# exception if the config does not meet yaml standards
config = yaml.load(config_content)
# validate mandatory config fields
validate_config(config)
config_pprint = json.dumps(config, sort_keys=True, indent=4, separators=(',', ': '))
LOGGER.info("git manager configuration: " + config_pprint)
return config
def create_working_dir():
dir_path = tempfile.mkdtemp()
LOGGER.info("Created a tmp directory for cloning repos: " + dir_path)
return dir_path
def clone_and_setup_repos(config, working_dir):
for org, value in config['orgs'].items():
repos = value['repos']
for repo in repos:
LOGGER.info("Cloning repo '{}/{}' and copying over files located in playbook directory".format(org, repo))
git_clone(working_dir, org, repo)
branch_name = config['general']['branch-name']
# if we are creating branch then lets do it
if config['general'].get('create-branch') is True:
git_branch(working_dir, org, repo, branch_name)
# now checkout branch
git_checkout(working_dir, org, repo, branch_name)
playbook_dir = config['general']['playbook-dir']
# now copy over the playbook
playbook_files = [f for f in listdir(playbook_dir) if isfile(join(playbook_dir, f))]
for playbook_file in playbook_files:
repo_dir = get_repo_dir(working_dir, org, repo)
copyfile(playbook_dir + "/" + playbook_file, repo_dir + "/" + playbook_file)
LOGGER.info("Finished cloning repo '{}/{}'".format(org, repo))
def update_repos(config, working_dir):
extra_vars = {}
if 'extra-vars' in config['general']:
extra_vars = config['general']['extra-vars']
for org, value in config['orgs'].items():
repos = value['repos']
for repo in repos:
LOGGER.info("Running 'run.yml' in '{}/{}'".format(org, repo))
extra_vars['org'] = org
extra_vars['repo'] = repo
run_playbook(working_dir, org, repo, extra_vars)
LOGGER.info("Finished running 'run.yml' in '{}/{}'".format(org, repo))
def git_add(config, working_dir, org, repo):
# this should be a list
add_files = config['general']['git-add']
command = ['git', 'add']
command.extend(add_files)
repo_dir = get_repo_dir(working_dir, org, repo)
subprocess_command(command, repo_dir)
def git_commit(config, working_dir, org, repo):
commit_message = config['general']['commit-message']
command = ['git', 'commit', '-m', commit_message]
repo_dir = get_repo_dir(working_dir, org, repo)
subprocess_command(command, repo_dir)
def git_push(config, working_dir, org, repo):
branch_name = config['general']['branch-name']
command = ['git', 'push', '--set-upstream', 'origin', branch_name]
repo_dir = get_repo_dir(working_dir, org, repo)
subprocess_command(command, repo_dir)
def git_add_commit_push_repos(config, working_dir):
for org, value in config['orgs'].items():
repos = value['repos']
for repo in repos:
git_add(config, working_dir, org, repo)
git_commit(config, working_dir, org, repo)
git_push(config, working_dir, org, repo)
def git_delete_branch_locally(config, working_dir, org, repo):
branch_name = config['general']['branch-name']
command = ['git', 'branch', '-d', branch_name]
repo_dir = get_repo_dir(working_dir, org, repo)
subprocess_command(command, repo_dir)
def git_delete_branch_remote(config, working_dir, org, repo):
branch_name = config['general']['branch-name']
command = ['git', 'push', 'origin', '--delete', branch_name]
repo_dir = get_repo_dir(working_dir, org, repo)
subprocess_command(command, repo_dir)
def git_delete_branches(config, working_dir):
for org, value in config['orgs'].items():
repos = value['repos']
for repo in repos:
git_checkout(working_dir, org, repo, "master")
git_delete_branch_locally(config, working_dir, org, repo)
git_delete_branch_remote(config, working_dir, org, repo)
def run_git_manager(config, working_dir):
if config['general'].get('delete-branch') is True:
LOGGER.info("Starting to delete branches from repos")
git_delete_branches(config, working_dir)
return
# if note deleting branch then
# assume adding a branch
LOGGER.info("Starting to create branches in repos")
update_repos(config, working_dir)
git_add_commit_push_repos(config, working_dir)
def main():
config=get_config()
working_dir=create_working_dir()
clone_and_setup_repos(config, working_dir)
run_git_manager(config, working_dir)
if __name__ == "__main__":
LOGGER = create_logger()
main()
LOGGER.info("------------------ ENDING ------------------")
# shutil.rmtree(dir_path)
| [
"logging.getLogger",
"os.path.exists",
"subprocess.check_output",
"logging.StreamHandler",
"os.listdir",
"logging.Formatter",
"json.dumps",
"os.path.join",
"yaml.load",
"shutil.copyfile",
"logging.FileHandler",
"tempfile.mkdtemp"
] | [((405, 437), 'logging.getLogger', 'logging.getLogger', (['"""git-manager"""'], {}), "('git-manager')\n", (422, 437), False, 'import logging\n'), ((530, 569), 'logging.FileHandler', 'logging.FileHandler', (['"""git-manager.logs"""'], {}), "('git-manager.logs')\n", (549, 569), False, 'import logging\n'), ((654, 677), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (675, 677), False, 'import logging\n'), ((766, 839), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (783, 839), False, 'import logging\n'), ((3860, 3885), 'yaml.load', 'yaml.load', (['config_content'], {}), '(config_content)\n', (3869, 3885), False, 'import yaml\n'), ((3965, 4033), 'json.dumps', 'json.dumps', (['config'], {'sort_keys': '(True)', 'indent': '(4)', 'separators': "(',', ': ')"}), "(config, sort_keys=True, indent=4, separators=(',', ': '))\n", (3975, 4033), False, 'import json\n'), ((4148, 4166), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (4164, 4166), False, 'import tempfile\n'), ((2165, 2194), 'os.path.exists', 'path.exists', (['config_file_path'], {}), '(config_file_path)\n', (2176, 2194), False, 'from os import path\n'), ((3471, 3493), 'json.dumps', 'json.dumps', (['extra_vars'], {}), '(extra_vars)\n', (3481, 3493), False, 'import json\n'), ((2539, 2577), 'subprocess.check_output', 'check_output', (['command'], {'cwd': 'working_dir'}), '(command, cwd=working_dir)\n', (2551, 2577), False, 'from subprocess import check_output\n'), ((5090, 5166), 'shutil.copyfile', 'copyfile', (["(playbook_dir + '/' + playbook_file)", "(repo_dir + '/' + playbook_file)"], {}), "(playbook_dir + '/' + playbook_file, repo_dir + '/' + playbook_file)\n", (5098, 5166), False, 'from shutil import copyfile\n'), ((4938, 4959), 'os.listdir', 'listdir', (['playbook_dir'], {}), '(playbook_dir)\n', (4945, 4959), False, 'from os import listdir\n'), ((4970, 4991), 'os.path.join', 'join', (['playbook_dir', 'f'], {}), '(playbook_dir, f)\n', (4974, 4991), False, 'from os.path import isfile, join\n')] |
from psycopg2 import sql
from django.core.management.base import CommandError
from django.db import connections
from psycopg2_extension.base_command import PsycopgBaseCommand
from psycopg2_extension.utils import init_database, comma_separated_strings
class Command(PsycopgBaseCommand):
help = 'Initialize the database. It is necessary to use role which has permissions to drop & create databases ' \
'and roles.'
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
'--snapshot-file',
dest='snapshot_file',
help='Snapshot file path'
)
parser.add_argument(
'--extensions',
dest='extensions',
help='Database extensions separated with ,',
type=comma_separated_strings
)
parser.add_argument(
'--noinput', '--no-input', action='store_false', dest='interactive',
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--nosnapshot', action='store_true', dest='no_snapshot',
help='Do not load snapshot.',
)
def handle(self, *args, **options):
connection = self._get_connection(options['database'])
if options['interactive']:
message = (
'This will delete existing database!\n'
'Are you sure you want to do this?\n\n'
"Type 'yes' to continue, or 'no' to cancel: "
)
if input(message) != 'yes':
raise CommandError('Init SQL database cancelled.')
db_name = connection.settings_dict['NAME']
self.stdout.write(f'Init database {db_name}')
with connection._nodb_cursor() as cursor:
cursor.execute("SELECT pid, pg_terminate_backend(pid) "
"FROM pg_stat_activity "
"WHERE pid <> pg_backend_pid() AND datname = %s", [db_name])
cursor.execute(sql.SQL("DROP DATABASE IF EXISTS {}").format(sql.Identifier(db_name)))
cursor.execute(sql.SQL("CREATE DATABASE {}").format(sql.Identifier(db_name)))
connection.close()
connection_settings = {}
if options['no_snapshot'] or options['snapshot_file']:
connection_settings['SNAPSHOT_FILE'] = None if options['no_snapshot'] else options['snapshot_file']
if options['extensions']:
connection_settings['EXTENSIONS'] = options['extensions']
init_database(connection, self.stdout.write, connection_settings)
| [
"psycopg2_extension.utils.init_database",
"psycopg2.sql.Identifier",
"django.core.management.base.CommandError",
"psycopg2.sql.SQL"
] | [((2555, 2620), 'psycopg2_extension.utils.init_database', 'init_database', (['connection', 'self.stdout.write', 'connection_settings'], {}), '(connection, self.stdout.write, connection_settings)\n', (2568, 2620), False, 'from psycopg2_extension.utils import init_database, comma_separated_strings\n'), ((1609, 1653), 'django.core.management.base.CommandError', 'CommandError', (['"""Init SQL database cancelled."""'], {}), "('Init SQL database cancelled.')\n", (1621, 1653), False, 'from django.core.management.base import CommandError\n'), ((2090, 2113), 'psycopg2.sql.Identifier', 'sql.Identifier', (['db_name'], {}), '(db_name)\n', (2104, 2113), False, 'from psycopg2 import sql\n'), ((2180, 2203), 'psycopg2.sql.Identifier', 'sql.Identifier', (['db_name'], {}), '(db_name)\n', (2194, 2203), False, 'from psycopg2 import sql\n'), ((2045, 2082), 'psycopg2.sql.SQL', 'sql.SQL', (['"""DROP DATABASE IF EXISTS {}"""'], {}), "('DROP DATABASE IF EXISTS {}')\n", (2052, 2082), False, 'from psycopg2 import sql\n'), ((2143, 2172), 'psycopg2.sql.SQL', 'sql.SQL', (['"""CREATE DATABASE {}"""'], {}), "('CREATE DATABASE {}')\n", (2150, 2172), False, 'from psycopg2 import sql\n')] |
import utils
from os import listdir
from os.path import join
import datetime as dt
import copy
def parse(historical_crypto_prices, holdings, ticker_info, dividends):
all_data_files = [f for f in listdir('Data/CoinbasePro')]
files = [f for f in all_data_files if f.startswith("coinbase")]
for f in files:
#print("Reading ", f)
lines = open(join('Data/CoinbasePro', f), 'r').readlines()
for l in lines:
line = l.strip()
line = utils.removeCommasWithinQuotes(line)
cols = line.split(',')
if len(cols) == 9:
# Assumed signature:
# portfolio,type,time,amount,balance,amount/balance unit,transfer id,trade id,order id
if cols[0] == 'portfolio':
# This is title row, ignore
continue
trade_type = cols[1]
date = dt.datetime.strptime(cols[2].split('T')[0], '%Y-%m-%d')
ticker = cols[5]
if ticker == 'GBP':
# No need to record GBP transactions
continue
qty = float(cols[3])
price = utils.getCryptoAssetPrice(historical_crypto_prices,ticker, date);
utils.addCryptoTicker(holdings, ticker_info, ticker)
if trade_type == 'deposit':
# Nothing to do as it's assumed to be a transfer to/from coinbase
# ignore this trade
continue
elif trade_type == 'withdrawal':
# Nothing to do as it's assumed to be a transfer to/from coinbase
# ignore this trade
continue
elif trade_type == 'fee':
# Ignore this fees as currently it's not easy to match to the actual trade
continue
elif trade_type == 'match':
# Add as a buy/sell transaction. Automatically the qty is positive / negative
holdings[ticker].append(
{'date': date, 'qty': qty, 'price': price, 'commission': 0})
else:
print('WARNING: Unsupported trade type:', trade_type)
elif len(cols) >= 6:
# This seems like a row we should parse but dont know the format.
# Just raise a warning
print('WARNING: Row not parsed, might be skipping actual transaction: ' + line);
| [
"os.listdir",
"os.path.join",
"utils.removeCommasWithinQuotes",
"utils.addCryptoTicker",
"utils.getCryptoAssetPrice"
] | [((200, 227), 'os.listdir', 'listdir', (['"""Data/CoinbasePro"""'], {}), "('Data/CoinbasePro')\n", (207, 227), False, 'from os import listdir\n'), ((486, 522), 'utils.removeCommasWithinQuotes', 'utils.removeCommasWithinQuotes', (['line'], {}), '(line)\n', (516, 522), False, 'import utils\n'), ((1184, 1249), 'utils.getCryptoAssetPrice', 'utils.getCryptoAssetPrice', (['historical_crypto_prices', 'ticker', 'date'], {}), '(historical_crypto_prices, ticker, date)\n', (1209, 1249), False, 'import utils\n'), ((1267, 1319), 'utils.addCryptoTicker', 'utils.addCryptoTicker', (['holdings', 'ticker_info', 'ticker'], {}), '(holdings, ticker_info, ticker)\n', (1288, 1319), False, 'import utils\n'), ((368, 395), 'os.path.join', 'join', (['"""Data/CoinbasePro"""', 'f'], {}), "('Data/CoinbasePro', f)\n", (372, 395), False, 'from os.path import join\n')] |
# The following example creates a CodeArtifact domain named my-domain to store repositories. It also creates two CodeArtifact repositories: my-repo and my-upstream-repo within the domain. my-repo has my-upstream-repo configured as an upstream repository, and my-upstream-repo has an external connection to the public repository, npmjs.
#
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeartifact-repository.html#aws-resource-codeartifact-repository--examples
from aws_cdk import (
core as cdk,
aws_codeartifact as codeartifact,
)
# For consistency with other languages, `cdk` is the preferred import name for
# the CDK's core module. The following line also imports it as `core` for use
# with examples from the CDK Developer's Guide, which are in the process of
# being updated to use `cdk`. You may delete this import if you don't need it.
from aws_cdk import core
class ReproStack(cdk.Stack):
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
# Domain
domain = codeartifact.CfnDomain(self, 'myDomain',
domain_name = 'thisismydomain',
)
# Upstream repo
repo = codeartifact.CfnRepository(self, 'myUpstreamRepo',
repository_name = 'testUpstreamRepo',
domain_name = domain.domain_name,
external_connections = ['public:npmjs'],
)
# # Primary repository
# repo = codeartifact.CfnRepository(self, 'myRepo',
# repository_name = 'testrepo',
# domain_name = domain.domain_name,
# upstreams = [upstream_repo.repository_name],
# )
| [
"aws_cdk.aws_codeartifact.CfnRepository",
"aws_cdk.aws_codeartifact.CfnDomain"
] | [((1110, 1180), 'aws_cdk.aws_codeartifact.CfnDomain', 'codeartifact.CfnDomain', (['self', '"""myDomain"""'], {'domain_name': '"""thisismydomain"""'}), "(self, 'myDomain', domain_name='thisismydomain')\n", (1132, 1180), True, 'from aws_cdk import core as cdk, aws_codeartifact as codeartifact\n'), ((1241, 1407), 'aws_cdk.aws_codeartifact.CfnRepository', 'codeartifact.CfnRepository', (['self', '"""myUpstreamRepo"""'], {'repository_name': '"""testUpstreamRepo"""', 'domain_name': 'domain.domain_name', 'external_connections': "['public:npmjs']"}), "(self, 'myUpstreamRepo', repository_name=\n 'testUpstreamRepo', domain_name=domain.domain_name,\n external_connections=['public:npmjs'])\n", (1267, 1407), True, 'from aws_cdk import core as cdk, aws_codeartifact as codeartifact\n')] |
import traceback
from flask import abort
from flask_login import current_user
from ims import db
from ims.service.mappers.comUserMapper import selectComUser as __getUser
from ims.service.mappers.travelExpensesMapper import selectTraTravelExpensesList as __getList
from ims.service.mappers.travelExpensesMapper import selectTraTravelExpensesDetails as __getDetails
from ims.service.mappers.travelExpensesMapper import insertUpdateTraTravelExpenses as __insertUpdateOne
from ims.service.mappers.travelExpensesMapper import deleteTraTravelExpenses as __deleteOne
def getTravelExpensesList(userId, year, month):
"""1ヶ月分旅費精算リストを取得するMapperを呼び出す
:param userId: 登録ユーザID
:param year: 登録年
:param month: 登録月
"""
dtoList = __getList(userId, year, month)
return dtoList
def getTravelExpensesDetails(travelExpensesId):
"""選択された旅費精算詳細を取得するMapperを呼び出す
:param travelExpensesId: 旅費精算ID
"""
try:
Id = int(travelExpensesId)
dto = __getDetails(Id)
if dto:
user = __getUser(dto.user_id)
if user.group_id == current_user.group_id:
return dto
else:
return None
except:
return None
def insertUpdateTravelExpenses(dto, isUpdate):
"""旅費精算の新規または修正を処理するMapperを呼び出す
サービス層のExceptionをキャッチし、処理します。
:param dto: 旅費精算詳細データ
:param isUpdate: 新規・修正判定フラグ
"""
try:
__insertUpdateOne(dto,isUpdate)
db.session.commit()
except Exception:
traceback.print_exc()
db.session.rollback()
abort(500)
finally:
db.session.close()
def deleteTravelExpenses(travelExpensesId):
"""旅費精算を削除するMapperを呼び出す
サービス層のExceptionをキャッチし、処理します。
:param travelExpensesId: 旅費精算ID
"""
try:
__deleteOne(travelExpensesId)
db.session.commit()
except Exception:
traceback.print_exc()
db.session.rollback()
abort(500)
finally:
db.session.close() | [
"ims.service.mappers.travelExpensesMapper.selectTraTravelExpensesList",
"ims.service.mappers.comUserMapper.selectComUser",
"ims.db.session.commit",
"ims.service.mappers.travelExpensesMapper.selectTraTravelExpensesDetails",
"ims.db.session.close",
"ims.service.mappers.travelExpensesMapper.deleteTraTravelEx... | [((740, 770), 'ims.service.mappers.travelExpensesMapper.selectTraTravelExpensesList', '__getList', (['userId', 'year', 'month'], {}), '(userId, year, month)\n', (749, 770), True, 'from ims.service.mappers.travelExpensesMapper import selectTraTravelExpensesList as __getList\n'), ((978, 994), 'ims.service.mappers.travelExpensesMapper.selectTraTravelExpensesDetails', '__getDetails', (['Id'], {}), '(Id)\n', (990, 994), True, 'from ims.service.mappers.travelExpensesMapper import selectTraTravelExpensesDetails as __getDetails\n'), ((1406, 1438), 'ims.service.mappers.travelExpensesMapper.insertUpdateTraTravelExpenses', '__insertUpdateOne', (['dto', 'isUpdate'], {}), '(dto, isUpdate)\n', (1423, 1438), True, 'from ims.service.mappers.travelExpensesMapper import insertUpdateTraTravelExpenses as __insertUpdateOne\n'), ((1446, 1465), 'ims.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1463, 1465), False, 'from ims import db\n'), ((1589, 1607), 'ims.db.session.close', 'db.session.close', ([], {}), '()\n', (1605, 1607), False, 'from ims import db\n'), ((1777, 1806), 'ims.service.mappers.travelExpensesMapper.deleteTraTravelExpenses', '__deleteOne', (['travelExpensesId'], {}), '(travelExpensesId)\n', (1788, 1806), True, 'from ims.service.mappers.travelExpensesMapper import deleteTraTravelExpenses as __deleteOne\n'), ((1815, 1834), 'ims.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1832, 1834), False, 'from ims import db\n'), ((1958, 1976), 'ims.db.session.close', 'db.session.close', ([], {}), '()\n', (1974, 1976), False, 'from ims import db\n'), ((1030, 1052), 'ims.service.mappers.comUserMapper.selectComUser', '__getUser', (['dto.user_id'], {}), '(dto.user_id)\n', (1039, 1052), True, 'from ims.service.mappers.comUserMapper import selectComUser as __getUser\n'), ((1496, 1517), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1515, 1517), False, 'import traceback\n'), ((1526, 1547), 'ims.db.session.rollback', 'db.session.rollback', ([], {}), '()\n', (1545, 1547), False, 'from ims import db\n'), ((1556, 1566), 'flask.abort', 'abort', (['(500)'], {}), '(500)\n', (1561, 1566), False, 'from flask import abort\n'), ((1865, 1886), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1884, 1886), False, 'import traceback\n'), ((1895, 1916), 'ims.db.session.rollback', 'db.session.rollback', ([], {}), '()\n', (1914, 1916), False, 'from ims import db\n'), ((1925, 1935), 'flask.abort', 'abort', (['(500)'], {}), '(500)\n', (1930, 1935), False, 'from flask import abort\n')] |
#!/hive/groups/recon/local/bin/python
# Requires Python 2.6, current default python on hgwdev is 2.4
"""CGI script that outputs the ENCODE status based upon a specified
field.
"""
import cgi, cgitb
import datetime
import json
import sys
# Import local modules found in "/hive/groups/encode/dcc/charts"
sys.path.append("/hive/groups/encode/dcc/charts")
import encodeReportLib
import gviz_api
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "1.0.0"
cgitb.enable()
# Parse report file and return result in the proper format
# for the Google Visualization API
def processReportFile (reportFile, statusLabel, keyIndex, norelease, species):
hash = {}
f = open(reportFile, "r")
for line in f:
line = line.rstrip()
if line.startswith('Project'):
continue
splitArray = line.split('\t')
keyLabel = splitArray[keyIndex]
status = splitArray[8]
assembly = splitArray[9]
if status == 'revoked' or status == 'replaced':
continue
if norelease == 1 and status == 'released':
continue
if keyIndex == 5:
keyLabel=encodeReportLib.parseFreezeLabel(keyLabel)
if species == 'all':
pass
else:
if species == 'human' and assembly.startswith('hg'):
pass
elif species == 'mouse' and assembly.startswith('mm'):
pass
elif species == assembly:
pass
else:
continue
if not keyLabel in hash:
hash[keyLabel] = {}
for i in statusLabel:
hash[keyLabel][i] = 0
hash[keyLabel][status] += 1
f.close()
if keyIndex == 5:
sortKey = encodeReportLib.orderFreezeDateLabels(hash.keys())
else:
sortKey = sorted(hash)
# Populate dataArray with the contents of the matrix
dataArray = []
for labKey in sortKey:
array = []
array.append(labKey)
for statusKey in statusLabel:
array.append(hash[labKey][statusKey])
dataArray.append(array)
return dataArray
def main():
form = cgi.FieldStorage()
# CGI Variables
# key = project, lab, data, freeze, or species
# Display data based on the key variable
# norelease = 0 or 1
# 0 = Output all data
# 1 = Output only unreleased data
# species = human, mouse, all
# human = Output only human data
# mouse = Output only mouse data
# all = Output all data
keyField = form.getvalue('key')
if keyField == None:
keyField = 'project'
norelease = form.getvalue('norelease')
if norelease == None:
norelease = 0
norelease = int(norelease)
species = form.getvalue('species')
if species == None:
species = 'all'
switch = {'project':0, 'lab':1, 'data':2, 'freeze':5, 'status':8}
titleTag = {'project':"Project", 'lab':"Lab", 'data':"Data_Type",
'freeze':"Freeze", 'status':"Status"}
if keyField not in switch:
keyField = 'project'
keyIndex = switch[keyField]
# Headers for the columns in the data matrix
description = [(titleTag[keyField], "string")]
fullLabel = ['released', 'reviewing', 'approved', 'displayed', 'downloads', 'loaded']
statusLabel = []
for label in fullLabel:
if label == 'released' and norelease == 1:
continue
tmpDesc = [(label, 'number')]
description += tmpDesc
statusLabel.append(label)
reportFile, currentDate = encodeReportLib.getRecentReport()
matrix = processReportFile(reportFile, statusLabel, keyIndex, norelease,
species)
# Create the data table
data_table = gviz_api.DataTable(description)
data_table.LoadData(matrix)
# Convert to JavaScript code
jscode = data_table.ToJSCode("jscode_data")
# Set variables for HTML output
template_vars = {}
template_vars['jscode'] = jscode
template_vars['dateStamp'] = encodeReportLib.dateIntToDateStr(currentDate)
template_vars['title'] = "ENCODE (%s) Status by %s" % (species,
titleTag[keyField])
template_vars['packageName'] = 'columnchart'
template_vars['visClass'] = 'ColumnChart'
template_vars['style'] = ""
template_vars['species'] = species
template_vars['keyField'] = keyField
template_vars['norelease']= norelease
# Set the chart specific configuration options
chart_config = {}
chart_config['isStacked'] = 'true'
chart_config['legendFontSize'] = 16
chart_config['width'] = 854
chart_config['height'] = 480
chart_config['titleX'] = titleTag[keyField]
chart_config['titleY'] = "# of Submissions"
chart_config['tooltipFontSize'] = 16
chart_config['enableTooltip'] = 'true'
colors = encodeReportLib.getColorArray(len(statusLabel))
colors.reverse()
chart_config['colors'] = colors
template_vars['chart_config'] = json.dumps(chart_config)
encodeReportLib.renderHtml(template_vars, 0, 1)
return
if __name__ == '__main__':
main()
sys.exit(0)
| [
"gviz_api.DataTable",
"encodeReportLib.getRecentReport",
"encodeReportLib.parseFreezeLabel",
"cgi.FieldStorage",
"sys.exit",
"json.dumps",
"encodeReportLib.renderHtml",
"encodeReportLib.dateIntToDateStr",
"cgitb.enable",
"sys.path.append"
] | [((305, 354), 'sys.path.append', 'sys.path.append', (['"""/hive/groups/encode/dcc/charts"""'], {}), "('/hive/groups/encode/dcc/charts')\n", (320, 354), False, 'import sys\n'), ((465, 479), 'cgitb.enable', 'cgitb.enable', ([], {}), '()\n', (477, 479), False, 'import cgi, cgitb\n'), ((1958, 1976), 'cgi.FieldStorage', 'cgi.FieldStorage', ([], {}), '()\n', (1974, 1976), False, 'import cgi, cgitb\n'), ((3290, 3323), 'encodeReportLib.getRecentReport', 'encodeReportLib.getRecentReport', ([], {}), '()\n', (3321, 3323), False, 'import encodeReportLib\n'), ((3480, 3511), 'gviz_api.DataTable', 'gviz_api.DataTable', (['description'], {}), '(description)\n', (3498, 3511), False, 'import gviz_api\n'), ((3742, 3787), 'encodeReportLib.dateIntToDateStr', 'encodeReportLib.dateIntToDateStr', (['currentDate'], {}), '(currentDate)\n', (3774, 3787), False, 'import encodeReportLib\n'), ((4694, 4718), 'json.dumps', 'json.dumps', (['chart_config'], {}), '(chart_config)\n', (4704, 4718), False, 'import json\n'), ((4722, 4769), 'encodeReportLib.renderHtml', 'encodeReportLib.renderHtml', (['template_vars', '(0)', '(1)'], {}), '(template_vars, 0, 1)\n', (4748, 4769), False, 'import encodeReportLib\n'), ((4819, 4830), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4827, 4830), False, 'import sys\n'), ((1085, 1127), 'encodeReportLib.parseFreezeLabel', 'encodeReportLib.parseFreezeLabel', (['keyLabel'], {}), '(keyLabel)\n', (1117, 1127), False, 'import encodeReportLib\n')] |
# -*- coding: utf-8 -*-
"""
...
"""
import LibraryTT.txt2array as conversion
import numpy as np
from numpy import sqrt
import pandas as pd
import matplotlib.pyplot as plt
import random
import math
from mpl_toolkits.mplot3d import Axes3D
# import open3d as o3d
# %matplotlib inline
D = conversion.txt2array()
DD = np.copy(D) # Creamos copia de datos para no afectar a los originales
Epsilon = 30
MinPts = 75 #78
# result = DBSCAN(DD,Epsilon,MinPts)
chch = conversion.RObjetos(DD,Epsilon,MinPts)
TN = conversion.usar(chch)
# Graficar un dato
conversion.imprimir3D(D)
# conversion.imprimir3D(DD)
# Imprimir sin ruido--- graficar
conversion.imprimirObjetos(TN,chch,0,0)
# Graficar con ruido
conversion.imprimirObjetos(TN,chch,1,0)
# conversion.imprimirObjetos(TN,chch,2,1)
# (Objetos,tamañoobjetos,2,cualObjeto)
# el ransac
# vectores para guardar datos.
abcd = np.array([[0,0,0,0]])
ldps = np.array([])
gplns = np.array([])
abcd,ldps,gplns = conversion.rnsc(TN,chch,abcd,ldps,gplns)
abcd = np.delete(abcd,0,axis=0)
# BUSCAR centros de planos aunque debiera buscar algo más con planos pequeños.
cplns = 0 # va pasando por cada valor abcd para hacer la prueba
# p1 = 0
# sc = 0
# p2 = gplns[sc]
cplanos = np.array([[0,0,0]])
Dists = np.array([])
cplanos,Dists = conversion.centros(cplanos,Dists,TN,ldps,gplns)
dext = 100
dint = 50
tvdext = np.array([])
tvdint = np.array([])
# Para checar que objetos andan dentro del rango int y ext
# np.append(datosx,[[xmin,xmax]],axis=0)
# Se guardan las posiciones
for ima in range(0,len(Dists)):
if (Dists[ima] <= dext):
tvdext = np.append(tvdext,ima)
# print("hay un obstaculo en zona alejada")
if (Dists[ima] <= dint):
tvdint = np.append(tvdint,ima)
# print("Hay obstaculo cercano, detener y cambiar posicion")
# Para conocer mejor cuales son int mas que ext porque son mas importantes
if (len(tvdext) > 0) and (len(tvdint) > 0):
for ixt in range(0,len(tvdint)):
for ixtt in range(0,len(tvdext)):
if (tvdint[ixt] == tvdext[ixtt]):
tvdext = np.delete(tvdext[ixtt])
if (len(tvdext) <= 0):
break
prac = 0
if (len(tvdext) > 0) or (len(tvdint) > 0):
if (len(tvdint)>0):
for din in range(0,len(tvdint)):
xd = cplanos[int(tvdint[din]),0]
yd = cplanos[int(tvdint[din]),1]
angulo = math.atan2(xd,yd)
angulo = math.degrees(angulo)
# En cada uno encender vibrador
if (angulo >= 120):
print("rapido dar un paso a la derecha")
prac += 1
if (angulo <= 60):
print("rapido dar un paso a la izquierda")
prac += 1
if ((angulo > 60)and(angulo < 120)):
print("Deten tu carruaje")
prac += 1
# Aqui apagara los vibradores
if (prac == 0) and (len(tvdext)>0):
for din in range(0,len(tvdext)):
xd = cplanos[int(tvdext[din]),0]
yd = cplanos[int(tvdext[din]),1]
angulo = math.atan2(xd,yd)
angulo = math.degrees(angulo)
# En cada uno encender vibrador
if (angulo >= 120):
print("dar un paso a la derecha")
if (angulo <= 60):
print("dar un paso a la izquierda")
if ((angulo > 60)and(angulo < 120)):
print("Abra algo")
| [
"numpy.copy",
"LibraryTT.txt2array.usar",
"LibraryTT.txt2array.imprimir3D",
"LibraryTT.txt2array.centros",
"numpy.delete",
"LibraryTT.txt2array.imprimirObjetos",
"math.degrees",
"numpy.append",
"numpy.array",
"math.atan2",
"LibraryTT.txt2array.rnsc",
"LibraryTT.txt2array.RObjetos",
"LibraryT... | [((305, 327), 'LibraryTT.txt2array.txt2array', 'conversion.txt2array', ([], {}), '()\n', (325, 327), True, 'import LibraryTT.txt2array as conversion\n'), ((336, 346), 'numpy.copy', 'np.copy', (['D'], {}), '(D)\n', (343, 346), True, 'import numpy as np\n'), ((486, 526), 'LibraryTT.txt2array.RObjetos', 'conversion.RObjetos', (['DD', 'Epsilon', 'MinPts'], {}), '(DD, Epsilon, MinPts)\n', (505, 526), True, 'import LibraryTT.txt2array as conversion\n'), ((533, 554), 'LibraryTT.txt2array.usar', 'conversion.usar', (['chch'], {}), '(chch)\n', (548, 554), True, 'import LibraryTT.txt2array as conversion\n'), ((578, 602), 'LibraryTT.txt2array.imprimir3D', 'conversion.imprimir3D', (['D'], {}), '(D)\n', (599, 602), True, 'import LibraryTT.txt2array as conversion\n'), ((669, 711), 'LibraryTT.txt2array.imprimirObjetos', 'conversion.imprimirObjetos', (['TN', 'chch', '(0)', '(0)'], {}), '(TN, chch, 0, 0)\n', (695, 711), True, 'import LibraryTT.txt2array as conversion\n'), ((733, 775), 'LibraryTT.txt2array.imprimirObjetos', 'conversion.imprimirObjetos', (['TN', 'chch', '(1)', '(0)'], {}), '(TN, chch, 1, 0)\n', (759, 775), True, 'import LibraryTT.txt2array as conversion\n'), ((913, 937), 'numpy.array', 'np.array', (['[[0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0]])\n', (921, 937), True, 'import numpy as np\n'), ((943, 955), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (951, 955), True, 'import numpy as np\n'), ((965, 977), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (973, 977), True, 'import numpy as np\n'), ((999, 1043), 'LibraryTT.txt2array.rnsc', 'conversion.rnsc', (['TN', 'chch', 'abcd', 'ldps', 'gplns'], {}), '(TN, chch, abcd, ldps, gplns)\n', (1014, 1043), True, 'import LibraryTT.txt2array as conversion\n'), ((1048, 1074), 'numpy.delete', 'np.delete', (['abcd', '(0)'], {'axis': '(0)'}), '(abcd, 0, axis=0)\n', (1057, 1074), True, 'import numpy as np\n'), ((1269, 1290), 'numpy.array', 'np.array', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (1277, 1290), True, 'import numpy as np\n'), ((1298, 1310), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1306, 1310), True, 'import numpy as np\n'), ((1328, 1379), 'LibraryTT.txt2array.centros', 'conversion.centros', (['cplanos', 'Dists', 'TN', 'ldps', 'gplns'], {}), '(cplanos, Dists, TN, ldps, gplns)\n', (1346, 1379), True, 'import LibraryTT.txt2array as conversion\n'), ((1414, 1426), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1422, 1426), True, 'import numpy as np\n'), ((1437, 1449), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1445, 1449), True, 'import numpy as np\n'), ((1663, 1685), 'numpy.append', 'np.append', (['tvdext', 'ima'], {}), '(tvdext, ima)\n', (1672, 1685), True, 'import numpy as np\n'), ((1791, 1813), 'numpy.append', 'np.append', (['tvdint', 'ima'], {}), '(tvdint, ima)\n', (1800, 1813), True, 'import numpy as np\n'), ((2496, 2514), 'math.atan2', 'math.atan2', (['xd', 'yd'], {}), '(xd, yd)\n', (2506, 2514), False, 'import math\n'), ((2536, 2556), 'math.degrees', 'math.degrees', (['angulo'], {}), '(angulo)\n', (2548, 2556), False, 'import math\n'), ((3250, 3268), 'math.atan2', 'math.atan2', (['xd', 'yd'], {}), '(xd, yd)\n', (3260, 3268), False, 'import math\n'), ((3290, 3310), 'math.degrees', 'math.degrees', (['angulo'], {}), '(angulo)\n', (3302, 3310), False, 'import math\n'), ((2160, 2183), 'numpy.delete', 'np.delete', (['tvdext[ixtt]'], {}), '(tvdext[ixtt])\n', (2169, 2183), True, 'import numpy as np\n')] |
import sys
import os
import numpy as np
import pandas as pd
from Globals import *
#-------- Create directories ------------
os.makedirs(dir_data,exist_ok=True)
os.makedirs(dir_chain,exist_ok=True)
os.makedirs(dir_plots,exist_ok=True)
os.makedirs(dir_outs,exist_ok=True)
#------------- Load data ----------------------------------
df = pd.read_csv(file_data,usecols=columns_data,nrows=n_sources)
df.replace(to_replace=nan_values,value=np.nan,inplace=True)
df.set_index(identifier,inplace=True)
n_init = len(df)
print("The data set contains {0} sources.".format(n_init))
#-----------------------------------------------------------
#+++++++++++++++++++ Filter data ++++++++++++++++++++++++++
#---- Set as NaN the BP values larger than limit_BP -------
idx = np.where(df[label_BP] > limit_BP)[0]
if len(idx) > 0:
df.loc[df.iloc[idx].index,label_BP] = np.nan
#----------------------------------------------------------
#---- Set uncertainty as missing if band is missing ---
for ob,un in zip(phot_obs,phot_unc):
mask = np.isnan(df.loc[:,ob])
df.loc[mask,un] = np.nan
#----------------------------------------------------------
#- Set uncertainty to nan_unc if band is observed ---
for ob,un in zip(phot_obs,phot_unc):
mask = np.isnan(df.loc[:,un]) & np.isfinite(df.loc[:,ob])
df.loc[mask,un] = nan_unc
#----------------------------------------------------------
#--- Remove objects with less than n_obs_min bands --------
df.dropna(thresh=n_obs_min,subset=phot_obs,inplace=True)
#----------------------------------------------------------
#---- Minimum uncertainty --------------------------------
for un in phot_unc:
df.loc[:,un] += add_unc
#----------------------------------------------------------
print("After filtering {0} sources were removed.".format(n_init - len(df)))
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++ Split data frame +++++++++++++++++++++++++++++++++
n_sources = len(df)
group_size = int(np.floor(n_sources/size))
reminder = n_sources % size
group_size = np.repeat(group_size,size)
group_size[-1] += reminder
groups = []
for g,gs in enumerate(group_size):
groups.append(np.repeat(g+1,gs))
groups = np.concatenate(groups)
df.insert(loc=0,column="Groups",value=groups)
grouped_df = df.groupby("Groups")
#--- Write each chunk -----
for g in range(1,size+1):
grouped_df.get_group(g).to_csv(dir_data + "data_{0}_of_{1}.csv".format(g,size))
| [
"numpy.repeat",
"os.makedirs",
"pandas.read_csv",
"numpy.where",
"numpy.floor",
"numpy.isnan",
"numpy.isfinite",
"numpy.concatenate"
] | [((125, 161), 'os.makedirs', 'os.makedirs', (['dir_data'], {'exist_ok': '(True)'}), '(dir_data, exist_ok=True)\n', (136, 161), False, 'import os\n'), ((161, 198), 'os.makedirs', 'os.makedirs', (['dir_chain'], {'exist_ok': '(True)'}), '(dir_chain, exist_ok=True)\n', (172, 198), False, 'import os\n'), ((198, 235), 'os.makedirs', 'os.makedirs', (['dir_plots'], {'exist_ok': '(True)'}), '(dir_plots, exist_ok=True)\n', (209, 235), False, 'import os\n'), ((235, 271), 'os.makedirs', 'os.makedirs', (['dir_outs'], {'exist_ok': '(True)'}), '(dir_outs, exist_ok=True)\n', (246, 271), False, 'import os\n'), ((337, 398), 'pandas.read_csv', 'pd.read_csv', (['file_data'], {'usecols': 'columns_data', 'nrows': 'n_sources'}), '(file_data, usecols=columns_data, nrows=n_sources)\n', (348, 398), True, 'import pandas as pd\n'), ((2019, 2046), 'numpy.repeat', 'np.repeat', (['group_size', 'size'], {}), '(group_size, size)\n', (2028, 2046), True, 'import numpy as np\n'), ((2164, 2186), 'numpy.concatenate', 'np.concatenate', (['groups'], {}), '(groups)\n', (2178, 2186), True, 'import numpy as np\n'), ((759, 792), 'numpy.where', 'np.where', (['(df[label_BP] > limit_BP)'], {}), '(df[label_BP] > limit_BP)\n', (767, 792), True, 'import numpy as np\n'), ((1021, 1044), 'numpy.isnan', 'np.isnan', (['df.loc[:, ob]'], {}), '(df.loc[:, ob])\n', (1029, 1044), True, 'import numpy as np\n'), ((1952, 1978), 'numpy.floor', 'np.floor', (['(n_sources / size)'], {}), '(n_sources / size)\n', (1960, 1978), True, 'import numpy as np\n'), ((1230, 1253), 'numpy.isnan', 'np.isnan', (['df.loc[:, un]'], {}), '(df.loc[:, un])\n', (1238, 1253), True, 'import numpy as np\n'), ((1255, 1281), 'numpy.isfinite', 'np.isfinite', (['df.loc[:, ob]'], {}), '(df.loc[:, ob])\n', (1266, 1281), True, 'import numpy as np\n'), ((2135, 2155), 'numpy.repeat', 'np.repeat', (['(g + 1)', 'gs'], {}), '(g + 1, gs)\n', (2144, 2155), True, 'import numpy as np\n')] |
from estimator_adaptative import EstimatorAdaptative
from mpl_toolkits.mplot3d import Axes3D
from grid_search import GridSearch
from sklearn import metrics
import matplotlib.pyplot as plt
import matplotlib as mpl
from utils import *
import numpy as np
import os
import sys
data_path = '../../databases'
PlotsDirectory = '../plots/Week2/task2/'
if not os.path.exists(PlotsDirectory):
os.makedirs(PlotsDirectory)
Pr = list()
Re = list()
names = ['highway', 'fall', 'traffic']
estimation_range = [np.array([1050, 1200]), np.array([1460, 1510]), np.array([950, 1000])]
prediction_range = [np.array([1201, 1350]), np.array([1511, 1560]), np.array([1001, 1050])]
a = [{'min':0, 'max':40, 'step':1}, {'min':0, 'max':40, 'step':1},{'min':0, 'max':40, 'step':1}]
rho = [0.599, 0.004,0]
for i in range(len(names)):
if len(sys.argv) > 1:
i = names.index(str(sys.argv[1]))
print('computing ' + names[i] +' ...')
[X_est, y_est] = load_data(data_path, names[i], estimation_range[i], grayscale=True)
[X_pred, y_pred] = load_data(data_path, names[i], prediction_range[i], grayscale=True)
alpha_range = np.arange(a[i].get('min'), a[i].get('max'), a[i].get('step'))
for idx, alpha in enumerate(alpha_range):
print(str(idx) + "/" + str(len(alpha_range)) + " " + str(alpha))
estPrecision = EstimatorAdaptative(alpha=alpha, rho=rho[i], metric="precision")
estRecall = EstimatorAdaptative(alpha=alpha, rho=rho[i], metric="recall")
estPrecision.fit(X_est)
estRecall.fit(X_est)
Pr.append(estPrecision.score(X_pred, y_pred))
Re.append(estRecall.score(X_pred, y_pred))
plt.figure()
plt.plot(np.array(Re), np.array(Pr), 'b', label='Precision-Recall')
plt.title("Precision vs Recall curve [AUC = " + str(round(metrics.auc(Re, Pr, True), 4)) + "] [" + names[i] + " sequence]")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.savefig(PlotsDirectory + names[i] + '_PRcurve_AUC.png', bbox_inches='tight')
plt.close()
if len(sys.argv) > 1:
break
#Empty lists
Pr[:] = []
Re[:] = []
| [
"os.path.exists",
"matplotlib.pyplot.savefig",
"estimator_adaptative.EstimatorAdaptative",
"os.makedirs",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.auc",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure"
] | [((353, 383), 'os.path.exists', 'os.path.exists', (['PlotsDirectory'], {}), '(PlotsDirectory)\n', (367, 383), False, 'import os\n'), ((389, 416), 'os.makedirs', 'os.makedirs', (['PlotsDirectory'], {}), '(PlotsDirectory)\n', (400, 416), False, 'import os\n'), ((501, 523), 'numpy.array', 'np.array', (['[1050, 1200]'], {}), '([1050, 1200])\n', (509, 523), True, 'import numpy as np\n'), ((525, 547), 'numpy.array', 'np.array', (['[1460, 1510]'], {}), '([1460, 1510])\n', (533, 547), True, 'import numpy as np\n'), ((549, 570), 'numpy.array', 'np.array', (['[950, 1000]'], {}), '([950, 1000])\n', (557, 570), True, 'import numpy as np\n'), ((592, 614), 'numpy.array', 'np.array', (['[1201, 1350]'], {}), '([1201, 1350])\n', (600, 614), True, 'import numpy as np\n'), ((616, 638), 'numpy.array', 'np.array', (['[1511, 1560]'], {}), '([1511, 1560])\n', (624, 638), True, 'import numpy as np\n'), ((640, 662), 'numpy.array', 'np.array', (['[1001, 1050]'], {}), '([1001, 1050])\n', (648, 662), True, 'import numpy as np\n'), ((1648, 1660), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1658, 1660), True, 'import matplotlib.pyplot as plt\n'), ((1865, 1885), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (1875, 1885), True, 'import matplotlib.pyplot as plt\n'), ((1890, 1913), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (1900, 1913), True, 'import matplotlib.pyplot as plt\n'), ((1919, 2004), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(PlotsDirectory + names[i] + '_PRcurve_AUC.png')"], {'bbox_inches': '"""tight"""'}), "(PlotsDirectory + names[i] + '_PRcurve_AUC.png', bbox_inches='tight'\n )\n", (1930, 2004), True, 'import matplotlib.pyplot as plt\n'), ((2004, 2015), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2013, 2015), True, 'import matplotlib.pyplot as plt\n'), ((1330, 1394), 'estimator_adaptative.EstimatorAdaptative', 'EstimatorAdaptative', ([], {'alpha': 'alpha', 'rho': 'rho[i]', 'metric': '"""precision"""'}), "(alpha=alpha, rho=rho[i], metric='precision')\n", (1349, 1394), False, 'from estimator_adaptative import EstimatorAdaptative\n'), ((1415, 1476), 'estimator_adaptative.EstimatorAdaptative', 'EstimatorAdaptative', ([], {'alpha': 'alpha', 'rho': 'rho[i]', 'metric': '"""recall"""'}), "(alpha=alpha, rho=rho[i], metric='recall')\n", (1434, 1476), False, 'from estimator_adaptative import EstimatorAdaptative\n'), ((1674, 1686), 'numpy.array', 'np.array', (['Re'], {}), '(Re)\n', (1682, 1686), True, 'import numpy as np\n'), ((1688, 1700), 'numpy.array', 'np.array', (['Pr'], {}), '(Pr)\n', (1696, 1700), True, 'import numpy as np\n'), ((1795, 1820), 'sklearn.metrics.auc', 'metrics.auc', (['Re', 'Pr', '(True)'], {}), '(Re, Pr, True)\n', (1806, 1820), False, 'from sklearn import metrics\n')] |
"""
Links content subjects together.
"""
import re
from django.core.management.base import BaseCommand, CommandError
from croftair.models import Subject
class Command(BaseCommand):
"""
Command class.
"""
def handle(self, *args, **kwargs):
"""
Handler method.
"""
for subject in Subject.objects.all():
body = subject.body
for inner_subject in Subject.objects.exclude(pk=subject.pk):
if inner_subject.name in body:
print('Detecting', inner_subject.name, 'in body of', subject.name)
inner_subject_key = ' %s ' % inner_subject.name
inner_subject_em_key = ' <em>%s</em> ' % inner_subject.name
inner_subject_replacement = ' <a href="/subject/%s/"><em>%s</em></a> ' % (inner_subject.slug, inner_subject.name)
body = body.replace(inner_subject_em_key, inner_subject_replacement)
body = body.replace(inner_subject_key, inner_subject_replacement)
inner_subject_period_key = ' %s.' % inner_subject.name
inner_subject_period_replacement = inner_subject_replacement[:-1] + '.'
body = body.replace(inner_subject_period_key, inner_subject_period_replacement)
inner_subject_comma_key = ' %s,' % inner_subject.name
inner_subject_comma_replacement = inner_subject_replacement[:-1] + ','
body = body.replace(inner_subject_comma_key, inner_subject_comma_replacement)
for alias in inner_subject.aliases.all():
if alias.alias in body:
print('Detecting alias', alias.alias, 'in body of', subject.name)
alias_key = ' %s ' % alias.alias
alias_em_key = ' <em>%s</em> ' % alias.alias
alias_replacement = ' <a href="/subject/%s/"><em>%s</em></a> ' % (inner_subject.slug, alias.alias)
body = body.replace(alias_em_key, alias_replacement)
body = body.replace(alias_key, alias_replacement)
alias_period_key = alias_key[:-1] + '.'
alias_period_replacement = alias_replacement[:-1] + '.'
body = body.replace(alias_period_key, alias_period_replacement)
alias_comma_key = alias_key[:-1] + ','
alias_comma_replacement = alias_replacement[:-1] + ','
body = body.replace(alias_comma_key, alias_comma_replacement)
subject.body = body
subject.save()
| [
"croftair.models.Subject.objects.exclude",
"croftair.models.Subject.objects.all"
] | [((332, 353), 'croftair.models.Subject.objects.all', 'Subject.objects.all', ([], {}), '()\n', (351, 353), False, 'from croftair.models import Subject\n'), ((433, 471), 'croftair.models.Subject.objects.exclude', 'Subject.objects.exclude', ([], {'pk': 'subject.pk'}), '(pk=subject.pk)\n', (456, 471), False, 'from croftair.models import Subject\n')] |
#!/usr/bin
# -*- coding: utf-8 -*-
"""
DelftStack Python Tkinter Tutorial
Author: <NAME>
URL: https://www.delftstack.com/tutorial/tkinter-tutorial/tkinter-button/
Website: https://www.delftstack.com
"""
from sys import version_info
if version_info.major == 2:
import Tkinter as tk
elif version_info.major == 3:
import tkinter as tk
from functools import partial
app = tk.Tk()
labelExample = tk.Button(app, text="0")
def change_label_number(num):
counter = int(str(labelExample['text']))
counter += num
labelExample.config(text=str(counter))
buttonExample = tk.Button(app, text="Increase", width=30,
command=partial(change_label_number, 2))
buttonExample.pack()
labelExample.pack()
app.mainloop() | [
"tkinter.Tk",
"functools.partial",
"tkinter.Button"
] | [((394, 401), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (399, 401), True, 'import tkinter as tk\n'), ((417, 441), 'tkinter.Button', 'tk.Button', (['app'], {'text': '"""0"""'}), "(app, text='0')\n", (426, 441), True, 'import tkinter as tk\n'), ((677, 708), 'functools.partial', 'partial', (['change_label_number', '(2)'], {}), '(change_label_number, 2)\n', (684, 708), False, 'from functools import partial\n')] |
# Generated by Django 2.0.2 on 2018-02-15 19:41
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('offers_private_lessons', models.BooleanField(default=False)),
('address', models.TextField()),
('bio', models.TextField()),
('website', models.CharField(max_length=256)),
('phone', models.CharField(max_length=20)),
('profile_img_url', models.CharField(max_length=256)),
('social', django.contrib.postgres.fields.jsonb.JSONField()),
('time_created', models.DateTimeField()),
('last_modified', models.DateTimeField()),
('category', models.ManyToManyField(to='directory.Category')),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=64)),
('password', models.CharField(max_length=256)),
('name', models.CharField(max_length=256)),
('time_created', models.DateTimeField()),
('last_modified', models.DateTimeField()),
('user_type', models.IntegerField(choices=[(1, 'Admin'), (0, 'User')], default=0)),
],
),
]
| [
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ManyToManyField",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((348, 441), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (364, 441), False, 'from django.db import migrations, models\n'), ((465, 497), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (481, 497), False, 'from django.db import migrations, models\n'), ((630, 723), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (646, 723), False, 'from django.db import migrations, models\n'), ((747, 779), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (763, 779), False, 'from django.db import migrations, models\n'), ((825, 859), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (844, 859), False, 'from django.db import migrations, models\n'), ((890, 908), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (906, 908), False, 'from django.db import migrations, models\n'), ((935, 953), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (951, 953), False, 'from django.db import migrations, models\n'), ((984, 1016), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (1000, 1016), False, 'from django.db import migrations, models\n'), ((1045, 1076), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (1061, 1076), False, 'from django.db import migrations, models\n'), ((1115, 1147), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (1131, 1147), False, 'from django.db import migrations, models\n'), ((1261, 1283), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1281, 1283), False, 'from django.db import migrations, models\n'), ((1320, 1342), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1340, 1342), False, 'from django.db import migrations, models\n'), ((1374, 1421), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""directory.Category"""'}), "(to='directory.Category')\n", (1396, 1421), False, 'from django.db import migrations, models\n'), ((1551, 1644), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1567, 1644), False, 'from django.db import migrations, models\n'), ((1672, 1703), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (1688, 1703), False, 'from django.db import migrations, models\n'), ((1735, 1767), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (1751, 1767), False, 'from django.db import migrations, models\n'), ((1795, 1827), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (1811, 1827), False, 'from django.db import migrations, models\n'), ((1863, 1885), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1883, 1885), False, 'from django.db import migrations, models\n'), ((1922, 1944), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1942, 1944), False, 'from django.db import migrations, models\n'), ((1977, 2044), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(1, 'Admin'), (0, 'User')]", 'default': '(0)'}), "(choices=[(1, 'Admin'), (0, 'User')], default=0)\n", (1996, 2044), False, 'from django.db import migrations, models\n')] |
import math
import unittest
from py_range_parse import parse_range
class ParseTest(unittest.TestCase):
def test_parse_equal_values(self):
parsed_range = parse_range("[-inf..-inf]")
self.assertIn(-math.inf, parsed_range)
def test_parse_spaces(self):
parsed_range = parse_range("[ -8.3 .. +18.3 ]")
self.assertIn(-8.3, parsed_range)
self.assertIn(18.3, parsed_range)
def test_parse_all_values(self):
parsed_range = parse_range("[-inf..∞]")
self.assertIn(-math.inf, parsed_range)
self.assertIn(math.inf, parsed_range)
def test_parse_range_negative(self):
parsed_range = parse_range("[-5..-2]")
self.assertEqual(parsed_range.start, -5)
self.assertEqual(parsed_range.end, -2)
def test_parse_range_negative_inverted(self):
parsed_range = parse_range("[5..-2]")
self.assertEqual(parsed_range.start, -2)
self.assertEqual(parsed_range.end, 5)
def test_float_range_contains(self):
parsed_range = parse_range("[1.0..4.3]")
self.assertIn(1, parsed_range)
self.assertIn(1.0, parsed_range)
self.assertIn(2, parsed_range)
self.assertIn(2.0, parsed_range)
self.assertIn(2.1, parsed_range)
self.assertIn(4, parsed_range)
self.assertIn(4.3, parsed_range)
def test_int_range_contains(self):
parsed_range = parse_range("[1..4]")
self.assertIn(1, parsed_range)
self.assertIn(1.0, parsed_range)
self.assertIn(2, parsed_range)
self.assertIn(2.0, parsed_range)
self.assertNotIn(2.1, parsed_range)
self.assertIn(4, parsed_range)
self.assertIn(4.0, parsed_range)
def test_int_range_exclude(self):
parsed_range = parse_range("]1..4[")
assert parsed_range is not None
def test_int_range_inf(self):
parsed_range = parse_range("]-inf..4[")
self.assertNotIn(-math.inf, parsed_range)
self.assertIn(-10000000, parsed_range)
def test_int_range_inf_inverted(self):
parsed_range = parse_range("]inf..4[")
self.assertNotIn(-math.inf, parsed_range)
self.assertNotIn(3, parsed_range)
self.assertNotIn(4, parsed_range)
self.assertIn(4.000000001, parsed_range)
self.assertIn(10000000, parsed_range)
def test_int_inclusion_inverted(self):
parsed_range = parse_range("]2..1]")
self.assertNotIn(0, parsed_range)
self.assertIn(1, parsed_range)
self.assertNotIn(2, parsed_range)
self.assertNotIn(3, parsed_range)
| [
"py_range_parse.parse_range"
] | [((169, 196), 'py_range_parse.parse_range', 'parse_range', (['"""[-inf..-inf]"""'], {}), "('[-inf..-inf]')\n", (180, 196), False, 'from py_range_parse import parse_range\n'), ((301, 333), 'py_range_parse.parse_range', 'parse_range', (['"""[ -8.3 .. +18.3 ]"""'], {}), "('[ -8.3 .. +18.3 ]')\n", (312, 333), False, 'from py_range_parse import parse_range\n'), ((479, 503), 'py_range_parse.parse_range', 'parse_range', (['"""[-inf..∞]"""'], {}), "('[-inf..∞]')\n", (490, 503), False, 'from py_range_parse import parse_range\n'), ((662, 685), 'py_range_parse.parse_range', 'parse_range', (['"""[-5..-2]"""'], {}), "('[-5..-2]')\n", (673, 685), False, 'from py_range_parse import parse_range\n'), ((856, 878), 'py_range_parse.parse_range', 'parse_range', (['"""[5..-2]"""'], {}), "('[5..-2]')\n", (867, 878), False, 'from py_range_parse import parse_range\n'), ((1039, 1064), 'py_range_parse.parse_range', 'parse_range', (['"""[1.0..4.3]"""'], {}), "('[1.0..4.3]')\n", (1050, 1064), False, 'from py_range_parse import parse_range\n'), ((1412, 1433), 'py_range_parse.parse_range', 'parse_range', (['"""[1..4]"""'], {}), "('[1..4]')\n", (1423, 1433), False, 'from py_range_parse import parse_range\n'), ((1783, 1804), 'py_range_parse.parse_range', 'parse_range', (['"""]1..4["""'], {}), "(']1..4[')\n", (1794, 1804), False, 'from py_range_parse import parse_range\n'), ((1903, 1927), 'py_range_parse.parse_range', 'parse_range', (['"""]-inf..4["""'], {}), "(']-inf..4[')\n", (1914, 1927), False, 'from py_range_parse import parse_range\n'), ((2092, 2115), 'py_range_parse.parse_range', 'parse_range', (['"""]inf..4["""'], {}), "(']inf..4[')\n", (2103, 2115), False, 'from py_range_parse import parse_range\n'), ((2412, 2433), 'py_range_parse.parse_range', 'parse_range', (['"""]2..1]"""'], {}), "(']2..1]')\n", (2423, 2433), False, 'from py_range_parse import parse_range\n')] |
import importlib
import ipaddress
classes_cache = {}
instance_cache = {}
def get_class(type:str):
cls = classes_cache.get(type)
if cls:
return cls
raise TypeError(f'wrong type {type}. not subclass of BaseType')
def get_instance(type:str,**option):
key = ','.join('{}={}'.format(k,v) for k, v in sorted(option.items()))
key = '{}|{}'.format(type,key)
instance = instance_cache.get(key)
if instance:
return instance
obj = get_class(type)(**option)
instance_cache[key] = obj
return obj
def inject_class_cache():
mod = globals().get('__package__')
print(mod)
for k,v in globals().items():
if type(v) == type and k != 'BaseType' and issubclass(v,BaseType):
classes_cache[k] = v
classes_cache['.'.join((mod,k))] = v
print(classes_cache)
class BaseType:
def __init__(self,**option):
self.option = option
def __getattr__(self, item):
return self.option.get(item)
def stringfy(self,value):
raise NotImplementedError
def destringfy(self,value):
return NotImplementedError
class Int(BaseType):
def stringfy(self, value):
val = int(value)
min = self.min
if min and val < min:
raise ValueError('too small')
max = self.val
if max and val > max:
raise ValueError('too big')
return str(int(value))
def destringfy(self, value):
return value
class Ip(BaseType):
def stringfy(self, value):
prefix = self.prefix
if prefix and not str(value).startswith(prefix):
raise ValueError(f'{value} is not start with {prefix}')
return str(ipaddress.ip_address(value))
def destringfy(self, value):
return value
inject_class_cache() | [
"ipaddress.ip_address"
] | [((1703, 1730), 'ipaddress.ip_address', 'ipaddress.ip_address', (['value'], {}), '(value)\n', (1723, 1730), False, 'import ipaddress\n')] |
import collections
import heapq
from typing import List
class Solution:
def networkDelayTime1(self, times: List[List[int]], N: int, K: int) -> int:
"""
DFS
"""
# build adjacent list
graph = collections.defaultdict(list)
for u, v, w in times:
graph[u].append((v, w))
dist = {node: float('inf') for node in range(1, N + 1)}
def dfs(node, time):
if time >= dist[node]:
return
dist[node] = time
for neib, t in sorted(graph[node]):
dfs(neib, time + t)
dfs(K, 0)
res = max(dist.values())
return res if res < float('inf') else -1
def networkDelayTime2(self, times: List[List[int]], N: int, K: int) -> int:
"""
Dijkstra
"""
# build adjacent list
graph = collections.defaultdict(list)
for u, v, w in times:
graph[u].append((v, w))
# store all nodes and its distance to K we met
heap = [(0, K)]
# keep track of distance of every nodes to K
dist = {}
while heap:
# break if all distances has been determined (unnecessary)
if len(dist) == N:
break
d, node = heapq.heappop(heap)
# go for next if distance has been determined before (unnecessary)
if node in dist:
continue
dist[node] = d
# check neighbors of node
for neib, d2 in graph[node]:
# pass determined neighbors
if neib not in dist:
heapq.heappush(heap, (d + d2, neib))
# if not all nodes are visited return -1 else return max value
return max(dist.values()) if len(dist) == N else -1
| [
"heapq.heappop",
"heapq.heappush",
"collections.defaultdict"
] | [((240, 269), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (263, 269), False, 'import collections\n'), ((876, 905), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (899, 905), False, 'import collections\n'), ((1290, 1309), 'heapq.heappop', 'heapq.heappop', (['heap'], {}), '(heap)\n', (1303, 1309), False, 'import heapq\n'), ((1650, 1686), 'heapq.heappush', 'heapq.heappush', (['heap', '(d + d2, neib)'], {}), '(heap, (d + d2, neib))\n', (1664, 1686), False, 'import heapq\n')] |
#!/usr/bin/env python3
import argparse
import os
import re
import shlex
import subprocess
from collections import defaultdict, namedtuple
######################################################################
# Common actions that may be reused for multiple devices.
#
# Need help? Try these:
# * xinput --help
# * man xinput
# * man libinput
# * man synaptics
# * man xkeyboard-config
# * /var/log/Xorg.0.log
# * https://wiki.archlinux.org/index.php/Touchpad_Synaptics
# * https://wiki.archlinux.org/index.php/Mouse_acceleration#Disabling_mouse_acceleration
# * http://xorg.freedesktop.org/wiki/Development/Documentation/PointerAcceleration/#accelerationprofileinteger
FLAT_ACCEL_PROFILE = [
['set-prop', 'libinput Accel Profile Enabled', 0, 1],
]
MIDDLE_CLICK_EMULATION = [
['set-prop', 'libinput Middle Emulation Enabled', 1],
]
NATURAL_SCROLLING = [
['set-prop', 'libinput Natural Scrolling Enabled', 1],
# _______
# / | | \ Scroll:
# | 1 |2| 3 | 4
# | |_| | 6<@>7
# | | 5
# []9 Forward|
# | |
# []8 Back |
# | |
# \_______/
# Inverting horizontal scroll buttons: 6 and 7
# But this has no effect in Chrome. :(
# https://gitlab.freedesktop.org/libinput/libinput/issues/195
# https://crbug.com/913403
# ['set-button-map', 1, 2, 3, 4, 5, 7, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
# Side buttons are now middle-click (8,9 = 2)
# ['set-button-map', 1, 2, 3, 4, 5, 6, 7, 2, 2],
# Side buttons are now horizontal scroll (8,9 = 6,7)
# ['set-button-map', 1, 2, 3, 4, 5, 6, 7, 6, 7],
]
TOUCHPAD_COMMON_SETTINGS = [
# Vertical and horizontal scrolling.
['set-prop', 'Synaptics Two-Finger Scrolling', 1, 1],
['set-prop', 'Synaptics Circular Scrolling', 0],
['set-prop', 'Synaptics Edge Scrolling', 0, 0, 0],
# I wish I could find good palm detection settings.
['set-prop', 'Synaptics Palm Detection', 0],
# This is already the defaut.
['set-prop', 'Synaptics Locked Drags', 0],
# This is already the defaut. AKA TapAndDragGesture.
['set-prop', 'Synaptics Gestures', 1],
# min, max, accel, <deprecated>
# ['set-prop', 'Synaptics Move Speed', 1.00, 1.75, 0.053763, 0],
# ['set-prop', 'Synaptics Move Speed', 1.00, 1.50, 0.05, 0],
['set-prop', 'Synaptics Move Speed', 1.0, 1.0, 0.0, 0],
# http://xorg.freedesktop.org/wiki/Development/Documentation/PointerAcceleration/#accelerationprofileinteger
# Defaults:
# ['set-prop', 'Device Accel Profile', 1],
# ['set-prop', 'Device Accel Constant Deceleration', 2.5],
# ['set-prop', 'Device Accel Adaptive Deceleration', 1.000000],
# ['set-prop', 'Device Accel Velocity Scaling', 12.500000],
# Profiles:
# -1. none
# 1. device-dependent
# 2. polynomial
# 3. smooth linear
# 4. simple
# 5. power
# 6. linear
# 7. limited
['set-prop', 'Device Accel Profile', -1],
# Tweaking the touchpad cursor speed.
['set-prop', 'Device Accel Constant Deceleration', 1.25],
]
# x_range/y_range values are extracted from Xorg.0.log.
# It seems there is no other way to read them.
# https://wiki.archlinux.org/index.php/Touchpad_Synaptics
def TOUCHPAD_MINIMUM_EDGES(x_range, y_range):
return [
# left, right, top, bottom.
# ['set-prop', 'Synaptics Edges', 130, 3130, 96, 1697],
['set-prop', 'Synaptics Edges', 1, x_range - 1, 1, y_range - 1],
]
def TOUCHPAD_EDGES_AND_SOFT_BUTTONS(x_range, y_range, x_perc1=40, x_perc2=60, y_perc=80):
x_pos1 = round(x_range * x_perc1 / 100)
x_pos2 = round(x_range * x_perc2 / 100)
y_pos = round(y_range * y_perc / 100)
return [
# left, right, top, bottom.
['set-prop', 'Synaptics Edges', 1, x_pos2, 1, y_pos],
# Setting clickpad button regions.
# This touchpad has only one button under the entire touch surface.
# This command configures the clicking area such as:
#
# |0 pos1 pos2 (x_range) 3260|
# |0% 40% 60% 100%|
# |-------------------+---------+-------------------|1470 (y_pos)
# | | | |
# | Left | Middle | Right |
# '-----------------------------------------------' 1793 (y_range)
#
# right button (left right top bottom) left button (left right top bottom)
[
'set-prop', 'Synaptics Soft Button Areas',
x_pos2, 0, y_pos, 0,
x_pos1, x_pos2, y_pos, 0
],
]
######################################################################
# Rules, mapping device names to actions.
rules_list = [
# Format:
# (
# 'keyboard or pointer', # Device type as string
# ['Device foo', 'Device bar'], # List of device names
# [ # List of "actions"
# ['set-prop', 'foo', 1, 2, 3],
# ['setxkbmap', 'foo', 'bar'].
# ],
# )
# Mouse devices.
(
'pointer',
[
'Microsoft Microsoft Basic Optical Mouse v2.0',
'Corsair Gaming HARPOON RGB Mouse',
'Logitech USB Laser Mouse',
'Logitech M705',
'Logitech MX Master 2S',
'MX Master 2S',
'MX Master 2S Mouse',
'PixArt Dell MS116 USB Optical Mouse',
],
[
*FLAT_ACCEL_PROFILE,
# *NATURAL_SCROLLING,
]
),
(
'pointer',
[
# This is actually a touchpad, but behaves like a mouse.
# The touchpad is not configurable without the proprietary Logitech tool.
# Pinching gestures generate Ctrl+mouse-scroll input.
'Logitech K400 Plus',
],
[
*FLAT_ACCEL_PROFILE,
*NATURAL_SCROLLING,
*MIDDLE_CLICK_EMULATION,
]
),
# Keyboard devices.
(
'keyboard',
[
'AT Translated Set 2 keyboard', # Asus X450C laptop
],
[
# First to clear the previously set options
['setxkbmap', '-option'],
[
'setxkbmap', 'us', 'altgr-intl',
'caps:backspace',
'numpad:microsoft',
'compose:menu',
],
]
),
(
'keyboard',
[
'Logitech K400 Plus',
],
[
# First to clear the previously set options
['setxkbmap', '-option'],
[
'setxkbmap', 'us', 'altgr-intl',
'compose:caps',
],
]
),
(
'keyboard',
[
# idVendor=05ac, idProduct=026c
'Apple Inc. Magic Keyboard with Numeric Keypad',
],
[
# https://unix.stackexchange.com/q/86933
# First to clear the previously set options
['setxkbmap', '-option'],
# Then to set the options. (man xkeyboard-config)
# * Capslock is another backspace.
# * Bottom-left modifiers are: Ctrl, Super, Alt
# * Bottom-right modifiers are: AltGr, Compose, Ctrl
# * F13, F14, F15 are PrtScn/SysRq, Scroll Lock, Pause/Break
[
'setxkbmap', 'us', 'altgr-intl',
#'caps:backspace',
'numpad:microsoft',
'compose:rwin',
#'altwin:swap_alt_win',
'apple:alupckeys', # Emulate PC keys
'lv3:ralt_switch', # Third level, useful for deadkeys.
],
# fn is in place of the usual Insert key.
# Thus, remapping Eject to Insert
# xmodmap -e 'keysym XF86Eject = Insert NoSymbol Insert'
]
),
# Touchpad devices.
(
'pointer',
[
# Asus X450C laptop.
'ETPS/2 Elantech Touchpad',
# (II) config/udev: Adding input device ETPS/2 Elantech Touchpad (/dev/input/event9)
# (**) ETPS/2 Elantech Touchpad: Applying InputClass "evdev touchpad catchall"
# (**) ETPS/2 Elantech Touchpad: Applying InputClass "touchpad catchall"
# (**) ETPS/2 Elantech Touchpad: Applying InputClass "Default clickpad buttons"
# (II) LoadModule: "synaptics"
# (II) Loading /usr/lib/xorg/modules/input/synaptics_drv.so
# (II) Module synaptics: vendor="X.Org Foundation"
# compiled for 1.14.2, module version = 1.7.1
# Module class: X.Org XInput Driver
# ABI class: X.Org XInput driver, version 19.1
# (II) Using input driver 'synaptics' for 'ETPS/2 Elantech Touchpad'
# (**) ETPS/2 Elantech Touchpad: always reports core events
# (**) Option "Device" "/dev/input/event9"
# (II) synaptics: ETPS/2 Elantech Touchpad: found clickpad property
# (--) synaptics: ETPS/2 Elantech Touchpad: x-axis range 0 - 3260 (res 32)
# (--) synaptics: ETPS/2 Elantech Touchpad: y-axis range 0 - 1793 (res 32)
# (--) synaptics: ETPS/2 Elantech Touchpad: pressure range 0 - 255
# (--) synaptics: ETPS/2 Elantech Touchpad: finger width range 0 - 15
# (--) synaptics: ETPS/2 Elantech Touchpad: buttons: left double triple
# (--) synaptics: ETPS/2 Elantech Touchpad: Vendor 0x2 Product 0xe
# (**) Option "SoftButtonAreas" "50% 0 82% 0 0 0 0 0"
# (--) synaptics: ETPS/2 Elantech Touchpad: touchpad found
# (**) ETPS/2 Elantech Touchpad: always reports core events
# (**) Option "config_info" "udev:/sys/devices/platform/i8042/serio4/input/input9/event9"
# (II) XINPUT: Adding extended input device "ETPS/2 Elantech Touchpad" (type: TOUCHPAD, id 12)
# (**) synaptics: ETPS/2 Elantech Touchpad: (accel) MinSpeed is now constant deceleration 2.5
# (**) synaptics: ETPS/2 Elantech Touchpad: (accel) MaxSpeed is now 1.75
# (**) synaptics: ETPS/2 Elantech Touchpad: (accel) AccelFactor is now 0.054
# (**) ETPS/2 Elantech Touchpad: (accel) keeping acceleration scheme 1
# (**) ETPS/2 Elantech Touchpad: (accel) acceleration profile 1
# (**) ETPS/2 Elantech Touchpad: (accel) acceleration factor: 2.000
# (**) ETPS/2 Elantech Touchpad: (accel) acceleration threshold: 4
# (--) synaptics: ETPS/2 Elantech Touchpad: touchpad found
# (II) config/udev: Adding input device ETPS/2 Elantech Touchpad (/dev/input/mouse0)
# (**) ETPS/2 Elantech Touchpad: Ignoring device from InputClass "touchpad ignore duplicates"
],
[
*TOUCHPAD_COMMON_SETTINGS,
*TOUCHPAD_EDGES_AND_SOFT_BUTTONS(3260, 1793, 40, 60, 82),
# Natural scrolling, like Mac OS X, reverse of classical scrolling.
['set-prop', 'Synaptics Scrolling Distance', -74, -74],
# Tapping settings:
# Right top tap
# Right bottom tap
# Left top tap
# Left bottom tap
# 1-finger tap: left click
# 2-finger tap: right click
# 3-finger tap: middle click
['set-prop', 'Synaptics Tap Action', 0, 3, 0, 0, 1, 3, 2],
# Clicking settings (for a clickable touchpad, without dedicated buttons):
# 1-finger click: left click
# 2-finger click: right click
# 3-finger click: middle click
['set-prop', 'Synaptics Click Action', 1, 3, 2],
]
),
(
'pointer',
[
# Dell E7270 laptop.
'AlpsPS/2 ALPS DualPoint TouchPad',
'AlpsPS/2 ALPS GlidePoint',
# (II) config/udev: Adding input device AlpsPS/2 ALPS DualPoint TouchPad (/dev/input/event7)
# (**) AlpsPS/2 ALPS DualPoint TouchPad: Applying InputClass "evdev touchpad catchall"
# (**) AlpsPS/2 ALPS DualPoint TouchPad: Applying InputClass "libinput touchpad catchall"
# (**) AlpsPS/2 ALPS DualPoint TouchPad: Applying InputClass "touchpad catchall"
# (**) AlpsPS/2 ALPS DualPoint TouchPad: Applying InputClass "Default clickpad buttons"
# (II) LoadModule: "synaptics"
# (II) Loading /usr/lib/xorg/modules/input/synaptics_drv.so
# (II) Module synaptics: vendor="X.Org Foundation"
# compiled for 1.19.3, module version = 1.9.0
# Module class: X.Org XInput Driver
# ABI class: X.Org XInput driver, version 24.1
# (II) Using input driver 'synaptics' for 'AlpsPS/2 ALPS DualPoint TouchPad'
# (**) AlpsPS/2 ALPS DualPoint TouchPad: always reports core events
# (**) Option "Device" "/dev/input/event7"
# (--) synaptics: AlpsPS/2 ALPS DualPoint TouchPad: x-axis range 0 - 2432 (res 34)
# (--) synaptics: AlpsPS/2 ALPS DualPoint TouchPad: y-axis range 0 - 1280 (res 34)
# (--) synaptics: AlpsPS/2 ALPS DualPoint TouchPad: pressure range 0 - 127
# (II) synaptics: AlpsPS/2 ALPS DualPoint TouchPad: device does not report finger width.
# (--) synaptics: AlpsPS/2 ALPS DualPoint TouchPad: buttons: left right middle double triple
# (--) synaptics: AlpsPS/2 ALPS DualPoint TouchPad: Vendor 0x2 Product 0x8
# (--) synaptics: AlpsPS/2 ALPS DualPoint TouchPad: invalid finger width range. defaulting to 0 - 15
# (--) synaptics: AlpsPS/2 ALPS DualPoint TouchPad: touchpad found
# (**) AlpsPS/2 ALPS DualPoint TouchPad: always reports core events
# (**) Option "config_info" "udev:/sys/devices/platform/i8042/serio1/input/input6/event7"
# (II) XINPUT: Adding extended input device "AlpsPS/2 ALPS DualPoint TouchPad" (type: TOUCHPAD, id 14)
# (**) synaptics: AlpsPS/2 ALPS DualPoint TouchPad: (accel) MinSpeed is now constant deceleration 2.5
# (**) synaptics: AlpsPS/2 ALPS DualPoint TouchPad: (accel) MaxSpeed is now 1.75
# (**) synaptics: AlpsPS/2 ALPS DualPoint TouchPad: (accel) AccelFactor is now 0.073
# (**) AlpsPS/2 ALPS DualPoint TouchPad: (accel) keeping acceleration scheme 1
# (**) AlpsPS/2 ALPS DualPoint TouchPad: (accel) acceleration profile 1
# (**) AlpsPS/2 ALPS DualPoint TouchPad: (accel) acceleration factor: 2.000
# (**) AlpsPS/2 ALPS DualPoint TouchPad: (accel) acceleration threshold: 4
# (--) synaptics: AlpsPS/2 ALPS DualPoint TouchPad: touchpad found
# (II) config/udev: Adding input device AlpsPS/2 ALPS DualPoint TouchPad (/dev/input/mouse1)
# (**) AlpsPS/2 ALPS DualPoint TouchPad: Ignoring device from InputClass "touchpad ignore duplicates"
# (II) config/udev: Adding input device AlpsPS/2 ALPS DualPoint Stick (/dev/input/event6)
# (**) AlpsPS/2 ALPS DualPoint Stick: Applying InputClass "evdev pointer catchall"
# (**) AlpsPS/2 ALPS DualPoint Stick: Applying InputClass "trackpoint catchall"
# (**) AlpsPS/2 ALPS DualPoint Stick: Applying InputClass "libinput pointer catchall"
# (II) Using input driver 'libinput' for 'AlpsPS/2 ALPS DualPoint Stick'
# (**) AlpsPS/2 ALPS DualPoint Stick: always reports core events
# (**) Option "Device" "/dev/input/event6"
# (**) Option "_source" "server/udev"
# (II) input device 'AlpsPS/2 ALPS DualPoint Stick', /dev/input/event6 is tagged by udev as: Mouse Pointingstick
# (II) input device 'AlpsPS/2 ALPS DualPoint Stick', /dev/input/event6 is a pointer caps
# (**) Option "config_info" "udev:/sys/devices/platform/i8042/serio1/input/input8/event6"
# (II) XINPUT: Adding extended input device "AlpsPS/2 ALPS DualPoint Stick" (type: MOUSE, id 15)
# (**) Option "AccelerationScheme" "none"
# (**) AlpsPS/2 ALPS DualPoint Stick: (accel) selected scheme none/0
# (**) AlpsPS/2 ALPS DualPoint Stick: (accel) acceleration factor: 2.000
# (**) AlpsPS/2 ALPS DualPoint Stick: (accel) acceleration threshold: 4
# (II) input device 'AlpsPS/2 ALPS DualPoint Stick', /dev/input/event6 is tagged by udev as: Mouse Pointingstick
# (II) input device 'AlpsPS/2 ALPS DualPoint Stick', /dev/input/event6 is a pointer caps
# (II) config/udev: Adding input device AlpsPS/2 ALPS DualPoint Stick (/dev/input/mouse0)
],
[
*TOUCHPAD_COMMON_SETTINGS,
*TOUCHPAD_MINIMUM_EDGES(2432, 1280),
# Natural scrolling, like Mac OS X, reverse of classical scrolling.
['set-prop', 'Synaptics Scrolling Distance', -54, -54],
# Tapping settings:
# Right top tap
# Right bottom tap
# Left top tap
# Left bottom tap
# 1-finger tap: left click
# 2-finger tap: right click
# 3-finger tap: middle click
['set-prop', 'Synaptics Tap Action', 0, 0, 0, 0, 1, 3, 2],
]
),
# Do-nothing rules.
# They exist just to avoid reporting these devices as unrecognized.
(
'pointer',
[
'Virtual core XTEST pointer',
],
[]
),
(
'keyboard',
[
'Virtual core XTEST keyboard',
'Power Button',
'Asus Wireless Radio Control',
'Video Bus',
'Sleep Button',
'USB Camera: USB Camera',
'Asus WMI hotkeys',
'Integrated_Webcam_HD: Integrate',
'Dell WMI hotkeys',
'DELL Wireless hotkeys',
'MX Master 2S',
'MX Master 2S Keyboard',
'Logitech MX Master 2S',
'Yubico Yubikey 4 OTP+U2F',
],
[]
),
]
######################################################################
# No need to modify anything below this line.
# Custom lightweight object type.
XInputDevice = namedtuple('XInputDevice', 'type id name')
# Compiling the regex during the module load.
xinput_regex = re.compile(r'^. ↳ (.*[^ ]) *\tid=([0-9]+)\t\[slave +(pointer|keyboard) +\([0-9]+\)\]')
xinput_ignored_regex = re.compile(r'^. Virtual core (pointer|keyboard) +\tid=([0-9]+)\t\[master +(pointer|keyboard) +\([0-9]+\)\]')
def rules_as_dict():
'''Converts rules_list into a dictionary for fast access.
The result of this function can be used as this:
d = rules_as_dict()
action_list = d['pointer', 'Mouse ABCXYZ'] # Can cause KeyError exception!
action_list = d.get(('pointer', 'Mouse ABCXYZ'), []) # Much better!
Where action_list will be a list of xinput commands.
'''
d = defaultdict(list)
for rule in rules_list:
(dev_type, dev_names, actions) = rule
actions_as_str = [
[str(x) for x in action] for action in actions
]
for name in dev_names:
d[dev_type, name].extend(actions_as_str)
# Disabling the defaultdict behavior, making it behave just like a normal dict.
d.default_factory = None
return d
def xinput_list():
'''Generator that returns XInputDevice objects based on 'xinput list' output.
'''
p = subprocess.run(
['xinput', 'list'],
check=True,
# capture_output=True, # added in Python 3.7
stdout=subprocess.PIPE, stderr=subprocess.PIPE # equivalent to capture_output=True
)
text = p.stdout.decode('utf8', 'replace')
for line in text.splitlines():
match = xinput_regex.match(line)
if match:
yield XInputDevice(name=match.group(1), id=match.group(2), type=match.group(3))
else:
match = xinput_ignored_regex.match(line)
if not match:
print('Warning! Unrecognized line from `xinput list` output: {0!r}'.format(line))
def build_cmdline_args(device, action):
'''Builds the command-line to be executed.
Has extra logic to inject the device id, and to select either xinput or
setxkbmap.
'''
(head, *tail) = action
id = str(device.id)
if head == 'setxkbmap':
return ['setxkbmap', '-device', id, *tail]
else:
return ['xinput', head, id, *tail]
def parse_arguments():
parser = argparse.ArgumentParser(
description='Uses xinput to configure available devices to my preferences'
)
parser.add_argument(
'-v', '--verbose',
action='store_true',
dest='verbose',
help='Prints each executed command for each device',
)
options = parser.parse_args()
return options
def main():
options = parse_arguments()
rules = rules_as_dict()
# Setting up the 'C' locale to prevent any issue related to localization or translation.
os.environ['LC_ALL'] = 'C'
# For each detected device...
for device in xinput_list():
actions = rules.get((device.type, device.name), None)
if actions is None:
print('Ignoring {0.type} device {0.id} ({0.name})'.format(device))
else:
is_this_the_first_action = True
if options.verbose and len(actions) == 0:
print('There are no actions for {0.type} device {0.id} ({0.name})'.format(device))
for action in actions:
if options.verbose and is_this_the_first_action:
print('Setting up {0.type} device {0.id} ({0.name})'.format(device))
is_this_the_first_action = False
# Preparing the command-line.
args = build_cmdline_args(device=device, action=action)
# The shell-friendly string version of the command-line.
# Only used for debugging purposes.
cmdline = ' '.join(shlex.quote(arg) for arg in args)
if options.verbose:
print('Running: {0}'.format(cmdline))
# Running! This is the main purpose of this whole script! :)
p = subprocess.run(args)
if p.returncode != 0:
print('Warning! Command returned {0}: {1}'.format(p.returncode, cmdline))
if __name__ == '__main__':
main()
| [
"collections.namedtuple",
"argparse.ArgumentParser",
"re.compile",
"subprocess.run",
"collections.defaultdict",
"shlex.quote"
] | [((18276, 18318), 'collections.namedtuple', 'namedtuple', (['"""XInputDevice"""', '"""type id name"""'], {}), "('XInputDevice', 'type id name')\n", (18286, 18318), False, 'from collections import defaultdict, namedtuple\n'), ((18380, 18483), 're.compile', 're.compile', (['"""^. ↳ (.*[^ ]) *\\\\tid=([0-9]+)\\\\t\\\\[slave +(pointer|keyboard) +\\\\([0-9]+\\\\)\\\\]"""'], {}), "(\n '^. ↳ (.*[^ ]) *\\\\tid=([0-9]+)\\\\t\\\\[slave +(pointer|keyboard) +\\\\([0-9]+\\\\)\\\\]'\n )\n", (18390, 18483), False, 'import re\n'), ((18492, 18615), 're.compile', 're.compile', (['"""^. Virtual core (pointer|keyboard) +\\\\tid=([0-9]+)\\\\t\\\\[master +(pointer|keyboard) +\\\\([0-9]+\\\\)\\\\]"""'], {}), "(\n '^. Virtual core (pointer|keyboard) +\\\\tid=([0-9]+)\\\\t\\\\[master +(pointer|keyboard) +\\\\([0-9]+\\\\)\\\\]'\n )\n", (18502, 18615), False, 'import re\n'), ((19002, 19019), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (19013, 19019), False, 'from collections import defaultdict, namedtuple\n'), ((19519, 19617), 'subprocess.run', 'subprocess.run', (["['xinput', 'list']"], {'check': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(['xinput', 'list'], check=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n", (19533, 19617), False, 'import subprocess\n'), ((20568, 20672), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Uses xinput to configure available devices to my preferences"""'}), "(description=\n 'Uses xinput to configure available devices to my preferences')\n", (20591, 20672), False, 'import argparse\n'), ((22298, 22318), 'subprocess.run', 'subprocess.run', (['args'], {}), '(args)\n', (22312, 22318), False, 'import subprocess\n'), ((22072, 22088), 'shlex.quote', 'shlex.quote', (['arg'], {}), '(arg)\n', (22083, 22088), False, 'import shlex\n')] |
import hashlib
from os import mkdir, rename
import re
import requests
from bs4 import BeautifulSoup as bs
import pdfkit
from cred import DOWNLOADS
BASE_URL = "https://studente.unimi.it/graduatorie/selezioneCorso/it/"
ENTRY = "P.html"
def find_pdf(data):
ris = []
soup = bs(data, "lxml")
for row in soup.find_all("ul", class_="list-group"):
ris.append([row.li.a.string.strip(), row.li.a["href"][18:]])
return ris
def find_course(data):
soup = bs(data, "lxml")
for x in soup.find_all("span"):
if x.string and "Corso" in x.string:
return x.string.strip()
def find_links(data):
links = []
soup = bs(data, "lxml")
tbody = soup.find("tbody")
for tr in tbody.find_all("tr"):
links.append(tr.td.a["href"])
return links
def download_pdfs(sess, links):
scraped = []
try:
mkdir(DOWNLOADS)
except FileExistsError:
pass
for x in range(len(links)):
resp = sess.get("https://studente.unimi.it/graduatorie/" + links[x][1])
if resp.status_code != 200:
continue
hash_ = hashlib.md5(resp.content).hexdigest()
with open(f"{DOWNLOADS}/{hash_ + '_' + links[x][0]}", "wb") as fd:
fd.write(resp.content)
scraped.append(f"{hash_ + '_' + links[x][0]}")
return scraped
def generate_rankings(data):
try:
mkdir(DOWNLOADS)
except FileExistsError:
pass
ris = "<html><body><table border='1px solid'><tr><td colspan=2><b>"
students = []
course_name = ""
pub_date = ""
soup = bs(data, "lxml")
spans = soup.find_all("span")
tbody = soup.find("tbody")
# Find course name
for span in spans:
if span.string and "Corso " in span.string:
course_name = re.findall("(?<=in ).*", span.string.strip())[0]
ris += course_name + "</b></td></tr>"
break
# Find publish date
for span in spans:
if span.string and "Data di" in span.string:
pub_date = re.findall("\\d+/\\d+/\\d+", span.string.strip())[0]
pub_date = pub_date.replace("/", "-")
# Scrape rankings
for row in tbody.find_all("tr"):
cols = row.find_all("td")
if cols[1].span.string is None:
continue
students.append((int(cols[1].span.string.strip()), cols[0].a.string.strip()))
# Sorting
for x in range(len(students) - 1):
for y in range(x + 1, len(students)):
if students[x][0] > students[y][0]:
temp = students[x]
students[x] = students[y]
students[y] = temp
for student in students:
ris += "<tr>"
ris += f"<td>{student[0]}</td>"
ris += f"<td>{student[1]}</td>"
ris += "</tr>"
ris += "</table></body></html>"
filename = f"{course_name.replace('/', '')}_{pub_date}.pdf"
pdfkit.from_string(ris, f"{DOWNLOADS}/" + filename, options={"--log-level": "none"})
return filename
def scrape():
sess = requests.Session()
scraped = []
rankings_generated = {}
resp = sess.get(BASE_URL + ENTRY)
links = find_links(resp.text)
for link in links:
resp = sess.get(BASE_URL + link)
if resp.status_code != 200:
continue
pdf_links = find_pdf(resp.text)
course_name = find_course(resp.text)
rankings_generated[course_name] = generate_rankings(resp.text)
scraped.append(((download_pdfs(sess, pdf_links)), course_name))
return scraped, rankings_generated
def main():
a, b = scrape()
print(a)
print("------")
print(b)
print("------")
print("Done")
if __name__ == "__main__":
main()
| [
"requests.Session",
"hashlib.md5",
"pdfkit.from_string",
"bs4.BeautifulSoup",
"os.mkdir"
] | [((284, 300), 'bs4.BeautifulSoup', 'bs', (['data', '"""lxml"""'], {}), "(data, 'lxml')\n", (286, 300), True, 'from bs4 import BeautifulSoup as bs\n'), ((478, 494), 'bs4.BeautifulSoup', 'bs', (['data', '"""lxml"""'], {}), "(data, 'lxml')\n", (480, 494), True, 'from bs4 import BeautifulSoup as bs\n'), ((662, 678), 'bs4.BeautifulSoup', 'bs', (['data', '"""lxml"""'], {}), "(data, 'lxml')\n", (664, 678), True, 'from bs4 import BeautifulSoup as bs\n'), ((1580, 1596), 'bs4.BeautifulSoup', 'bs', (['data', '"""lxml"""'], {}), "(data, 'lxml')\n", (1582, 1596), True, 'from bs4 import BeautifulSoup as bs\n'), ((2891, 2979), 'pdfkit.from_string', 'pdfkit.from_string', (['ris', "(f'{DOWNLOADS}/' + filename)"], {'options': "{'--log-level': 'none'}"}), "(ris, f'{DOWNLOADS}/' + filename, options={'--log-level':\n 'none'})\n", (2909, 2979), False, 'import pdfkit\n'), ((3023, 3041), 'requests.Session', 'requests.Session', ([], {}), '()\n', (3039, 3041), False, 'import requests\n'), ((869, 885), 'os.mkdir', 'mkdir', (['DOWNLOADS'], {}), '(DOWNLOADS)\n', (874, 885), False, 'from os import mkdir, rename\n'), ((1382, 1398), 'os.mkdir', 'mkdir', (['DOWNLOADS'], {}), '(DOWNLOADS)\n', (1387, 1398), False, 'from os import mkdir, rename\n'), ((1112, 1137), 'hashlib.md5', 'hashlib.md5', (['resp.content'], {}), '(resp.content)\n', (1123, 1137), False, 'import hashlib\n')] |
from infi.systray import SysTrayIcon
from traylert.traylert_crypto import encrypt, decrypt
from win10toast import ToastNotifier
import click
import configparser
import json
import jsonpickle
import requests
import time
import os
from pathlib import Path
def fetch_system_info(config, endpoint_override=False):
if not endpoint_override:
r = requests.get(config['CLIENT']['endpoint'])
else:
r = requests.get(endpoint_override)
if not config['CRYPTO'].getboolean('aes'):
return r.text
else:
system_info_json = decrypt(jsonpickle.decode(r.text), config['CRYPTO']['encryption_key'])
return json.loads(system_info_json)
def do_nothing(sysTrayIcon):
pass
@click.command()
@click.option('--config_file', default=False, help='A configuration .ini')
@click.option('--endpoint_override', default='http://127.0.0.1:5000', help='The endpoint to connect to.')
def main(endpoint_override=False, config_file=False):
# Icon
if os.path.dirname(__file__) is not '':
icon = os.path.dirname(__file__) + '/../data/traylert.ico'
else:
icon = '../data/traylert.ico'
# Config
config = configparser.ConfigParser()
if not config_file:
if os.path.dirname(__file__) is not '':
config.read(os.path.dirname(__file__) + '/traylert.ini')
else:
config.read('traylert.ini')
else:
config.read(config_file)
toaster = ToastNotifier()
systray = SysTrayIcon(icon, 'Traylert', None)
systray.start()
alerts = [('None', 'None')]
while True:
if not endpoint_override:
system_info = fetch_system_info(config)
else:
system_info = fetch_system_info(config, endpoint_override)
if system_info['alerts'] != alerts:
if len(system_info['alerts']) > 0:
for alert in system_info['alerts']:
toaster.show_toast('Traylert', f'{alert[0]}\n{alert[1]}')
alerts = system_info['alerts']
menu_options = []
for key, value in system_info.items():
if type(value) != list:
menu_options.append((f'{key} - {value}', None, do_nothing))
systray.shutdown()
systray = SysTrayIcon(icon, f'Last Alert: {alerts[-1]}', tuple(menu_options))
systray.start()
time.sleep(config.getint('CLIENT', 'refresh_time'))
if __name__ == '__main__':
main() | [
"json.loads",
"configparser.ConfigParser",
"click.option",
"win10toast.ToastNotifier",
"jsonpickle.decode",
"requests.get",
"os.path.dirname",
"infi.systray.SysTrayIcon",
"click.command"
] | [((701, 716), 'click.command', 'click.command', ([], {}), '()\n', (714, 716), False, 'import click\n'), ((719, 792), 'click.option', 'click.option', (['"""--config_file"""'], {'default': '(False)', 'help': '"""A configuration .ini"""'}), "('--config_file', default=False, help='A configuration .ini')\n", (731, 792), False, 'import click\n'), ((795, 904), 'click.option', 'click.option', (['"""--endpoint_override"""'], {'default': '"""http://127.0.0.1:5000"""', 'help': '"""The endpoint to connect to."""'}), "('--endpoint_override', default='http://127.0.0.1:5000', help=\n 'The endpoint to connect to.')\n", (807, 904), False, 'import click\n'), ((1135, 1162), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1160, 1162), False, 'import configparser\n'), ((1382, 1397), 'win10toast.ToastNotifier', 'ToastNotifier', ([], {}), '()\n', (1395, 1397), False, 'from win10toast import ToastNotifier\n'), ((1410, 1445), 'infi.systray.SysTrayIcon', 'SysTrayIcon', (['icon', '"""Traylert"""', 'None'], {}), "(icon, 'Traylert', None)\n", (1421, 1445), False, 'from infi.systray import SysTrayIcon\n'), ((360, 402), 'requests.get', 'requests.get', (["config['CLIENT']['endpoint']"], {}), "(config['CLIENT']['endpoint'])\n", (372, 402), False, 'import requests\n'), ((418, 449), 'requests.get', 'requests.get', (['endpoint_override'], {}), '(endpoint_override)\n', (430, 449), False, 'import requests\n'), ((625, 653), 'json.loads', 'json.loads', (['system_info_json'], {}), '(system_info_json)\n', (635, 653), False, 'import json\n'), ((971, 996), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (986, 996), False, 'import os\n'), ((552, 577), 'jsonpickle.decode', 'jsonpickle.decode', (['r.text'], {}), '(r.text)\n', (569, 577), False, 'import jsonpickle\n'), ((1018, 1043), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1033, 1043), False, 'import os\n'), ((1193, 1218), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1208, 1218), False, 'import os\n'), ((1246, 1271), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1261, 1271), False, 'import os\n')] |
from django.contrib import messages
from django.contrib.auth.models import Group
from django.core.urlresolvers import reverse, reverse_lazy
from django.http.response import HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext as _
from django.views.generic.edit import CreateView
from django.views.generic.list import ListView
from med_social.views.base import BaseEditView
from .forms import GroupForm
class GroupCreateView(CreateView):
template_name = 'ACL/create.html'
form_class = GroupForm
model = Group
def get_form_kwargs(self):
kwargs = super(GroupCreateView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def get_success_url(self):
return reverse('groups:list')
class GroupList(ListView):
model = Group
context_object_name = 'groups'
template_name = 'ACL/list.html'
def get_queryset(self):
user = self.request.user
if user.is_client:
qs = self.model.objects.filter(vendor=None)
elif user.is_vendor:
qs = self.model.objects.filter(vendor=user.vendor)
else:
Http404()
return qs.order_by('name')
class GroupEditView(BaseEditView):
model_form = GroupForm
template_name = 'ACL/create.html'
context_variable = 'group'
success_url = reverse_lazy('groups:list')
deleted = False
def get_instance(self, request, *args, **kwargs):
user = self.request.user
if user.is_client:
return get_object_or_404(self.model, pk=self.kwargs['pk'], vendor=None)
elif user.is_vendor:
return get_object_or_404(self.model, pk=self.kwargs['pk'], vendor=user.vendor)
raise Http404()
def form_delete(self, form):
if self.object.kind in [Group.DEFAULT_USER, Group.DEFAULT_ADMIN]:
messages.warning(
self.request,
_('{} is a system created group, it cannot be deleted.'.format(self.object.display_name)),
)
return HttpResponseRedirect(reverse('groups:edit', args=(self.object.id,)))
return super(GroupEditView, self).form_delete(form)
def save_m2m(self, form):
pass
| [
"django.http.response.Http404",
"django.shortcuts.get_object_or_404",
"django.core.urlresolvers.reverse_lazy",
"django.core.urlresolvers.reverse"
] | [((1400, 1427), 'django.core.urlresolvers.reverse_lazy', 'reverse_lazy', (['"""groups:list"""'], {}), "('groups:list')\n", (1412, 1427), False, 'from django.core.urlresolvers import reverse, reverse_lazy\n'), ((800, 822), 'django.core.urlresolvers.reverse', 'reverse', (['"""groups:list"""'], {}), "('groups:list')\n", (807, 822), False, 'from django.core.urlresolvers import reverse, reverse_lazy\n'), ((1781, 1790), 'django.http.response.Http404', 'Http404', ([], {}), '()\n', (1788, 1790), False, 'from django.http.response import HttpResponseRedirect, Http404\n'), ((1582, 1646), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['self.model'], {'pk': "self.kwargs['pk']", 'vendor': 'None'}), "(self.model, pk=self.kwargs['pk'], vendor=None)\n", (1599, 1646), False, 'from django.shortcuts import get_object_or_404\n'), ((1204, 1213), 'django.http.response.Http404', 'Http404', ([], {}), '()\n', (1211, 1213), False, 'from django.http.response import HttpResponseRedirect, Http404\n'), ((1695, 1766), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['self.model'], {'pk': "self.kwargs['pk']", 'vendor': 'user.vendor'}), "(self.model, pk=self.kwargs['pk'], vendor=user.vendor)\n", (1712, 1766), False, 'from django.shortcuts import get_object_or_404\n'), ((2120, 2166), 'django.core.urlresolvers.reverse', 'reverse', (['"""groups:edit"""'], {'args': '(self.object.id,)'}), "('groups:edit', args=(self.object.id,))\n", (2127, 2166), False, 'from django.core.urlresolvers import reverse, reverse_lazy\n')] |
#!/usr/bin/env python
""" Tests of Larch Scripts """
import unittest
import time
import ast
import numpy as np
import os
from sys import version_info
from utils import TestCase
from larch import Interpreter
class TestScripts(TestCase):
'''tests'''
def test_basic_interp(self):
self.runscript('interp.lar', dirname='../examples/basic/')
assert(len(self.session.get_errors()) == 0)
self.isNear("y0[1]", 0.48578, places=3)
self.isNear("y1[1]", 0.81310, places=3)
self.isNear("y2[1]", 0.41532, places=3)
if __name__ == '__main__': # pragma: no cover
for suite in (TestScripts,):
suite = unittest.TestLoader().loadTestsFromTestCase(suite)
unittest.TextTestRunner(verbosity=13).run(suite)
| [
"unittest.TextTestRunner",
"unittest.TestLoader"
] | [((658, 679), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (677, 679), False, 'import unittest\n'), ((717, 754), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(13)'}), '(verbosity=13)\n', (740, 754), False, 'import unittest\n')] |
import numpy as np
a = np.arange(15).reshape(3,5)
print(a)
print(a.shape) | [
"numpy.arange"
] | [((23, 36), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (32, 36), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
############################################################
## <NAME> ##
## Copyright (C) 2019-2020 Lauro Sumoy Lab, IGTP, Spain ##
############################################################
"""
Get frequence of reads for each type, variant, etc
"""
## import useful modules
import os
import sys
import re
import time
from io import open
import pandas as pd
from collections import defaultdict
import numpy as np
import random
import argparse
## import my modules
from HCGB import functions
from HCGB.functions import fasta_functions
## get frequencies
def get_freq(given_df, col_list):
df_freq = pd.DataFrame()
for miRNA, row in given_df.iterrows():
for col in col_list:
if row[col]==0:
df_freq.loc[miRNA, col] = 0
else:
df_freq.loc[miRNA, col] = row[col]/row['total']
return (df_freq)
#####################################################
parser = argparse.ArgumentParser(prog='mod_freq.py', formatter_class=argparse.RawDescriptionHelpFormatter,
description='''
mod_freq.py: Modified given frequencies and select isomiRs
Version: 0.1
License: GPLv3
USAGE: python mod_freq.py --freq table.freq.csv --out out_name [--debug]
''', epilog="Original code: JFSanchezHerrero")
#####################################################
parser.add_argument('-f', '--freq', action='store', help='Table with original variant frequencies to modify', required=True)
parser.add_argument('-o', '--out', action='store', help='Output names', required=True)
parser.add_argument('--debug', action='store_true', default=False, help='Developer messages')
parser.add_argument('--random_rows', action='store', type=int, help='Numbers of miRNA to subset', default=100)
args = parser.parse_args()
#####################################################
## original counts
print ("# Read original frequency table")
original_counts = functions.main_functions.get_data(args.freq, ',', 'index_col=0')
col_list = list(original_counts) ## get columns
## drop NAs
print ("# Remove any rows containing NAs from frequency table")
original_counts = original_counts.dropna()
## subset 100 rows
print ("# Randomly subsetting rows")
subset_df = original_counts.sample(n=args.random_rows)
## add missing data
print ("# Adding missing information")
modified_counts = subset_df.copy(deep=True)
for col in col_list:
modified_counts.loc[modified_counts.sample(frac=0.35).index, col] = pd.np.nan
## randomize
print ("# Shuffling information")
random_counts = modified_counts.apply(np.random.permutation, axis=1, result_type='broadcast')
random_counts[np.isnan(random_counts)] = 0
random_counts['total'] = random_counts.sum(axis=1)
## get frequencies
print ("# Get frequence")
random_freqs = get_freq(random_counts, col_list)
if (args.debug):
print ('##########')
print ('Random original Counts')
print (subset_df)
print ('##########')
print ('')
print ('##########')
print ('Random original Frequence')
subset_df['total'] = subset_df.sum(axis=1)
original_freq = get_freq(subset_df, col_list)
print (original_freq)
print ('##########')
## print randomize counts & frequencies
print ('##########')
print ('Random Counts')
print (random_counts)
print ('##########')
print ('')
print ('##########')
print ('Frequence')
print (random_freqs)
print ('##########')
## adjust to 100
print ("# Adjusting to 100 counts")
new_random = pd.DataFrame(columns=col_list)
for miRNA, row in random_freqs.iterrows():
for col in col_list:
if row[col]==0:
new_random.loc[miRNA, col] = 0
else:
new_random.loc[miRNA, col] = int(row[col]*100)
new_random['total'] = new_random.sum(axis=1)
for miRNA, row in new_random.iterrows():
if row['total']!=100:
sum = 100 - int(row['total'])
rnd = random.sample(col_list, 1)
row[rnd] += sum
new_random = new_random.drop(columns=['total'])
new_random['total'] = new_random.sum(axis=1)
print ('##########')
print ('Counts')
print (subset_df)
print ('##########')
print ('')
## print randomize counts & frequencies
print ('##########')
print ('Counts adjusted')
print (new_random)
print ('##########')
print ('')
print ("Printing frequencies in table: " + args.out)
#print (df_miRNA)
new_random.to_csv(args.out + ".csv", ',')
| [
"random.sample",
"argparse.ArgumentParser",
"numpy.isnan",
"HCGB.functions.main_functions.get_data",
"pandas.DataFrame"
] | [((997, 1336), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""mod_freq.py"""', 'formatter_class': 'argparse.RawDescriptionHelpFormatter', 'description': '"""\n\nmod_freq.py: Modified given frequencies and select isomiRs\n\nVersion: 0.1\nLicense: GPLv3\n\nUSAGE: python mod_freq.py --freq table.freq.csv --out out_name [--debug] \n"""', 'epilog': '"""Original code: JFSanchezHerrero"""'}), '(prog=\'mod_freq.py\', formatter_class=argparse.\n RawDescriptionHelpFormatter, description=\n """\n\nmod_freq.py: Modified given frequencies and select isomiRs\n\nVersion: 0.1\nLicense: GPLv3\n\nUSAGE: python mod_freq.py --freq table.freq.csv --out out_name [--debug] \n"""\n , epilog=\'Original code: JFSanchezHerrero\')\n', (1020, 1336), False, 'import argparse\n'), ((1989, 2053), 'HCGB.functions.main_functions.get_data', 'functions.main_functions.get_data', (['args.freq', '""","""', '"""index_col=0"""'], {}), "(args.freq, ',', 'index_col=0')\n", (2022, 2053), False, 'from HCGB import functions\n'), ((3560, 3590), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'col_list'}), '(columns=col_list)\n', (3572, 3590), True, 'import pandas as pd\n'), ((670, 684), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (682, 684), True, 'import pandas as pd\n'), ((2698, 2721), 'numpy.isnan', 'np.isnan', (['random_counts'], {}), '(random_counts)\n', (2706, 2721), True, 'import numpy as np\n'), ((3966, 3992), 'random.sample', 'random.sample', (['col_list', '(1)'], {}), '(col_list, 1)\n', (3979, 3992), False, 'import random\n')] |
# -*- coding: utf-8 -*-
"""CUBI+Snakemake wrapper code for applying the filter list.
"""
import gzip
import re
import sys
import textwrap
from snakemake import shell
config = snakemake.config["step_config"]["somatic_variant_filtration"]["filter_sets"]
params = snakemake.params.args
print("DEBUG- params = {}".format(params), file=sys.stderr)
__author__ = "<NAME> <<EMAIL>>"
shell.executable("/bin/bash")
title = re.compile(
"^#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER(\tINFO)?(\tFORMAT)?\t([^\t]+)\t([^\t]+)"
)
iTumor = -1
with gzip.open(snakemake.input.vcf, mode="rt") as f:
for line in f:
m = title.search(line.rstrip())
if m:
if (m.group(3) == params["normal_sample"]) and (m.group(4) == params["tumor_sample"]):
iTumor = 1
break
if (m.group(4) == params["normal_sample"]) and (m.group(3) == params["tumor_sample"]):
iTumor = 0
break
if iTumor < 0:
print(
"Can't find normal sample {} or tumor sample {} in vcf header".format(
params["normal_sample"], params["tumor_sample"]
),
file=sys.stderr,
)
sys.exit(-1)
cmd = ["zcat " + snakemake.input.vcf]
if "dkfz" in snakemake.wildcards.filter_set:
cmd.append("bcftools view -f .,PASS")
if "ebfilter" in snakemake.wildcards.filter_set:
cmd.append(
"bcftools view -e 'EB < {threshold}'".format(
threshold=config["dkfz_and_ebfilter"].get("ebfilter_threshold", 3)
)
)
if "oxog" in snakemake.wildcards.filter_set:
if snakemake.wildcards.var_caller != "scalpel":
min_vaf = config["dkfz_and_ebfilter_and_oxog"].get("vaf_threshold", 0.08)
min_cov = config["dkfz_and_ebfilter_and_oxog"].get("coverage_threshold", 5)
if snakemake.wildcards.var_caller == "mutect2":
allele_freq_str = "AF"
else:
allele_freq_str = "FA"
oxo_filter = '(FILTER != "PASS" || FORMAT/{}[{}:0]<={} || FORMAT/AD[{}:1]<{})'.format(
allele_freq_str, iTumor, min_vaf, iTumor, min_cov
)
cmd.append(
'bcftools filter -e \'{oxo_filter} && REF = "G" && ALT ~ "T"\''.format(
oxo_filter=oxo_filter
)
)
cmd.append(
'bcftools filter -e \'{oxo_filter} && REF = "C" && ALT ~ "A"\''.format(
oxo_filter=oxo_filter
)
)
script = " | ".join(cmd) + " | bgzip > " + snakemake.output.vcf
shell(
textwrap.dedent(
r"""
set -x
{script}
tabix {snakemake.output.vcf}
pushd $(dirname {snakemake.output.vcf})
md5sum $(basename {snakemake.output.vcf}) > $(basename {snakemake.output.vcf}).md5
md5sum $(basename {snakemake.output.tbi}) > $(basename {snakemake.output.tbi}).md5
"""
)
)
# TODO
# Compute MD5 sums of logs.
# shell(
# r"""
# pwd
# md5sum {snakemake.log} >{snakemake.log}.md5
# """
# )
| [
"textwrap.dedent",
"gzip.open",
"re.compile",
"snakemake.shell.executable",
"sys.exit"
] | [((380, 409), 'snakemake.shell.executable', 'shell.executable', (['"""/bin/bash"""'], {}), "('/bin/bash')\n", (396, 409), False, 'from snakemake import shell\n'), ((419, 523), 're.compile', 're.compile', (['"""^#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER(\tINFO)?(\tFORMAT)?\t([^\t]+)\t([^\t]+)"""'], {}), "(\n '^#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER(\\tINFO)?(\\tFORMAT)?\\t([^\\t]+)\\t([^\\t]+)'\n )\n", (429, 523), False, 'import re\n'), ((537, 578), 'gzip.open', 'gzip.open', (['snakemake.input.vcf'], {'mode': '"""rt"""'}), "(snakemake.input.vcf, mode='rt')\n", (546, 578), False, 'import gzip\n'), ((1165, 1177), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (1173, 1177), False, 'import sys\n'), ((2498, 2818), 'textwrap.dedent', 'textwrap.dedent', (['"""\n set -x\n\n {script}\n\n tabix {snakemake.output.vcf}\n\n pushd $(dirname {snakemake.output.vcf})\n md5sum $(basename {snakemake.output.vcf}) > $(basename {snakemake.output.vcf}).md5\n md5sum $(basename {snakemake.output.tbi}) > $(basename {snakemake.output.tbi}).md5\n """'], {}), '(\n """\n set -x\n\n {script}\n\n tabix {snakemake.output.vcf}\n\n pushd $(dirname {snakemake.output.vcf})\n md5sum $(basename {snakemake.output.vcf}) > $(basename {snakemake.output.vcf}).md5\n md5sum $(basename {snakemake.output.tbi}) > $(basename {snakemake.output.tbi}).md5\n """\n )\n', (2513, 2818), False, 'import textwrap\n')] |
import hydra
import os
from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip
def timeToSeconds(t: str) -> int:
"""
Convert the parsed time string from config.yaml to seconds
Args:
t (str): Supported format "hh:mm:ss"
Returns:
int: Total of seconds
"""
n = [int(x) for x in t.split(":")]
n[0] = n[0] * 60 * 60
n[1] = n[1] * 60
return sum(n)
@hydra.main(config_path=".", config_name="config")
def main(cfg):
# add a prefix to the name of the trimmed video
newName = cfg["path"].split("\\")
newName[-1] = newName[-1].split(".")
newName[-1][-1] = "_trimmed." + newName[-1][-1]
newName[-1] = "".join(newName[-1])
newName = "\\".join(newName)
# parsing start and end time in seconds
start = timeToSeconds(cfg["start"])
end = timeToSeconds(cfg["end"])
# check is already a trimmed video exist
if os.path.isfile(newName):
print("A trimmed version already exist")
# ask to override the existing trimmed version
a = input("Would like to overwrit the file? [y/n] \n")
if a != "y":
# if not permited exit
return
# trim the video
ffmpeg_extract_subclip(cfg["path"], start, end, targetname=newName)
if __name__ == "__main__":
main()
| [
"os.path.isfile",
"moviepy.video.io.ffmpeg_tools.ffmpeg_extract_subclip",
"hydra.main"
] | [((411, 460), 'hydra.main', 'hydra.main', ([], {'config_path': '"""."""', 'config_name': '"""config"""'}), "(config_path='.', config_name='config')\n", (421, 460), False, 'import hydra\n'), ((906, 929), 'os.path.isfile', 'os.path.isfile', (['newName'], {}), '(newName)\n', (920, 929), False, 'import os\n'), ((1200, 1267), 'moviepy.video.io.ffmpeg_tools.ffmpeg_extract_subclip', 'ffmpeg_extract_subclip', (["cfg['path']", 'start', 'end'], {'targetname': 'newName'}), "(cfg['path'], start, end, targetname=newName)\n", (1222, 1267), False, 'from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip\n')] |
import argparse
import os
import yaml
import pprint
from lib.fast_rcnn.config import cfg_from_file, get_output_dir, get_log_dir
from lib.datasets.factory import get_imdb
from lib.networks.factory import get_network
from lib.fast_rcnn.train import get_training_roidb
from lib.fast_rcnn.config import cfg
from lib.fast_rcnn.train import train_net
def train_main(data_dir,model_dir,train_steps,input_yaml):
cfg_from_file(input_yaml)
print('Using config:')
pprint.pprint(cfg)
imdb = get_imdb('voc_2007_trainval')
print('Loaded dataset `{:s}` for training'.format(imdb.name))
roidb = get_training_roidb(imdb)
output_dir = get_output_dir(imdb, None)
log_dir = get_log_dir(imdb)
print('Output will be saved to `{:s}`'.format(output_dir))
print('Logs will be saved to `{:s}`'.format(log_dir))
device_name = '/gpu:0'
print(device_name)
network = get_network('VGGnet_train')
train_net(network, imdb, roidb,
output_dir=output_dir,
log_dir=log_dir,
pretrained_model='data/pretrain_model/VGG_imagenet.npy',
max_iters=int(cfg.TRAIN.max_steps),
restore=bool(int(cfg.TRAIN.restore)))
if __name__ == '__main__':
args_parser = argparse.ArgumentParser()
args_parser.add_argument(
'--data-dir',
default='/opt/ml/input/data/training',
type=str,
help='The directory where the CIFAR-10 input data is stored. Default: /opt/ml/input/data/training. This '
'directory corresponds to the SageMaker channel named \'training\', which was specified when creating '
'our training job on SageMaker')
# For more information:
# https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-inference-code.html
args_parser.add_argument(
'--model-dir',
default='/opt/ml/model',
type=str,
help='The directory where the model will be stored. Default: /opt/ml/model. This directory should contain all '
'final model artifacts as Amazon SageMaker copies all data within this directory as a single object in '
'compressed tar format.')
args_parser.add_argument(
'--train-steps',
type=int,
default=100,
help='The number of steps to use for training.')
args_parser.add_argument(
'--input-yaml',
type=str,
default='/opt/ml/input/config/text.yaml'
)
args = args_parser.parse_args()
train_main(**vars(args))
| [
"lib.fast_rcnn.config.cfg_from_file",
"lib.fast_rcnn.train.get_training_roidb",
"lib.fast_rcnn.config.get_output_dir",
"argparse.ArgumentParser",
"lib.datasets.factory.get_imdb",
"lib.fast_rcnn.config.get_log_dir",
"lib.networks.factory.get_network",
"pprint.pprint"
] | [((425, 450), 'lib.fast_rcnn.config.cfg_from_file', 'cfg_from_file', (['input_yaml'], {}), '(input_yaml)\n', (438, 450), False, 'from lib.fast_rcnn.config import cfg_from_file, get_output_dir, get_log_dir\n'), ((488, 506), 'pprint.pprint', 'pprint.pprint', (['cfg'], {}), '(cfg)\n', (501, 506), False, 'import pprint\n'), ((527, 556), 'lib.datasets.factory.get_imdb', 'get_imdb', (['"""voc_2007_trainval"""'], {}), "('voc_2007_trainval')\n", (535, 556), False, 'from lib.datasets.factory import get_imdb\n'), ((641, 665), 'lib.fast_rcnn.train.get_training_roidb', 'get_training_roidb', (['imdb'], {}), '(imdb)\n', (659, 665), False, 'from lib.fast_rcnn.train import get_training_roidb\n'), ((688, 714), 'lib.fast_rcnn.config.get_output_dir', 'get_output_dir', (['imdb', 'None'], {}), '(imdb, None)\n', (702, 714), False, 'from lib.fast_rcnn.config import cfg_from_file, get_output_dir, get_log_dir\n'), ((732, 749), 'lib.fast_rcnn.config.get_log_dir', 'get_log_dir', (['imdb'], {}), '(imdb)\n', (743, 749), False, 'from lib.fast_rcnn.config import cfg_from_file, get_output_dir, get_log_dir\n'), ((956, 983), 'lib.networks.factory.get_network', 'get_network', (['"""VGGnet_train"""'], {}), "('VGGnet_train')\n", (967, 983), False, 'from lib.networks.factory import get_network\n'), ((1324, 1349), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1347, 1349), False, 'import argparse\n')] |
import os
from openspeechcorpus_cli.utils import get_all_file_names_and_relative_paths
def execute_script(
transcription_file,
output_file,
wav_folder,
output_folder,
include_source=True,
verify_existing=True,
extension='mfc',
):
relative_paths = get_all_file_names_and_relative_paths(transcription_file)
lines = list()
for record in relative_paths:
if record.endswith(".mp4"):
record = record.replace(".mp4", ".wav")
absolute_record_path = os.path.join(os.path.abspath(wav_folder), record)
if verify_existing:
if not os.path.exists(absolute_record_path):
print(f"File {record} not found in {wav_folder}, skipping")
continue
relative_folders = record.split("/")
relative_path_without_file = "/".join(relative_folders[:-1])
new_file_name = f"{'.'.join(relative_folders[-1].split('.')[:-1])}.{extension}"
features_path = os.path.join(
os.path.abspath(output_folder),
"features",
relative_path_without_file,
new_file_name
)
if include_source:
lines.append(f"{absolute_record_path} {features_path}\n")
else:
lines.append(f"{features_path}\n")
output_file = open(output_file, 'w+')
output_file.writelines(lines, )
output_file.close()
| [
"os.path.abspath",
"os.path.exists",
"openspeechcorpus_cli.utils.get_all_file_names_and_relative_paths"
] | [((310, 367), 'openspeechcorpus_cli.utils.get_all_file_names_and_relative_paths', 'get_all_file_names_and_relative_paths', (['transcription_file'], {}), '(transcription_file)\n', (347, 367), False, 'from openspeechcorpus_cli.utils import get_all_file_names_and_relative_paths\n'), ((553, 580), 'os.path.abspath', 'os.path.abspath', (['wav_folder'], {}), '(wav_folder)\n', (568, 580), False, 'import os\n'), ((1028, 1058), 'os.path.abspath', 'os.path.abspath', (['output_folder'], {}), '(output_folder)\n', (1043, 1058), False, 'import os\n'), ((637, 673), 'os.path.exists', 'os.path.exists', (['absolute_record_path'], {}), '(absolute_record_path)\n', (651, 673), False, 'import os\n')] |
"""
Provides a default QA configuration for the projects, by reading the configuration file and the environment variables.
"""
import os
import sys
import datetime
from getpass import getuser
from pathlib import Path, PurePosixPath
from typing import Dict, Any, Tuple, List, Optional, Union
import yaml
import click
from .utils import merge, getenvs
from .git import git_head, git_show
from .conventions import slugify, get_commit_dirs, location_from_spec
from .iterators import flatten
# In case the qaboard.yaml configuration has errors, we don't want to exit directly.
# but first show all the errors that need to be fixed.
config_has_error = False
# Don't lots of verbose info if the users just wants the help, or start a new project
ignore_config_errors = len(sys.argv)==1 or '--help' in sys.argv or 'init' in sys.argv
# When the code is imported we care less about warnings...
ignore_config_errors = ignore_config_errors or not sys.argv[0].endswith('qa')
def find_configs(path : Path) -> List[Tuple[Dict, Path]]:
"""Returns the parsed content and paths of qaboard.yaml files that should be loaded for a (sub)project at the `path`.
Returns a tuple (configs, paths). Each element is a list - the root qaboard.yaml is first and the subproject's is last.
"""
configsxpaths = []
# We need a full path to iterate on the parents
path = path.resolve()
# We look for qaboard.yaml configuration files in the path folder and its parents
parents = [path, *list(path.parents)]
for parent in parents:
qatools_config_path = parent / 'qaboard.yaml'
if not qatools_config_path.exists():
qatools_config_path = parent / 'qatools.yaml' # backward compatibility
if not qatools_config_path.exists():
continue
with qatools_config_path.open('r') as f:
qatools_config = yaml.load(f, Loader=yaml.SafeLoader)
if not qatools_config: # support empty files that just mark subprojects
qatools_config = {}
configsxpaths.append((qatools_config, qatools_config_path))
if qatools_config.get('root'):
break
configsxpaths.reverse()
return configsxpaths
qatools_configsxpaths = find_configs(path=Path())
qatools_configs = [q[0] for q in qatools_configsxpaths]
qatools_config_paths = [q[1] for q in qatools_configsxpaths]
if not qatools_configsxpaths:
config_has_error = True
if not ignore_config_errors:
click.secho('ERROR: Could not find a `qaboard.yaml` configuration file.\nDid you run `qatools init` ?', fg='red', err=True)
click.secho(
'Please read the tutorial or ask <NAME> for help:\n'
'http://qa-docs/',
dim=True, err=True)
# take care not to mutate the root config, as its project.name is the git repo name
config : Dict[str, Any] = {}
for c in qatools_configs:
config = merge(c, config)
# The top-most qaboard.yaml is the root project
# The current subproject corresponds to the lowest qaboard.yaml
if not qatools_config_paths:
root_qatools = None
project_dir = None
root_qatools_config: Dict[str, Any] = {}
project = None
project_root = None
subproject = Path(".")
else:
if len(qatools_config_paths)==1:
root_qatools = qatools_config_paths[0].parent
project_dir = root_qatools
root_qatools_config = qatools_configs[0]
else:
root_qatools, *__, project_dir = [c.parent for c in qatools_config_paths]
root_qatools_config, *_ = qatools_configs
subproject = project_dir.relative_to(root_qatools) if root_qatools else Path(".")
# We check for consistency
if root_qatools_config and config:
if root_qatools_config.get('project', {}).get('url') != config.get('project', {}).get('url'):
config_has_error = True
if not ignore_config_errors:
click.secho(f"ERROR: Don't redefine the project's URL in ./qaboard.yaml.", fg='red', bold=True, err=True)
click.secho(f"Changed from {root_qatools_config.get('project', {}).get('url')} to {config.get('project', {}).get('url')}", fg='red')
# We identify sub-qatools projects using the location of qaboard.yaml related to the project root
# It's not something the user should change...
project_root = Path(root_qatools_config['project']['name'])
project = project_root / subproject
uncoherent_name = config['project']['name'] not in [root_qatools_config['project']['name'], project]
if uncoherent_name:
config_has_error = True
if not ignore_config_errors:
click.secho(f"ERROR: Don't redefine <project.name> in ./qaboard.yaml", fg='red', bold=True, err=True)
click.secho(f"Changed from {root_qatools_config['project']['name']} to {config['project']['name']})", fg='red')
config['project']['name'] = project.as_posix()
# It's useful to know what's the platform since code is often compiled a different locations.
# For instance Linux builds are often at `build/bin/` vs `/x64/Release/` on Windows.
on_windows = os.name == 'nt'
on_linux = not on_windows
# SIRC-specific hosts
on_vdi = 'HOST' in os.environ and os.environ['HOST'].endswith("vdi")
on_lsf = 'HOST' in os.environ and (os.environ['HOST'].endswith("transchip.com") or os.environ['HOST'].startswith("planet"))
platform = 'windows' if on_windows else 'linux'
user = getuser()
def storage_roots(config: Dict, project: Path, subproject: Path) -> Tuple[Path, Path]:
# we do compute it twice, but it gives us some flexibility
user = getuser()
try:
if 'ci_root' in config:
# click.secho('DEPRECATION WARNING: the config key "ci_root" was renamed "storage"', fg='yellow', err=True)
config['storage'] = config['ci_root']
config_storage: Union[str, Dict] = os.environ.get('QA_STORAGE', config.get('storage', {}))
interpolation_vars = {"project": project, "subproject": subproject, "user": user}
spec_artifacts = config_storage.get('artifacts', config_storage) if isinstance(config_storage, dict) else config_storage
spec_outputs = config_storage.get('outputs', config_storage) if isinstance(config_storage, dict) else config_storage
artifacts_root = location_from_spec(spec_artifacts, interpolation_vars)
outputs_root = location_from_spec(spec_outputs, interpolation_vars)
if not artifacts_root or not outputs_root:
raise KeyError
except KeyError:
artifacts_root = Path()
outputs_root = Path()
config_has_error = True
if not ignore_config_errors:
click.secho('ERROR: Could not find the storage settings that define where outputs & artifacts are saved.', fg='red', err=True)
click.secho('Consider adding to qaboard.yaml:\n```storage:\n linux: /net/stage/algo_data/ci\n windows: "\\\\netapp\\algo_data\\ci"\n```', fg='red', err=True, dim=True)
return outputs_root, artifacts_root
def mkdir(path: Path):
global config_has_error
if not path.exists():
try:
path.mkdir(parents=True)
click.secho(f'Created: {path}', fg='blue', err=True)
except:
config_has_error = True
if not ignore_config_errors:
click.secho(f'ERROR: The storage path does not exist: "{path}".', fg='red', err=True)
outputs_root: Optional[Path]
artifacts_root: Optional[Path]
artifacts_project_root: Optional[Path]
artifacts_project: Optional[Path]
outputs_project_root: Optional[Path]
outputs_project: Optional[Path]
if root_qatools_config:
assert project
assert project_root
outputs_root, artifacts_root = storage_roots(config, project, subproject)
mkdir(outputs_root)
mkdir(artifacts_root)
artifacts_project_root = artifacts_root / project_root
artifacts_project = artifacts_root / project
outputs_project_root = outputs_root / project_root
outputs_project = outputs_root / project
else:
outputs_root = None
artifacts_root = None
artifacts_project_root = None
artifacts_project = None
outputs_project_root = None
outputs_project = None
# This flag identifies runs that happen within the CI or tuning experiments
ci_env_variables = (
# Set by most CI tools (GitlabCI, CircleCI, TravisCI, Github Actions...) except Jenkins,
# and by the web application during tuning runs
'CI',
# set by Jenkins' git plugin
'GIT_COMMIT',
)
is_ci = any([v in os.environ for v in ci_env_variables])
if is_ci:
# This field is not used at the moment, possibly in the future we'll want to support other VCS like SVN
commit_type = config.get('project', {}).get('type', 'git')
# Different CI tools use different environment variables to tell us
# what commit and branch we're running on
commit_sha_variables = (
'CI_COMMIT_SHA', # GitlabCI
'GIT_COMMIT', # Jenkins, git plugin
'CIRCLE_SHA1', # CircleCI
'TRAVIS_COMMIT', # TravisCI
'GITHUB_SHA' # Github Actions
)
commit_id = getenvs(commit_sha_variables)
branch_env_variables = (
'CI_COMMIT_TAG', # GitlabCI, only when building tags
'CI_COMMIT_REF_NAME', # GitlabCI
'GIT_BRANCH', # Jenkins
'gitlabBranch', # Jenkins gitlab plugin
'CIRCLE_BRANCH', # CircleCI
'TRAVIS_BRANCH', # TravisCI
'GITHUB_REF' # Github Actions
)
commit_branch = getenvs(branch_env_variables)
if commit_branch:
commit_branch = commit_branch.replace('origin/', '').replace('refs/heads/', '')
tag_env_variables = (
'CI_COMMIT_TAG', # GitlabCI
'GIT_TAG_NAME', # Jenkins git plugin
'CIRCLE_TAG', # CircleCI
'TRAVIS_TAG', # TravisCI
# Github Actions uses GITHUB_REF too
)
commit_tag = getenvs(tag_env_variables)
else:
commit_type = None
# If possible we'll complete the information later
commit_id = None
commit_branch = None
commit_tag = None
# TODO: refactor in git.py, consider calling git directly...
repo_root = Path(os.environ.get('QA_REPO', str(root_qatools if root_qatools else Path())))
is_in_git_repo = False
for d in (repo_root, *list(repo_root.parents)):
if (d / '.git').exists():
is_in_git_repo = True
repo_root = d
if not commit_id or not commit_branch:
if is_in_git_repo:
commit_branch, commit_id = git_head(repo_root)
else:
if not commit_branch:
commit_branch = f'<local:{user}>'
if not commit_id:
commit_id = f'<local:{user}>'
if artifacts_project_root:
artifacts_branch_root = artifacts_project_root / 'branches' / slugify(commit_branch)
artifacts_branch = artifacts_branch_root / subproject
else:
artifacts_branch_root = Path()
artifacts_branch = Path()
commit_committer_name: Optional[str] = user
commit_committer_email: Optional[str] = None
commit_authored_datetime = datetime.datetime.now(datetime.timezone.utc).isoformat()
commit_message: Optional[str] = None
commit_parents: List[str] = []
if commit_id and is_in_git_repo:
fields = ['%cn', '%ce', '%aI', '%P', "%B"]
try:
commit_info = git_show("%n".join(fields), commit_id)
fields_values = commit_info.split('\n', maxsplit=len(fields))
commit_committer_name, commit_committer_email, commit_authored_datetime, commit_parents_str, commit_message = fields_values
commit_parents = commit_parents_str.split()
except:
# may fail when working on the first commit in a repo, like in our tests
pass
if root_qatools_config:
assert artifacts_project_root
assert outputs_project_root
commit_dirs = get_commit_dirs(commit_id, repo_root)
artifacts_commit_root = artifacts_project_root / commit_dirs
artifacts_commit = artifacts_project_root / commit_dirs / subproject
outputs_commit_root = outputs_project_root / commit_dirs
outputs_commit = outputs_project_root / commit_dirs / subproject
else:
artifacts_commit_root = Path()
artifacts_commit = Path()
outputs_commit_root = Path()
outputs_commit = Path()
# backward compatibility for HW_ALG's runs. And tof/swip_tof's runs: has to exist
commit_ci_dir = outputs_commit
# backward compatibility for HW_ALG/tools/ci_tools/find_valid_build.py
ci_dir = artifacts_project_root
# When running qa from a folder with a commit's artifacts,
# there is no information about the git commit, no .git/ folder.
# During tuning/extra runs, QA-Board will provide this info using
# the QA_OUTPUTS_COMMIT and GIT_COMMIT environment variables
if 'QA_OUTPUTS_COMMIT' in os.environ:
outputs_commit = Path(os.environ['QA_OUTPUTS_COMMIT'])
default_platform = platform
default_batch_label = 'default'
config_inputs = config.get('inputs', {})
# "batches" is prefered, but we want to stay backward compatible
default_batches_files = config_inputs.get('groups', config_inputs.get('batches'))
if not default_batches_files:
default_batches_files = []
if not (isinstance(default_batches_files, list) or isinstance(default_batches_files, tuple)):
default_batches_files = [default_batches_files]
config_inputs_types = config_inputs.get('types', {})
default_input_type = config_inputs_types.get('default', 'default')
def get_default_configuration(input_settings) -> str:
from .conventions import serialize_config
default_configuration = input_settings.get('configs', input_settings.get('configurations', input_settings.get('configuration', [])))
default_configuration = list(flatten(default_configuration))
return serialize_config(default_configuration)
def get_default_database(inputs_settings):
# All recordings used should be stored at the same location
# We will refer to them by their relative path related to the "database"
global ignore_config_errors
if 'type' in inputs_settings and inputs_settings['type'] in inputs_settings and 'database' in inputs_settings[inputs_settings['type']]:
database_spec = inputs_settings[inputs_settings['type']]['database']
else:
database_spec = inputs_settings.get('database', {})
try:
database = location_from_spec(database_spec)
except:
database = Path("/")
if not database:
database = "."
if not ignore_config_errors:
click.secho(f'WARNING: Could not find the default database location, defaulting to "."', fg='yellow', err=True)
click.secho(f'Consider adding to qaboard.yaml:\n```\ninputs:\n database:\n linux: /net/stage/algo_data\n windows: "\\\\netapp2\\algo_data"\n```', fg='yellow', err=True, dim=True)
ignore_config_errors = True
return Path(database)
_metrics: Dict = {}
available_metrics: Dict[str, Dict[str, Any]] = {}
main_metrics: List = []
metrics_file = config.get('outputs', {}).get('metrics')
if metrics_file:
metrics_file_path = Path(root_qatools / metrics_file)
if not metrics_file_path.exists():
if not ignore_config_errors:
click.secho(f'WARNING: Could not find the file containing metrics ({metrics_file})', fg='yellow', err=True)
click.secho(f' It is defined in qaboard.yaml under outputs.metrics', fg='yellow', err=True, dim=True)
ignore_config_errors = True
else:
with metrics_file_path.open(errors="surrogateescape") as f:
try:
_metrics = yaml.load(f, Loader=yaml.SafeLoader)
except Exception as e:
config_has_error = True
if not ignore_config_errors:
click.secho(f'ERROR: Unable to parse {metrics_file}', fg='red', err=True, bold=True)
click.secho(f'{e}', fg='red', err=True)
ignore_config_errors = True
available_metrics = _metrics.get('available_metrics', {})
main_metrics = _metrics.get('main_metrics', [])
# We want to allow any user to use the Gitlab API, stay compatible with usage at Samsung
# ...and remove the credentials from the repo
default_secrets_path = os.environ.get('QA_SECRETS', '/home/ispq/.secrets.yaml' if os.name != 'nt' else '//mars/raid/users/ispq/.secrets.yaml')
secrets_path = Path(config.get('secrets', default_secrets_path))
if secrets_path.exists():
with secrets_path.open() as f:
secrets = yaml.load(f, Loader=yaml.SafeLoader)
else:
secrets = {}
| [
"click.secho",
"pathlib.Path",
"os.environ.get",
"yaml.load",
"datetime.datetime.now",
"getpass.getuser"
] | [((5272, 5281), 'getpass.getuser', 'getuser', ([], {}), '()\n', (5279, 5281), False, 'from getpass import getuser\n'), ((15617, 15740), 'os.environ.get', 'os.environ.get', (['"""QA_SECRETS"""', "('/home/ispq/.secrets.yaml' if os.name != 'nt' else\n '//mars/raid/users/ispq/.secrets.yaml')"], {}), "('QA_SECRETS', '/home/ispq/.secrets.yaml' if os.name != 'nt' else\n '//mars/raid/users/ispq/.secrets.yaml')\n", (15631, 15740), False, 'import os\n'), ((3174, 3183), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (3178, 3183), False, 'from pathlib import Path, PurePosixPath\n'), ((4219, 4263), 'pathlib.Path', 'Path', (["root_qatools_config['project']['name']"], {}), "(root_qatools_config['project']['name'])\n", (4223, 4263), False, 'from pathlib import Path, PurePosixPath\n'), ((5442, 5451), 'getpass.getuser', 'getuser', ([], {}), '()\n', (5449, 5451), False, 'from getpass import getuser\n'), ((10547, 10553), 'pathlib.Path', 'Path', ([], {}), '()\n', (10551, 10553), False, 'from pathlib import Path, PurePosixPath\n'), ((10577, 10583), 'pathlib.Path', 'Path', ([], {}), '()\n', (10581, 10583), False, 'from pathlib import Path, PurePosixPath\n'), ((11758, 11764), 'pathlib.Path', 'Path', ([], {}), '()\n', (11762, 11764), False, 'from pathlib import Path, PurePosixPath\n'), ((11786, 11792), 'pathlib.Path', 'Path', ([], {}), '()\n', (11790, 11792), False, 'from pathlib import Path, PurePosixPath\n'), ((11817, 11823), 'pathlib.Path', 'Path', ([], {}), '()\n', (11821, 11823), False, 'from pathlib import Path, PurePosixPath\n'), ((11843, 11849), 'pathlib.Path', 'Path', ([], {}), '()\n', (11847, 11849), False, 'from pathlib import Path, PurePosixPath\n'), ((12376, 12413), 'pathlib.Path', 'Path', (["os.environ['QA_OUTPUTS_COMMIT']"], {}), "(os.environ['QA_OUTPUTS_COMMIT'])\n", (12380, 12413), False, 'from pathlib import Path, PurePosixPath\n'), ((14340, 14354), 'pathlib.Path', 'Path', (['database'], {}), '(database)\n', (14344, 14354), False, 'from pathlib import Path, PurePosixPath\n'), ((14549, 14582), 'pathlib.Path', 'Path', (['(root_qatools / metrics_file)'], {}), '(root_qatools / metrics_file)\n', (14553, 14582), False, 'from pathlib import Path, PurePosixPath\n'), ((2250, 2256), 'pathlib.Path', 'Path', ([], {}), '()\n', (2254, 2256), False, 'from pathlib import Path, PurePosixPath\n'), ((2466, 2602), 'click.secho', 'click.secho', (['"""ERROR: Could not find a `qaboard.yaml` configuration file.\nDid you run `qatools init` ?"""'], {'fg': '"""red"""', 'err': '(True)'}), '(\n """ERROR: Could not find a `qaboard.yaml` configuration file.\nDid you run `qatools init` ?"""\n , fg=\'red\', err=True)\n', (2477, 2602), False, 'import click\n'), ((2594, 2706), 'click.secho', 'click.secho', (['"""Please read the tutorial or ask <NAME> for help:\nhttp://qa-docs/"""'], {'dim': '(True)', 'err': '(True)'}), '(\n """Please read the tutorial or ask <NAME> for help:\nhttp://qa-docs/""",\n dim=True, err=True)\n', (2605, 2706), False, 'import click\n'), ((3557, 3566), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (3561, 3566), False, 'from pathlib import Path, PurePosixPath\n'), ((10701, 10745), 'datetime.datetime.now', 'datetime.datetime.now', (['datetime.timezone.utc'], {}), '(datetime.timezone.utc)\n', (10722, 10745), False, 'import datetime\n'), ((15875, 15911), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.SafeLoader'}), '(f, Loader=yaml.SafeLoader)\n', (15884, 15911), False, 'import yaml\n'), ((1861, 1897), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.SafeLoader'}), '(f, Loader=yaml.SafeLoader)\n', (1870, 1897), False, 'import yaml\n'), ((4494, 4600), 'click.secho', 'click.secho', (['f"""ERROR: Don\'t redefine <project.name> in ./qaboard.yaml"""'], {'fg': '"""red"""', 'bold': '(True)', 'err': '(True)'}), '(f"ERROR: Don\'t redefine <project.name> in ./qaboard.yaml", fg=\n \'red\', bold=True, err=True)\n', (4505, 4600), False, 'import click\n'), ((4602, 4723), 'click.secho', 'click.secho', (['f"""Changed from {root_qatools_config[\'project\'][\'name\']} to {config[\'project\'][\'name\']})"""'], {'fg': '"""red"""'}), '(\n f"Changed from {root_qatools_config[\'project\'][\'name\']} to {config[\'project\'][\'name\']})"\n , fg=\'red\')\n', (4613, 4723), False, 'import click\n'), ((6328, 6334), 'pathlib.Path', 'Path', ([], {}), '()\n', (6332, 6334), False, 'from pathlib import Path, PurePosixPath\n'), ((6354, 6360), 'pathlib.Path', 'Path', ([], {}), '()\n', (6358, 6360), False, 'from pathlib import Path, PurePosixPath\n'), ((6889, 6941), 'click.secho', 'click.secho', (['f"""Created: {path}"""'], {'fg': '"""blue"""', 'err': '(True)'}), "(f'Created: {path}', fg='blue', err=True)\n", (6900, 6941), False, 'import click\n'), ((13906, 13915), 'pathlib.Path', 'Path', (['"""/"""'], {}), "('/')\n", (13910, 13915), False, 'from pathlib import Path, PurePosixPath\n'), ((13993, 14114), 'click.secho', 'click.secho', (['f"""WARNING: Could not find the default database location, defaulting to ".\\""""'], {'fg': '"""yellow"""', 'err': '(True)'}), '(\n f\'WARNING: Could not find the default database location, defaulting to "."\'\n , fg=\'yellow\', err=True)\n', (14004, 14114), False, 'import click\n'), ((14111, 14304), 'click.secho', 'click.secho', (['f"""Consider adding to qaboard.yaml:\n```\ninputs:\n database:\n linux: /net/stage/algo_data\n windows: "\\\\\\\\netapp2\\\\algo_data"\n```"""'], {'fg': '"""yellow"""', 'err': '(True)', 'dim': '(True)'}), '(\n f"""Consider adding to qaboard.yaml:\n```\ninputs:\n database:\n linux: /net/stage/algo_data\n windows: "\\\\\\\\netapp2\\\\algo_data"\n```"""\n , fg=\'yellow\', err=True, dim=True)\n', (14122, 14304), False, 'import click\n'), ((14659, 14775), 'click.secho', 'click.secho', (['f"""WARNING: Could not find the file containing metrics ({metrics_file})"""'], {'fg': '"""yellow"""', 'err': '(True)'}), "(\n f'WARNING: Could not find the file containing metrics ({metrics_file})',\n fg='yellow', err=True)\n", (14670, 14775), False, 'import click\n'), ((14773, 14886), 'click.secho', 'click.secho', (['f""" It is defined in qaboard.yaml under outputs.metrics"""'], {'fg': '"""yellow"""', 'err': '(True)', 'dim': '(True)'}), "(f' It is defined in qaboard.yaml under outputs.metrics',\n fg='yellow', err=True, dim=True)\n", (14784, 14886), False, 'import click\n'), ((3805, 3914), 'click.secho', 'click.secho', (['f"""ERROR: Don\'t redefine the project\'s URL in ./qaboard.yaml."""'], {'fg': '"""red"""', 'bold': '(True)', 'err': '(True)'}), '(f"ERROR: Don\'t redefine the project\'s URL in ./qaboard.yaml.",\n fg=\'red\', bold=True, err=True)\n', (3816, 3914), False, 'import click\n'), ((6428, 6564), 'click.secho', 'click.secho', (['"""ERROR: Could not find the storage settings that define where outputs & artifacts are saved."""'], {'fg': '"""red"""', 'err': '(True)'}), "(\n 'ERROR: Could not find the storage settings that define where outputs & artifacts are saved.'\n , fg='red', err=True)\n", (6439, 6564), False, 'import click\n'), ((6561, 6740), 'click.secho', 'click.secho', (['"""Consider adding to qaboard.yaml:\n```storage:\n linux: /net/stage/algo_data/ci\n windows: "\\\\\\\\netapp\\\\algo_data\\\\ci"\n```"""'], {'fg': '"""red"""', 'err': '(True)', 'dim': '(True)'}), '(\n """Consider adding to qaboard.yaml:\n```storage:\n linux: /net/stage/algo_data/ci\n windows: "\\\\\\\\netapp\\\\algo_data\\\\ci"\n```"""\n , fg=\'red\', err=True, dim=True)\n', (6572, 6740), False, 'import click\n'), ((9928, 9934), 'pathlib.Path', 'Path', ([], {}), '()\n', (9932, 9934), False, 'from pathlib import Path, PurePosixPath\n'), ((15019, 15055), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.SafeLoader'}), '(f, Loader=yaml.SafeLoader)\n', (15028, 15055), False, 'import yaml\n'), ((7027, 7116), 'click.secho', 'click.secho', (['f"""ERROR: The storage path does not exist: "{path}"."""'], {'fg': '"""red"""', 'err': '(True)'}), '(f\'ERROR: The storage path does not exist: "{path}".\', fg=\'red\',\n err=True)\n', (7038, 7116), False, 'import click\n'), ((15164, 15252), 'click.secho', 'click.secho', (['f"""ERROR: Unable to parse {metrics_file}"""'], {'fg': '"""red"""', 'err': '(True)', 'bold': '(True)'}), "(f'ERROR: Unable to parse {metrics_file}', fg='red', err=True,\n bold=True)\n", (15175, 15252), False, 'import click\n'), ((15259, 15298), 'click.secho', 'click.secho', (['f"""{e}"""'], {'fg': '"""red"""', 'err': '(True)'}), "(f'{e}', fg='red', err=True)\n", (15270, 15298), False, 'import click\n')] |
# Copyright (c) 2020, <NAME>.
# Distributed under the MIT License. See LICENSE for more info.
"""An example of generating a heat map of correlations."""
from matplotlib import pyplot as plt
import pandas as pd
from sklearn.datasets import load_wine as load_data
from psynlig import plot_correlation_heatmap
plt.style.use('seaborn-talk')
data_set = load_data()
data = pd.DataFrame(data_set['data'], columns=data_set['feature_names'])
kwargs = {
'heatmap': {
'vmin': -1,
'vmax': 1,
'cmap': 'viridis',
}
}
plot_correlation_heatmap(data, textcolors=['white', 'black'], **kwargs)
plt.show()
| [
"psynlig.plot_correlation_heatmap",
"matplotlib.pyplot.style.use",
"sklearn.datasets.load_wine",
"pandas.DataFrame",
"matplotlib.pyplot.show"
] | [((307, 336), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-talk"""'], {}), "('seaborn-talk')\n", (320, 336), True, 'from matplotlib import pyplot as plt\n'), ((350, 361), 'sklearn.datasets.load_wine', 'load_data', ([], {}), '()\n', (359, 361), True, 'from sklearn.datasets import load_wine as load_data\n'), ((369, 434), 'pandas.DataFrame', 'pd.DataFrame', (["data_set['data']"], {'columns': "data_set['feature_names']"}), "(data_set['data'], columns=data_set['feature_names'])\n", (381, 434), True, 'import pandas as pd\n'), ((537, 608), 'psynlig.plot_correlation_heatmap', 'plot_correlation_heatmap', (['data'], {'textcolors': "['white', 'black']"}), "(data, textcolors=['white', 'black'], **kwargs)\n", (561, 608), False, 'from psynlig import plot_correlation_heatmap\n'), ((609, 619), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (617, 619), True, 'from matplotlib import pyplot as plt\n')] |
###############################################
##### Multiple Choice Question Autograder #####
###############################################
import os
import string
import re
import runpy
from utils import *
class Notebook:
"""Multiple choice question autograder for Jupyter Notebook"""
def __init__(self, tests=".tests.py", scored=False, max_attempts=None):
"""
Initlaizes multiple choice autograder.
Args:
* `tests` (`str`): The relative filepath to tests file
Kwargs:
* `scored` (`bool`): Whether or not the assignment is scored
* `max_attempts` (`int`): The maximum number of takes allowed; deault `None`
Returns:
* `Notebook`. The `Notebook` instance for the autograder
"""
if os.path.exists(".MCAUTOGRADER_STATUS") and os.path.isfile(".MCAUTOGRADER_STATUS"):
with open(".MCAUTOGRADER_STATUS", "rb") as f:
self.__dict__ = pickle.load(f).__dict__
else:
self._tests_raw = runpy.run_path(tests)["answers"]
self._identifiers = [answer["identifier"] for answer in self._tests_raw]
self._tests = {identifier : test for identifier, test in zip(
self._identifiers,
self._tests_raw
)}
self._scored = scored
if self._scored:
try:
self._points = {identifier : self._tests[identifier]["points"] for identifier in self._identifiers}
except KeyError:
assert False, "One or more identifiers missing points value in scored notebook"
self._answered = {identifier : false for identifier, false in zip(
self._identifiers,
repeat(False, len(self._identifiers))
)}
self._possible = sum(self._points.values())
assert self._possible > 0, "Scored notebooks must have > 0 points possible"
self._earned = 0
self._inf_retakes = True
if max_attempts:
assert max_attempts > 0 and type(max_attempts) == int, "max_attempts must be a positive integer"
self._inf_retakes = False
self._retakes = {identifier : zero for identifier, zero in zip(
self._identifiers,
repeat(0, len(self._identifiers))
)}
self._max_attempts = max_attempts
if self._scored and not self._inf_retakes:
self.__dict__ = dict(
_identifiers = self._identifiers,
_tests = self._tests,
_scored = self._scored,
_inf_retakes = self._inf_retakes,
_points = self._points,
_answered = self._answered,
_possible = self._possible,
_earned = self._earned,
_retakes = self._retakes,
_max_attempts = self._max_attempts
)
elif self._scored:
self.__dict__ = dict(
_identifiers = self._identifiers,
_tests = self._tests,
_scored = self._scored,
_inf_retakes = self._inf_retakes,
_points = self._points,
_answered = self._answered,
_possible = self._possible,
_earned = self._earned
)
elif not self._inf_retakes:
self.__dict__ = dict(
_identifiers = self._identifiers,
_tests = self._tests,
_scored = self._scored,
_inf_retakes = self._inf_retakes,
_retakes = self._retakes,
_max_attempts = self._max_attempts
)
else:
self.__dict__ = dict(
_identifiers = self._identifiers,
_tests = self._tests,
_scored = self._scored,
_inf_retakes = self._inf_retakes
)
def _serialize(self):
with open(".MCAUTOGRADER_STATUS", "wb+") as f:
serialize(self, f)
def _check_answer(self, identifier, answer):
"""
Checks whether or not answer is correct; returns boolean
Args:
* `identifier` (`str`): The question identifier
* `answer` (`str`, `int`): The student's answer
Returns:
* `bool`. Whether or not the answer is correct
"""
assert identifier in self._identifiers, "{} is not in the question bank".format(identifier)
assert type(answer) in [str, int], "Answer must be a string or integer"
if type(answer) == str:
assert len(answer) == 1, "Answer must be of length 1"
else:
assert 0 <= answer < 10, "Answer must be a single digit"
if not self._inf_retakes:
if self._retakes[identifier] >= self._max_attempts:
print("No more retakes allowed.")
return None
correct_answer = self._tests[identifier]["answer"]
assert type(correct_answer) == type(answer), "Answer is not a(n) {}".format(type(correct_answer))
if correct_answer == answer:
if self._scored and not self._answered[identifier]:
self._answered[identifier] = True
self._earned += self._points[identifier]
if not self._inf_retakes:
self._retakes[identifier] += 1
return True
else:
if not self._inf_retakes:
self._retakes[identifier] += 1
return False
def check(self, identifier, answer):
"""
Visible wrapper for `Notebook._check_answer` to print output based on whether or not student's
answer is correct
Args:
* `identifier` (`str`): The question identifier
* `answer` (`str`, `int`): The student's answer
Returns:
* `None`. Prints out student's result on question
"""
result = self._check_answer(identifier, answer)
if self._scored:
if result:
print("Correct. {} points added to your score.".format(self._points[identifier]))
elif result == None:
return None
else:
print("Try again.")
else:
if result:
print("Correct.")
elif result == None:
return None
else:
print("Try again.")
self._serialize()
def score(self):
"""
If assignment is scored, displays student's score as fraction and percentage.
"""
if self._scored:
print("{}/{}: {:.3f}%".format(self._earned, self._possible, self._earned/self._possible*100))
else:
print("This notebook is not scored.")
self._serialize() | [
"os.path.isfile",
"os.path.exists",
"runpy.run_path"
] | [((727, 765), 'os.path.exists', 'os.path.exists', (['""".MCAUTOGRADER_STATUS"""'], {}), "('.MCAUTOGRADER_STATUS')\n", (741, 765), False, 'import os\n'), ((770, 808), 'os.path.isfile', 'os.path.isfile', (['""".MCAUTOGRADER_STATUS"""'], {}), "('.MCAUTOGRADER_STATUS')\n", (784, 808), False, 'import os\n'), ((933, 954), 'runpy.run_path', 'runpy.run_path', (['tests'], {}), '(tests)\n', (947, 954), False, 'import runpy\n')] |
"""
Blizzard API CLI - Cache class.
<NAME> 01/07/19
"""
# built-in
import argparse
import json
import logging
import os
class CacheError(Exception):
""" Custom exception for use with the Cache class. """
class Cache:
""" Disk-based cache implementation. """
log = logging.getLogger(__name__)
def __init__(self, cache_directory):
"""
:param cache_directory: Path to an existing cache directory or a
location that one should be created
:raises: argparse.ArgumentTypeError, for use with argparse
"""
# ensure access to this directory (or viability of creation)
if not os.path.isdir(cache_directory):
try:
os.makedirs(cache_directory)
except OSError as exc:
Cache.log.error(exc)
raise argparse.ArgumentTypeError(exc)
# check for read and write access
if not os.access(cache_directory, os.O_RDWR):
error_str = "Don't have read and write access to '{0}'.".format(cache_directory)
Cache.log.error(error_str)
raise argparse.ArgumentTypeError(error_str)
self.directory = cache_directory
def bucket_path(self, name):
"""
:param name: String name of the bucket
:returns: String path to the fully-qualified bucket file
"""
return os.path.join(self.directory, "{0}.json".format(name))
def add_bucket(self, name):
"""
:param name: String name of the new bucket to add
:raises: CacheError if the bucket is already present
"""
# ensure bucket isn't already present
if self.is_bucket(name):
error_str = "Can't add bucket '{0}', it exists.".format(name)
Cache.log.error(error_str)
raise CacheError(error_str)
# add empty list to new bucket file
with open(self.bucket_path(name), "w") as bucket_file:
bucket_file.write(json.dumps([]))
bucket_file.flush()
def get_bucket(self, name):
"""
:param name: String name of the bucket to retrieve
:returns: deserialized content that the bucket contained
:raises: CacheError if the bucket does not exist
"""
# ensure bucket is present
if not self.is_bucket(name):
error_str = "Can't remove bucket '{0}', it doesn't exist.".format(name)
Cache.log.error(error_str)
raise CacheError(error_str)
# read and return the deserialized contents
content = None
with open(self.bucket_path(name), "r") as bucket_file:
content = json.load(bucket_file)
return content
def remove_bucket(self, name):
"""
:param name: String name of the bucket to be removed
:raises: CacheError if the bucket does not exist
"""
# ensure bucket is present
if not self.is_bucket(name):
error_str = "Can't remove bucket '{0}', it doesn't exist.".format(name)
Cache.log.error(error_str)
raise CacheError(error_str)
# remove file
os.remove(self.bucket_path(name))
def remove_all_buckets(self):
""" Clear the cache of all existing data. """
buckets = self.get_bucket_names()
for bucket in buckets:
self.remove_bucket(bucket)
def add_item(self, bucket_name, item):
"""
:param bucket_name: String name of bucket to add item to
:param item: arbitrary object to be serialized into JSON and added
:raises: CacheError if 'item' cannot be serialized into JSON
"""
# ensure bucket is present
if not self.is_bucket(bucket_name):
error_str = "Can't add item to '{0}', it doesn't exist.".format(bucket_name)
Cache.log.error(error_str)
raise CacheError(error_str)
# append the item to the existing bucket contents
existing_contents = self.get_bucket(bucket_name)
existing_contents.append(item)
try:
string_content = json.dumps(existing_contents, indent=4)
except TypeError as exc:
Cache.log.error(exc)
raise CacheError(exc)
# write contents back
with open(self.bucket_path(bucket_name), "w") as bucket_file:
bucket_file.write(string_content)
bucket_file.flush()
def is_bucket(self, name):
"""
:param name: String name of bucket to query
:returns: Boolean status, true if the bucket exists
"""
buckets = self.get_bucket_names()
return name in buckets
def get_bucket_names(self):
"""
:returns: list of Strings of all existing buckets in this cache
"""
return [file_name.replace(".json", "") for file_name in os.listdir(self.directory)]
| [
"logging.getLogger",
"os.listdir",
"os.makedirs",
"os.access",
"json.dumps",
"argparse.ArgumentTypeError",
"os.path.isdir",
"json.load"
] | [((282, 309), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (299, 309), False, 'import logging\n'), ((669, 699), 'os.path.isdir', 'os.path.isdir', (['cache_directory'], {}), '(cache_directory)\n', (682, 699), False, 'import os\n'), ((947, 984), 'os.access', 'os.access', (['cache_directory', 'os.O_RDWR'], {}), '(cache_directory, os.O_RDWR)\n', (956, 984), False, 'import os\n'), ((1136, 1173), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['error_str'], {}), '(error_str)\n', (1162, 1173), False, 'import argparse\n'), ((2686, 2708), 'json.load', 'json.load', (['bucket_file'], {}), '(bucket_file)\n', (2695, 2708), False, 'import json\n'), ((4135, 4174), 'json.dumps', 'json.dumps', (['existing_contents'], {'indent': '(4)'}), '(existing_contents, indent=4)\n', (4145, 4174), False, 'import json\n'), ((734, 762), 'os.makedirs', 'os.makedirs', (['cache_directory'], {}), '(cache_directory)\n', (745, 762), False, 'import os\n'), ((2003, 2017), 'json.dumps', 'json.dumps', (['[]'], {}), '([])\n', (2013, 2017), False, 'import json\n'), ((4890, 4916), 'os.listdir', 'os.listdir', (['self.directory'], {}), '(self.directory)\n', (4900, 4916), False, 'import os\n'), ((857, 888), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['exc'], {}), '(exc)\n', (883, 888), False, 'import argparse\n')] |
# sqlite3 is part of python base install, still must import
import sqlite3
# create the sqlite connection - input param is the name of the DB
connection = sqlite3.connect('data.db')
# create cursor - think computer cursor
# allows you to select & start things
# responsible for executing queries
# (selection from, insert into, etc)
cursor = connection.cursor()
# create a table in SQL DB following schema (how it will look)
# - STR is a SQL command (all caps)
# - table name
# - columns of table in (varname type, ...)
create_table = "CREATE TABLE users (id int, username text, password text)"
# run the query with the cursor
cursor.execute(create_table)
# if run the script now, a new file called "data.db" is created
# Let's store data for a single user!
user = (1, 'jose', 'asdf')
# Smart enough to know which pieces go with which ?
insert_query = "INSERT INTO users VALUES (?, ?, ?)"
cursor.execute(insert_query, user)
# Let's add multiple users...
users = [
(2, 'jan', 'hfds'),
(3, 'jakob', 'jdd')
]
cursor.executemany(insert_query, users)
# Selecting users from a table...
select_query = "SELECT * FROM users"
# iterate over as if a list
for row in cursor.execute(select_query):
print(row)
# Must tell connection to save all changes!
connection.commit()
# good practice to always...
connection.close()
# this way it doesn't receive any more data
# or consume resources while waiting
| [
"sqlite3.connect"
] | [((156, 182), 'sqlite3.connect', 'sqlite3.connect', (['"""data.db"""'], {}), "('data.db')\n", (171, 182), False, 'import sqlite3\n')] |
import os
import argparse
import torch
import numpy
import random
from datetime import datetime
def format_time():
now = datetime.now() # current date and time
date_time = now.strftime("%m-%d-%H:%M:%S")
return date_time
def ensure_dir(path):
if not os.path.exists(path):
os.makedirs(path)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('true'):
return True
elif v.lower() in ('false'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def set_seed(seed):
# torch.backends.cudnn.deterministic = True ## this one is controversial
torch.manual_seed(seed)
numpy.random.seed(seed)
random.seed(seed)
torch.cuda.manual_seed(seed)
| [
"torch.manual_seed",
"os.path.exists",
"os.makedirs",
"random.seed",
"argparse.ArgumentTypeError",
"datetime.datetime.now",
"numpy.random.seed",
"torch.cuda.manual_seed"
] | [((127, 141), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (139, 141), False, 'from datetime import datetime\n'), ((668, 691), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (685, 691), False, 'import torch\n'), ((696, 719), 'numpy.random.seed', 'numpy.random.seed', (['seed'], {}), '(seed)\n', (713, 719), False, 'import numpy\n'), ((724, 741), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (735, 741), False, 'import random\n'), ((746, 774), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (768, 774), False, 'import torch\n'), ((270, 290), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (284, 290), False, 'import os\n'), ((300, 317), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (311, 317), False, 'import os\n'), ((510, 563), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (536, 563), False, 'import argparse\n')] |
import os
import boto3
from boto3.s3.transfer import S3Transfer
import settings
class Uploader(object):
"""Amazon S3 File Uploader"""
def __init__(self,
file_path,
s3_filepath=None,
s3_http_prefix=None,
s3_bucket=None,
s3_region=None,
aws_access_key=None,
aws_secret=None):
self.file_path = file_path
if s3_filepath:
self.s3_filepath = s3_filepath
else:
self.s3_filepath = settings.S3_FILEPATH
if s3_http_prefix:
self.s3_http_prefix = s3_http_prefix
else:
self.s3_http_prefix = settings.S3_HTTP_PREFIX
if s3_bucket:
self.s3_bucket = s3_bucket
else:
self.s3_bucket = settings.AWS_BUCKET
if s3_region:
self.s3_region = s3_region
else:
self.s3_region = settings.AWS_REGION
if aws_access_key:
self.aws_access_key = aws_access_key
else:
self.aws_access_key = settings.AWS_ACCESS_KEY_ID
if aws_secret:
self.aws_secret = aws_secret
else:
self.aws_secret = settings.AWS_SECRET_ACCESS_KEY
self.client = boto3.client(
's3',
aws_access_key_id=self.aws_access_key,
aws_secret_access_key=self.aws_secret
)
# TODO: Implement error handling
def upload(self, file_path=None):
if not file_path:
file_path = self.file_path
# Create direcotry if not exist
self.__create_directory()
file_name = os.path.basename(file_path)
transfer = S3Transfer(self.client)
transfer.upload_file(
file_path,
self.s3_bucket,
self.s3_filepath + "/" + file_name
)
return "%s/%s/%s" % (self.s3_http_prefix, self.s3_filepath, file_name)
def __create_directory(self, directory=None):
if not directory:
directory = self.s3_filepath
response = self.client.put_object(
Bucket=self.s3_bucket,
Key="%s/" % directory,
)
return response
| [
"boto3.s3.transfer.S3Transfer",
"boto3.client",
"os.path.basename"
] | [((1289, 1389), 'boto3.client', 'boto3.client', (['"""s3"""'], {'aws_access_key_id': 'self.aws_access_key', 'aws_secret_access_key': 'self.aws_secret'}), "('s3', aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret)\n", (1301, 1389), False, 'import boto3\n'), ((1669, 1696), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (1685, 1696), False, 'import os\n'), ((1716, 1739), 'boto3.s3.transfer.S3Transfer', 'S3Transfer', (['self.client'], {}), '(self.client)\n', (1726, 1739), False, 'from boto3.s3.transfer import S3Transfer\n')] |
###############################################################################
# Copyright (c) 2019 Uber Technologies, Inc. #
# #
# Licensed under the Uber Non-Commercial License (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at the root directory of this project. #
# #
# See the License for the specific language governing permissions and #
# limitations under the License. #
###############################################################################
import math
import sys
from copy import deepcopy
import gpytorch
import numpy as np
import torch
from .gp import train_gp
from .turbo_1 import Turbo1
from .utils import from_unit_cube, latin_hypercube, to_unit_cube
class TurboM(Turbo1):
"""The TuRBO-m algorithm.
Parameters
----------
f : function handle
lb : Lower variable bounds, numpy.array, shape (d,).
ub : Upper variable bounds, numpy.array, shape (d,).
n_init : Number of initial points *FOR EACH TRUST REGION* (2*dim is recommended), int.
max_evals : Total evaluation budget, int.
n_trust_regions : Number of trust regions
batch_size : Number of points in each batch, int.
verbose : If you want to print information about the optimization progress, bool.
use_ard : If you want to use ARD for the GP kernel.
max_cholesky_size : Largest number of training points where we use Cholesky, int
n_training_steps : Number of training steps for learning the GP hypers, int
min_cuda : We use float64 on the CPU if we have this or fewer datapoints
device : Device to use for GP fitting ("cpu" or "cuda")
dtype : Dtype to use for GP fitting ("float32" or "float64")
Example usage:
turbo5 = TurboM(f=f, lb=lb, ub=ub, n_init=n_init, max_evals=max_evals, n_trust_regions=5)
turbo5.optimize() # Run optimization
X, fX = turbo5.X, turbo5.fX # Evaluated points
"""
def __init__(
self,
f,
lb,
ub,
n_init,
max_evals,
n_trust_regions,
batch_size=1,
verbose=True,
use_ard=True,
max_cholesky_size=2000,
n_training_steps=50,
min_cuda=1024,
device="cpu",
dtype="float64",
):
self.n_trust_regions = n_trust_regions
super().__init__(
f=f,
lb=lb,
ub=ub,
n_init=n_init,
max_evals=max_evals,
batch_size=batch_size,
verbose=verbose,
use_ard=use_ard,
max_cholesky_size=max_cholesky_size,
n_training_steps=n_training_steps,
min_cuda=min_cuda,
device=device,
dtype=dtype,
)
self.succtol = 3
self.failtol = max(5, self.dim)
# Very basic input checks
assert n_trust_regions > 1 and isinstance(max_evals, int)
assert max_evals > n_trust_regions * n_init, "Not enough trust regions to do initial evaluations"
assert max_evals > batch_size, "Not enough evaluations to do a single batch"
# Remember the hypers for trust regions we don't sample from
self.hypers = [{} for _ in range(self.n_trust_regions)]
# Initialize parameters
self._restart()
def _restart(self):
self._idx = np.zeros((0, 1), dtype=int) # Track what trust region proposed what using an index vector
self.failcount = np.zeros(self.n_trust_regions, dtype=int)
self.succcount = np.zeros(self.n_trust_regions, dtype=int)
self.length = self.length_init * np.ones(self.n_trust_regions)
def _adjust_length(self, fX_next, i):
assert i >= 0 and i <= self.n_trust_regions - 1
fX_min = self.fX[self._idx[:, 0] == i, 0].min() # Target value
if fX_next.min() < fX_min - 1e-3 * math.fabs(fX_min):
self.succcount[i] += 1
self.failcount[i] = 0
else:
self.succcount[i] = 0
self.failcount[i] += len(fX_next) # NOTE: Add size of the batch for this TR
if self.succcount[i] == self.succtol: # Expand trust region
self.length[i] = min([2.0 * self.length[i], self.length_max])
self.succcount[i] = 0
elif self.failcount[i] >= self.failtol: # Shrink trust region (we may have exceeded the failtol)
self.length[i] /= 2.0
self.failcount[i] = 0
def _select_candidates(self, X_cand, y_cand):
"""Select candidates from samples from all trust regions."""
assert X_cand.shape == (self.n_trust_regions, self.n_cand, self.dim)
assert y_cand.shape == (self.n_trust_regions, self.n_cand, self.batch_size)
assert X_cand.min() >= 0.0 and X_cand.max() <= 1.0 and np.all(np.isfinite(y_cand))
X_next = np.zeros((self.batch_size, self.dim))
idx_next = np.zeros((self.batch_size, 1), dtype=int)
for k in range(self.batch_size):
i, j = np.unravel_index(np.argmin(y_cand[:, :, k]), (self.n_trust_regions, self.n_cand))
assert y_cand[:, :, k].min() == y_cand[i, j, k]
X_next[k, :] = deepcopy(X_cand[i, j, :])
idx_next[k, 0] = i
assert np.isfinite(y_cand[i, j, k]) # Just to make sure we never select nan or inf
# Make sure we never pick this point again
y_cand[i, j, :] = np.inf
return X_next, idx_next
def optimize(self):
"""Run the full optimization process."""
# Create initial points for each TR
for i in range(self.n_trust_regions):
X_init = latin_hypercube(self.n_init, self.dim)
X_init = from_unit_cube(X_init, self.lb, self.ub)
fX_init = np.array([[self.f(x)] for x in X_init])
# Update budget and set as initial data for this TR
self.X = np.vstack((self.X, X_init))
self.fX = np.vstack((self.fX, fX_init))
self._idx = np.vstack((self._idx, i * np.ones((self.n_init, 1), dtype=int)))
self.n_evals += self.n_init
if self.verbose:
fbest = fX_init.min()
print(f"TR-{i} starting from: {fbest:.4}")
sys.stdout.flush()
# Thompson sample to get next suggestions
while self.n_evals < self.max_evals:
# Generate candidates from each TR
X_cand = np.zeros((self.n_trust_regions, self.n_cand, self.dim))
y_cand = np.inf * np.ones((self.n_trust_regions, self.n_cand, self.batch_size))
for i in range(self.n_trust_regions):
idx = np.where(self._idx == i)[0] # Extract all "active" indices
# Get the points, values the active values
X = deepcopy(self.X[idx, :])
X = to_unit_cube(X, self.lb, self.ub)
# Get the values from the standardized data
fX = deepcopy(self.fX[idx, 0].ravel())
# Don't retrain the model if the training data hasn't changed
n_training_steps = 0 if self.hypers[i] else self.n_training_steps
# Create new candidates
X_cand[i, :, :], y_cand[i, :, :], self.hypers[i] = self._create_candidates(
X, fX, length=self.length[i], n_training_steps=n_training_steps, hypers=self.hypers[i]
)
# Select the next candidates
X_next, idx_next = self._select_candidates(X_cand, y_cand)
assert X_next.min() >= 0.0 and X_next.max() <= 1.0
# Undo the warping
X_next = from_unit_cube(X_next, self.lb, self.ub)
# Evaluate batch
fX_next = np.array([[self.f(x)] for x in X_next])
# Update trust regions
for i in range(self.n_trust_regions):
idx_i = np.where(idx_next == i)[0]
if len(idx_i) > 0:
self.hypers[i] = {} # Remove model hypers
fX_i = fX_next[idx_i]
if self.verbose and fX_i.min() < self.fX.min() - 1e-3 * math.fabs(self.fX.min()):
n_evals, fbest = self.n_evals, fX_i.min()
print(f"{n_evals}) New best @ TR-{i}: {fbest:.4}")
sys.stdout.flush()
self._adjust_length(fX_i, i)
# Update budget and append data
self.n_evals += self.batch_size
self.X = np.vstack((self.X, deepcopy(X_next)))
self.fX = np.vstack((self.fX, deepcopy(fX_next)))
self._idx = np.vstack((self._idx, deepcopy(idx_next)))
# Check if any TR needs to be restarted
for i in range(self.n_trust_regions):
if self.length[i] < self.length_min: # Restart trust region if converged
idx_i = self._idx[:, 0] == i
if self.verbose:
n_evals, fbest = self.n_evals, self.fX[idx_i, 0].min()
print(f"{n_evals}) TR-{i} converged to: : {fbest:.4}")
sys.stdout.flush()
# Reset length and counters, remove old data from trust region
self.length[i] = self.length_init
self.succcount[i] = 0
self.failcount[i] = 0
self._idx[idx_i, 0] = -1 # Remove points from trust region
self.hypers[i] = {} # Remove model hypers
# Create a new initial design
X_init = latin_hypercube(self.n_init, self.dim)
X_init = from_unit_cube(X_init, self.lb, self.ub)
fX_init = np.array([[self.f(x)] for x in X_init])
# Print progress
if self.verbose:
n_evals, fbest = self.n_evals, fX_init.min()
print(f"{n_evals}) TR-{i} is restarting from: : {fbest:.4}")
sys.stdout.flush()
# Append data to local history
self.X = np.vstack((self.X, X_init))
self.fX = np.vstack((self.fX, fX_init))
self._idx = np.vstack((self._idx, i * np.ones((self.n_init, 1), dtype=int)))
self.n_evals += self.n_init
| [
"numpy.ones",
"numpy.where",
"numpy.zeros",
"numpy.isfinite",
"numpy.vstack",
"math.fabs",
"copy.deepcopy",
"numpy.argmin",
"sys.stdout.flush"
] | [((3611, 3638), 'numpy.zeros', 'np.zeros', (['(0, 1)'], {'dtype': 'int'}), '((0, 1), dtype=int)\n', (3619, 3638), True, 'import numpy as np\n'), ((3727, 3768), 'numpy.zeros', 'np.zeros', (['self.n_trust_regions'], {'dtype': 'int'}), '(self.n_trust_regions, dtype=int)\n', (3735, 3768), True, 'import numpy as np\n'), ((3794, 3835), 'numpy.zeros', 'np.zeros', (['self.n_trust_regions'], {'dtype': 'int'}), '(self.n_trust_regions, dtype=int)\n', (3802, 3835), True, 'import numpy as np\n'), ((5089, 5126), 'numpy.zeros', 'np.zeros', (['(self.batch_size, self.dim)'], {}), '((self.batch_size, self.dim))\n', (5097, 5126), True, 'import numpy as np\n'), ((5146, 5187), 'numpy.zeros', 'np.zeros', (['(self.batch_size, 1)'], {'dtype': 'int'}), '((self.batch_size, 1), dtype=int)\n', (5154, 5187), True, 'import numpy as np\n'), ((3877, 3906), 'numpy.ones', 'np.ones', (['self.n_trust_regions'], {}), '(self.n_trust_regions)\n', (3884, 3906), True, 'import numpy as np\n'), ((5417, 5442), 'copy.deepcopy', 'deepcopy', (['X_cand[i, j, :]'], {}), '(X_cand[i, j, :])\n', (5425, 5442), False, 'from copy import deepcopy\n'), ((5493, 5521), 'numpy.isfinite', 'np.isfinite', (['y_cand[i, j, k]'], {}), '(y_cand[i, j, k])\n', (5504, 5521), True, 'import numpy as np\n'), ((6130, 6157), 'numpy.vstack', 'np.vstack', (['(self.X, X_init)'], {}), '((self.X, X_init))\n', (6139, 6157), True, 'import numpy as np\n'), ((6180, 6209), 'numpy.vstack', 'np.vstack', (['(self.fX, fX_init)'], {}), '((self.fX, fX_init))\n', (6189, 6209), True, 'import numpy as np\n'), ((6666, 6721), 'numpy.zeros', 'np.zeros', (['(self.n_trust_regions, self.n_cand, self.dim)'], {}), '((self.n_trust_regions, self.n_cand, self.dim))\n', (6674, 6721), True, 'import numpy as np\n'), ((5050, 5069), 'numpy.isfinite', 'np.isfinite', (['y_cand'], {}), '(y_cand)\n', (5061, 5069), True, 'import numpy as np\n'), ((5265, 5291), 'numpy.argmin', 'np.argmin', (['y_cand[:, :, k]'], {}), '(y_cand[:, :, k])\n', (5274, 5291), True, 'import numpy as np\n'), ((6482, 6500), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6498, 6500), False, 'import sys\n'), ((6752, 6813), 'numpy.ones', 'np.ones', (['(self.n_trust_regions, self.n_cand, self.batch_size)'], {}), '((self.n_trust_regions, self.n_cand, self.batch_size))\n', (6759, 6813), True, 'import numpy as np\n'), ((7026, 7050), 'copy.deepcopy', 'deepcopy', (['self.X[idx, :]'], {}), '(self.X[idx, :])\n', (7034, 7050), False, 'from copy import deepcopy\n'), ((4122, 4139), 'math.fabs', 'math.fabs', (['fX_min'], {}), '(fX_min)\n', (4131, 4139), False, 'import math\n'), ((6886, 6910), 'numpy.where', 'np.where', (['(self._idx == i)'], {}), '(self._idx == i)\n', (6894, 6910), True, 'import numpy as np\n'), ((8112, 8135), 'numpy.where', 'np.where', (['(idx_next == i)'], {}), '(idx_next == i)\n', (8120, 8135), True, 'import numpy as np\n'), ((8744, 8760), 'copy.deepcopy', 'deepcopy', (['X_next'], {}), '(X_next)\n', (8752, 8760), False, 'from copy import deepcopy\n'), ((8805, 8822), 'copy.deepcopy', 'deepcopy', (['fX_next'], {}), '(fX_next)\n', (8813, 8822), False, 'from copy import deepcopy\n'), ((8871, 8889), 'copy.deepcopy', 'deepcopy', (['idx_next'], {}), '(idx_next)\n', (8879, 8889), False, 'from copy import deepcopy\n'), ((10350, 10377), 'numpy.vstack', 'np.vstack', (['(self.X, X_init)'], {}), '((self.X, X_init))\n', (10359, 10377), True, 'import numpy as np\n'), ((10408, 10437), 'numpy.vstack', 'np.vstack', (['(self.fX, fX_init)'], {}), '((self.fX, fX_init))\n', (10417, 10437), True, 'import numpy as np\n'), ((6260, 6296), 'numpy.ones', 'np.ones', (['(self.n_init, 1)'], {'dtype': 'int'}), '((self.n_init, 1), dtype=int)\n', (6267, 6296), True, 'import numpy as np\n'), ((8547, 8565), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8563, 8565), False, 'import sys\n'), ((9354, 9372), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9370, 9372), False, 'import sys\n'), ((10250, 10268), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10266, 10268), False, 'import sys\n'), ((10496, 10532), 'numpy.ones', 'np.ones', (['(self.n_init, 1)'], {'dtype': 'int'}), '((self.n_init, 1), dtype=int)\n', (10503, 10532), True, 'import numpy as np\n')] |
import random
from tts import tts
def who_are_you():
messages = ["I'm Aida, your personal assistant", "Aida, I thought I told you before", "My Name is Aida and I'm a personal assistant"]
tts(random.choice(messages))
def how_am_i():
replies = ["You Seem Nice", "A Good Person", "You Sound Kind"]
tts(random.choice(replies))
def skills_list():
tts("I can take notes for you, read the latest news headlines, play audio files on your computer, tell you the time & weather outside, gather information about anything from Wikipedia, search for anything online, check your e-mail & open Firefox")
def tell_joke():
jokes = ["I'm afraid I'm not that funny", "Jokes are dead, look at memes instead", "No, I always forget the punch line"]
tts(random.choice(jokes))
def who_am_i():
insights = ["You sound like a nice person. I wish you all the best.", "Is that a philosophical question or do you suffer from amnesia?", "Obviously you are my user!"]
tts(random.choice(insights))
def where_born():
answers = ["I wasn't exactly born. I'm a computer program remember?", "Technically inside a computer", "Computer programs aren't born, they are written by programmers"]
tts(random.choice(answers))
def how_are_you():
feelings = ["I'm fine, thanks for asking", "I'm OK", "Having A Great Day So Far!"]
tts(random.choice(feelings))
def awake():
responses = ["How Can I Help You ?", "Is There Something I Can Do For You ?", "Need Help With Something ?"]
tts(random.choice(responses))
def undefined():
print("Aida couldn't understand audio")
| [
"tts.tts",
"random.choice"
] | [((365, 617), 'tts.tts', 'tts', (['"""I can take notes for you, read the latest news headlines, play audio files on your computer, tell you the time & weather outside, gather information about anything from Wikipedia, search for anything online, check your e-mail & open Firefox"""'], {}), "('I can take notes for you, read the latest news headlines, play audio files on your computer, tell you the time & weather outside, gather information about anything from Wikipedia, search for anything online, check your e-mail & open Firefox'\n )\n", (368, 617), False, 'from tts import tts\n'), ((200, 223), 'random.choice', 'random.choice', (['messages'], {}), '(messages)\n', (213, 223), False, 'import random\n'), ((317, 339), 'random.choice', 'random.choice', (['replies'], {}), '(replies)\n', (330, 339), False, 'import random\n'), ((764, 784), 'random.choice', 'random.choice', (['jokes'], {}), '(jokes)\n', (777, 784), False, 'import random\n'), ((982, 1005), 'random.choice', 'random.choice', (['insights'], {}), '(insights)\n', (995, 1005), False, 'import random\n'), ((1207, 1229), 'random.choice', 'random.choice', (['answers'], {}), '(answers)\n', (1220, 1229), False, 'import random\n'), ((1346, 1369), 'random.choice', 'random.choice', (['feelings'], {}), '(feelings)\n', (1359, 1369), False, 'import random\n'), ((1505, 1529), 'random.choice', 'random.choice', (['responses'], {}), '(responses)\n', (1518, 1529), False, 'import random\n')] |
import pygame
import random
import sys
import model as m
import view as v
v.initialize_pygame()
def game_loop():
#declare width and height of screen
WIDTH = 800
HEIGHT = 600
#rgb codes of various colours
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
BLUE = (0, 0, 255)
BACKGROUND_COLOUR = (0, 0, 0) #black
#player variables
player_size = 50
player_pos = [WIDTH/2, HEIGHT-2*player_size]
#enemy variables
enemy_size = 50
enemy_pos = [random.randint(0, WIDTH-enemy_size), 0]
enemy_list = [enemy_pos]
SPEED = 10
screen = pygame.display.set_mode((WIDTH, HEIGHT))
score = 0
clock = pygame.time.Clock()
myFont = pygame.font.SysFont("monospace", 35)
game_over = False
while not game_over:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
x = player_pos[0]
y = player_pos[1]
if event.key == pygame.K_LEFT and m.not_overflow_left(player_pos, player_size, WIDTH):
x -= player_size
elif event.key == pygame.K_RIGHT and m.not_overflow_right(player_pos, player_size, WIDTH):
x += player_size
player_pos = [x, y]
v.fill_background(screen, BACKGROUND_COLOUR)
enemy_list = m.drop_enemies(enemy_list, WIDTH, enemy_size)
enemy_list, score = m.update_enemy_positions(enemy_list, score, SPEED, HEIGHT)
SPEED = m.set_level(score, SPEED)
v.print_score(score, myFont, YELLOW, screen, WIDTH, HEIGHT)
game_over = m.collision_check(enemy_list, player_pos, player_size, enemy_size)
v.draw_enemies(enemy_list, screen, enemy_pos, enemy_size, BLUE)
v.draw_player(screen, RED, player_pos, player_size)
v.set_fps(clock)
v.update_screen()
if __name__ == '__main__':
game_loop() | [
"model.collision_check",
"view.update_screen",
"view.draw_player",
"sys.exit",
"model.drop_enemies",
"pygame.display.set_mode",
"view.fill_background",
"random.randint",
"model.not_overflow_left",
"view.set_fps",
"view.print_score",
"view.initialize_pygame",
"pygame.time.Clock",
"pygame.fo... | [((76, 97), 'view.initialize_pygame', 'v.initialize_pygame', ([], {}), '()\n', (95, 97), True, 'import view as v\n'), ((588, 628), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(WIDTH, HEIGHT)'], {}), '((WIDTH, HEIGHT))\n', (611, 628), False, 'import pygame\n'), ((657, 676), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (674, 676), False, 'import pygame\n'), ((691, 727), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""monospace"""', '(35)'], {}), "('monospace', 35)\n", (710, 727), False, 'import pygame\n'), ((489, 526), 'random.randint', 'random.randint', (['(0)', '(WIDTH - enemy_size)'], {}), '(0, WIDTH - enemy_size)\n', (503, 526), False, 'import random\n'), ((797, 815), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (813, 815), False, 'import pygame\n'), ((1351, 1395), 'view.fill_background', 'v.fill_background', (['screen', 'BACKGROUND_COLOUR'], {}), '(screen, BACKGROUND_COLOUR)\n', (1368, 1395), True, 'import view as v\n'), ((1426, 1471), 'model.drop_enemies', 'm.drop_enemies', (['enemy_list', 'WIDTH', 'enemy_size'], {}), '(enemy_list, WIDTH, enemy_size)\n', (1440, 1471), True, 'import model as m\n'), ((1500, 1558), 'model.update_enemy_positions', 'm.update_enemy_positions', (['enemy_list', 'score', 'SPEED', 'HEIGHT'], {}), '(enemy_list, score, SPEED, HEIGHT)\n', (1524, 1558), True, 'import model as m\n'), ((1575, 1600), 'model.set_level', 'm.set_level', (['score', 'SPEED'], {}), '(score, SPEED)\n', (1586, 1600), True, 'import model as m\n'), ((1618, 1677), 'view.print_score', 'v.print_score', (['score', 'myFont', 'YELLOW', 'screen', 'WIDTH', 'HEIGHT'], {}), '(score, myFont, YELLOW, screen, WIDTH, HEIGHT)\n', (1631, 1677), True, 'import view as v\n'), ((1699, 1765), 'model.collision_check', 'm.collision_check', (['enemy_list', 'player_pos', 'player_size', 'enemy_size'], {}), '(enemy_list, player_pos, player_size, enemy_size)\n', (1716, 1765), True, 'import model as m\n'), ((1783, 1846), 'view.draw_enemies', 'v.draw_enemies', (['enemy_list', 'screen', 'enemy_pos', 'enemy_size', 'BLUE'], {}), '(enemy_list, screen, enemy_pos, enemy_size, BLUE)\n', (1797, 1846), True, 'import view as v\n'), ((1856, 1907), 'view.draw_player', 'v.draw_player', (['screen', 'RED', 'player_pos', 'player_size'], {}), '(screen, RED, player_pos, player_size)\n', (1869, 1907), True, 'import view as v\n'), ((1917, 1933), 'view.set_fps', 'v.set_fps', (['clock'], {}), '(clock)\n', (1926, 1933), True, 'import view as v\n'), ((1943, 1960), 'view.update_screen', 'v.update_screen', ([], {}), '()\n', (1958, 1960), True, 'import view as v\n'), ((875, 885), 'sys.exit', 'sys.exit', ([], {}), '()\n', (883, 885), False, 'import sys\n'), ((1063, 1114), 'model.not_overflow_left', 'm.not_overflow_left', (['player_pos', 'player_size', 'WIDTH'], {}), '(player_pos, player_size, WIDTH)\n', (1082, 1114), True, 'import model as m\n'), ((1206, 1258), 'model.not_overflow_right', 'm.not_overflow_right', (['player_pos', 'player_size', 'WIDTH'], {}), '(player_pos, player_size, WIDTH)\n', (1226, 1258), True, 'import model as m\n')] |
import numpy as np
import pathlib
import Vox
import os
import sys
sys.path.append("../base")
import JSONHelper
def save_output(batch_size, rootdir, samples, outputs, is_testtime=False):
for i in range(batch_size):
is_match = outputs["match"][i].item()
if True:
sdf_scan = samples["sdf_scan"][i].numpy()
df_cad = samples["df_cad"][i].numpy()
heatmap_pred = outputs["heatmap"][i].data.cpu().numpy()
grid2world_scan = samples["grid2world_scan"][i].numpy()
grid2world_cad = samples["grid2world_cad"][i].numpy()
basename_save = samples["basename_save"][i]
voxres_scan = samples["voxres_scan"][i]
voxres_cad = samples["voxres_cad"][i]
scale = outputs["scale"][i].data.cpu().numpy().tolist()
p_scan = samples["p_scan"][i].numpy().tolist()
savedir = rootdir + "/" + basename_save
pathlib.Path(savedir).mkdir(parents=False, exist_ok=True)
dims_cad = [df_cad.shape[1], df_cad.shape[2], df_cad.shape[3]]
vox = Vox.Vox(dims_cad, voxres_cad, grid2world_cad, df_cad, heatmap_pred)
Vox.write_vox(savedir + "/predict-heatmap.vox2", vox)
item = {"match" : is_match, "scale" : scale, "p_scan" : p_scan}
JSONHelper.write(savedir + "/predict.json", item)
force_symlink(savedir + "/input-center.vox", samples["filename_vox_center"][i])
if is_testtime:
continue
#if is_match > 0.95:
# print(savedir)
# print(scale)
# dim_scan = [sdf_scan.shape[1], sdf_scan.shape[2], sdf_scan.shape[3]]
# vox = Vox.Vox(dim_scan, voxres_scan, grid2world_scan, sdf_scan)
# Vox.write_vox(savedir + "/input-center.vox", vox)
# quit()
heatmap_gt = outputs["heatmap_gt"][i].data.cpu().numpy()
dim_cad = [df_cad.shape[1], df_cad.shape[2], df_cad.shape[3]]
vox = Vox.Vox(dim_cad, voxres_cad, grid2world_cad, df_cad, heatmap_gt)
Vox.write_vox(savedir + "/gt-heatmap.vox2", vox)
def force_symlink(linkname, target):
try:
os.symlink(target, linkname)
except:
os.remove(linkname)
os.symlink(target, linkname)
| [
"pathlib.Path",
"JSONHelper.write",
"os.symlink",
"Vox.write_vox",
"Vox.Vox",
"sys.path.append",
"os.remove"
] | [((67, 93), 'sys.path.append', 'sys.path.append', (['"""../base"""'], {}), "('../base')\n", (82, 93), False, 'import sys\n'), ((2292, 2320), 'os.symlink', 'os.symlink', (['target', 'linkname'], {}), '(target, linkname)\n', (2302, 2320), False, 'import os\n'), ((1135, 1202), 'Vox.Vox', 'Vox.Vox', (['dims_cad', 'voxres_cad', 'grid2world_cad', 'df_cad', 'heatmap_pred'], {}), '(dims_cad, voxres_cad, grid2world_cad, df_cad, heatmap_pred)\n', (1142, 1202), False, 'import Vox\n'), ((1215, 1268), 'Vox.write_vox', 'Vox.write_vox', (["(savedir + '/predict-heatmap.vox2')", 'vox'], {}), "(savedir + '/predict-heatmap.vox2', vox)\n", (1228, 1268), False, 'import Vox\n'), ((1358, 1407), 'JSONHelper.write', 'JSONHelper.write', (["(savedir + '/predict.json')", 'item'], {}), "(savedir + '/predict.json', item)\n", (1374, 1407), False, 'import JSONHelper\n'), ((2111, 2175), 'Vox.Vox', 'Vox.Vox', (['dim_cad', 'voxres_cad', 'grid2world_cad', 'df_cad', 'heatmap_gt'], {}), '(dim_cad, voxres_cad, grid2world_cad, df_cad, heatmap_gt)\n', (2118, 2175), False, 'import Vox\n'), ((2188, 2236), 'Vox.write_vox', 'Vox.write_vox', (["(savedir + '/gt-heatmap.vox2')", 'vox'], {}), "(savedir + '/gt-heatmap.vox2', vox)\n", (2201, 2236), False, 'import Vox\n'), ((2341, 2360), 'os.remove', 'os.remove', (['linkname'], {}), '(linkname)\n', (2350, 2360), False, 'import os\n'), ((2369, 2397), 'os.symlink', 'os.symlink', (['target', 'linkname'], {}), '(target, linkname)\n', (2379, 2397), False, 'import os\n'), ((970, 991), 'pathlib.Path', 'pathlib.Path', (['savedir'], {}), '(savedir)\n', (982, 991), False, 'import pathlib\n')] |
#
# file: gd_1d.py
#
# 1D example of GD
#
# RTK, 14-Feb-2021
# Last update: 14-Feb-2021
#
################################################################
import sys
import os
import numpy as np
import matplotlib.pylab as plt
# The function and its derivative
def f(x):
return 6*x**2 - 12*x + 3
def d(x):
return 12*x - 12
# Show the function, derivative, and minimum
x = np.linspace(-1,3,1000)
y = f(x)
plt.plot(x,y,color='#1f77b4')
x = np.linspace(0,3,10)
z = d(x)
plt.plot(x,z,color='#ff7f0e')
plt.plot([-1,3],[0,0],linestyle=(0,(1,1)),color='k')
plt.plot([1,1],[-10,25],linestyle=(0,(1,1)),color='k')
plt.plot([1,1],[f(1),f(1)],marker='o',color='#1f77b4')
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.tight_layout(pad=0, w_pad=0, h_pad=0)
plt.savefig("gd_1d_plot.png", dpi=300)
#plt.show()
plt.close()
# Show a series of gradient descent steps
x = np.linspace(-1,3,1000)
plt.plot(x,f(x))
x = -0.9
eta = 0.03
for i in range(15):
plt.plot(x, f(x), marker='o', color='r')
x = x - eta * d(x)
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.tight_layout(pad=0, w_pad=0, h_pad=0)
plt.savefig("gd_1d_steps.png", dpi=300)
#plt.show()
plt.close()
print("Minimum at (%0.6f, %0.6f)" % (x, f(x)))
# Show oscillation if step size too large
x = np.linspace(0.75,1.25,1000)
plt.plot(x,f(x))
x = xold = 0.75
for i in range(14):
plt.plot([xold,x], [f(xold),f(x)], marker='o', linestyle='dotted', color='r')
xold = x
x = x - 0.15 * d(x)
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.tight_layout(pad=0, w_pad=0, h_pad=0)
plt.savefig("gd_1d_oscillating.png", dpi=300)
#plt.show()
| [
"matplotlib.pylab.savefig",
"matplotlib.pylab.tight_layout",
"matplotlib.pylab.xlabel",
"numpy.linspace",
"matplotlib.pylab.plot",
"matplotlib.pylab.close",
"matplotlib.pylab.ylabel"
] | [((392, 416), 'numpy.linspace', 'np.linspace', (['(-1)', '(3)', '(1000)'], {}), '(-1, 3, 1000)\n', (403, 416), True, 'import numpy as np\n'), ((424, 455), 'matplotlib.pylab.plot', 'plt.plot', (['x', 'y'], {'color': '"""#1f77b4"""'}), "(x, y, color='#1f77b4')\n", (432, 455), True, 'import matplotlib.pylab as plt\n'), ((458, 479), 'numpy.linspace', 'np.linspace', (['(0)', '(3)', '(10)'], {}), '(0, 3, 10)\n', (469, 479), True, 'import numpy as np\n'), ((487, 518), 'matplotlib.pylab.plot', 'plt.plot', (['x', 'z'], {'color': '"""#ff7f0e"""'}), "(x, z, color='#ff7f0e')\n", (495, 518), True, 'import matplotlib.pylab as plt\n'), ((517, 576), 'matplotlib.pylab.plot', 'plt.plot', (['[-1, 3]', '[0, 0]'], {'linestyle': '(0, (1, 1))', 'color': '"""k"""'}), "([-1, 3], [0, 0], linestyle=(0, (1, 1)), color='k')\n", (525, 576), True, 'import matplotlib.pylab as plt\n'), ((570, 631), 'matplotlib.pylab.plot', 'plt.plot', (['[1, 1]', '[-10, 25]'], {'linestyle': '(0, (1, 1))', 'color': '"""k"""'}), "([1, 1], [-10, 25], linestyle=(0, (1, 1)), color='k')\n", (578, 631), True, 'import matplotlib.pylab as plt\n'), ((680, 697), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""$x$"""'], {}), "('$x$')\n", (690, 697), True, 'import matplotlib.pylab as plt\n'), ((698, 715), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""$y$"""'], {}), "('$y$')\n", (708, 715), True, 'import matplotlib.pylab as plt\n'), ((716, 757), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {'pad': '(0)', 'w_pad': '(0)', 'h_pad': '(0)'}), '(pad=0, w_pad=0, h_pad=0)\n', (732, 757), True, 'import matplotlib.pylab as plt\n'), ((758, 796), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""gd_1d_plot.png"""'], {'dpi': '(300)'}), "('gd_1d_plot.png', dpi=300)\n", (769, 796), True, 'import matplotlib.pylab as plt\n'), ((809, 820), 'matplotlib.pylab.close', 'plt.close', ([], {}), '()\n', (818, 820), True, 'import matplotlib.pylab as plt\n'), ((869, 893), 'numpy.linspace', 'np.linspace', (['(-1)', '(3)', '(1000)'], {}), '(-1, 3, 1000)\n', (880, 893), True, 'import numpy as np\n'), ((1019, 1036), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""$x$"""'], {}), "('$x$')\n", (1029, 1036), True, 'import matplotlib.pylab as plt\n'), ((1037, 1054), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""$y$"""'], {}), "('$y$')\n", (1047, 1054), True, 'import matplotlib.pylab as plt\n'), ((1055, 1096), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {'pad': '(0)', 'w_pad': '(0)', 'h_pad': '(0)'}), '(pad=0, w_pad=0, h_pad=0)\n', (1071, 1096), True, 'import matplotlib.pylab as plt\n'), ((1097, 1136), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""gd_1d_steps.png"""'], {'dpi': '(300)'}), "('gd_1d_steps.png', dpi=300)\n", (1108, 1136), True, 'import matplotlib.pylab as plt\n'), ((1149, 1160), 'matplotlib.pylab.close', 'plt.close', ([], {}), '()\n', (1158, 1160), True, 'import matplotlib.pylab as plt\n'), ((1256, 1285), 'numpy.linspace', 'np.linspace', (['(0.75)', '(1.25)', '(1000)'], {}), '(0.75, 1.25, 1000)\n', (1267, 1285), True, 'import numpy as np\n'), ((1457, 1474), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""$x$"""'], {}), "('$x$')\n", (1467, 1474), True, 'import matplotlib.pylab as plt\n'), ((1475, 1492), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""$y$"""'], {}), "('$y$')\n", (1485, 1492), True, 'import matplotlib.pylab as plt\n'), ((1493, 1534), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {'pad': '(0)', 'w_pad': '(0)', 'h_pad': '(0)'}), '(pad=0, w_pad=0, h_pad=0)\n', (1509, 1534), True, 'import matplotlib.pylab as plt\n'), ((1535, 1580), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""gd_1d_oscillating.png"""'], {'dpi': '(300)'}), "('gd_1d_oscillating.png', dpi=300)\n", (1546, 1580), True, 'import matplotlib.pylab as plt\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 14:33:25 2020
@author: <NAME>
"""
import json
name = str(input('Please give the name of cookie file,like xxxx.json: \n'))
site = str(input('Please give the name of PH\'s website,like AdidasGB: \n'))
site_pd = str(
input('Please give the name of PD\'s website,like adidas-gb(can be found in cookie file): \n'))
try:
with open(name, 'r') as f:
pd_cookie = json.load(f)
ph_cookie = []
for cookie in pd_cookie[site_pd]:
tmp = {"cookie":cookie['cookie'],"site":site}
ph_cookie.append(tmp)
with open('ph_cookie.json', 'w') as f:
json.dump(ph_cookie, f)
print('完成')
except Exception as e:
print("失败,exception {} ".format(e))
| [
"json.load",
"json.dump"
] | [((423, 435), 'json.load', 'json.load', (['f'], {}), '(f)\n', (432, 435), False, 'import json\n'), ((644, 667), 'json.dump', 'json.dump', (['ph_cookie', 'f'], {}), '(ph_cookie, f)\n', (653, 667), False, 'import json\n')] |
import unittest
from src import wheel
from src import table
from src import bet
from src import outcome
from src import invalid_bet_exception
from src import roulette_game
class TestTable(unittest.TestCase):
def setUp(self):
self.wheel = wheel.Wheel()
self.table = table.Table(minimum=1, maximum=1000)
self.game = roulette_game.RouletteGame(table=self.table, wheel=self.wheel)
self.bets = [
bet.Bet(5, outcome.Outcome("Number 1",35)),
bet.Bet(20, outcome.Outcome("Number 2",35)),
bet.Bet(50, outcome.Outcome("Number 3",35)),
bet.Bet(490, outcome.Outcome("Number 4",35))
]
def test_placeBet(self):
print("\nTesting placeBet")
self.table.placeBet(self.bets[0],self.game)
self.table.placeBet(self.bets[1],self.game)
print(self.table)
def test___str__(self):
print("\nTesting __str__")
self.table.placeBet(self.bets[2],self.game)
self.table.placeBet(self.bets[3],self.game)
print(self.table)
def test___repr__(self):
self.table.placeBet(self.bets[2],self.game)
self.table.placeBet(self.bets[3],self.game)
print("\nTesting __repr__")
print(self.table.__repr__())
def tearDown(self):
self.table = None
self.bets = None
if __name__ == '__main__':
unittest.main()
| [
"src.table.Table",
"src.wheel.Wheel",
"src.outcome.Outcome",
"unittest.main",
"src.roulette_game.RouletteGame"
] | [((1382, 1397), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1395, 1397), False, 'import unittest\n'), ((252, 265), 'src.wheel.Wheel', 'wheel.Wheel', ([], {}), '()\n', (263, 265), False, 'from src import wheel\n'), ((287, 323), 'src.table.Table', 'table.Table', ([], {'minimum': '(1)', 'maximum': '(1000)'}), '(minimum=1, maximum=1000)\n', (298, 323), False, 'from src import table\n'), ((345, 407), 'src.roulette_game.RouletteGame', 'roulette_game.RouletteGame', ([], {'table': 'self.table', 'wheel': 'self.wheel'}), '(table=self.table, wheel=self.wheel)\n', (371, 407), False, 'from src import roulette_game\n'), ((453, 484), 'src.outcome.Outcome', 'outcome.Outcome', (['"""Number 1"""', '(35)'], {}), "('Number 1', 35)\n", (468, 484), False, 'from src import outcome\n'), ((510, 541), 'src.outcome.Outcome', 'outcome.Outcome', (['"""Number 2"""', '(35)'], {}), "('Number 2', 35)\n", (525, 541), False, 'from src import outcome\n'), ((567, 598), 'src.outcome.Outcome', 'outcome.Outcome', (['"""Number 3"""', '(35)'], {}), "('Number 3', 35)\n", (582, 598), False, 'from src import outcome\n'), ((625, 656), 'src.outcome.Outcome', 'outcome.Outcome', (['"""Number 4"""', '(35)'], {}), "('Number 4', 35)\n", (640, 656), False, 'from src import outcome\n')] |
"""
Datetime operations.
date_to_str,
str_to_datetime,
datetime_to_str,
datetime_to_min,
min_to_datetime,
to_day_of_week_int,
working_day,
now_str,
deltatime_str,
timestamp_to_millis,
millis_to_timestamp,
time_to_str,
str_to_time,
elapsed_time_dt,
diff_time,
create_time_slot_in_minute,
generate_time_statistics,
threshold_time_statistics
"""
from __future__ import annotations
from datetime import datetime
import holidays
from pandas import DataFrame, Timestamp
from pymove.utils.constants import (
COUNT,
DATETIME,
LOCAL_LABEL,
MAX,
MEAN,
MIN,
PREV_LOCAL,
STD,
SUM,
THRESHOLD,
TIME_SLOT,
TIME_TO_PREV,
)
def date_to_str(dt: datetime) -> str:
"""
Get date, in string format, from timestamp.
Parameters
----------
dt : datetime
Represents a date
Returns
-------
str
Represents the date in string format
Example
-------
>>> from datetime import datatime
>>> from pymove.utils.datetime import date_to_str
>>> time_now = datetime.now()
>>> print(time_now)
'2021-04-29 11:01:29.909340'
>>> print(type(time_now))
'<class 'datetime.datetime'>'
>>> print(date_to_str(time_now), type(time_now))
'2021-04-29 <class 'str'>'
"""
return dt.strftime('%Y-%m-%d')
def str_to_datetime(dt_str: str) -> datetime:
"""
Converts a datetime in string format to datetime format.
Parameters
----------
dt_str : str
Represents a datetime in string format, "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S"
Returns
-------
datetime
Represents a datetime in datetime format
Example
-------
>>> from pymove.utils.datetime import str_to_datetime
>>> time_1 = '2020-06-29'
>>> time_2 = '2020-06-29 12:45:59'
>>> print(type(time_1), type(time_2))
'<class 'str'> <class 'str'>'
>>> print( str_to_datetime(time_1), type(str_to_datetime(time_1)))
'2020-06-29 00:00:00 <class 'datetime.datetime'>'
>>> print(str_to_datetime(time_2), type(str_to_datetime(time_2)))
'2020-06-29 12:45:59 <class 'datetime.datetime'>'
"""
if len(dt_str) == 10:
return datetime.strptime(dt_str, '%Y-%m-%d')
else:
return datetime.strptime(dt_str, '%Y-%m-%d %H:%M:%S')
def datetime_to_str(dt: datetime) -> str:
"""
Converts a date in datetime format to string format.
Parameters
----------
dt : datetime
Represents a datetime in datetime format.
Returns
-------
str
Represents a datetime in string format "%Y-%m-%d %H:%M:%S".
Example:
-------
>>> from pymove.utils.datetime import datetime_to_str
>>> from datetime import datetime
>>> time_now = datetime.now()
>>> print(time_now)
'2021-04-29 14:15:29.708113'
>>> print(type(time_now))
'<class 'datetime.datetime'>'
>>> print(datetime_to_str(time_now), type(datetime_to_str(time_now)))
'2021-04-29 14:15:29 <class 'str' >'
"""
return dt.strftime('%Y-%m-%d %H:%M:%S')
def datetime_to_min(dt: datetime) -> int:
"""
Converts a datetime to an int representation in minutes.
To do the reverse use: min_to_datetime.
Parameters
----------
dt : datetime
Represents a datetime in datetime format
Returns
-------
int
Represents minutes from
Example
-------
>>> from pymove.utils.datetime import datetime_to_min
>>> from datetime import datetime
>>> time_now = datetime.now()
>>> print(type(datetime_to_min(time_now)))
'<class 'int'>'
>>> datetime_to_min(time_now)
'26996497'
"""
# get an integer time slot from a datetime
return int(
(dt - dt.utcfromtimestamp(0)).total_seconds() / 60
)
def min_to_datetime(minutes: int) -> datetime:
"""
Converts an int representation in minutes to a datetime.
To do the reverse use: datetime_to_min.
Parameters
----------
minutes : int
Represents minutes
Returns
-------
datetime
Represents minutes in datetime format
Example
-------
>>> from pymove.utils.datetime import min_to_datetime
>>> print(min_to_datetime(26996497), type(min_to_datetime(26996497)))
'2021-04-30 13:37:00 <class 'datetime.datetime'>'
"""
return datetime.utcfromtimestamp(minutes * 60)
def to_day_of_week_int(dt: datetime) -> int:
"""
Get day of week of a date. Monday == 0...Sunday == 6.
Parameters
----------
dt : datetime
Represents a datetime in datetime format.
Returns
-------
int
Represents day of week.
Example
-------
>>> from pymove.utils.datetime import str_to_datetime
>>> monday = str_to_datetime('2021-05-3 12:00:01')
>>> friday = str_to_datetime('2021-05-7 12:00:01')
>>> print(to_day_of_week_int(monday), type(to_day_of_week_int(monday)))
'0 <class 'int'>'
>>> print(to_day_of_week_int(friday), type(to_day_of_week_int(friday)))
'4 <class 'int'>'
"""
return dt.weekday()
def working_day(
dt: str | datetime,
country: str = 'BR',
state: str | None = None
) -> bool:
"""
Indices if a day specified by the user is a working day.
Parameters
----------
dt : str or datetime
Specifies the day the user wants to know if it is a business day.
country : str
Indicates country to check for vacation days, by default 'BR'
state: str
Indicates state to check for vacation days, by default None
Returns
-------
boolean
if true, means that the day informed by the user is a working day.
if false, means that the day is not a working day.
Examples
--------
>>> from pymove.utils.datetime import str_to_datetime
>>> independence_day = str_to_datetime('2021-09-7 12:00:01') # Holiday in Brazil
>>> next_day = str_to_datetime('2021-09-8 12:00:01') # Not a Holiday in Brazil
>>> print(working_day(independence_day, 'BR'))
False
>>> print(type(working_day(independence_day, 'BR')))
<class 'bool'>
>>> print(working_day(next_day, 'BR'))
True
>>> print(type(working_day(next_day, 'BR')))
'<class 'bool'>'
References
----------
Countries and States names available in https://pypi.org/project/holidays/
"""
result = True
if isinstance(dt, str):
dt = str_to_datetime(dt)
if isinstance(dt, datetime):
dt = datetime(dt.year, dt.month, dt.day)
if dt in holidays.CountryHoliday(country=country, prov=None, state=state):
result = False
else:
dow = to_day_of_week_int(dt)
# 5 == Saturday, 6 == Sunday
if dow == 5 or dow == 6:
result = False
return result
def now_str() -> str:
"""
Get datetime of now.
Returns
-------
str
Represents a date
Examples
--------
>>> from pymove.utils.datetime import now_str
>>> now_str()
'2019-09-02 13:54:16'
"""
return datetime_to_str(datetime.now())
def deltatime_str(deltatime_seconds: float) -> str:
"""
Convert time in a format appropriate of time.
Parameters
----------
deltatime_seconds : float
Represents the elapsed time in seconds
Returns
-------
time_str : str
Represents time in a format hh:mm:ss
Examples
--------
>>> from pymove.utils.datetime import deltatime_str
>>> deltatime_str(1082.7180936336517)
'18m:02.718s'
Notes
-----
Output example if more than 24 hours: 25:33:57
https://stackoverflow.com/questions/3620943/measuring-elapsed-time-with-the-time-module
"""
hours, rem = divmod(deltatime_seconds, 3600)
minutes, seconds = divmod(rem, 60)
if hours:
return f'{int(hours):0>2}h:{int(minutes):0>2}m:{seconds:05.2f}s'
elif minutes:
return f'{int(minutes):0>2}m:{seconds:05.2f}s'
else:
return f'{seconds:05.2f}s'
def timestamp_to_millis(timestamp: str) -> int:
"""
Converts a local datetime to a POSIX timestamp in milliseconds (like in Java).
Parameters
----------
timestamp : str
Represents a date
Returns
-------
int
Represents millisecond results
Examples
--------
>>> from pymove.utils.datetime import timestamp_to_millis
>>> timestamp_to_millis('2015-12-12 08:00:00.123000')
1449907200123 (UTC)
"""
return Timestamp(timestamp).value // 1000000
def millis_to_timestamp(milliseconds: float) -> Timestamp:
"""
Converts milliseconds to timestamp.
Parameters
----------
milliseconds : int
Represents millisecond.
Returns
-------
Timestamp
Represents the date corresponding.
Examples
--------
>>> from pymove.utils.datetime import millis_to_timestamp
>>> millis_to_timestamp(1449907200123)
'2015-12-12 08:00:00.123000'
"""
return Timestamp(milliseconds, unit='ms')
def time_to_str(time: Timestamp) -> str:
"""
Get time, in string format, from timestamp.
Parameters
----------
time : Timestamp
Represents a time
Returns
-------
str
Represents the time in string format
Examples
--------
>>> from pymove.utils.datetime import time_to_str
>>> time_to_str("2015-12-12 08:00:00.123000")
'08:00:00'
"""
return time.strftime('%H:%M:%S')
def str_to_time(dt_str: str) -> datetime:
"""
Converts a time in string format "%H:%M:%S" to datetime format.
Parameters
----------
dt_str : str
Represents a time in string format
Returns
-------
datetime
Represents a time in datetime format
Examples
--------
>>> from pymove.utils.datetime import str_to_time
>>> str_to_time("08:00:00")
datetime(1900, 1, 1, 8, 0)
"""
return datetime.strptime(dt_str, '%H:%M:%S')
def elapsed_time_dt(start_time: datetime) -> int:
"""
Computes the elapsed time from a specific start time.
Parameters
----------
start_time : datetime
Specifies the start time of the time range to be computed
Returns
-------
int
Represents the time elapsed from the start time to the current time
(when the function was called).
Examples
--------
>>> from datetime import datetime
>>> from pymove.utils.datetime import str_to_datetime
>>> start_time_1 = datetime(2020, 6, 29, 0, 0)
>>> start_time_2 = str_to_datetime('2020-06-29 12:45:59')
>>> print(elapsed_time_dt(start_time_1))
26411808666
>>> print(elapsed_time_dt(start_time_2))
26365849667
"""
return diff_time(start_time, datetime.now())
def diff_time(start_time: datetime, end_time: datetime) -> int:
"""
Computes the elapsed time from the start time to the end time specified by the user.
Parameters
----------
start_time : datetime
Specifies the start time of the time range to be computed
end_time : datetime
Specifies the start time of the time range to be computed
Returns
-------
int
Represents the time elapsed from the start time to the current time
(when the function was called).
Examples
--------
>>> from datetime import datetime
>>> from pymove.utils.datetime import str_to_datetime
>>> time_now = datetime.now()
>>> start_time_1 = datetime(2020, 6, 29, 0, 0)
>>> start_time_2 = str_to_datetime('2020-06-29 12:45:59')
>>> print(diff_time(start_time_1, time_now))
26411808665
>>> print(diff_time(start_time_2, time_now))
26365849665
"""
return int((end_time - start_time).total_seconds() * 1000)
def create_time_slot_in_minute(
data: DataFrame,
slot_interval: int = 15,
initial_slot: int = 0,
label_datetime: str = DATETIME,
label_time_slot: str = TIME_SLOT,
inplace: bool = False
) -> DataFrame | None:
"""
Partitions the time in slot windows.
Parameters
----------
data : DataFrame
dataframe with datetime column
slot_interval : int, optional
size of the slot window in minutes, by default 5
initial_slot : int, optional
initial window time, by default 0
label_datetime : str, optional
name of the datetime column, by default DATETIME
label_time_slot : str, optional
name of the time slot column, by default TIME_SLOT
inplace : boolean, optional
wether the operation will be done in the original dataframe,
by default False
Returns
-------
DataFrame
data with converted time slots or None
Examples
--------
>>> from pymove.utils.datetime import create_time_slot_in_minute
>>> from pymove import datetime
>>> data
lat lon datetime id
0 39.984094 116.319236 2008-10-23 05:44:05 1
1 39.984198 116.319322 2008-10-23 05:56:06 1
2 39.984224 116.319402 2008-10-23 05:56:11 1
3 39.984224 116.319402 2008-10-23 06:10:15 1
>>> datetime.create_time_slot_in_minute(data, inplace=False)
lat lon datetime id time_slot
0 39.984094 116.319236 2008-10-23 05:44:05 1 22
1 39.984198 116.319322 2008-10-23 05:56:06 1 23
2 39.984224 116.319402 2008-10-23 05:56:11 1 23
3 39.984224 116.319402 2008-10-23 06:10:15 1 24
"""
if data.dtypes[label_datetime] != 'datetime64[ns]':
raise ValueError(f'{label_datetime} colum must be of type datetime')
if not inplace:
data = data.copy()
minute_day = data[label_datetime].dt.hour * 60 + data[label_datetime].dt.minute
data[label_time_slot] = minute_day // slot_interval + initial_slot
if not inplace:
return data
def generate_time_statistics(
data: DataFrame,
local_label: str = LOCAL_LABEL
):
"""
Calculates time statistics of the pairwise local labels.
(average, standard deviation, minimum, maximum, sum and count)
of the pairwise local labels of a symbolic trajectory.
Parameters
----------
data : DataFrame
The input trajectories date.
local_label : str, optional
The name of the feature with local id, by default LOCAL_LABEL
Return
------
DataFrame
Statistics infomations of the pairwise local labels
Example
-------
>>> from pymove.utils.datetime import generate_time_statistics
>>> df
local_label prev_local time_to_prev id
0 house NaN NaN 1
1 market house 720.0 1
2 market market 5.0 1
3 market market 1.0 1
4 school market 844.0 1
>>> generate_time_statistics(df)
local_label prev_local mean std \
min max sum count
0 house market 844.0 0.000000 \
844.0 844.0 844.0 1
1 market house 720.0 0.000000 \
720.0 720.0 720.0 1
2 market market 3.0 2.828427 \
1.0 5.0 6.0 2
"""
df_statistics = data.groupby(
[local_label, PREV_LOCAL]
).agg({TIME_TO_PREV: [
MEAN, STD, MIN, MAX, SUM, COUNT
]})
df_statistics.columns = df_statistics.columns.droplevel(0)
df_statistics.fillna(0, inplace=True)
df_statistics.reset_index(inplace=True)
return df_statistics
def _calc_time_threshold(seg_mean: float, seg_std: float) -> float:
"""
Auxiliary function for calculating the threshold.
Based on the mean and standard deviation of the time transitions
between adjacent places on discrete MoveDataFrame.
Parameters
----------
seg_mean : float
The time mean between two local labels (segment).
seg_std : float
The time mean between two local labels (segment).
Return
------
float
The threshold based on the mean and standard deviation
of transition time for the segment.
Examples
--------
>>> from pymove.utils.datetime import _calc_time_threshold
>>> print(_calc_time_threshold(12.3, 2.1))
14.4
>>> print(_calc_time_threshold(1, 1.5))
2.5
>>> print(_calc_time_threshold(-2, 2))
0.0
"""
threshold = seg_std + seg_mean
threshold = float(f'{threshold:.1f}')
return threshold
def threshold_time_statistics(
df_statistics: DataFrame,
mean_coef: float = 1.0,
std_coef: float = 1.0,
inplace: bool = False
) -> DataFrame | None:
"""
Calculates and creates the threshold column.
The values are based in the time statistics dataframe for each segment.
Parameters
----------
df_statistics : DataFrame
Time Statistics of the pairwise local labels.
mean_coef : float
Multiplication coefficient of the mean time for the segment, by default 1.0
std_coef : float
Multiplication coefficient of sdt time for the segment, by default 1.0
inplace : boolean, optional
wether the operation will be done in the original dataframe,
by default False
Return
------
DataFrame
DataFrame of time statistics with the aditional feature: threshold,
which indicates the time limit of the trajectory segment, or None
Example
-------
>>> from pymove.utils.datetime import generate_time_statistics
>>> df
local_label prev_local time_to_prev id
0 house NaN NaN 1
1 market house 720.0 1
2 market market 5.0 1
3 market market 1.0 1
4 school market 844.0 1
>>> statistics = generate_time_statistics(df)
>>> statistics
local_label prev_local mean std min max sum count
0 house market 844.0 0.000000 844.0 844.0 844.0 1
1 market house 720.0 0.000000 720.0 720.0 720.0 1
2 market market 3.0 2.828427 1.0 5.0 6.0 2
>>> threshold_time_statistics(statistics)
local_label prev_local mean std min \
max sum count threshold
0 house market 844.0 0.000000 844.0 \
844.0 844.0 1 844.0
1 market house 720.0 0.000000 720.0 \
720.0 720.0 1 720.0
2 market market 3.0 2.828427 1.0 \
5.0 6.0 2 5.8
"""
if not inplace:
df_statistics = df_statistics.copy()
df_statistics[THRESHOLD] = df_statistics.apply(
lambda x: _calc_time_threshold(x[MEAN] * mean_coef, x[STD] * std_coef), axis=1
)
if not inplace:
return df_statistics
| [
"datetime.datetime.utcfromtimestamp",
"holidays.CountryHoliday",
"datetime.datetime",
"datetime.datetime.strptime",
"datetime.datetime.now",
"pandas.Timestamp"
] | [((4317, 4356), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['(minutes * 60)'], {}), '(minutes * 60)\n', (4342, 4356), False, 'from datetime import datetime\n'), ((8948, 8982), 'pandas.Timestamp', 'Timestamp', (['milliseconds'], {'unit': '"""ms"""'}), "(milliseconds, unit='ms')\n", (8957, 8982), False, 'from pandas import DataFrame, Timestamp\n'), ((9886, 9923), 'datetime.datetime.strptime', 'datetime.strptime', (['dt_str', '"""%H:%M:%S"""'], {}), "(dt_str, '%H:%M:%S')\n", (9903, 9923), False, 'from datetime import datetime\n'), ((2172, 2209), 'datetime.datetime.strptime', 'datetime.strptime', (['dt_str', '"""%Y-%m-%d"""'], {}), "(dt_str, '%Y-%m-%d')\n", (2189, 2209), False, 'from datetime import datetime\n'), ((2235, 2281), 'datetime.datetime.strptime', 'datetime.strptime', (['dt_str', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(dt_str, '%Y-%m-%d %H:%M:%S')\n", (2252, 2281), False, 'from datetime import datetime\n'), ((6461, 6496), 'datetime.datetime', 'datetime', (['dt.year', 'dt.month', 'dt.day'], {}), '(dt.year, dt.month, dt.day)\n', (6469, 6496), False, 'from datetime import datetime\n'), ((6511, 6575), 'holidays.CountryHoliday', 'holidays.CountryHoliday', ([], {'country': 'country', 'prov': 'None', 'state': 'state'}), '(country=country, prov=None, state=state)\n', (6534, 6575), False, 'import holidays\n'), ((7035, 7049), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7047, 7049), False, 'from datetime import datetime\n'), ((10713, 10727), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10725, 10727), False, 'from datetime import datetime\n'), ((8449, 8469), 'pandas.Timestamp', 'Timestamp', (['timestamp'], {}), '(timestamp)\n', (8458, 8469), False, 'from pandas import DataFrame, Timestamp\n')] |
from nn_analysis.constants import ACTS_CONFIGS_PATH
from nn_analysis import utils
from nn_analysis import acts as ac
def main():
acts_configs = utils.load_config(ACTS_CONFIGS_PATH)
for acts_name in acts_configs.keys():
for version in acts_configs[acts_name].keys():
version = int(version)
ac.utils.assert_consistent_x(acts_name, version)
if __name__ == '__main__':
main() | [
"nn_analysis.utils.load_config",
"nn_analysis.acts.utils.assert_consistent_x"
] | [((149, 185), 'nn_analysis.utils.load_config', 'utils.load_config', (['ACTS_CONFIGS_PATH'], {}), '(ACTS_CONFIGS_PATH)\n', (166, 185), False, 'from nn_analysis import utils\n'), ((335, 383), 'nn_analysis.acts.utils.assert_consistent_x', 'ac.utils.assert_consistent_x', (['acts_name', 'version'], {}), '(acts_name, version)\n', (363, 383), True, 'from nn_analysis import acts as ac\n')] |
import threading
import MqttProcessor
import logging
import time
class Heartbeat(threading.Thread):
def __init__(self, config, pubQueue):
super().__init__()
self.config = config
self.pubQueue = pubQueue
# self.daemon = True
self.logger = logging.getLogger('Heartbeat')
def run(self):
cnt = 0
while True:
cnt += 1
pubItem = MqttProcessor.PublishItem(self.config.heartbeatTopic, str(cnt))
self.pubQueue.put(pubItem)
time.sleep(self.config.heartbeatPeriod)
| [
"logging.getLogger",
"time.sleep"
] | [((284, 314), 'logging.getLogger', 'logging.getLogger', (['"""Heartbeat"""'], {}), "('Heartbeat')\n", (301, 314), False, 'import logging\n'), ((529, 568), 'time.sleep', 'time.sleep', (['self.config.heartbeatPeriod'], {}), '(self.config.heartbeatPeriod)\n', (539, 568), False, 'import time\n')] |
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
# Create your models here.
class User(models.Model):
sex_choice = (
(0, '男'),
(1, '女'),
)
id = models.AutoField(primary_key=True) # 用户ID
role = models.ForeignKey('Role', on_delete=models.CASCADE, default=1) # 权限ID
avatar = models.CharField(max_length=2083) # 头像
real_name = models.CharField(max_length=20) # 真实姓名
student_id = models.CharField(max_length=11) # 学号 用于学生身份认证
card_id = models.CharField(max_length=18, unique=True) # 身份证 用于学生身份认证
sex = models.PositiveSmallIntegerField(choices=sex_choice) # 男0 女1
email = models.EmailField() # E-mail
is_active = models.BooleanField(default=0) # 账号是否已激活(邮箱验证)
name = models.CharField(max_length=20, unique=True) # 用户名
password = models.CharField(max_length=256) # 密码
create_at = models.DateTimeField(auto_now_add=True) # 创建时间
update_at = models.DateTimeField(auto_now=True) # 更新时间
def __str__(self):
return self.name
# 登陆
def login(self, request):
request.session['user2_id'] = self.id
# 注销
def logout(self, request):
request.session.flush()
class Meta:
ordering = ["-create_at"]
verbose_name = "用户"
verbose_name_plural = "用户"
class Role(models.Model):
# permission = (
# ('normal_user', '普通用户'),
#
# )
id = models.AutoField(primary_key=True) # 角色id
role_name = models.CharField(max_length=255) # 角色名称
create_at = models.DateTimeField(auto_now_add=True) # 创建时间
update_at = models.DateTimeField(auto_now=True) # 更新时间
# role_permission = models.TextField(choices=permission, default='[]') # 角色拥有的权限ID(json数组形式存储permission_no)
role_permission = models.TextField(default='[]') # 角色拥有的权限ID(json数组形式存储permission_no)
def __str__(self):
return self.role_name
class Meta:
ordering = ["-create_at"]
verbose_name = "角色"
verbose_name_plural = "角色"
class Permission(models.Model):
id = models.AutoField(primary_key=True) # 权限id
permission_no = models.CharField(max_length=255, unique=True) # 权限编号
permission_name = models.CharField(max_length=255, blank=True) # 权限名称 用于说明权限(最好不要为空)
create_at = models.DateTimeField(auto_now_add=True) # 创建时间
update_at = models.DateTimeField(auto_now=True) # 更新时间
def __str__(self):
return self.permission_name
class Meta:
ordering = ["-create_at"]
verbose_name = "权限"
verbose_name_plural = "权限"
class GoodsType(models.Model):
id = models.AutoField(primary_key=True) # 商品分类ID
name = models.CharField(max_length=20, unique=True) # 分类名称
create_at = models.DateTimeField(auto_now_add=True) # 创建时间
update_at = models.DateTimeField(auto_now=True) # 更新时间
def __str__(self):
return self.name
class Meta:
ordering = ["-create_at"]
verbose_name = "物品分类"
verbose_name_plural = "物品分类"
class Goods(models.Model):
sale_way_choice = (
(0, '一口价'),
(1, '拍卖'),
)
is_new_choice = (
(0, '非全新'),
(1, '全新'),
)
status_choice = (
(0, '未出售'),
(1, '交易中'),
(2, '已出售'),
)
goods_id = models.AutoField(primary_key=True) # 商品 ID
title = models.CharField(max_length=30) # 标题
description = models.CharField(max_length=3000) # 商品描述
picture = models.TextField() # 图片URL
sale_way = models.PositiveSmallIntegerField(choices=sale_way_choice) # 出售方式
is_new = models.BooleanField(choices=is_new_choice) # 是否全新 0 非全新 1 全新
fixed_price = models.DecimalField(max_digits=8, decimal_places=2) # 一口价价格
purchase_price = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True) # 入手价格
goods_type_id = models.ForeignKey('GoodsType', on_delete=models.CASCADE, blank=True, null=True) # 商品ID
customer_id = models.ForeignKey('User', on_delete=models.CASCADE) # 购买者
status = models.PositiveSmallIntegerField(choices=status_choice) # 商品状态 0 未出售 1 交易中 2 已出售
visits = models.PositiveIntegerField(default=0) # 访问量
create_at = models.DateTimeField(auto_now_add=True) # 创建时间
update_at = models.DateTimeField(auto_now=True) # 更新时间
def __str__(self):
return self.title
class Meta:
ordering = ["-create_at"]
verbose_name = "商品标题"
verbose_name_plural = "商品标题"
class GoodsMessage(models.Model):
msg_id = models.AutoField(primary_key=True) # 留言ID
goods_id = models.ForeignKey('Goods', models.CASCADE) # 商品ID
send_user_id = models.ForeignKey('User', models.CASCADE, related_name='it_send_this_message') # 发表用户ID
recv_user_id = models.ForeignKey('User', models.CASCADE, related_name='it_recv_this_message') # 接收用户ID
content = models.CharField(max_length=256) # 留言内容
create_at = models.DateTimeField(auto_now_add=True) # 创建时间
update_at = models.DateTimeField(auto_now=True) # 更新时间
def __str__(self):
return self.content
class Meta:
ordering = ["-create_at"]
verbose_name = "商品留言"
verbose_name_plural = "商品留言"
class ChatRecord(models.Model):
msg_id = models.AutoField(primary_key=True) # 留言ID
goods_id = models.ForeignKey('Goods', models.CASCADE) # 商品ID
send_user_id = models.ForeignKey('User', models.CASCADE, related_name='it_send_this_record') # 发表用户ID
recv_user_id = models.ForeignKey('User', models.CASCADE, related_name='it_recv_this_record') # 接收用户ID
content = models.CharField(max_length=256) # 留言内容
create_at = models.DateTimeField(auto_now_add=True) # 创建时间
update_at = models.DateTimeField(auto_now=True) # 更新时间
def __str__(self):
return self.content
class Meta:
ordering = ["-create_at"]
verbose_name = "聊天记录"
verbose_name_plural = "聊天记录"
class Follow(models.Model):
id = models.AutoField(primary_key=True) # 关注ID
fans_user_id = models.ForeignKey('User', models.CASCADE, related_name='its_fans') # 粉丝ID
user_id = models.ForeignKey('User', models.CASCADE, related_name='it_love') # 被关注者 ID
create_at = models.DateTimeField(auto_now_add=True) # 创建时间
update_at = models.DateTimeField(auto_now=True) # 更新时间
def __str__(self):
return "关注ID:" + str(self.id)
class Meta:
ordering = ["-create_at"]
verbose_name = "关注者"
verbose_name_plural = "关注者"
class Collection(models.Model):
id = models.AutoField(primary_key=True) # 收藏记录ID
user_id = models.ForeignKey('User', models.CASCADE) # 用户ID
goods_id = models.ForeignKey('Goods', models.CASCADE) # 商品ID
create_at = models.DateTimeField(auto_now_add=True) # 创建时间
update_at = models.DateTimeField(auto_now=True) # 更新时间
def __str__(self):
return "收藏记录ID:" + str(self.id)
class Meta:
ordering = ["-create_at"]
verbose_name = "收藏记录"
verbose_name_plural = "收藏记录"
| [
"django.db.models.EmailField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.PositiveIntegerField",
"django.db.models.DateTimeField",
"django.db.models.DecimalField",
"django.db.models.PositiveSmallIntege... | [((208, 242), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (224, 242), False, 'from django.db import models\n'), ((262, 324), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Role"""'], {'on_delete': 'models.CASCADE', 'default': '(1)'}), "('Role', on_delete=models.CASCADE, default=1)\n", (279, 324), False, 'from django.db import models\n'), ((346, 379), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2083)'}), '(max_length=2083)\n', (362, 379), False, 'from django.db import models\n'), ((402, 433), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (418, 433), False, 'from django.db import models\n'), ((459, 490), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(11)'}), '(max_length=11)\n', (475, 490), False, 'from django.db import models\n'), ((520, 564), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(18)', 'unique': '(True)'}), '(max_length=18, unique=True)\n', (536, 564), False, 'from django.db import models\n'), ((591, 643), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'choices': 'sex_choice'}), '(choices=sex_choice)\n', (623, 643), False, 'from django.db import models\n'), ((665, 684), 'django.db.models.EmailField', 'models.EmailField', ([], {}), '()\n', (682, 684), False, 'from django.db import models\n'), ((711, 741), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(0)'}), '(default=0)\n', (730, 741), False, 'from django.db import models\n'), ((770, 814), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'unique': '(True)'}), '(max_length=20, unique=True)\n', (786, 814), False, 'from django.db import models\n'), ((837, 869), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (853, 869), False, 'from django.db import models\n'), ((892, 931), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (912, 931), False, 'from django.db import models\n'), ((956, 991), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (976, 991), False, 'from django.db import models\n'), ((1429, 1463), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (1445, 1463), False, 'from django.db import models\n'), ((1488, 1520), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1504, 1520), False, 'from django.db import models\n'), ((1545, 1584), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1565, 1584), False, 'from django.db import models\n'), ((1609, 1644), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1629, 1644), False, 'from django.db import models\n'), ((1788, 1818), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""[]"""'}), "(default='[]')\n", (1804, 1818), False, 'from django.db import models\n'), ((2068, 2102), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (2084, 2102), False, 'from django.db import models\n'), ((2131, 2176), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'unique': '(True)'}), '(max_length=255, unique=True)\n', (2147, 2176), False, 'from django.db import models\n'), ((2207, 2251), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)'}), '(max_length=255, blank=True)\n', (2223, 2251), False, 'from django.db import models\n'), ((2291, 2330), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2311, 2330), False, 'from django.db import models\n'), ((2355, 2390), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (2375, 2390), False, 'from django.db import models\n'), ((2615, 2649), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (2631, 2649), False, 'from django.db import models\n'), ((2671, 2715), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'unique': '(True)'}), '(max_length=20, unique=True)\n', (2687, 2715), False, 'from django.db import models\n'), ((2740, 2779), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2760, 2779), False, 'from django.db import models\n'), ((2804, 2839), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (2824, 2839), False, 'from django.db import models\n'), ((3283, 3317), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (3299, 3317), False, 'from django.db import models\n'), ((3339, 3370), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (3355, 3370), False, 'from django.db import models\n'), ((3395, 3428), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(3000)'}), '(max_length=3000)\n', (3411, 3428), False, 'from django.db import models\n'), ((3451, 3469), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (3467, 3469), False, 'from django.db import models\n'), ((3494, 3551), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'choices': 'sale_way_choice'}), '(choices=sale_way_choice)\n', (3526, 3551), False, 'from django.db import models\n'), ((3573, 3615), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'choices': 'is_new_choice'}), '(choices=is_new_choice)\n', (3592, 3615), False, 'from django.db import models\n'), ((3653, 3704), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(8)', 'decimal_places': '(2)'}), '(max_digits=8, decimal_places=2)\n', (3672, 3704), False, 'from django.db import models\n'), ((3735, 3809), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(8)', 'decimal_places': '(2)', 'blank': '(True)', 'null': '(True)'}), '(max_digits=8, decimal_places=2, blank=True, null=True)\n', (3754, 3809), False, 'from django.db import models\n'), ((3838, 3917), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""GoodsType"""'], {'on_delete': 'models.CASCADE', 'blank': '(True)', 'null': '(True)'}), "('GoodsType', on_delete=models.CASCADE, blank=True, null=True)\n", (3855, 3917), False, 'from django.db import models\n'), ((3944, 3995), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""User"""'], {'on_delete': 'models.CASCADE'}), "('User', on_delete=models.CASCADE)\n", (3961, 3995), False, 'from django.db import models\n'), ((4016, 4071), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'choices': 'status_choice'}), '(choices=status_choice)\n', (4048, 4071), False, 'from django.db import models\n'), ((4111, 4149), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4138, 4149), False, 'from django.db import models\n'), ((4173, 4212), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (4193, 4212), False, 'from django.db import models\n'), ((4237, 4272), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (4257, 4272), False, 'from django.db import models\n'), ((4498, 4532), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (4514, 4532), False, 'from django.db import models\n'), ((4556, 4598), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Goods"""', 'models.CASCADE'], {}), "('Goods', models.CASCADE)\n", (4573, 4598), False, 'from django.db import models\n'), ((4626, 4704), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""User"""', 'models.CASCADE'], {'related_name': '"""it_send_this_message"""'}), "('User', models.CASCADE, related_name='it_send_this_message')\n", (4643, 4704), False, 'from django.db import models\n'), ((4734, 4812), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""User"""', 'models.CASCADE'], {'related_name': '"""it_recv_this_message"""'}), "('User', models.CASCADE, related_name='it_recv_this_message')\n", (4751, 4812), False, 'from django.db import models\n'), ((4837, 4869), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (4853, 4869), False, 'from django.db import models\n'), ((4894, 4933), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (4914, 4933), False, 'from django.db import models\n'), ((4958, 4993), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (4978, 4993), False, 'from django.db import models\n'), ((5219, 5253), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (5235, 5253), False, 'from django.db import models\n'), ((5277, 5319), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Goods"""', 'models.CASCADE'], {}), "('Goods', models.CASCADE)\n", (5294, 5319), False, 'from django.db import models\n'), ((5347, 5424), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""User"""', 'models.CASCADE'], {'related_name': '"""it_send_this_record"""'}), "('User', models.CASCADE, related_name='it_send_this_record')\n", (5364, 5424), False, 'from django.db import models\n'), ((5454, 5531), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""User"""', 'models.CASCADE'], {'related_name': '"""it_recv_this_record"""'}), "('User', models.CASCADE, related_name='it_recv_this_record')\n", (5471, 5531), False, 'from django.db import models\n'), ((5556, 5588), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (5572, 5588), False, 'from django.db import models\n'), ((5613, 5652), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (5633, 5652), False, 'from django.db import models\n'), ((5677, 5712), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (5697, 5712), False, 'from django.db import models\n'), ((5930, 5964), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (5946, 5964), False, 'from django.db import models\n'), ((5992, 6058), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""User"""', 'models.CASCADE'], {'related_name': '"""its_fans"""'}), "('User', models.CASCADE, related_name='its_fans')\n", (6009, 6058), False, 'from django.db import models\n'), ((6081, 6146), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""User"""', 'models.CASCADE'], {'related_name': '"""it_love"""'}), "('User', models.CASCADE, related_name='it_love')\n", (6098, 6146), False, 'from django.db import models\n'), ((6174, 6213), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (6194, 6213), False, 'from django.db import models\n'), ((6238, 6273), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (6258, 6273), False, 'from django.db import models\n'), ((6504, 6538), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (6520, 6538), False, 'from django.db import models\n'), ((6563, 6604), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""User"""', 'models.CASCADE'], {}), "('User', models.CASCADE)\n", (6580, 6604), False, 'from django.db import models\n'), ((6628, 6670), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Goods"""', 'models.CASCADE'], {}), "('Goods', models.CASCADE)\n", (6645, 6670), False, 'from django.db import models\n'), ((6695, 6734), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (6715, 6734), False, 'from django.db import models\n'), ((6759, 6794), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (6779, 6794), False, 'from django.db import models\n')] |
"""This module provides a writer for serialising data to the local filesystem."""
import json
import os
import re
from csv import DictWriter
from deprecation import deprecated
from hashlib import sha256
from lxml import etree
from pandas import DataFrame
from polymatheia import __version__
class JSONWriter():
"""The :class:`~polymatheia.data.writer.JSONWriter` writes records to the local filesystem as JSON files."""
def __init__(self, directory, id_path):
"""Create a new :class:`~polymatheia.data.writer.JSONWriter`.
For each record the identifier is used to create a directory structure. In the leaf directory the identifier
is then used as the filename.
:param directory: The base directory within which to create the files
:type directory: ``str``
:param id_path: The path used to access the identifier in the record
:type id_path: ``str`` or ``list``
"""
self._directory = directory
if isinstance(id_path, str):
self._id_path = id_path.split('.')
else:
self._id_path = id_path
def write(self, records):
"""Write the records to the file-system.
:param records: The records to write
:type records: Iterable of :class:`~polymatheia.data.NavigableDict`
"""
for record in records:
identifier = record.get(self._id_path)
if identifier:
hash = sha256(identifier.encode('utf-8'))
hex = hash.hexdigest()
file_path = os.path.join(
self._directory,
*[hex[idx:idx+4] for idx in range(0, len(hex), 4)],
hex)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(f'{file_path}.json', 'w') as out_f:
json.dump(record, out_f)
@deprecated(deprecated_in='0.2.0', removed_in='1.0.0', current_version=__version__,
details='Replaced by the polymatheia.data.writer.JSONWriter')
class LocalWriter(JSONWriter):
"""Deprecated. Use :class:`~polymatheia.data.writer.JSONWriter`."""
pass
class XMLWriter():
"""The :class:`~polymatheia.data.writer.XMLWriter` writes records to the local filesystem as XML."""
def __init__(self, directory, id_path):
"""Create a new :class:`~polymatheia.data.writer.XMLWriter`.
For each record the identifier is used to create a directory structure. In the leaf directory the identifier
is then used as the filename.
:param directory: The base directory within which to create the files
:type directory: ``str``
:param id_path: The path used to access the identifier in the record
:type id_path: ``str`` or ``list``
"""
self._directory = directory
if isinstance(id_path, str):
self._id_path = id_path.split('.')
else:
self._id_path = id_path
def write(self, records):
"""Write the records to the file-system.
:param records: The records to write
:type records: Iterable of :class:`~polymatheia.data.NavigableDict`
"""
for record in records:
identifier = record.get(self._id_path)
if identifier:
hash = sha256(identifier.encode('utf-8'))
hex = hash.hexdigest()
file_path = os.path.join(
self._directory,
*[hex[idx:idx+4] for idx in range(0, len(hex), 4)],
hex)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(f'{file_path}.xml', 'wb') as out_f:
root = etree.Element('record')
self._build_xml_doc(root, record)
out_f.write(etree.tostring(root))
def _build_xml_doc(self, parent, data):
"""Build the XML document tree.
Tag names are generated from the keys in the ``data``, ensuring that they are valid XML tag names.
Handles nested ``data`` trees by nesting elements and lists by generating the same tag repeatedly.
:param parent: The parent node to attach elements to
:type parent: :class:`~lxml.etree.Element`
:param data: The data to build the tree from
:type data: :class:`~polymatheia.data.NavigableDict`
"""
for key, value in data.items():
if isinstance(value, list):
for sub_value in value:
element = etree.Element(self._valid_xml_tag(key))
if isinstance(sub_value, dict):
self._build_xml_doc(element, sub_value)
else:
element.text = str(value)
parent.append(element)
elif isinstance(value, dict):
element = etree.Element(self._valid_xml_tag(key))
self._build_xml_doc(element, value)
parent.append(element)
else:
element = etree.Element(self._valid_xml_tag(key))
element.text = str(value)
parent.append(element)
def _valid_xml_tag(self, tag):
"""Generate a valid XML tag for the given ``tag``.
:param tag: The tag to generate a valid XML tag for
:type tag: ``str``
:return: A valid XML tag
:rtype: ``str``
"""
tag = re.sub(r'\s+', '-', tag)
tag = ''.join(re.findall(r'\w|\d|-|_|\.', tag))
while not re.match(r'\w', tag) or re.match(r'\d', tag):
tag = tag[1:]
if tag.lower().startswith('xml'):
tag = tag[3:]
return tag
class CSVWriter():
"""The :class:`~polymatheia.data.writer.CSVWriter` writes records into a CSV file.
The :class:`~polymatheia.data.writer.CSVWriter` assumes that no record contains any kind of nested data. If
it is passed nested data, then the behaviour is undefined.
"""
def __init__(self, target, default_value='', extras_action='ignore', column_names=None):
"""Create a new :class:`~polymatheia.data.writer.CSVWriter`.
:param target: The target to write the CSV to. Can either be a ``str`` filename or an existing file-like object
:param default_value: The default value to output if a record does not contain a value for a specified CSV
column name
:param extras_action: The action to take if a record contains keys that are not in the CVS fieldnames. Set to
``'ignore'`` to just ignore this (the default). Set to ``'raise'`` to raise a
``ValueError``.
:type extras_action: ``str``
:param fieldnames: The CSV column names to use. If ``None`` is specified, then the column names are derived
from the first record's keys.
:type fieldnames: ``list`` of ``str``
"""
self._target = target
self._default_value = default_value
self._extras_action = extras_action
self._column_names = column_names
def write(self, records):
"""Write the ``records`` to the CSV file.
:param records: The records to write
:type records: Iterable of :class:`~polymatheia.data.NavigableDict`
"""
if isinstance(self._target, str):
with open(self._target, 'w') as out_file:
self._write_csv(out_file, records)
else:
self._write_csv(self._target, records)
def _write_csv(self, file, records):
"""Perform the actual writing of the CSV file.
:param file: The file-like object to write to
:param records: The records to write
:type records: Iterable of :class:`~polymatheia.data.NavigableDict`
"""
csv_writer = None
for record in records:
if not csv_writer:
csv_writer = DictWriter(file,
fieldnames=record.keys() if self._column_names is None
else self._column_names,
restval=self._default_value,
extrasaction=self._extras_action)
csv_writer.writeheader()
csv_writer.writerow(record)
class PandasDFWriter(object):
"""The :class:`~polymatheia.data.writer.PandasDFWriter` writes records to a Pandas :class:`~pandas.DataFrame`.
The :class:`~polymatheia.data.writer.PandasDFWriter` attempts to automatically coerce columns to integers or
floats.
The :class:`~polymatheia.data.writer.PandasDFWriter` assumes that no record contains any kind of nested data. If
it is passed nested data, then the behaviour is undefined.
"""
def __init__(self):
"""Create a new :class:`~polymatheia.data.writer.PandasDFWriter`."""
self.df = None
def write(self, records):
"""Write the ``records`` to the Pandas :class:`~pandas.DataFrame`.
:param records: The records to write
:type records: Iterable of :class:`~polymatheia.data.NavigableDict`
:return: The Pandas dataframe
:rtype: :class:`~pandas.DataFrame`
"""
columns = {}
for record in records:
for key, value in record.items():
if key not in columns:
columns[key] = []
columns[key].append(value)
for key, values in columns.items():
try:
columns[key] = [int(v) for v in values]
except ValueError:
try:
columns[key] = [float(v) for v in values]
except ValueError:
pass
return DataFrame(columns)
| [
"lxml.etree.Element",
"deprecation.deprecated",
"re.match",
"os.path.dirname",
"pandas.DataFrame",
"re.sub",
"re.findall",
"json.dump",
"lxml.etree.tostring"
] | [((1890, 2039), 'deprecation.deprecated', 'deprecated', ([], {'deprecated_in': '"""0.2.0"""', 'removed_in': '"""1.0.0"""', 'current_version': '__version__', 'details': '"""Replaced by the polymatheia.data.writer.JSONWriter"""'}), "(deprecated_in='0.2.0', removed_in='1.0.0', current_version=\n __version__, details='Replaced by the polymatheia.data.writer.JSONWriter')\n", (1900, 2039), False, 'from deprecation import deprecated\n'), ((5453, 5477), 're.sub', 're.sub', (['"""\\\\s+"""', '"""-"""', 'tag'], {}), "('\\\\s+', '-', tag)\n", (5459, 5477), False, 'import re\n'), ((9806, 9824), 'pandas.DataFrame', 'DataFrame', (['columns'], {}), '(columns)\n', (9815, 9824), False, 'from pandas import DataFrame\n'), ((5500, 5534), 're.findall', 're.findall', (['"""\\\\w|\\\\d|-|_|\\\\."""', 'tag'], {}), "('\\\\w|\\\\d|-|_|\\\\.', tag)\n", (5510, 5534), False, 'import re\n'), ((5576, 5596), 're.match', 're.match', (['"""\\\\d"""', 'tag'], {}), "('\\\\d', tag)\n", (5584, 5596), False, 'import re\n'), ((5552, 5572), 're.match', 're.match', (['"""\\\\w"""', 'tag'], {}), "('\\\\w', tag)\n", (5560, 5572), False, 'import re\n'), ((1737, 1763), 'os.path.dirname', 'os.path.dirname', (['file_path'], {}), '(file_path)\n', (1752, 1763), False, 'import os\n'), ((1862, 1886), 'json.dump', 'json.dump', (['record', 'out_f'], {}), '(record, out_f)\n', (1871, 1886), False, 'import json\n'), ((3594, 3620), 'os.path.dirname', 'os.path.dirname', (['file_path'], {}), '(file_path)\n', (3609, 3620), False, 'import os\n'), ((3726, 3749), 'lxml.etree.Element', 'etree.Element', (['"""record"""'], {}), "('record')\n", (3739, 3749), False, 'from lxml import etree\n'), ((3836, 3856), 'lxml.etree.tostring', 'etree.tostring', (['root'], {}), '(root)\n', (3850, 3856), False, 'from lxml import etree\n')] |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
import json
from django import forms
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext_lazy as _
from .django_conf import APP_CODE, DATA_LIMIT_SIZE, DEFAULT_ACTIVITY_TYPE
from .utils import get_uuid, data_truncation
class CustomForm(forms.Form):
def error_message(self, errors):
error_content = []
for k, v in sorted(errors.items()):
error_content.append('%s: %s' % (k, force_unicode(v[0])))
return ';'.join(error_content)
class CustomField(forms.Field):
"""进行参数类型校验的Field"""
def __init__(self, *args, **kwargs):
super(CustomField, self).__init__(*args, **kwargs)
def to_python(self, value):
if not value:
return ''
if not isinstance(value, basestring):
try:
value = json.dumps(value)
except Exception:
# 处理非json化的数据
value = '%s' % value
return value
class LogFieldHandler(CustomForm):
"""处理请求参数"""
log_id = forms.CharField(label=_(u'记录唯一标识'), required=False)
app_code = forms.CharField(label=_(u'应用编码'), required=False)
username = forms.CharField(label=_(u'用户名称'), required=True)
activity_type = forms.IntegerField(label=_(u'活动类型'), required=False)
activity_name = forms.CharField(label=_(u'活动名称'), required=True)
request_params = CustomField(label=_(u'请求参数'), required=False)
before_data = CustomField(label=_(u'活动前的数据'), required=False)
after_data = CustomField(label=_(u'活动后的数据'), required=False)
remarks = CustomField(label=_(u'其它信息'), required=False)
data_limit_size = forms.IntegerField(label=_(u'存储数据大小'), required=False)
def clean(self):
data = self.cleaned_data
data_limit_size = data['data_limit_size'] or DATA_LIMIT_SIZE
return {
'log_id': data['log_id'] or get_uuid(),
'username': data['username'],
'app_code': data['app_code'] or APP_CODE,
'activity_type': data['activity_type'] or DEFAULT_ACTIVITY_TYPE,
'activity_name': data['activity_name'],
'request_params': data['request_params'],
'before_data': data_truncation(data['before_data'], data_limit_size=data_limit_size),
'after_data': data_truncation(data['after_data'], data_limit_size=data_limit_size),
'remarks': data_truncation(data['remarks'], data_limit_size=data_limit_size)
}
| [
"json.dumps",
"django.utils.encoding.force_unicode",
"django.utils.translation.ugettext_lazy"
] | [((1800, 1812), 'django.utils.translation.ugettext_lazy', '_', (['u"""记录唯一标识"""'], {}), "(u'记录唯一标识')\n", (1801, 1812), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1867, 1877), 'django.utils.translation.ugettext_lazy', '_', (['u"""应用编码"""'], {}), "(u'应用编码')\n", (1868, 1877), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1932, 1942), 'django.utils.translation.ugettext_lazy', '_', (['u"""用户名称"""'], {}), "(u'用户名称')\n", (1933, 1942), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2004, 2014), 'django.utils.translation.ugettext_lazy', '_', (['u"""活动类型"""'], {}), "(u'活动类型')\n", (2005, 2014), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2074, 2084), 'django.utils.translation.ugettext_lazy', '_', (['u"""活动名称"""'], {}), "(u'活动名称')\n", (2075, 2084), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2140, 2150), 'django.utils.translation.ugettext_lazy', '_', (['u"""请求参数"""'], {}), "(u'请求参数')\n", (2141, 2150), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2204, 2216), 'django.utils.translation.ugettext_lazy', '_', (['u"""活动前的数据"""'], {}), "(u'活动前的数据')\n", (2205, 2216), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2269, 2281), 'django.utils.translation.ugettext_lazy', '_', (['u"""活动后的数据"""'], {}), "(u'活动后的数据')\n", (2270, 2281), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2331, 2341), 'django.utils.translation.ugettext_lazy', '_', (['u"""其它信息"""'], {}), "(u'其它信息')\n", (2332, 2341), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2406, 2418), 'django.utils.translation.ugettext_lazy', '_', (['u"""存储数据大小"""'], {}), "(u'存储数据大小')\n", (2407, 2418), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1575, 1592), 'json.dumps', 'json.dumps', (['value'], {}), '(value)\n', (1585, 1592), False, 'import json\n'), ((1190, 1209), 'django.utils.encoding.force_unicode', 'force_unicode', (['v[0]'], {}), '(v[0])\n', (1203, 1209), False, 'from django.utils.encoding import force_unicode\n')] |
###
# Copyright 2022-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
"""Test file for checking Python linting."""
import logging
import sys
import unittest.mock
import mypy.api
import pydocstyle.cli
import pylint.lint
import pytest
@pytest.mark.filterwarnings(r"ignore:In astroid 3.0.0 NodeNG.statement\(\).*:DeprecationWarning")
def test_linting() -> None:
"""Check code and tests for Python linting errors."""
runner = pylint.lint.Run(
["--rcfile=../pyproject.toml", "../gdbmongo/", "../stubs/", "../tests/"], exit=False)
lint_ok = runner.linter.msg_status == 0
assert lint_ok, "Changes are needed to address linting issues"
def test_typechecking() -> None:
"""Check code and tests for Python type errors."""
(normal_report, error_report, exit_status) = mypy.api.run(
["--config-file=../pyproject.toml", "../gdbmongo/", "../stubs/", "../tests/"])
if normal_report:
print("\nType checking report:\n", file=sys.stdout)
print(normal_report, file=sys.stdout)
if error_report:
print("\nError report:\n", file=sys.stderr)
print(error_report, file=sys.stderr)
typecheck_ok = exit_status == 0
assert typecheck_ok, "Changes are needed to address type annotation issues"
def test_docstrings() -> None:
"""Check docstrings for Python style errors."""
with unittest.mock.patch(
"sys.argv",
["", "--config=../pyproject.toml", "../gdbmongo/", "../stubs/", "../tests/"]):
logger = logging.getLogger("pydocstyle.utils")
# pydocstyle automatically configures its logger to level DEBUG. This leads pytest to
# capture and display a large volume of log messages whenever there is a test assertion
# failure. We override logging.Logger.setLevel() on pydocstyle's logger to prevent this.
# Note that pytest automatically captures any messages at level WARNING and above.
with unittest.mock.patch.object(logger, "setLevel"):
exit_code = pydocstyle.cli.run_pydocstyle()
docstrings_ok = exit_code == 0
assert docstrings_ok, "Changes are needed to address docstring issues"
| [
"logging.getLogger",
"pytest.mark.filterwarnings"
] | [((761, 863), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:In astroid 3.0.0 NodeNG.statement\\\\(\\\\).*:DeprecationWarning"""'], {}), "(\n 'ignore:In astroid 3.0.0 NodeNG.statement\\\\(\\\\).*:DeprecationWarning')\n", (787, 863), False, 'import pytest\n'), ((2027, 2064), 'logging.getLogger', 'logging.getLogger', (['"""pydocstyle.utils"""'], {}), "('pydocstyle.utils')\n", (2044, 2064), False, 'import logging\n')] |
import sys
import mock
import pytest
def test_setup_not_present():
sys.modules['smbus'] = mock.MagicMock()
from lsm303d import LSM303D
lsm303d = LSM303D()
with pytest.raises(RuntimeError):
lsm303d.setup()
def test_setup_mock_present():
from tools import SMBusFakeDevice
smbus = mock.Mock()
smbus.SMBus = SMBusFakeDevice
sys.modules['smbus'] = smbus
from lsm303d import LSM303D
lsm303d = LSM303D()
lsm303d.setup()
| [
"mock.Mock",
"lsm303d.LSM303D",
"pytest.raises",
"mock.MagicMock"
] | [((96, 112), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (110, 112), False, 'import mock\n'), ((159, 168), 'lsm303d.LSM303D', 'LSM303D', ([], {}), '()\n', (166, 168), False, 'from lsm303d import LSM303D\n'), ((314, 325), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (323, 325), False, 'import mock\n'), ((439, 448), 'lsm303d.LSM303D', 'LSM303D', ([], {}), '()\n', (446, 448), False, 'from lsm303d import LSM303D\n'), ((178, 205), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (191, 205), False, 'import pytest\n')] |