code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import torch
import torch.nn as nn
import numpy as np
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
class LogisticRegression(nn.Module):
def __init__(self, n_input_features):
super(LogisticRegression, self).__init__()
self.linear = nn.Linear(n_input_features, 1)
def forward(self, x):
y_predicted = torch.sigmoid(self.linear(x))
return y_predicted
def main():
# 1. Preparing coco
bc = datasets.load_breast_cancer()
X, y = bc.data, bc.target
n_samples, n_features = X.shape
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)
# scale features
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.transform(x_test)
x_train = torch.from_numpy(x_train.astype(np.float32))
x_test = torch.from_numpy(x_test.astype(np.float32))
y_train = torch.from_numpy(y_train.astype(np.float32))
y_test = torch.from_numpy(y_test.astype(np.float32))
y_train = y_train.view(y_train.shape[0], 1)
y_test = y_test.view(y_test.shape[0], 1)
# 2. Model
model = LogisticRegression(n_features)
# 3. Loss and Optimizer
learning_rate = 0.01
criterion = nn.BCELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# 4. Training Loop
num_epochs = 100
for epoch in range(num_epochs):
# Forward Pass
y_predicted = model(x_train)
loss = criterion(y_predicted, y_train)
# Backward Pass
loss.backward()
# Optimizer
optimizer.step()
if (epoch + 1) % 10 == 0:
print(f'epoch: {epoch + 1}, loss = {loss.item():.4f}')
with torch.no_grad():
y_predicted = model(x_test)
y_predicted_cls = y_predicted.round()
acc = y_predicted_cls.eq(y_test).sum() / float(y_test.shape[0])
print(f'accuracy = {acc:.4f}')
if __name__ == '__main__':
main()
| [
"sklearn.model_selection.train_test_split",
"sklearn.datasets.load_breast_cancer",
"sklearn.preprocessing.StandardScaler",
"torch.nn.BCELoss",
"torch.nn.Linear",
"torch.no_grad"
] | [((558, 587), 'sklearn.datasets.load_breast_cancer', 'datasets.load_breast_cancer', ([], {}), '()\n', (585, 587), False, 'from sklearn import datasets\n'), ((695, 751), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(1234)'}), '(X, y, test_size=0.2, random_state=1234)\n', (711, 751), False, 'from sklearn.model_selection import train_test_split\n'), ((783, 799), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (797, 799), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1330, 1342), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (1340, 1342), True, 'import torch.nn as nn\n'), ((374, 404), 'torch.nn.Linear', 'nn.Linear', (['n_input_features', '(1)'], {}), '(n_input_features, 1)\n', (383, 404), True, 'import torch.nn as nn\n'), ((1808, 1823), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1821, 1823), False, 'import torch\n')] |
from pyqchem.structure import Structure
import numpy as np
# Ethene parallel position
def dimer_ethene(distance, slide_y, slide_z):
coordinates = [[0.0000000, 0.0000000, 0.6660120],
[0.0000000, 0.0000000, -0.6660120],
[0.0000000, 0.9228100, 1.2279200],
[0.0000000, -0.9228100, 1.2279200],
[0.0000000, -0.9228100, -1.2279200],
[0.0000000, 0.9228100, -1.2279200],
[distance, 0.0000000, 0.6660120],
[distance, 0.0000000, -0.6660120],
[distance, 0.9228100, 1.2279200],
[distance, -0.9228100, 1.2279200],
[distance, -0.9228100, -1.2279200],
[distance, 0.9228100, -1.2279200]]
coordinates = np.array(coordinates)
coordinates[6:, 1] = coordinates[6:, 1] + slide_y
coordinates[6:, 2] = coordinates[6:, 2] + slide_z
symbols = ['C', 'C', 'H', 'H', 'H', 'H', 'C', 'C', 'H', 'H', 'H', 'H']
molecule = Structure(coordinates=coordinates,
symbols=symbols,
charge=0)
return molecule, {'state_threshold': 0.2,
'n_mon': 6}
# Tetracloroethene
def dimer_tetrafluoroethene(distance, slide_y, slide_z):
monomer = [[ 0.6624670117, 0.0000000000, 0.0000000000],
[-0.6624670117, 0.0000000000, 0.0000000000],
[ 1.3834661472, 1.0993897934, 0.0000000000],
[ 1.3834661472, -1.0993897934, 0.0000000000],
[-1.3834661472, -1.0993897934, 0.0000000000],
[-1.3834661472, 1.0993897934, 0.0000000000]]
symbols = ['C', 'C', 'F', 'F', 'F', 'F']
monomer2 = np.array(monomer)
#monomer2 = np.dot(monomer, rotation_matrix([0, 1, 0], np.pi / 2))
monomer2[:, 2] = monomer2[:, 2] + distance
monomer2[:, 1] = monomer2[:, 1] + slide_y
monomer2[:, 0] = monomer2[:, 0] + slide_z
coordinates = np.vstack([monomer, monomer2])
molecule = Structure(coordinates=coordinates,
symbols=symbols * 2,
charge=0)
return molecule, {'state_threshold': 0.2,
'n_mon': len(monomer)}
# Tetracloroethene
def dimer_mix(distance, slide_y, slide_z):
monomer1 = [[ 0.6660120, 0.0000000, 0.0000000,],
[-0.6660120, 0.0000000, 0.0000000,],
[ 1.2279200, 0.9228100, 0.0000000,],
[ 1.2279200, -0.9228100, 0.0000000,],
[-1.2279200, -0.9228100, 0.0000000,],
[-1.2279200, 0.9228100, 0.0000000,]]
symbols1 = ['C', 'C', 'H', 'H', 'H', 'H']
monomer2 = [[ 0.6624670117, 0.0000000000, 0.0000000000],
[-0.6624670117, 0.0000000000, 0.0000000000],
[ 1.3834661472, 1.0993897934, 0.0000000000],
[ 1.3834661472, -1.0993897934, 0.0000000000],
[-1.3834661472, -1.0993897934, 0.0000000000],
[-1.3834661472, 1.0993897934, 0.0000000000]]
symbols2 = ['C', 'C', 'F', 'F', 'F', 'F']
monomer2 = np.array(monomer2)
monomer2[:, 2] = monomer2[:, 2] + distance
monomer2[:, 1] = monomer2[:, 1] + slide_y
monomer2[:, 0] = monomer2[:, 0] + slide_z
coordinates = np.vstack([monomer1, monomer2])
symbols = symbols1 + symbols2
molecule = Structure(coordinates=coordinates,
symbols=symbols,
charge=0)
return molecule, {'state_threshold': 0.4,
'n_mon': len(monomer1)} | [
"numpy.array",
"pyqchem.structure.Structure",
"numpy.vstack"
] | [((826, 847), 'numpy.array', 'np.array', (['coordinates'], {}), '(coordinates)\n', (834, 847), True, 'import numpy as np\n'), ((1048, 1109), 'pyqchem.structure.Structure', 'Structure', ([], {'coordinates': 'coordinates', 'symbols': 'symbols', 'charge': '(0)'}), '(coordinates=coordinates, symbols=symbols, charge=0)\n', (1057, 1109), False, 'from pyqchem.structure import Structure\n'), ((1748, 1765), 'numpy.array', 'np.array', (['monomer'], {}), '(monomer)\n', (1756, 1765), True, 'import numpy as np\n'), ((1995, 2025), 'numpy.vstack', 'np.vstack', (['[monomer, monomer2]'], {}), '([monomer, monomer2])\n', (2004, 2025), True, 'import numpy as np\n'), ((2042, 2107), 'pyqchem.structure.Structure', 'Structure', ([], {'coordinates': 'coordinates', 'symbols': '(symbols * 2)', 'charge': '(0)'}), '(coordinates=coordinates, symbols=symbols * 2, charge=0)\n', (2051, 2107), False, 'from pyqchem.structure import Structure\n'), ((3128, 3146), 'numpy.array', 'np.array', (['monomer2'], {}), '(monomer2)\n', (3136, 3146), True, 'import numpy as np\n'), ((3306, 3337), 'numpy.vstack', 'np.vstack', (['[monomer1, monomer2]'], {}), '([monomer1, monomer2])\n', (3315, 3337), True, 'import numpy as np\n'), ((3388, 3449), 'pyqchem.structure.Structure', 'Structure', ([], {'coordinates': 'coordinates', 'symbols': 'symbols', 'charge': '(0)'}), '(coordinates=coordinates, symbols=symbols, charge=0)\n', (3397, 3449), False, 'from pyqchem.structure import Structure\n')] |
#!/usr/bin/env python3
"""
Adaptive Affine Control:
My favorite myopic (not MPC, DP, or RL) control-law when absolutely nothing is known about your system except
that the control is additive and fully-actuated:
```
dx/dt = f(x,t) + u # drift f unknown, state x at time t known, choose control u to make x=r
u = W.dot(x) + b # policy is affine function of state
dW/dt = outer(k*(r-x), x) # parameters are adapted (learned online) to oppose the...
db/dt = k*(r-x) # ... gradient of the error-energy-rate d/dt((k/2)*(r-x)^2)
```
Try this with any crazy f. Even throw in a B(x,t) transformation on u (though no guarantees for that).
It's basically PID but with the PD gains evolving according to the regression-like dW/dt I gave.
PID with stationary PD gains fails when the f is reasonably nonlinear. This law still works.
Of course, additive-control fully-actuated systems pretty much only model lame low-level problems, but still neat.
"""
# Dependencies
import numpy as np
from matplotlib import pyplot
##################################################
# Controller
class C:
def __init__(self, n, k):
self.n = int(n)
self.k = float(k)
self.W = np.zeros((n, n), dtype=float)
self.b = np.zeros(n, dtype=float)
def u(self, r, x, dt):
ked = self.k*(r - x)*dt
self.W += np.outer(ked, x)
self.b += ked
return self.W.dot(x) + self.b
##################################################
# Drift dynamic
n = 3
def f(x, t):
return np.array([10.0*(x[1] - x[0]),
x[0]*(28.0 - x[2]) - x[1],
x[0]*x[1] - 2.6*x[2]])
# Actuator dynamic
# (needs to be identity for Lyapunov proof, but might still work otherwise)
def B(x, t):
return np.array([[x[1], 0.0, 0.0],
[ 0.0, 2*x[0], 0.0],
[ 0.0, 0.0, 1.0]])
##################################################
# Time
dt = 0.001
T = np.arange(0.0, 3.0, dt)
# State
X = np.zeros((len(T), n), dtype=float)
X[0] = [-1.0, 2.0, 3.0]
# Control
U = np.zeros((len(T), n), dtype=float)
c = C(n, 1.0)
# Reference
R = np.array([[6.0, 7.0, -7.0]] * len(T))
##################################################
# Simulation
control = True
for i in range(len(T)-1):
if control: U[i] = c.u(R[i], X[i], dt)
dxdt = f(X[i], T[i]) + B(X[i], T[i]).dot(U[i])
X[i+1] = X[i] + dxdt*dt
##################################################
# Plot
fig = pyplot.figure()
if control: fig.suptitle("Controlled Response", fontsize=26)
else: fig.suptitle("Natural Response", fontsize=26)
ax = None
for i in range(n):
ax = fig.add_subplot(n, 1, i+1, sharex=ax)
ax.plot(T, X[:, i], color='b', linewidth=2, label="state")
ax.plot(T, R[:, i], color='g', linewidth=3, linestyle=':', label="desire")
ax.plot(T[:-1], U[:-1, i], color='r', linewidth=0.5, label="action", scaley=False)
ax.set_xlim([T[0], T[-1]])
ax.set_ylabel("state "+str(i), fontsize=20)
ax.grid(True)
ax.set_xlabel("time", fontsize=20)
ax.legend()
pyplot.show()
| [
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.outer",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((1963, 1986), 'numpy.arange', 'np.arange', (['(0.0)', '(3.0)', 'dt'], {}), '(0.0, 3.0, dt)\n', (1972, 1986), True, 'import numpy as np\n'), ((2473, 2488), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (2486, 2488), False, 'from matplotlib import pyplot\n'), ((3051, 3064), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (3062, 3064), False, 'from matplotlib import pyplot\n'), ((1530, 1622), 'numpy.array', 'np.array', (['[10.0 * (x[1] - x[0]), x[0] * (28.0 - x[2]) - x[1], x[0] * x[1] - 2.6 * x[2]]'], {}), '([10.0 * (x[1] - x[0]), x[0] * (28.0 - x[2]) - x[1], x[0] * x[1] - \n 2.6 * x[2]])\n', (1538, 1622), True, 'import numpy as np\n'), ((1772, 1839), 'numpy.array', 'np.array', (['[[x[1], 0.0, 0.0], [0.0, 2 * x[0], 0.0], [0.0, 0.0, 1.0]]'], {}), '([[x[1], 0.0, 0.0], [0.0, 2 * x[0], 0.0], [0.0, 0.0, 1.0]])\n', (1780, 1839), True, 'import numpy as np\n'), ((1205, 1234), 'numpy.zeros', 'np.zeros', (['(n, n)'], {'dtype': 'float'}), '((n, n), dtype=float)\n', (1213, 1234), True, 'import numpy as np\n'), ((1252, 1276), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'float'}), '(n, dtype=float)\n', (1260, 1276), True, 'import numpy as np\n'), ((1354, 1370), 'numpy.outer', 'np.outer', (['ked', 'x'], {}), '(ked, x)\n', (1362, 1370), True, 'import numpy as np\n')] |
"""MongoDB instance classes and logic."""
import datetime
import json
import logging
import time
import pymongo
import requests
from concurrent import futures
from distutils.version import LooseVersion
from objectrocket import bases
from objectrocket import util
logger = logging.getLogger(__name__)
class MongodbInstance(bases.BaseInstance, bases.Extensible, bases.InstanceAclsInterface):
"""An ObjectRocket MongoDB service instance.
:param dict instance_document: A dictionary representing the instance object, most likey coming
from the ObjectRocket API.
:param objectrocket.instances.Instances instances: An instance of
:py:class:`objectrocket.instances.Instances`.
"""
def __init__(self, instance_document, instances):
super(MongodbInstance, self).__init__(
instance_document=instance_document,
instances=instances
)
# Bind required pseudo private attributes from API response document.
# Smallest plans may not have an SSL/TLS connection string.
self._ssl_connect_string = instance_document.get('ssl_connect_string')
# Register any extensions for this class.
self._register_extensions('objectrocket.instances.mongodb.MongodbInstance')
# on demand initialization of new_relic_stats
# TODO: (kmagge) we can get rid of this when we have newer version of stats
self._new_relic_stats = None
#####################
# Public interface. #
#####################
@util.token_auto_auth
def compaction(self, request_compaction=False):
"""Retrieve a report on, or request compaction for this instance.
:param bool request_compaction: A boolean indicating whether or not to request compaction.
"""
url = self._service_url + 'compaction/'
if request_compaction:
response = requests.post(url, **self._instances._default_request_kwargs)
else:
response = requests.get(url, **self._instances._default_request_kwargs)
return response.json()
def get_authenticated_connection(self, user, passwd, db='admin', ssl=True, **kwargs):
"""Get an authenticated connection to this instance.
:param str user: The username to use for authentication.
:param str passwd: The password to use for authentication.
:param str db: The name of the database to authenticate against. Defaults to ``'Admin'``.
:param bool ssl: Use SSL/TLS if available for this instance. Defaults to ``True``.
:param kwargs: Additional keyword arguments to pass to MongoClient.
:raises: :py:class:`pymongo.errors.OperationFailure` if authentication fails.
"""
# Attempt to establish an authenticated connection.
try:
connection = self.get_connection(ssl=ssl, **kwargs)
connection[db].authenticate(user, passwd)
return connection
# Catch exception here for logging, then just re-raise.
except pymongo.errors.OperationFailure as ex:
logger.exception(ex)
raise
def get_connection(self, ssl=True, **kwargs):
"""Get a live connection to this instance.
:param bool ssl: Use SSL/TLS if available for this instance.
"""
return self._get_connection(ssl=ssl, **kwargs)
@util.token_auto_auth
def shards(self, add_shard=False):
"""Get a list of shards belonging to this instance.
:param bool add_shard: A boolean indicating whether to add a new shard to the specified
instance.
"""
url = self._service_url + 'shards/'
if add_shard:
response = requests.post(url, **self._instances._default_request_kwargs)
else:
response = requests.get(url, **self._instances._default_request_kwargs)
return response.json()
def get_aggregate_database_stats(self):
return requests.get(self._service_url + 'aggregate_database_stats/',
**self._instances._default_request_kwargs).json()['data']
@property
@util.token_auto_auth
def new_relic_stats(self):
"""
Get stats for this instance.
"""
if self._new_relic_stats is None:
# if this is a sharded instance, fetch shard stats in parallel
if self.type == 'mongodb_sharded':
shards = [Shard(self.name, self._service_url + 'shards/',
self._client, shard_doc)
for shard_doc in self.shards().get('data')]
fs = []
with futures.ThreadPoolExecutor(len(shards)) as executor:
for shard in shards:
fs.append(executor.submit(shard.get_shard_stats))
futures.wait(fs, timeout=None, return_when=futures.ALL_COMPLETED)
stats_this_second = self._rollup_shard_stats_to_instance_stats(
{shard.name: future.result() for (shard, future) in zip(shards, fs)})
# power nap
time.sleep(1)
# fetch again
fs = []
with futures.ThreadPoolExecutor(len(shards)) as executor:
for shard in shards:
fs.append(executor.submit(shard.get_shard_stats))
futures.wait(fs, timeout=None, return_when=futures.ALL_COMPLETED)
stats_next_second = self._rollup_shard_stats_to_instance_stats(
{shard.name: future.result() for (shard, future) in zip(shards, fs)})
self._new_relic_stats = self._compile_new_relic_stats(stats_this_second, stats_next_second)
else:
# fetch stats like we did before (by hitting new_relic_stats API resource)
response = requests.get('{}{}'.format(self._url,
'new-relic-stats'),
**self._instances._default_request_kwargs)
self._new_relic_stats = json.loads(response.content).get(
'data') if response.status_code == 200 else {}
return self._new_relic_stats
def _rollup_shard_stats_to_instance_stats(self, shard_stats):
"""
roll up all shard stats to instance level stats
:param shard_stats: dict of {shard_name: shard level stats}
"""
instance_stats = {}
opcounters_per_node = []
# aggregate replication_lag
instance_stats['replication_lag'] = max(map(lambda s: s['replication_lag'], shard_stats.values()))
aggregate_server_statistics = {}
for shard_name, stats in shard_stats.items():
for statistic_key in stats.get('shard_stats'):
if statistic_key != 'connections' and statistic_key in aggregate_server_statistics:
aggregate_server_statistics[statistic_key] = util.sum_values(aggregate_server_statistics[statistic_key],
stats.get('shard_stats')[statistic_key])
else:
aggregate_server_statistics[statistic_key] = stats.get('shard_stats')[statistic_key]
# aggregate per_node_stats into opcounters_per_node
opcounters_per_node.append({shard_name: {member: node_stats['opcounters']
for member, node_stats in stats.get('per_node_stats').items()}})
instance_stats['opcounters_per_node'] = opcounters_per_node
instance_stats['aggregate_server_statistics'] = aggregate_server_statistics
return instance_stats
def _compile_new_relic_stats(self, stats_this_second, stats_next_second):
"""
from instance 'stats_this_second' and instance 'stats_next_second', compute some per
second stats metrics and other aggregated metrics
:param dict stats_this_second:
:param dict stats_next_second:
:return: compiled instance stats that has metrics
{'opcounters_per_node_per_second': {...},
'server_statistics_per_second': {...},
'aggregate_server_statistics': {...},
'replication_lag': 0.0,
'aggregate_database_statistics': {}
}
"""
server_statistics_per_second = {}
opcounters_per_node_per_second = []
for subdoc in ["opcounters", "network"]:
first_doc = stats_this_second['aggregate_server_statistics'][subdoc]
second_doc = stats_next_second['aggregate_server_statistics'][subdoc]
keys = set(first_doc.keys()) | set(second_doc.keys())
server_statistics_per_second[subdoc] = {key: int(second_doc[key]) - int(first_doc[key]) for key in keys if isinstance(first_doc[key], int)}
for node1, node2 in zip(stats_this_second['opcounters_per_node'], stats_next_second['opcounters_per_node']):
node_opcounters_per_second = {}
for repl, members in node2.items():
node_opcounters_per_second[repl] = {}
for member, ops in members.items():
node_opcounters_per_second[repl][member] = {}
for op, count in ops.items():
node_opcounters_per_second[repl][member][op] = count - node1[repl][member][op]
opcounters_per_node_per_second.append(node_opcounters_per_second)
return {'opcounters_per_node_per_second': opcounters_per_node_per_second,
'server_statistics_per_second': server_statistics_per_second,
'aggregate_server_statistics': stats_next_second.get('aggregate_server_statistics'),
'replication_lag': stats_next_second.get('replication_lag'),
'aggregate_database_statistics': self.get_aggregate_database_stats()}
@property
def ssl_connect_string(self):
"""This instance's SSL connection string."""
return self._ssl_connect_string
@util.token_auto_auth
def get_stepdown_window(self):
"""Get information on this instance's stepdown window."""
url = self._service_url + 'stepdown/'
response = requests.get(url, **self._instances._default_request_kwargs)
return response.json()
@util.token_auto_auth
def set_stepdown_window(self, start, end, enabled=True, scheduled=True, weekly=True):
"""Set the stepdown window for this instance.
Date times are assumed to be UTC, so use UTC date times.
:param datetime.datetime start: The datetime which the stepdown window is to open.
:param datetime.datetime end: The datetime which the stepdown window is to close.
:param bool enabled: A boolean indicating whether or not stepdown is to be enabled.
:param bool scheduled: A boolean indicating whether or not to schedule stepdown.
:param bool weekly: A boolean indicating whether or not to schedule compaction weekly.
"""
# Ensure a logical start and endtime is requested.
if not start < end:
raise TypeError('Parameter "start" must occur earlier in time than "end".')
# Ensure specified window is less than a week in length.
week_delta = datetime.timedelta(days=7)
if not ((end - start) <= week_delta):
raise TypeError('Stepdown windows can not be longer than 1 week in length.')
url = self._service_url + 'stepdown/'
data = {
'start': int(start.strftime('%s')),
'end': int(end.strftime('%s')),
'enabled': enabled,
'scheduled': scheduled,
'weekly': weekly,
}
response = requests.post(
url,
data=json.dumps(data),
**self._instances._default_request_kwargs
)
return response.json()
######################
# Private interface. #
######################
def _get_connection(self, ssl, **kwargs):
"""Get a live connection to this instance."""
# Use SSL/TLS if requested and available.
connect_string = self.connect_string
if ssl and self.ssl_connect_string:
connect_string = self.ssl_connect_string
return pymongo.MongoClient(connect_string, **kwargs)
class Shard(bases.Extensible):
"""An ObjectRocket MongoDB instance shard.
:param dict instance_name: Name of the instance the shard belongs to
:param string stats_base_url: Base url to fetch information and stats for this shard.
:param objectrocket.client.Client or_client: handle to talk to OR API
:param dict shard_document: a dictionary representing a mongodb shard
"""
def __init__(self, instance_name, stats_base_url, or_client, shard_document):
self._instance_name = instance_name
self._shardstr = shard_document['shardstr']
self._plan = shard_document['plan']
self._id = shard_document['id']
self._name = shard_document['name']
self._stats_base_url = stats_base_url
self._client = or_client
@property
def instance_name(self):
"""
:return: name of parent instance
"""
return self._instance_name
@property
def shard_string(self):
"""
:return: shard string
"""
return self._shardstr
@property
def plan(self):
"""
:return: Objectrocket plan that the parent instance is on
"""
return self._plan
@property
def name(self):
"""
:return: shard's name
"""
return self._name
@property
def id(self):
"""
:return: shard's unique ID
"""
return self._id
def get_shard_stats(self):
"""
:return: get stats for this mongodb shard
"""
return requests.get(self._stats_url, params={'include_stats': True},
headers={'X-Auth-Token': self._client.auth._token}
).json()['data']['stats']
@property
def _stats_url(self):
"""
:return: Objectrocket API endpoint to send shard stats request to
"""
return '%s%s/' % (self._stats_base_url, self.name)
| [
"logging.getLogger",
"json.loads",
"requests.post",
"json.dumps",
"requests.get",
"time.sleep",
"concurrent.futures.wait",
"pymongo.MongoClient",
"datetime.timedelta"
] | [((276, 303), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (293, 303), False, 'import logging\n'), ((10249, 10309), 'requests.get', 'requests.get', (['url'], {}), '(url, **self._instances._default_request_kwargs)\n', (10261, 10309), False, 'import requests\n'), ((11310, 11336), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(7)'}), '(days=7)\n', (11328, 11336), False, 'import datetime\n'), ((12309, 12354), 'pymongo.MongoClient', 'pymongo.MongoClient', (['connect_string'], {}), '(connect_string, **kwargs)\n', (12328, 12354), False, 'import pymongo\n'), ((1889, 1950), 'requests.post', 'requests.post', (['url'], {}), '(url, **self._instances._default_request_kwargs)\n', (1902, 1950), False, 'import requests\n'), ((1988, 2048), 'requests.get', 'requests.get', (['url'], {}), '(url, **self._instances._default_request_kwargs)\n', (2000, 2048), False, 'import requests\n'), ((3706, 3767), 'requests.post', 'requests.post', (['url'], {}), '(url, **self._instances._default_request_kwargs)\n', (3719, 3767), False, 'import requests\n'), ((3805, 3865), 'requests.get', 'requests.get', (['url'], {}), '(url, **self._instances._default_request_kwargs)\n', (3817, 3865), False, 'import requests\n'), ((5117, 5130), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5127, 5130), False, 'import time\n'), ((11805, 11821), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (11815, 11821), False, 'import json\n'), ((3958, 4067), 'requests.get', 'requests.get', (["(self._service_url + 'aggregate_database_stats/')"], {}), "(self._service_url + 'aggregate_database_stats/', **self.\n _instances._default_request_kwargs)\n", (3970, 4067), False, 'import requests\n'), ((4837, 4902), 'concurrent.futures.wait', 'futures.wait', (['fs'], {'timeout': 'None', 'return_when': 'futures.ALL_COMPLETED'}), '(fs, timeout=None, return_when=futures.ALL_COMPLETED)\n', (4849, 4902), False, 'from concurrent import futures\n'), ((5394, 5459), 'concurrent.futures.wait', 'futures.wait', (['fs'], {'timeout': 'None', 'return_when': 'futures.ALL_COMPLETED'}), '(fs, timeout=None, return_when=futures.ALL_COMPLETED)\n', (5406, 5459), False, 'from concurrent import futures\n'), ((13914, 14032), 'requests.get', 'requests.get', (['self._stats_url'], {'params': "{'include_stats': True}", 'headers': "{'X-Auth-Token': self._client.auth._token}"}), "(self._stats_url, params={'include_stats': True}, headers={\n 'X-Auth-Token': self._client.auth._token})\n", (13926, 14032), False, 'import requests\n'), ((6095, 6123), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (6105, 6123), False, 'import json\n')] |
from springleaf.utils.file_handler import FileHandler
from springleaf.utils.template_util import TemplateUtil
from .base_generator import BaseGenerator
class Generator(BaseGenerator):
def __init__(self, selected_file, files_to_create, attributes, structure):
super().__init__()
self.file = selected_file
self.files = files_to_create
self.attributes = attributes
self.structure = structure
"""
prepare_templates_data
@desc:
Instantiates TemplateUtils with corresponding data
@return: list - List of TemplateUtil objects
"""
def prepare_templates_data(self):
# getting root_package so we can append corespondig sub-package of the file which we have in project_structures.json
root_package = FileHandler.get_from_config_file('package')
# Getting type of methods so we can easiy check in the template if it's Standard getters and setters or Lombok
methods = FileHandler.get_from_config_file('methods')
# Getting structure content
structure_content = FileHandler.get_project_structure_content(
self.structure)
controller_type = FileHandler.get_from_config_file('controller-type')
response = FileHandler.get_from_config_file('response')
template_utils = []
for i in range(len(self.files)):
if self.autowire():
if "Controller" == self.files[i]:
template_utils.append(TemplateUtil(self.file + self.files[i], self.files[i],
self.attributes, methods, root_package + "." + structure_content[self.files[i].lower()], controller_type, response, self.file, root_package + "." + structure_content["service"].replace("&", self.file.lower())))
elif "Repository" == self.files[i]:
template_utils.append(TemplateUtil(self.file + self.files[i], self.files[i],
self.attributes, methods, root_package + "." + structure_content[self.files[i].lower()], controller_type, response, self.file, root_package + "." + structure_content["entity"]))
elif "DTO" == self.files[i]:
template_utils.append(TemplateUtil(self.file + self.files[i], self.files[i],
self.attributes, methods, root_package + "." + structure_content[self.files[i].lower()], controller_type, response, self.file, root_package + "." + structure_content["entity"]))
elif "Mapper" == self.files[i]:
template_utils.append(TemplateUtil(self.file + self.files[i], self.files[i],
self.attributes, methods, root_package + "." + structure_content[self.files[i].lower()], controller_type, response, self.file, root_package + "." + structure_content["entity"] + ":" + root_package + "." + structure_content["dto"]))
elif "Service" == self.files[i]:
template_utils.append(TemplateUtil(self.file + self.files[i] + "Impl", self.files[i] + "Impl",
self.attributes, methods, root_package + "." + structure_content[self.files[i].lower()], controller_type, response, self.file, root_package + "." + structure_content["repository"]))
else:
template_utils.append(TemplateUtil(self.file + self.files[i], self.files[i],
self.attributes, methods, root_package + "." + structure_content[self.files[i].lower()], controller_type, response, self.file, None))
else:
if "Service" == self.files[i]:
template_utils.append(TemplateUtil(self.file + self.files[i] + "Impl", self.files[i] + "Impl",
self.attributes, methods, root_package + "." + structure_content[self.files[i].lower()], controller_type, response, self.file, self.file + "Repository"))
elif "Mapper" == self.files[i]:
template_utils.append(TemplateUtil(self.file + self.files[i], self.files[i],
self.attributes, methods, root_package + "." + structure_content[self.files[i].lower()], controller_type, response, self.file, root_package + "." + structure_content["entity"] + ":" + root_package + "." + structure_content["dto"]))
else:
template_utils.append(TemplateUtil(self.file + self.files[i], self.files[i],
self.attributes, methods, root_package + "." + structure_content[self.files[i].lower()], controller_type, response, self.file, None))
return template_utils
"""
autowire
@desc:
If all files are selected, we can do autowiring automatically
@return: boolean - True if all are selected, else False
"""
def autowire(self):
if len(self.files) == 7:
return True
else:
return False
| [
"springleaf.utils.file_handler.FileHandler.get_from_config_file",
"springleaf.utils.file_handler.FileHandler.get_project_structure_content"
] | [((787, 830), 'springleaf.utils.file_handler.FileHandler.get_from_config_file', 'FileHandler.get_from_config_file', (['"""package"""'], {}), "('package')\n", (819, 830), False, 'from springleaf.utils.file_handler import FileHandler\n'), ((968, 1011), 'springleaf.utils.file_handler.FileHandler.get_from_config_file', 'FileHandler.get_from_config_file', (['"""methods"""'], {}), "('methods')\n", (1000, 1011), False, 'from springleaf.utils.file_handler import FileHandler\n'), ((1076, 1133), 'springleaf.utils.file_handler.FileHandler.get_project_structure_content', 'FileHandler.get_project_structure_content', (['self.structure'], {}), '(self.structure)\n', (1117, 1133), False, 'from springleaf.utils.file_handler import FileHandler\n'), ((1173, 1224), 'springleaf.utils.file_handler.FileHandler.get_from_config_file', 'FileHandler.get_from_config_file', (['"""controller-type"""'], {}), "('controller-type')\n", (1205, 1224), False, 'from springleaf.utils.file_handler import FileHandler\n'), ((1244, 1288), 'springleaf.utils.file_handler.FileHandler.get_from_config_file', 'FileHandler.get_from_config_file', (['"""response"""'], {}), "('response')\n", (1276, 1288), False, 'from springleaf.utils.file_handler import FileHandler\n')] |
from mpd_connection import MPDConnection
from processing_metadata import process_metadata, join_metadata
from lastfm_api import LastFmMetadataGetter, LastFmApiException
from multiprocessing.managers import SyncManager
from multiprocessing import Event
import time
# Shared objects
shared_song_metadata = {}
shared_mpd_status = {}
shared_playlist = {}
control_panel_new_song_event = Event()
control_panel_new_player_metadata_event = Event()
webapp_new_status_event = Event()
DEBUG_MODE = True
def initializer(self):
server = self.manager.get_server()
server.serve_forever()
class MetadataProvider:
def __init__(self):
# Set shared objects
SyncManager.register('SharedSongMetadata', callable=lambda: shared_song_metadata)
SyncManager.register('SharedMpdStatus', callable=lambda: shared_mpd_status)
SyncManager.register('SharedPlaylist', callable=lambda: shared_playlist)
SyncManager.register('ControlPanelNewSongEvent', callable=lambda: control_panel_new_song_event)
SyncManager.register('ControlPanelNewPlayerMetadataEvent', callable=lambda: control_panel_new_player_metadata_event)
SyncManager.register('WebappNewStatusEvent', callable=lambda: webapp_new_status_event)
self.manager = SyncManager(address=('', 50000), authkey=b'abc')
self.manager.start()
self.song_metadata = self.manager.SharedSongMetadata()
self.mpd_status = self.manager.SharedMpdStatus()
self.current_playlist = self.manager.SharedPlaylist()
self.webapp_new_status_event = self.manager.WebappNewStatusEvent()
self.control_panel_new_song_event = self.manager.ControlPanelNewSongEvent()
self.control_panel_new_player_metadata_event = self.manager.ControlPanelNewPlayerMetadataEvent()
self.lastfm = LastFmMetadataGetter()
self.mpd_connection = MPDConnection(host='localhost', port=6600,
new_player_status_callback=self.new_player_status_callback,
new_song_callback=self.new_song_callback,
playlist_callback=self.playlist_change_callback)
def new_song_callback(self):
metadata_from_mpd = self.mpd_connection.get_current_song_metadata()
processed_metadata = process_metadata(metadata_from_mpd)
track = processed_metadata.get('track')
artist = processed_metadata.get('artist')
# album = processed_metadata.get('album')
try:
album_metadata, track_metadata = \
self.lastfm.get_metadata(artist_name=artist, track_name=track)
except LastFmApiException:
track_metadata = {}
album_metadata = {}
joined_metadata = join_metadata(
metadata_from_mpd=processed_metadata,
track_metadata=track_metadata,
album_metadata=album_metadata
)
self.song_metadata.update(joined_metadata)
time.sleep(3)
self.control_panel_new_song_event.set()
# if DEBUG_MODE:
# import pprint
# pprint.pprint(joined_metadata)
# print(joined_metadata)
def new_player_status_callback(self):
player_status = self.mpd_connection.get_player_status()
self.mpd_status.update(player_status)
self.webapp_new_status_event.set()
self.control_panel_new_player_metadata_event.set()
def playlist_change_callback(self):
playlist = self.mpd_connection.get_playlist()
self.current_playlist = playlist.copy()
self.webapp_new_status_event.set()
if __name__ == '__main__':
prov = MetadataProvider()
while 1:
time.sleep(10000)
| [
"multiprocessing.managers.SyncManager.register",
"multiprocessing.Event",
"lastfm_api.LastFmMetadataGetter",
"processing_metadata.join_metadata",
"time.sleep",
"mpd_connection.MPDConnection",
"multiprocessing.managers.SyncManager",
"processing_metadata.process_metadata"
] | [((383, 390), 'multiprocessing.Event', 'Event', ([], {}), '()\n', (388, 390), False, 'from multiprocessing import Event\n'), ((433, 440), 'multiprocessing.Event', 'Event', ([], {}), '()\n', (438, 440), False, 'from multiprocessing import Event\n'), ((467, 474), 'multiprocessing.Event', 'Event', ([], {}), '()\n', (472, 474), False, 'from multiprocessing import Event\n'), ((672, 758), 'multiprocessing.managers.SyncManager.register', 'SyncManager.register', (['"""SharedSongMetadata"""'], {'callable': '(lambda : shared_song_metadata)'}), "('SharedSongMetadata', callable=lambda :\n shared_song_metadata)\n", (692, 758), False, 'from multiprocessing.managers import SyncManager\n'), ((762, 838), 'multiprocessing.managers.SyncManager.register', 'SyncManager.register', (['"""SharedMpdStatus"""'], {'callable': '(lambda : shared_mpd_status)'}), "('SharedMpdStatus', callable=lambda : shared_mpd_status)\n", (782, 838), False, 'from multiprocessing.managers import SyncManager\n'), ((846, 919), 'multiprocessing.managers.SyncManager.register', 'SyncManager.register', (['"""SharedPlaylist"""'], {'callable': '(lambda : shared_playlist)'}), "('SharedPlaylist', callable=lambda : shared_playlist)\n", (866, 919), False, 'from multiprocessing.managers import SyncManager\n'), ((927, 1027), 'multiprocessing.managers.SyncManager.register', 'SyncManager.register', (['"""ControlPanelNewSongEvent"""'], {'callable': '(lambda : control_panel_new_song_event)'}), "('ControlPanelNewSongEvent', callable=lambda :\n control_panel_new_song_event)\n", (947, 1027), False, 'from multiprocessing.managers import SyncManager\n'), ((1031, 1152), 'multiprocessing.managers.SyncManager.register', 'SyncManager.register', (['"""ControlPanelNewPlayerMetadataEvent"""'], {'callable': '(lambda : control_panel_new_player_metadata_event)'}), "('ControlPanelNewPlayerMetadataEvent', callable=lambda :\n control_panel_new_player_metadata_event)\n", (1051, 1152), False, 'from multiprocessing.managers import SyncManager\n'), ((1156, 1247), 'multiprocessing.managers.SyncManager.register', 'SyncManager.register', (['"""WebappNewStatusEvent"""'], {'callable': '(lambda : webapp_new_status_event)'}), "('WebappNewStatusEvent', callable=lambda :\n webapp_new_status_event)\n", (1176, 1247), False, 'from multiprocessing.managers import SyncManager\n'), ((1266, 1314), 'multiprocessing.managers.SyncManager', 'SyncManager', ([], {'address': "('', 50000)", 'authkey': "b'abc'"}), "(address=('', 50000), authkey=b'abc')\n", (1277, 1314), False, 'from multiprocessing.managers import SyncManager\n'), ((1813, 1835), 'lastfm_api.LastFmMetadataGetter', 'LastFmMetadataGetter', ([], {}), '()\n', (1833, 1835), False, 'from lastfm_api import LastFmMetadataGetter, LastFmApiException\n'), ((1866, 2068), 'mpd_connection.MPDConnection', 'MPDConnection', ([], {'host': '"""localhost"""', 'port': '(6600)', 'new_player_status_callback': 'self.new_player_status_callback', 'new_song_callback': 'self.new_song_callback', 'playlist_callback': 'self.playlist_change_callback'}), "(host='localhost', port=6600, new_player_status_callback=self.\n new_player_status_callback, new_song_callback=self.new_song_callback,\n playlist_callback=self.playlist_change_callback)\n", (1879, 2068), False, 'from mpd_connection import MPDConnection\n'), ((2331, 2366), 'processing_metadata.process_metadata', 'process_metadata', (['metadata_from_mpd'], {}), '(metadata_from_mpd)\n', (2347, 2366), False, 'from processing_metadata import process_metadata, join_metadata\n'), ((2779, 2897), 'processing_metadata.join_metadata', 'join_metadata', ([], {'metadata_from_mpd': 'processed_metadata', 'track_metadata': 'track_metadata', 'album_metadata': 'album_metadata'}), '(metadata_from_mpd=processed_metadata, track_metadata=\n track_metadata, album_metadata=album_metadata)\n', (2792, 2897), False, 'from processing_metadata import process_metadata, join_metadata\n'), ((2998, 3011), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (3008, 3011), False, 'import time\n'), ((3704, 3721), 'time.sleep', 'time.sleep', (['(10000)'], {}), '(10000)\n', (3714, 3721), False, 'import time\n')] |
#!/usr/bin/env python
import rospy
import tf
from nav_msgs.msg import Odometry
import numpy as np
import sys
import math
def qv_mult(q, v):
v_unit = None
if v == [0, 0, 0]: v_unit = [0.0, 0.0, 0.0]
else: v_unit = tf.transformations.unit_vector(v)
qp = list(v_unit)
qp.append(0.0)
return tf.transformations.quaternion_multiply(tf.transformations.quaternion_multiply(q, qp), tf.transformations.quaternion_conjugate(q))
class TransformOdometry:
def __init__(self, args):
self.x_pos_shift = float(args[1])
self.y_pos_shift = float(args[2])
self.z_pos_shift = float(args[3])
self.x_ori_shift = float(args[4])
self.y_ori_shift = float(args[5])
self.z_ori_shift = float(args[6])
self.w_ori_shift = float(args[7])
self.odom_sub_topic = args[8]
self.odom_pub_topic = args[9]
self.sub_odom_transform = rospy.Subscriber(self.odom_sub_topic, Odometry, self._transform_callback)
self.pub_odom_transform = rospy.Publisher(self.odom_pub_topic, Odometry, queue_size=1)
def get_node_name(self):
return self.odom_node_name
def _transform_callback(self, odom_msg):
#odom_msg.pose.pose.position.x = odom_msg.pose.pose.position.x * math.cos(self.z_ori_shift) - odom_msg.pose.pose.position.y * math.sin(self.z_ori_shift)
#odom_msg.pose.pose.position.y = odom_msg.pose.pose.position.x * math.sin(self.z_ori_shift) + odom_msg.pose.pose.position.y * math.cos(self.z_ori_shift)
#odom_msg.pose.pose.position.x = odom_msg.pose.pose.position.x * math.cos(self.y_ori_shift) - odom_msg.pose.pose.position.z * math.sin(self.y_ori_shift)
#odom_msg.pose.pose.position.y = odom_msg.pose.pose.position.x * math.sin(self.y_ori_shift) + odom_msg.pose.pose.position.z * math.cos(self.y_ori_shift)
#odom_msg.pose.pose.position.z = 0
#q = [odom_msg.pose.pose.orientation.x,
# odom_msg.pose.pose.orientation.y,
# odom_msg.pose.pose.orientation.z,
# odom_msg.pose.pose.orientation.w]
#new_vec = qv_mult(q, [self.x_pos_shift, self.y_pos_shift, self.z_pos_shift])
#tx = self.x_pos_shift - new_vec[0]
#ty = self.y_pos_shift - new_vec[1]
#tz = self.z_pos_shift - new_vec[2]
#odom_pub_msg = Odometry()
#odom_pub_msg.header.seq = odom_msg.header.seq
#odom_pub_msg.header.stamp = odom_msg.header.stamp
#odom_pub_msg.header.frame_id = odom_msg.header.frame_id
#odom_pub_msg.child_frame_id = ""
#odom_pub_msg.pose.pose.position.x = odom_msg.pose.pose.position.x + tx - self.x_pos_shift
#odom_pub_msg.pose.pose.position.y = odom_msg.pose.pose.position.y + ty - self.y_pos_shift
#odom_pub_msg.pose.pose.position.z = odom_msg.pose.pose.position.z + tz - self.z_pos_shift
#odom_pub_msg.pose.pose.orientation.x = odom_msg.pose.pose.orientation.x
#odom_pub_msg.pose.pose.orientation.y = odom_msg.pose.pose.orientation.y
#odom_pub_msg.pose.pose.orientation.z = odom_msg.pose.pose.orientation.z
#odom_pub_msg.pose.pose.orientation.w = odom_msg.pose.pose.orientation.w
#odom_pub_msg.pose.covariance = odom_msg.pose.covariance
self.pub_odom_transform.publish(odom_msg)
if __name__ == "__main__":
rospy.init_node("odom_transform")
# Parse input args
def parse_args(args):
if len(args) != 10: return False
for arg in args[1:8]:
try: float(arg)
except: return False
return True
args = rospy.myargv(argv=sys.argv)
if not parse_args(args):
rospy.logfatal("Invalid commandline arguments specified")
rospy.signal_shutdown("Invalid commandline arguments specified")
to = TransformOdometry(args)
rospy.spin()
| [
"tf.transformations.unit_vector",
"tf.transformations.quaternion_multiply",
"rospy.Publisher",
"rospy.logfatal",
"rospy.signal_shutdown",
"rospy.init_node",
"rospy.myargv",
"rospy.spin",
"tf.transformations.quaternion_conjugate",
"rospy.Subscriber"
] | [((3337, 3370), 'rospy.init_node', 'rospy.init_node', (['"""odom_transform"""'], {}), "('odom_transform')\n", (3352, 3370), False, 'import rospy\n'), ((3583, 3610), 'rospy.myargv', 'rospy.myargv', ([], {'argv': 'sys.argv'}), '(argv=sys.argv)\n', (3595, 3610), False, 'import rospy\n'), ((3818, 3830), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (3828, 3830), False, 'import rospy\n'), ((228, 261), 'tf.transformations.unit_vector', 'tf.transformations.unit_vector', (['v'], {}), '(v)\n', (258, 261), False, 'import tf\n'), ((358, 403), 'tf.transformations.quaternion_multiply', 'tf.transformations.quaternion_multiply', (['q', 'qp'], {}), '(q, qp)\n', (396, 403), False, 'import tf\n'), ((405, 447), 'tf.transformations.quaternion_conjugate', 'tf.transformations.quaternion_conjugate', (['q'], {}), '(q)\n', (444, 447), False, 'import tf\n'), ((920, 993), 'rospy.Subscriber', 'rospy.Subscriber', (['self.odom_sub_topic', 'Odometry', 'self._transform_callback'], {}), '(self.odom_sub_topic, Odometry, self._transform_callback)\n', (936, 993), False, 'import rospy\n'), ((1028, 1088), 'rospy.Publisher', 'rospy.Publisher', (['self.odom_pub_topic', 'Odometry'], {'queue_size': '(1)'}), '(self.odom_pub_topic, Odometry, queue_size=1)\n', (1043, 1088), False, 'import rospy\n'), ((3649, 3706), 'rospy.logfatal', 'rospy.logfatal', (['"""Invalid commandline arguments specified"""'], {}), "('Invalid commandline arguments specified')\n", (3663, 3706), False, 'import rospy\n'), ((3715, 3779), 'rospy.signal_shutdown', 'rospy.signal_shutdown', (['"""Invalid commandline arguments specified"""'], {}), "('Invalid commandline arguments specified')\n", (3736, 3779), False, 'import rospy\n')] |
from json import load, dumps
REPORT_FILE = "report.txt"
with open(REPORT_FILE, "r") as file:
data = load(file)
# EXAMPLE ON ACESSING THE JSON DATA REPORT
print(dumps(data["INTERFACES_INFO"]["Ethernet adapter Ethernet"], indent=4))
print(dumps(data["INTERFACES_INFO"]["Ethernet adapter Ethernet"]["IPv4 Address"]))
print(dumps(data["INTERFACES_INFO"]["Ethernet adapter Ethernet"]["Physical Address"]))
| [
"json.load",
"json.dumps"
] | [((113, 123), 'json.load', 'load', (['file'], {}), '(file)\n', (117, 123), False, 'from json import load, dumps\n'), ((177, 246), 'json.dumps', 'dumps', (["data['INTERFACES_INFO']['Ethernet adapter Ethernet']"], {'indent': '(4)'}), "(data['INTERFACES_INFO']['Ethernet adapter Ethernet'], indent=4)\n", (182, 246), False, 'from json import load, dumps\n'), ((255, 330), 'json.dumps', 'dumps', (["data['INTERFACES_INFO']['Ethernet adapter Ethernet']['IPv4 Address']"], {}), "(data['INTERFACES_INFO']['Ethernet adapter Ethernet']['IPv4 Address'])\n", (260, 330), False, 'from json import load, dumps\n'), ((339, 418), 'json.dumps', 'dumps', (["data['INTERFACES_INFO']['Ethernet adapter Ethernet']['Physical Address']"], {}), "(data['INTERFACES_INFO']['Ethernet adapter Ethernet']['Physical Address'])\n", (344, 418), False, 'from json import load, dumps\n')] |
"""
This is a stub and used as the default processor.
It doesn't do anything but it can be used to build out another
interface.
See the authorizenet module for the reference implementation
"""
from django.utils.translation import ugettext_lazy as _
from payment.modules.base import BasePaymentProcessor, ProcessorResult
class PaymentProcessor(BasePaymentProcessor):
def __init__(self, settings):
super(PaymentProcessor, self).__init__('dummy', settings)
def authorize_payment(self, order=None, testing=False, amount=None):
"""
Make an authorization for an order. This payment will then be captured when the order
is set marked 'shipped'.
"""
if order == None:
order = self.order
if amount is None:
amount = order.balance
cc = order.credit_card
if cc:
ccn = cc.decryptedCC
ccv = cc.ccv
if ccn == '4222222222222':
if ccv == '222':
self.log_extra('Bad CCV forced')
payment = self.record_failure(amount=amount, transaction_id='2',
reason_code='2', details='CCV error forced')
return ProcessorResult(self.key, False, _('Bad CCV - order declined'), payment)
else:
self.log_extra('Setting a bad credit card number to force an error')
payment = self.record_failure(amount=amount, transaction_id='2',
reason_code='2', details='Credit card number error forced')
return ProcessorResult(self.key, False, _('Bad credit card number - order declined'), payment)
orderauth = self.record_authorization(amount=amount, reason_code="0")
return ProcessorResult(self.key, True, _('Success'), orderauth)
def can_authorize(self):
return True
def capture_payment(self, testing=False, amount=None):
"""
Process the transaction and return a ProcessorResult:
Example:
>>> from livesettings.functions import config_get_group
>>> settings = config_get_group('PAYMENT_DUMMY')
>>> from payment.modules.dummy.processor import PaymentProcessor
>>> processor = PaymentProcessor(settings)
# If using a normal payment module, data should be an Order object.
>>> data = {}
>>> processor.prepare_data(data)
>>> processor.process()
ProcessorResult: DUMMY [Success] Success
"""
orderpayment = self.record_payment(amount=amount, reason_code="0")
return ProcessorResult(self.key, True, _('Success'), orderpayment)
def capture_authorized_payment(self, authorization, amount=None):
"""
Given a prior authorization, capture remaining amount not yet captured.
"""
if amount is None:
amount = authorization.remaining()
orderpayment = self.record_payment(amount=amount, reason_code="0",
transaction_id="dummy", authorization=authorization)
return ProcessorResult(self.key, True, _('Success'), orderpayment)
def release_authorized_payment(self, order=None, auth=None, testing=False):
"""Release a previously authorized payment."""
auth.complete = True
auth.save()
return ProcessorResult(self.key, True, _('Success'))
| [
"django.utils.translation.ugettext_lazy"
] | [((1820, 1832), 'django.utils.translation.ugettext_lazy', '_', (['"""Success"""'], {}), "('Success')\n", (1821, 1832), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2647, 2659), 'django.utils.translation.ugettext_lazy', '_', (['"""Success"""'], {}), "('Success')\n", (2648, 2659), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3114, 3126), 'django.utils.translation.ugettext_lazy', '_', (['"""Success"""'], {}), "('Success')\n", (3115, 3126), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3374, 3386), 'django.utils.translation.ugettext_lazy', '_', (['"""Success"""'], {}), "('Success')\n", (3375, 3386), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1259, 1288), 'django.utils.translation.ugettext_lazy', '_', (['"""Bad CCV - order declined"""'], {}), "('Bad CCV - order declined')\n", (1260, 1288), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1639, 1683), 'django.utils.translation.ugettext_lazy', '_', (['"""Bad credit card number - order declined"""'], {}), "('Bad credit card number - order declined')\n", (1640, 1683), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
"""Manage the directories
This includes options to store the hashtable in a file or keep
it temporarily, where it will be returned from the function...
"""
import json
import os
import platform
from typing import Tuple, Union
from .errors import FilenameError, PathError, SystemNotSupported
__all__ = [
"set_directory",
"set_filename",
"get_directory",
"get_filename",
"get_full_path",
"create_directory",
"create_file"
]
directory = os.getcwd()
filename = "hash_table.json"
def check_for_invalid_characters(string, os, path_input=False) -> Tuple[bool, str]:
"""Give an error if any illegal characters are detected based on the os.
Forward and back slashes will be ignored since they are used to define each section in a path.
"""
if os == "Windows":
if path_input == True:
illegal_characters = ["<", ">", ":", '"', "|", "?", "*"]
elif path_input == False:
illegal_characters = ["<", ">", ":", '"', "/", "\\", "|", "?", "*"]
elif os == "Linux":
if path_input == False:
illegal_characters = ["/"]
else:
return (False, "")
for character in illegal_characters:
if character in string:
if character == ":":
for x in range(0, len(string)):
if string[x] == ":" and x != 1:
return (True, ":")
else:
return (True, character)
return (False, "")
def check_for_reserved_names(string, os) -> Tuple[bool, str]:
"""Give an error if any reserved names are used in the file path."""
if os == "Windows":
illegal_names = [
"CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3",
"COM4", "COM5", "COM6", "COM7", "COM8", "COM9", "LPT1",
"LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9"
]
elif os == "Linux":
"""Linux has no reserved names."""
return (False, "")
else:
raise SystemNotSupported(f"Unsupported OS {os}.")
"""An exception should only be given if an illegal name is found on its own with no other characters."""
for reserved_name in illegal_names:
if string.upper() == reserved_name:
return (True, reserved_name)
if reserved_name in string.upper():
reserved_name_index = string.upper().find(reserved_name)
if string[reserved_name_index-1] == "\\" or string[reserved_name_index-1] == "/":
if reserved_name_index+(len(reserved_name)-1) == len(string)-1:
return (True, reserved_name)
if string[reserved_name_index+len(reserved_name)] == "\\" or string[reserved_name_index+len(reserved_name)] == "/":
return (True, reserved_name)
return (False, "")
def set_directory(file_directory, full_path=False) -> None:
"""Sets the directory of where the file will be stored. If 'full_path' is true,
set the directory as the full path.
If no directory is set, the file will be stored in the program location.
"""
global directory
invalid_characters = check_for_invalid_characters(file_directory, platform.system(), path_input=True)
if invalid_characters[0] == True:
raise PathError(f"Invalid character ({invalid_characters[1]}) found in file path.")
reserved_names = check_for_reserved_names(file_directory, platform.system())
if reserved_names[0] == True:
raise PathError(f"OS reserved name ({reserved_names[1]}) found in file path.")
if full_path == False:
directory = directory + file_directory
elif full_path == True:
directory = file_directory
def set_filename(file_name) -> None:
"""Sets the filename.
If no filename is set, it will remain the default name..
"""
global filename
file_name = file_name.strip()
if file_name == "":
raise FilenameError("The filename cannot be set as an empty value.")
invalid_characters = check_for_invalid_characters(file_name, platform.system())
if invalid_characters[0] == True:
raise FilenameError(f"Invalid character ({invalid_characters[1]}) found in filename.")
reserved_names = check_for_reserved_names(file_name, platform.system())
if reserved_names[0] == True:
raise FilenameError(f"OS reserved name ({reserved_names[1]}) found in filename.")
filename = file_name + ".json"
def get_directory() -> str:
"""Returns the current set directory."""
return directory
def get_filename(file_extension=False) -> str:
"""Returns the current set filename."""
if file_extension == False:
return filename[0:filename.find(".json")]
else:
return filename
def get_full_path(file_extension=True) -> str:
"""Returns the current set directory and filename."""
return get_directory() + "/" + get_filename(file_extension=file_extension)
def create_directory() -> None:
"""Creates the set/default directory."""
slash_indexes = []
for x in range(0, len(directory)):
if directory[x] == "/" or directory[x] == "\\":
slash_indexes.append(x)
directories_to_create = []
for x in range(0, len(slash_indexes)):
if x == len(slash_indexes)-1:
if os.path.isdir(directory[0:len(directory)]):
existing_directory = directory[0:len(directory)]
else:
directories_to_create.append(directory[0:len(directory)])
else:
if os.path.isdir(directory[0:slash_indexes[x+1]]):
existing_directory = directory[0:slash_indexes[x+1]]
else:
directories_to_create.append(directory[0:slash_indexes[x+1]])
for _dir in directories_to_create:
os.mkdir(_dir)
def create_file(overwrite_existing=False) -> Union[bool, None]:
"""Creates the file inside of the set/default directory.
Only proceed if the set/default directory exists.
"""
if os.path.isfile(get_full_path()):
if overwrite_existing == False:
return False
if os.path.isdir(directory):
file_path = get_full_path()
create_file = open(file_path, "w")
create_file.write(json.dumps([{}, {}, {}, []]))
create_file.close()
else:
raise PathError("The directory must be created (.create_directory()) before the file is.") | [
"json.dumps",
"os.getcwd",
"platform.system",
"os.path.isdir",
"os.mkdir"
] | [((467, 478), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (476, 478), False, 'import os\n'), ((6132, 6156), 'os.path.isdir', 'os.path.isdir', (['directory'], {}), '(directory)\n', (6145, 6156), False, 'import os\n'), ((3201, 3218), 'platform.system', 'platform.system', ([], {}), '()\n', (3216, 3218), False, 'import platform\n'), ((3430, 3447), 'platform.system', 'platform.system', ([], {}), '()\n', (3445, 3447), False, 'import platform\n'), ((4069, 4086), 'platform.system', 'platform.system', ([], {}), '()\n', (4084, 4086), False, 'import platform\n'), ((4279, 4296), 'platform.system', 'platform.system', ([], {}), '()\n', (4294, 4296), False, 'import platform\n'), ((5811, 5825), 'os.mkdir', 'os.mkdir', (['_dir'], {}), '(_dir)\n', (5819, 5825), False, 'import os\n'), ((5550, 5598), 'os.path.isdir', 'os.path.isdir', (['directory[0:slash_indexes[x + 1]]'], {}), '(directory[0:slash_indexes[x + 1]])\n', (5563, 5598), False, 'import os\n'), ((6263, 6291), 'json.dumps', 'json.dumps', (['[{}, {}, {}, []]'], {}), '([{}, {}, {}, []])\n', (6273, 6291), False, 'import json\n')] |
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from enum import Enum
import re
import os
from autohandshake.src.exceptions import (
InvalidURLError, NoSuchElementError, WrongPageForMethodError,
InsufficientPermissionsError, InvalidUserTypeError
)
from autohandshake.src.constants import MAX_WAIT_TIME
from autohandshake.src.constants import BASE_URL
class UserType(Enum):
"""
The possible user types in Handshake
* Employer - a Handshake employer account
* Staff - a career services staff/admin account
* Student - a student or alumni account
"""
EMPLOYER = 'Employers'
STAFF = 'Career Services'
STUDENT = 'Students'
class HandshakeBrowser:
"""
An automated browser for navigating Handshake.
Since a logged-in instance of this class is returned by HandshakeSession's
__enter__ method, it does not usually need to be manually instantiated. Additionally,
for most use cases, the user only needs to pass a HandshakeBrowser object to
a Page object, then let the Page's methods do the heavy-lifting.
For example, you almost never need to write:
::
browser = HandshakeBrowser()
The vast majority of use cases look something like:
::
with HandshakeSession(school_url, email) as browser:
some_page = SomePage(browser)
some_page.do_something()
If you need to specify a custom max_wait_time, that can be done through the
HandshakeSession object:
::
# this
with HandshakeSession(school_url, email, max_wait_time = 60) as browser:
some_page = SomePage(browser)
# not this
browser = HandshakeBrowser(max_wait_time = 60)
"""
def __init__(self, max_wait_time: int = MAX_WAIT_TIME, chromedriver_path: str = None, download_dir: str = None):
"""
:param max_wait_time: the maximum time (in seconds) to wait for an element
to load before throwing a timeout error
:type max_wait_time: int
:param chromedriver_path: the filepath to chromedriver.exe. If not specified, the package's own driver will be used
:type chromedriver_path: str
:param download_dir: the directory in which to download any files. If not
specified, defaults to system's default download location.
:type download_dir: str
"""
options = webdriver.ChromeOptions()
options.add_argument('--window-size=1920,1080')
if download_dir:
options.add_experimental_option('prefs', {'download.default_directory': download_dir})
dirname = os.path.dirname(__file__)
if not chromedriver_path:
chromedriver_path = os.path.join(dirname, '../chromedriver.exe')
self._browser = webdriver.Chrome(executable_path=chromedriver_path,
options=options)
self.max_wait_time = max_wait_time
self._user_type = None
def get(self, url: str):
"""Go to the web page specified by the given Handshake url.
:param url: the url to visit. Must be of the form
"https://[...].joinhandshake.com[/...]"
:type url: str
"""
self._validate_url_str(url)
self._browser.get(url)
self._validate_page_exists()
self._validate_permissions()
def quit(self):
"""Close the browser"""
self._browser.quit()
def element_exists_by_xpath(self, xpath: str) -> bool:
"""
Determine whether or not an element with the given xpath exists in the page.
:param xpath: the xpath of the element to search for
:type xpath: str
:return: True if the element exists, false otherwise
:rtype: bool
"""
try:
self._browser.find_element_by_xpath(xpath)
return True
except NoSuchElementException:
return False
def wait_until_element_exists_by_xpath(self, xpath: str):
"""
Wait until an element with the given xpath exists on the page.
:param xpath: the xpath of the element to wait for
:type xpath: str
"""
try:
WebDriverWait(self._browser, self.max_wait_time).until(
EC.visibility_of_element_located((By.XPATH, xpath)))
except TimeoutException:
raise TimeoutError(f"Element with xpath {xpath} did not appear in time")
def wait_until_element_does_not_exist_by_xpath(self, xpath: str):
"""
Wait until an element with the given xpath exists on the page.
:param xpath: the xpath of the element to wait for
:type xpath: str
"""
try:
WebDriverWait(self._browser, self.max_wait_time).until(
EC.invisibility_of_element_located((By.XPATH, xpath)))
except TimeoutException:
raise TimeoutError(f"Element with xpath {xpath} did not disappear in tme")
def wait_until_element_is_clickable_by_xpath(self, xpath: str):
"""
Wait until an element with the given xpath is clickable.
:param xpath: the xpath of the element to wait for
:type xpath: str
"""
try:
WebDriverWait(self._browser, self.max_wait_time).until(
EC.element_to_be_clickable((By.XPATH, xpath)))
except TimeoutException:
raise TimeoutError(f"Element with xpath {xpath} did not become clickable")
def send_text_to_element_by_xpath(self, xpath: str, text: str, clear: bool = True):
"""
Send a string to an input field identified by the given xpath
:param text: the text to send
:type text: str
:param xpath: the xpath of the input field to which to send the text
:type xpath: str
:param clear: whether or not to clear the field before sending text. If
False, text will be appended to any text already present.
:type clear: bool
"""
try:
element = self._browser.find_element_by_xpath(xpath)
if clear:
element.clear()
element.send_keys(text)
except NoSuchElementException:
raise NoSuchElementError(f'No element found for xpath: "{xpath}"')
def click_element_by_xpath(self, xpath):
"""
Click an element on the page given its xpath
:param xpath: the xpath of the element to click
:type xpath: str
"""
try:
self._browser.find_element_by_xpath(xpath).click()
except NoSuchElementException:
raise NoSuchElementError(f'No element found for xpath: "{xpath}"')
def wait_then_click_element_by_xpath(self, xpath):
"""
Click an element on the page given its xpath after waiting to make sure
it exists
:param xpath: the xpath of the element to click
:type xpath: str
"""
self.wait_until_element_exists_by_xpath(xpath)
self.click_element_by_xpath(xpath)
def get_element_attribute_by_xpath(self, xpath: str, attribute: str) -> str:
"""
Get the value of the given attribute from the element with the given xpath
:param xpath: the xpath of the element of interest
:type xpath: str
:param attribute: the name of the attribute of interest, e.g. 'value'
:type attribute: str
:return: the value of the attribute on the element of interest
:rtype: str
"""
try:
if attribute.lower() == 'text':
return self._browser.find_element_by_xpath(xpath).text
return self._browser.find_element_by_xpath(xpath).get_attribute(attribute)
except NoSuchElementException:
raise NoSuchElementError(f'No element found for xpath: "{xpath}"')
def get_elements_attribute_by_xpath(self, xpath: str, attribute: str) -> list:
"""
Get the value of a given attribute for all elements with the given xpath
:param xpath: the xpath of the elements of interest
:type xpath: str
:param attribute: the name of the attribute of interest, e.g. 'value'
:type attribute: str
:return: a list of values of the given attribute for each matching element
:rtype: list
"""
try:
elements = self._browser.find_elements_by_xpath(xpath)
if attribute.lower() == 'text':
return [element.text for element in elements]
return [element.get_attribute(attribute) for element in elements]
except NoSuchElementException:
raise NoSuchElementError(f'No elements found for xpath: "{xpath}"')
def execute_script_on_element_by_xpath(self, script: str, xpath: str = None):
"""
Execute the given javascript expression. If xpath of element is provided,
the element becomes available to use in the script, and can be accessed
using arguments[0].
:param script: the javascript to be executed
:type script: str
:param xpath: the xpath of the optional element to be passed to the script
:type xpath: str
"""
if not xpath:
self._browser.execute_script(script)
else:
try:
self._browser.execute_script(script, self._browser.find_element_by_xpath(xpath))
except NoSuchElementException:
raise NoSuchElementError(f'No elements found for xpath: "{xpath}"')
def element_is_selected_by_xpath(self, xpath: str) -> bool:
"""Get whether or not the element specified by the given xpath is selected
:param xpath: the xpath of the elements of interest
:type xpath: str
:return: True if the element is selected, False otherwise
:rtype: bool
"""
try:
return self._browser.find_element_by_xpath(xpath).is_selected()
except:
raise NoSuchElementError(f'No elements found for xpath: "{xpath}"')
def switch_to_frame_by_xpath(self, xpath):
try:
frame = self._browser.find_element_by_xpath(xpath)
self._browser.switch_to.frame(frame)
except NoSuchElementException:
raise NoSuchElementError(f'No elements found for xpath: "{xpath}"')
def update_constants(self):
"""
Update any Handshake environment constants such as School ID or User ID.
This should be done every time the browser switches user types and upon
initial login.
"""
if self._user_type == UserType.EMPLOYER:
self.employer_id = self._get_meta_constant('logged_in_user_institution_id')
else:
self.school_id = self._get_meta_constant('logged_in_user_institution_id')
self.user_id = self._get_meta_constant('logged_in_user_id')
self._user_type = UserType(self._get_meta_constant('current_user_type'))
def _get_meta_constant(self, name: str) -> str:
"""
Get the content of a meta tag with the given name.
The method is used to pull data like the current school id, user id,
or employer id from Handshake's <meta> tags. All tags are of the form:
<meta content="foo" name="bar">. Given "bar," this method returns "foo".
:param name: the name of the meta tag to query
:type name: str
:return: the content of the meta tag
:rtype: str
"""
return self.get_element_attribute_by_xpath(f'//meta[@name="{name}"]',
'content')
def switch_to_new_tab(self):
"""Wait for the new tab to finish loaded, then switch to it."""
WebDriverWait(self._browser, self.max_wait_time).until(
EC.number_of_windows_to_be(2))
self._browser.switch_to.window(self._browser.window_handles[-1])
def return_to_main_tab(self):
"""With a second tab open, close the current tab and return to the main tab."""
self._browser.execute_script('window.close();')
self._browser.switch_to.window(self._browser.window_handles[0])
def maximize_window(self):
"""Maximize the browser window."""
self._browser.maximize_window()
@property
def user_type(self) -> UserType:
"""
Get the user type of the account currently logged into Handshake.
:return: the browser's currently-logged-in user type
:rtype: UserType
"""
return self._user_type
def switch_users(self, user_type: UserType):
"""
Switch to the system view specified by the given user type.
This method automates the built-in "Switch Users" function in Handshake.
:param user_type: the user type to which to switch
:type user_type: UserType
"""
if self._user_type == user_type:
return # do nothing if the browser already has the desired user type
self.get(f'{BASE_URL}/user_switcher/options')
try:
self.click_element_by_xpath(f'//a[div[h4[contains(text(), "{user_type.value}")]]]')
except NoSuchElementError:
raise InvalidUserTypeError("User does not have a linked account of the given type")
self.update_constants()
@property
def current_url(self):
"""Get the url of the browser's current page"""
return self._browser.current_url
@staticmethod
def _validate_url_str(url: str):
"""
Determine whether or not a given Handshake URL is valid
:param login_url: the login url to test
:type login_url: str
"""
try:
re.match(r'^https://[a-zA-Z]+\.joinhandshake\.com', url) \
.group(0)
except AttributeError:
raise InvalidURLError('URL must be of the form '
'"https://app.joinhandshake.com[/...]" or '
'"https://[school name].joinhandshake.com[/...]"')
def _validate_page_exists(self):
"""Determine whether the current page exists or gave a 404 error."""
if self.element_exists_by_xpath("//p[contains(text(), 'You may want "
"to head back to the homepage.')]"):
raise InvalidURLError(self._browser.current_url)
def _validate_permissions(self):
"""Determine whether or not the logged in user has permission to view the current page"""
if self.element_exists_by_xpath("//div[contains(text(), 'You do not "
"have permission to do that.')]"):
raise InsufficientPermissionsError
| [
"selenium.webdriver.ChromeOptions",
"autohandshake.src.exceptions.InvalidURLError",
"selenium.webdriver.support.expected_conditions.invisibility_of_element_located",
"selenium.webdriver.support.ui.WebDriverWait",
"autohandshake.src.exceptions.NoSuchElementError",
"selenium.webdriver.Chrome",
"os.path.jo... | [((2670, 2695), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (2693, 2695), False, 'from selenium import webdriver\n'), ((2895, 2920), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2910, 2920), False, 'import os\n'), ((3056, 3124), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'executable_path': 'chromedriver_path', 'options': 'options'}), '(executable_path=chromedriver_path, options=options)\n', (3072, 3124), False, 'from selenium import webdriver\n'), ((2987, 3031), 'os.path.join', 'os.path.join', (['dirname', '"""../chromedriver.exe"""'], {}), "(dirname, '../chromedriver.exe')\n", (2999, 3031), False, 'import os\n'), ((12091, 12120), 'selenium.webdriver.support.expected_conditions.number_of_windows_to_be', 'EC.number_of_windows_to_be', (['(2)'], {}), '(2)\n', (12117, 12120), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((14610, 14652), 'autohandshake.src.exceptions.InvalidURLError', 'InvalidURLError', (['self._browser.current_url'], {}), '(self._browser.current_url)\n', (14625, 14652), False, 'from autohandshake.src.exceptions import InvalidURLError, NoSuchElementError, WrongPageForMethodError, InsufficientPermissionsError, InvalidUserTypeError\n'), ((4549, 4600), 'selenium.webdriver.support.expected_conditions.visibility_of_element_located', 'EC.visibility_of_element_located', (['(By.XPATH, xpath)'], {}), '((By.XPATH, xpath))\n', (4581, 4600), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((5068, 5121), 'selenium.webdriver.support.expected_conditions.invisibility_of_element_located', 'EC.invisibility_of_element_located', (['(By.XPATH, xpath)'], {}), '((By.XPATH, xpath))\n', (5102, 5121), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((5583, 5628), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (['(By.XPATH, xpath)'], {}), '((By.XPATH, xpath))\n', (5609, 5628), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((6509, 6569), 'autohandshake.src.exceptions.NoSuchElementError', 'NoSuchElementError', (['f"""No element found for xpath: "{xpath}\\""""'], {}), '(f\'No element found for xpath: "{xpath}"\')\n', (6527, 6569), False, 'from autohandshake.src.exceptions import InvalidURLError, NoSuchElementError, WrongPageForMethodError, InsufficientPermissionsError, InvalidUserTypeError\n'), ((6908, 6968), 'autohandshake.src.exceptions.NoSuchElementError', 'NoSuchElementError', (['f"""No element found for xpath: "{xpath}\\""""'], {}), '(f\'No element found for xpath: "{xpath}"\')\n', (6926, 6968), False, 'from autohandshake.src.exceptions import InvalidURLError, NoSuchElementError, WrongPageForMethodError, InsufficientPermissionsError, InvalidUserTypeError\n'), ((8071, 8131), 'autohandshake.src.exceptions.NoSuchElementError', 'NoSuchElementError', (['f"""No element found for xpath: "{xpath}\\""""'], {}), '(f\'No element found for xpath: "{xpath}"\')\n', (8089, 8131), False, 'from autohandshake.src.exceptions import InvalidURLError, NoSuchElementError, WrongPageForMethodError, InsufficientPermissionsError, InvalidUserTypeError\n'), ((8939, 9000), 'autohandshake.src.exceptions.NoSuchElementError', 'NoSuchElementError', (['f"""No elements found for xpath: "{xpath}\\""""'], {}), '(f\'No elements found for xpath: "{xpath}"\')\n', (8957, 9000), False, 'from autohandshake.src.exceptions import InvalidURLError, NoSuchElementError, WrongPageForMethodError, InsufficientPermissionsError, InvalidUserTypeError\n'), ((10268, 10329), 'autohandshake.src.exceptions.NoSuchElementError', 'NoSuchElementError', (['f"""No elements found for xpath: "{xpath}\\""""'], {}), '(f\'No elements found for xpath: "{xpath}"\')\n', (10286, 10329), False, 'from autohandshake.src.exceptions import InvalidURLError, NoSuchElementError, WrongPageForMethodError, InsufficientPermissionsError, InvalidUserTypeError\n'), ((10560, 10621), 'autohandshake.src.exceptions.NoSuchElementError', 'NoSuchElementError', (['f"""No elements found for xpath: "{xpath}\\""""'], {}), '(f\'No elements found for xpath: "{xpath}"\')\n', (10578, 10621), False, 'from autohandshake.src.exceptions import InvalidURLError, NoSuchElementError, WrongPageForMethodError, InsufficientPermissionsError, InvalidUserTypeError\n'), ((12023, 12071), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['self._browser', 'self.max_wait_time'], {}), '(self._browser, self.max_wait_time)\n', (12036, 12071), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((13486, 13563), 'autohandshake.src.exceptions.InvalidUserTypeError', 'InvalidUserTypeError', (['"""User does not have a linked account of the given type"""'], {}), "('User does not have a linked account of the given type')\n", (13506, 13563), False, 'from autohandshake.src.exceptions import InvalidURLError, NoSuchElementError, WrongPageForMethodError, InsufficientPermissionsError, InvalidUserTypeError\n'), ((14116, 14257), 'autohandshake.src.exceptions.InvalidURLError', 'InvalidURLError', (['"""URL must be of the form "https://app.joinhandshake.com[/...]" or "https://[school name].joinhandshake.com[/...]\\""""'], {}), '(\n \'URL must be of the form "https://app.joinhandshake.com[/...]" or "https://[school name].joinhandshake.com[/...]"\'\n )\n', (14131, 14257), False, 'from autohandshake.src.exceptions import InvalidURLError, NoSuchElementError, WrongPageForMethodError, InsufficientPermissionsError, InvalidUserTypeError\n'), ((4477, 4525), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['self._browser', 'self.max_wait_time'], {}), '(self._browser, self.max_wait_time)\n', (4490, 4525), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((4996, 5044), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['self._browser', 'self.max_wait_time'], {}), '(self._browser, self.max_wait_time)\n', (5009, 5044), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((5511, 5559), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['self._browser', 'self.max_wait_time'], {}), '(self._browser, self.max_wait_time)\n', (5524, 5559), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((9750, 9811), 'autohandshake.src.exceptions.NoSuchElementError', 'NoSuchElementError', (['f"""No elements found for xpath: "{xpath}\\""""'], {}), '(f\'No elements found for xpath: "{xpath}"\')\n', (9768, 9811), False, 'from autohandshake.src.exceptions import InvalidURLError, NoSuchElementError, WrongPageForMethodError, InsufficientPermissionsError, InvalidUserTypeError\n'), ((13982, 14039), 're.match', 're.match', (['"""^https://[a-zA-Z]+\\\\.joinhandshake\\\\.com"""', 'url'], {}), "('^https://[a-zA-Z]+\\\\.joinhandshake\\\\.com', url)\n", (13990, 14039), False, 'import re\n')] |
################################################################################
#
# Provide embeddings from raw audio with the wav2vec2 model from huggingface.
#
# Author(s): <NAME>
################################################################################
from typing import Optional, List
import torch as t
import pytorch_lightning as pl
from pytorch_lightning import LightningModule
from src.networks.wav2vec2_components.network import Wav2vec2
from src.util.torch import reset_model
from src.networks.wav2vec2_components.base_components import (
Wav2Vec2NetworkRegularisationConfig,
retrieve_pretrained_wav2vec2_config,
)
########################################################################################
# wrapper which holds all components of wav2vec2 network
class Wav2Vec2Network(pl.LightningModule):
def __init__(
self,
wav2vec2_huggingface_id: str,
reset_weights: bool,
reg_cfg: Optional[Wav2Vec2NetworkRegularisationConfig] = None,
insert_cls_token: bool = False,
learnable_cls_token: bool = False,
cls_token_constant: float = 1,
gradient_checkpointing: bool = False,
shuffling_location: Optional[str] = None,
shuffling_std: Optional[float] = None,
):
super().__init__()
self.wav2vec2 = Wav2vec2(
cfg=retrieve_pretrained_wav2vec2_config(wav2vec2_huggingface_id, reg_cfg),
enable_gradient_checkpointing=gradient_checkpointing,
pretrained_weights=wav2vec2_huggingface_id,
shuffling_location=shuffling_location,
shuffling_std=shuffling_std,
)
self.insert_cls_token = insert_cls_token
self.cls_token_constant = cls_token_constant
if "base" in wav2vec2_huggingface_id:
self.num_features = 768
elif "large" in wav2vec2_huggingface_id:
self.num_features = 1024
else:
raise ValueError("cannot determine num features")
if self.insert_cls_token:
cls_token = t.ones((self.num_embedding_features,)) * t.Tensor(
[self.cls_token_constant]
)
self.cls_token = t.nn.parameter.Parameter(
cls_token, requires_grad=learnable_cls_token
)
if reset_weights:
reset_model(self.wav2vec2)
@property
def num_embedding_features(self):
return self.num_features
def forward(
self, wav_input: t.Tensor, num_audio_samples: Optional[List[int]] = None
):
# assume wav_input is of shape [batch_size, num_audio_features]
assert len(wav_input.shape) == 2
if num_audio_samples is None:
num_audio_samples = [wav_input.shape[1] for _ in range(wav_input.shape[0])]
else:
assert len(num_audio_samples) == wav_input.shape[0]
assert all([0 >= le > wav_input.shape[1] for le in num_audio_samples])
audio_features, num_audio_features, attention_mask = self.extract_features(
wav_input, num_audio_samples
)
wav2vec2_embeddings = self.compute_wav2vec2_embeddings(
audio_features, attention_mask
)
return wav2vec2_embeddings, num_audio_features
def extract_features(self, wav_input: t.Tensor, num_audio_samples: List[int]):
# wav input should be of shape [BATCH_SIZE, NUM_AUDIO_SAMPLES]
# first compute audio features with CNN
features = self.wav2vec2.feature_extractor(wav_input)
features = features.transpose(1, 2)
# project channels of CNN output into a sequence of input token embeddings
features, _ = self.wav2vec2.feature_projector(features)
num_feature_tokens = self.compute_feature_extractor_lengths(num_audio_samples)
attention_mask = self.construct_attention_mask(
num_audio_samples, max(num_feature_tokens), device=wav_input.device
)
# optionally apply masking to sequence (in time and feature axis)
features = self.wav2vec2._mask_hidden_states(
features, attention_mask=attention_mask
)
# features should be of shape [BATCH_SIZE, NUM_FRAMES, NUM_FEATURES]
bs, num_frames, num_features = features.shape
assert bs == wav_input.shape[0]
assert num_frames == max(num_feature_tokens)
assert num_features == self.num_embedding_features
return features, num_feature_tokens, attention_mask
def compute_wav2vec2_embeddings(
self, input_token_sequence: t.Tensor, attention_mask: t.Tensor = None
):
# input token sequence is of shape [BATCH_SIZE, NUM_FRAMES, NUM_FEATURES]
# optional attention mask is of shape [BATCH_SIZE, NUM_FRAMES], where
# 1 means `pay attention` and 0 means `skip processing this frame`.
if self.insert_cls_token:
batched_cls_token = self.cls_token.repeat(
input_token_sequence.shape[0],
1,
1,
)
input_token_sequence = t.cat(
[batched_cls_token, input_token_sequence],
dim=1,
).to(device=input_token_sequence.device)
if attention_mask is not None:
# we want to attend to the class token
attention_mask = t.cat(
[
t.ones(
(attention_mask.shape[0], 1),
dtype=attention_mask.dtype,
device=attention_mask.device,
),
attention_mask,
],
dim=1,
).to(input_token_sequence.device)
encoder_output = self.wav2vec2.encoder(
input_token_sequence, attention_mask=attention_mask
)
embedding = encoder_output.last_hidden_state
# embedding should be of shape [BATCH_SIZE, NUM_FRAMES, NUM_FEATURES]
bs, num_frames, num_features = embedding.shape
assert bs == input_token_sequence.shape[0] == attention_mask.shape[0]
assert num_frames == input_token_sequence.shape[1] == attention_mask.shape[1]
assert num_features == self.num_embedding_features
return embedding
def construct_attention_mask(
self, num_audio_samples: List[int], feature_sequence_length: int, device: str
):
assert len(num_audio_samples) >= 1
# init assumes all tokens are attended to
bs = len(num_audio_samples)
max_num_audio_samples = max(num_audio_samples)
attention_mask = t.ones((bs, max_num_audio_samples), dtype=t.long)
for idx, length in enumerate(num_audio_samples):
assert length >= 0
# set each token which is 'padding' to 0
attention_mask[idx, length:] = 0
attention_mask = self.wav2vec2._get_feature_vector_attention_mask(
feature_sequence_length, attention_mask
)
return attention_mask.to(device=device)
def compute_feature_extractor_lengths(self, num_audio_samples: List[int]):
num_feature_lengths = self.wav2vec2._get_feat_extract_output_lengths(
t.LongTensor(num_audio_samples)
).tolist()
return num_feature_lengths
########################################################################################
# freezing logic
def freeze_wav2vec2_on_train_start(
module: LightningModule,
network: Wav2Vec2Network,
wav2vec2_initially_frozen: bool,
completely_freeze_feature_extractor: bool,
) -> None:
if hasattr(module, "_steps_wav2vec2_freeze"):
raise ValueError(
"expected to initialize the attribute '_steps_wav2vec2_freeze'"
)
if hasattr(module, "_is_wav2vec_frozen"):
raise ValueError("expected to initialize the attribute '_is_wav2vec_frozen'")
module._steps_wav2vec2_freeze = 0
if wav2vec2_initially_frozen:
network.freeze()
module._is_wav2vec_frozen = True
else:
module._is_wav2vec_frozen = False
if completely_freeze_feature_extractor:
network.wav2vec2.feature_extractor.requires_grad_(False)
def freeze_wav2vec2_on_after_backward(
module: LightningModule,
network: Wav2Vec2Network,
num_frozen_steps: int,
completely_freeze_feature_extractor: bool,
) -> None:
if not hasattr(module, "_steps_wav2vec2_freeze"):
raise ValueError("expected attribute '_steps_wav2vec2_freeze'")
if not hasattr(module, "_is_wav2vec_frozen"):
raise ValueError("expected attribute '_is_wav2vec_frozen'")
module._steps_wav2vec2_freeze += 1
if (
module._is_wav2vec_frozen
and num_frozen_steps is not None
and module._steps_wav2vec2_freeze >= num_frozen_steps
):
network.unfreeze()
module._is_wav2vec_frozen = False
if completely_freeze_feature_extractor:
network.wav2vec2.feature_extractor.requires_grad_(False)
| [
"torch.nn.parameter.Parameter",
"torch.LongTensor",
"torch.Tensor",
"src.networks.wav2vec2_components.base_components.retrieve_pretrained_wav2vec2_config",
"src.util.torch.reset_model",
"torch.cat",
"torch.ones"
] | [((6645, 6694), 'torch.ones', 't.ones', (['(bs, max_num_audio_samples)'], {'dtype': 't.long'}), '((bs, max_num_audio_samples), dtype=t.long)\n', (6651, 6694), True, 'import torch as t\n'), ((2195, 2265), 'torch.nn.parameter.Parameter', 't.nn.parameter.Parameter', (['cls_token'], {'requires_grad': 'learnable_cls_token'}), '(cls_token, requires_grad=learnable_cls_token)\n', (2219, 2265), True, 'import torch as t\n'), ((2335, 2361), 'src.util.torch.reset_model', 'reset_model', (['self.wav2vec2'], {}), '(self.wav2vec2)\n', (2346, 2361), False, 'from src.util.torch import reset_model\n'), ((1357, 1426), 'src.networks.wav2vec2_components.base_components.retrieve_pretrained_wav2vec2_config', 'retrieve_pretrained_wav2vec2_config', (['wav2vec2_huggingface_id', 'reg_cfg'], {}), '(wav2vec2_huggingface_id, reg_cfg)\n', (1392, 1426), False, 'from src.networks.wav2vec2_components.base_components import Wav2Vec2NetworkRegularisationConfig, retrieve_pretrained_wav2vec2_config\n'), ((2059, 2097), 'torch.ones', 't.ones', (['(self.num_embedding_features,)'], {}), '((self.num_embedding_features,))\n', (2065, 2097), True, 'import torch as t\n'), ((2100, 2135), 'torch.Tensor', 't.Tensor', (['[self.cls_token_constant]'], {}), '([self.cls_token_constant])\n', (2108, 2135), True, 'import torch as t\n'), ((5073, 5128), 'torch.cat', 't.cat', (['[batched_cls_token, input_token_sequence]'], {'dim': '(1)'}), '([batched_cls_token, input_token_sequence], dim=1)\n', (5078, 5128), True, 'import torch as t\n'), ((7240, 7271), 'torch.LongTensor', 't.LongTensor', (['num_audio_samples'], {}), '(num_audio_samples)\n', (7252, 7271), True, 'import torch as t\n'), ((5400, 5499), 'torch.ones', 't.ones', (['(attention_mask.shape[0], 1)'], {'dtype': 'attention_mask.dtype', 'device': 'attention_mask.device'}), '((attention_mask.shape[0], 1), dtype=attention_mask.dtype, device=\n attention_mask.device)\n', (5406, 5499), True, 'import torch as t\n')] |
"""Main module."""
from functools import reduce
class Calc:
def add(self, *args):
return sum(args)
def subtract(self, a, b):
return a - b
def multiply(self, *args):
if not all(args):
raise ValueError
return reduce(lambda x, y: x*y, args)
def divide(self, a, b):
try:
return a / b
except:
return "inf"
def avg(self, args, ut=None, lt=None):
_args = args[:]
if ut:
_args = [x for x in _args if x <= ut]
if lt:
_args = [x for x in _args if x >= lt]
if not len(_args):
return 0
return sum(_args)/len(_args)
| [
"functools.reduce"
] | [((269, 301), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'args'], {}), '(lambda x, y: x * y, args)\n', (275, 301), False, 'from functools import reduce\n')] |
import re
def le_assinatura():
"""[A funcao le os valores dos tracos linguisticos do modelo e devolve uma assinatura a ser comparada com os textos fornecidos]
Returns:
[list] -- [description]
"""
print("Bem-vindo ao detector automático de COH-PIAH.")
print("Informe a assinatura típica de um aluno infectado:")
wal = float(input("Entre o tamanho médio de palavra:"))
ttr = float(input("Entre a relação Type-Token:"))
hlr = float(input("Entre a Razão Hapax Legomana:"))
sal = float(input("Entre o tamanho médio de sentença:"))
sac = float(input("Entre a complexidade média da sentença:"))
pal = float(input("Entre o tamanho medio de frase:"))
return [wal, ttr, hlr, sal, sac, pal]
def le_textos():
"""[A funcao le todos os textos a serem comparados e devolve uma lista contendo cada texto como um elemento]
Returns:
[Lista] -- [Cada texto com 1 elemento]
"""
i = 1
textos = []
texto = input("Digite o texto " + str(i) + " (aperte enter para sair):")
while texto:
textos.append(texto)
i += 1
texto = input("Digite o texto " + str(i) +
" (aperte enter para sair):")
return textos
def separa_sentencas(texto):
"""[A funcao recebe um texto e devolve uma lista das sentencas dentro do texto]
Arguments:
texto {[type]} -- [description]
Returns:
[type] -- [description]
"""
sentencas = re.split(r'[.!?]+', texto)
if sentencas[-1] == '':
del sentencas[-1]
return sentencas
def separa_frases(sentenca):
"""[A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca]
Arguments:
sentenca {[str]} -- [recebe uma frase]
Returns:
[lista] -- [lista das frases contidas na sentença]
"""
return re.split(r'[,:;]+', sentenca)
def separa_palavras(frase):
"""[A funcao recebe uma frase e devolve uma lista das palavras dentro da frase]
Arguments:
frase {[str]} -- [Uma frase]
Returns:
[lista] -- [Retorna uma lista de palavras dentro da frase recebida]
"""
return frase.split()
def n_palavras_unicas(lista_palavras):
"""[Essa funcao recebe uma lista de palavras e devolve o numero de palavras que aparecem uma unica vez]
Arguments:
lista_palavras {[list]} -- [Recebe uma lista de palavras]
Returns:
[int] -- [Devolve o numero de palavras que aparecem uma unica vez]
"""
freq = dict()
unicas = 0
for palavra in lista_palavras:
p = palavra.lower()
if p in freq:
if freq[p] == 1:
unicas -= 1
freq[p] += 1
else:
freq[p] = 1
unicas += 1
return unicas
def n_palavras_diferentes(lista_palavras):
"""[Essa funcao recebe uma lista de palavras e devolve o numero de palavras diferentes utilizadas]
Arguments:
lista_palavras {[list]} -- [lista de palavras]
Returns:
[int] -- [Retorna o tamanho de palavras diferentes]
"""
freq = dict()
for palavra in lista_palavras:
p = palavra.lower()
if p in freq:
freq[p] += 1
else:
freq[p] = 1
return len(freq)
def compara_assinatura(as_a, as_b):
"""[IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.]
Arguments:
as_a {[list]} -- [description]
as_b {[list]} -- [description]
Returns:
[float] -- [Grau de similaridade dos textos]
"""
soma_das_similaridade = 0
for i in range(0, 6):
soma_das_similaridade += (abs(as_a[i] - as_b[i]))
return soma_das_similaridade / 6
def calcula_assinatura(texto):
"""[IMPLEMENTAR. Essa funcao recebe um texto e deve devolver a assinatura do texto.]
Arguments:
texto {[list]} -- [lista recebida de um texto]
Returns:
[list] -- [devolve a assinatura que o usuario é ou não infectado pelo COH-PIAH]
"""
''''''
sentenças = separa_sentencas(texto)
frases = list()
palavras = list()
meias_palavras = 0
meias_sentenças = 0
comp_sentenças = 0
rel_type_token = 0
hapax = 0
soma_caractere_das_sentenças = 0
soma_das_palavras = 0
soma_os_caracteres_das_frases = 0
tamanho_meio_frase = 0
for sentença in sentenças:
soma_caractere_das_sentenças += len(sentença)
l_frases = separa_frases(sentença)
for fra in l_frases:
frases.append(fra)
for frase in frases:
soma_os_caracteres_das_frases += len(frase)
lista_palavra = separa_palavras(frase)
for palavra in lista_palavra:
palavras.append(palavra)
for palavra in palavras:
soma_das_palavras += len(palavra)
meias_palavras = soma_das_palavras / len(palavras)
rel_type_token = n_palavras_diferentes(palavras) / len(palavras)
hapax = n_palavras_unicas(palavras) / len(palavras)
meias_sentenças = soma_caractere_das_sentenças / len(sentenças)
comp_sentenças = len(frases) / len(sentenças)
tamanho_meio_frase = soma_os_caracteres_das_frases / len(frases)
return [meias_palavras, rel_type_token, hapax,meias_sentenças,comp_sentenças, tamanho_meio_frase]
def avalia_textos(textos, ass_cp):
"""[IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.]
Arguments:
textos {[list]} -- [description]
ass_cp {[list]} -- [description]
Returns:
[int] -- [description]
"""
''''''
inf = []
for texto in textos:
ass_texto = calcula_assinatura(texto)
inf.append(compara_assinatura(ass_texto, ass_cp))
menor = inf[0]
c = 1
for i in range(1, len(inf)):
if (menor < inf[i]):
c = i
return c
# Programa Principal main
assinatura = le_assinatura()
textos = le_textos()
avaliar = avalia_textos(textos, assinatura)
print(f'O autor do texto {avaliar} está infectado com COH-PIAH')
| [
"re.split"
] | [((1470, 1495), 're.split', 're.split', (['"""[.!?]+"""', 'texto'], {}), "('[.!?]+', texto)\n", (1478, 1495), False, 'import re\n'), ((1847, 1875), 're.split', 're.split', (['"""[,:;]+"""', 'sentenca'], {}), "('[,:;]+', sentenca)\n", (1855, 1875), False, 'import re\n')] |
import cv2
def webcam_gui(filter_func, video_src=0):
cap = cv2.VideoCapture(video_src)
key_code = -1
while(key_code == -1):
# read a frame
ret, frame = cap.read()
# run filter with the arguments
frame_out = filter_func(frame)
# show the image
cv2.imshow('Press any key to exit', frame_out)
# wait for the key
key_code = cv2.waitKey(10)
cap.release()
cv2.destroyAllWindows()
def edge_filter(frame_in):
# convert into gray scale
frame_gray = cv2.cvtColor(frame_in, cv2.COLOR_BGR2GRAY)
# blur the image to reduce noise
frame_blur = cv2.blur(frame_gray, (3,3))
# Canny edge detection
frame_out = cv2.Canny(frame_blur, 30, 120)
return frame_out
def gray_filter(frame_in):
# convert into gray scale
frame_out = cv2.cvtColor(frame_in, cv2.COLOR_BGR2GRAY)
return frame_out
if __name__ == "__main__":
cv2.VideoCapture.set(CV_CAP_PROP_FPS, 10)
webcam_gui(edge_filter) | [
"cv2.VideoCapture.set",
"cv2.imshow",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.Canny",
"cv2.waitKey",
"cv2.blur"
] | [((66, 93), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_src'], {}), '(video_src)\n', (82, 93), False, 'import cv2\n'), ((470, 493), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (491, 493), False, 'import cv2\n'), ((580, 622), 'cv2.cvtColor', 'cv2.cvtColor', (['frame_in', 'cv2.COLOR_BGR2GRAY'], {}), '(frame_in, cv2.COLOR_BGR2GRAY)\n', (592, 622), False, 'import cv2\n'), ((677, 705), 'cv2.blur', 'cv2.blur', (['frame_gray', '(3, 3)'], {}), '(frame_gray, (3, 3))\n', (685, 705), False, 'import cv2\n'), ((748, 778), 'cv2.Canny', 'cv2.Canny', (['frame_blur', '(30)', '(120)'], {}), '(frame_blur, 30, 120)\n', (757, 778), False, 'import cv2\n'), ((889, 931), 'cv2.cvtColor', 'cv2.cvtColor', (['frame_in', 'cv2.COLOR_BGR2GRAY'], {}), '(frame_in, cv2.COLOR_BGR2GRAY)\n', (901, 931), False, 'import cv2\n'), ((1004, 1045), 'cv2.VideoCapture.set', 'cv2.VideoCapture.set', (['CV_CAP_PROP_FPS', '(10)'], {}), '(CV_CAP_PROP_FPS, 10)\n', (1024, 1045), False, 'import cv2\n'), ((329, 375), 'cv2.imshow', 'cv2.imshow', (['"""Press any key to exit"""', 'frame_out'], {}), "('Press any key to exit', frame_out)\n", (339, 375), False, 'import cv2\n'), ((431, 446), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (442, 446), False, 'import cv2\n')] |
#!/usr/bin/python3.7
#Author: <NAME>
import sys
import os
import math
import re
import numpy as np
#print('usage: <>.py <file.pdb> \nexecute nsc to generate point-based surface and create tables and if verbose==1 files dotslabel1.xyzrgb dotslabel2.xyzrgb dotslabel3.xyzrgb and dotslabel4.xyzrgb\n')
def pdbsurface(filepdb,nscexe):
verbose=0
#label1 {H, Cl, Br, I} white/grey 0.9 0.9 0.9
#label2 {O, N, S, F} red 1 0 0
#label3 {C, P, B} green 0 1 0
#label4 {others} blue 0 0 1
tabR= {'C':'%.2f' % 1.70, 'O':1.52, 'N':1.55, 'S':1.80, 'P':1.80, 'B':1.72, 'Br':1.85, 'Cl':1.75, 'I':1.98, 'F':1.47, 'H':'%.2f' % 1.20, 'Hp':'%.2f' % 1.10, 'X':'%.2f' % 1.10}
label= {'C':3, 'P':3, 'B':3, 'O':2, 'N':2, 'S':2, 'F':2, 'Hp':2, 'H':1, 'Cl':1, 'Br':1, 'I':1}
rgb= np.array([[0, 0, 0], [0.9, 0.9, 0.9], [1, 0, 0], [0, 1, 0], [0, 0, 1]])
espace5=' '
espace6=' '
fichier2D=0
filepdb=open(filepdb,'r')
getstr=filepdb.read().split('\n')
filepdb.close()
tabLignesPdb=[]
tabLignesPdb.append('')
compt=1
while (compt < len(getstr)):
tabLignesPdb.append(re.split('\s+', getstr[compt].strip()))
compt=compt+1
compt=1
comptatomes=0
getx=[]
getx.append('')
gety=[]
gety.append('')
getz=[]
getz.append('')
getA=[]
getA.append('')
getRayon=[]
getRayon.append('')
while (compt < len(tabLignesPdb)):
if (tabLignesPdb[compt][0] == 'HETATM' or tabLignesPdb[compt][0] == 'ATOM'):
xAtome=float(tabLignesPdb[compt][5])
yAtome=float(tabLignesPdb[compt][6])
zAtome=float(tabLignesPdb[compt][7])
getx.append(xAtome)
gety.append(yAtome)
getz.append(zAtome)
if (float(zAtome) == 0):
fichier2D=fichier2D+1
getA.append(tabLignesPdb[compt][2])
if(getA[compt]!='C' and getA[compt]!='O' and getA[compt]!='N' and getA[compt]!='P' and getA[compt]!='B' and getA[compt]!='H' and getA[compt]!='F' and getA[compt]!='Br' and getA[compt]!='Cl' and getA[compt]!='S' and getA[compt]!='I' and getA[compt]!='X' and getA[compt]!='Hp'):
print("Warning: atom %s set as C because it is not the tab (unusual in medchem)" % getA[compt])
getA[compt]='C'
getRayon.append(tabR[getA[compt]])
comptatomes=comptatomes+1
compt=compt+1
nbatomes=comptatomes
if (fichier2D==int(nbatomes)):
print("Warning: pdb file in 2D; SenSaaS needs 3D coordinates to work properly")
compt=1
while (compt <= nbatomes):
if (getA[compt] == 'H'):
compt2=1
while(compt2 <= nbatomes):
if (getA[compt2] == 'N' or getA[compt2] == 'O'):
distHp= math.sqrt((getx[compt] - getx[compt2])**2 + (gety[compt] - gety[compt2])**2 + (getz[compt] - getz[compt2])**2)
if (distHp <= 1.2):
getRayon[compt]=tabR['Hp']
compt2=compt2+1
compt=compt+1
#nsc:
compt=1
psaIn=open('psa.in','w')
psaIn.write('* XYZR\n')
psaIn.write(espace6+str(nbatomes)+'\n')
while (compt <= nbatomes):
x='%.2f' % getx[compt]
y='%.2f' % gety[compt]
z='%.2f' % getz[compt]
psaIn.write('%8s %8s %8s %8s %8s \n'%(x,y,z,getRayon[compt],getA[compt]))
compt=compt+1
psaIn.close()
cmd = '%s psa.in ' % (nscexe)
os.system(cmd)
psaOut=open('psa.out', 'r')
lignepsaOut= psaOut.readlines()
psaOut.close()
tabLignesPsaOut=[]
compt=3
while (compt < len(lignepsaOut)):
tabLignesPsaOut.append(re.split('\s+', lignepsaOut[compt].strip()))
compt=compt+1
nbDots= int(tabLignesPsaOut[0][2])
#print("nbDots= %6s" % (nbDots))
del tabLignesPsaOut[0]
del tabLignesPsaOut[0]
getDots=np.empty(shape=[nbDots,3], dtype='float64')
getrgb=np.empty(shape=[nbDots,3], dtype='float64')
compt=nbatomes+2
comptDots=0
ligneFicDots=[]
label1=[]
label2=[]
label3=[]
label4=[]
if(verbose==1):
dotsFic=open('dots.xyzrgb', 'w')
while (compt < nbatomes+nbDots+2):
xDot=float(tabLignesPsaOut[compt][2])
yDot=float(tabLignesPsaOut[compt][3])
zDot=float(tabLignesPsaOut[compt][4])
compt2=1
m=100
mi=0
while(compt2 <= nbatomes):
xa=getx[compt2]
ya=gety[compt2]
za=getz[compt2]
goodDots= math.sqrt((xDot - xa)**2 + (yDot - ya)**2 + (zDot - za)**2)
if(goodDots < m):
m=goodDots
mi=compt2
compt2=compt2+1
atomeCorrespondant=getA[mi]
rgbi=label[atomeCorrespondant]
if(getRayon[mi]==tabR['Hp']):
rgbi=label['O']
getrgb[comptDots,:]=[rgb[rgbi,0], rgb[rgbi,1], rgb[rgbi,2]]
getDots[comptDots,:]=[xDot,yDot,zDot]
if (rgbi == 1):
label1.append(np.vstack([getDots[comptDots], getrgb[comptDots]]))
elif (rgbi == 2):
label2.append(np.vstack([getDots[comptDots], getrgb[comptDots]]))
elif (rgbi == 3):
label3.append(np.vstack([getDots[comptDots], getrgb[comptDots]]))
elif (rgbi == 4):
label4.append(np.vstack([getDots[comptDots], getrgb[comptDots]]))
else:
print("no label for dot no %5s ?\n" %(comptDots))
if(verbose==1):
dotsFic.write('%8s'%xDot+'%8s'%yDot+'%8s'%zDot+espace5+'%5s'%(rgb[rgbi,0])+'%5s'%(rgb[rgbi,1])+'%5s'%(rgb[rgbi,2])+'\n')
comptDots=comptDots+1
compt=compt+1
if(verbose==1):
dotsFic.close()
dotslabel1=open('dotslabel1.xyzrgb', 'w')
dotslabel2=open('dotslabel2.xyzrgb', 'w')
dotslabel3=open('dotslabel3.xyzrgb', 'w')
dotslabel4=open('dotslabel4.xyzrgb', 'w')
getDots1=np.empty(shape=[len(label1),3], dtype='float64')
getrgb1=np.empty(shape=[len(label1),3], dtype='float64')
getDots2=np.empty(shape=[len(label2),3], dtype='float64')
getrgb2=np.empty(shape=[len(label2),3], dtype='float64')
getDots3=np.empty(shape=[len(label3),3], dtype='float64')
getrgb3=np.empty(shape=[len(label3),3], dtype='float64')
getDots4=np.empty(shape=[len(label4),3], dtype='float64')
getrgb4=np.empty(shape=[len(label4),3], dtype='float64')
compt=0
while(compt < len(label1)):
getDots1[compt]= label1[compt][0]
getrgb1[compt]= label1[compt][1]
if(verbose==1):
dotslabel1.write('%8s'%getDots1[compt,0]+'%8s'%getDots1[compt,1]+'%8s'%getDots1[compt,2]+espace5+'%5s'%getrgb1[compt,0]+'%5s'%getrgb1[compt,1]+'%5s'%getrgb1[compt,2]+'\n')
compt=compt+1
compt=0
while(compt < len(getDots2)):
getDots2[compt]= label2[compt][0]
getrgb2[compt]= label2[compt][1]
if(verbose==1):
dotslabel2.write('%8s'%getDots2[compt,0]+'%8s'%getDots2[compt,1]+'%8s'%getDots2[compt,2]+espace5+'%5s'%getrgb2[compt,0]+'%5s'%getrgb2[compt,1]+'%5s'%getrgb2[compt,2]+'\n')
compt=compt+1
compt=0
while(compt < len(getDots3)):
getDots3[compt]= label3[compt][0]
getrgb3[compt]= label3[compt][1]
if(verbose==1):
dotslabel3.write('%8s'%getDots3[compt,0]+'%8s'%getDots3[compt,1]+'%8s'%getDots3[compt,2]+espace5+'%5s'%getrgb3[compt,0]+'%5s'%getrgb3[compt,1]+'%5s'%getrgb3[compt,2]+'\n')
compt=compt+1
compt=0
while(compt < len(getDots4)):
getDots4[compt]= label4[compt][0]
getrgb4[compt]= label4[compt][1]
if(verbose==1):
dotslabel4.write('%8s'%getDots4[compt,0]+'%8s'%getDots4[compt,1]+'%8s'%getDots4[compt,2]+espace5+'%5s'%getrgb4[compt,0]+'%5s'%getrgb4[compt,1]+'%5s'%getrgb4[compt,2]+'\n')
compt=compt+1
if(verbose==1):
dotslabel1.close()
dotslabel2.close()
dotslabel3.close()
dotslabel4.close()
else:
os.remove("psa.in")
os.remove("psa.out")
return getDots, getrgb, getDots1, getrgb1, getDots2, getrgb2, getDots3, getrgb3, getDots4, getrgb4
| [
"math.sqrt",
"numpy.array",
"numpy.empty",
"numpy.vstack",
"os.system",
"os.remove"
] | [((776, 847), 'numpy.array', 'np.array', (['[[0, 0, 0], [0.9, 0.9, 0.9], [1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[0, 0, 0], [0.9, 0.9, 0.9], [1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (784, 847), True, 'import numpy as np\n'), ((3459, 3473), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (3468, 3473), False, 'import os\n'), ((3878, 3922), 'numpy.empty', 'np.empty', ([], {'shape': '[nbDots, 3]', 'dtype': '"""float64"""'}), "(shape=[nbDots, 3], dtype='float64')\n", (3886, 3922), True, 'import numpy as np\n'), ((3933, 3977), 'numpy.empty', 'np.empty', ([], {'shape': '[nbDots, 3]', 'dtype': '"""float64"""'}), "(shape=[nbDots, 3], dtype='float64')\n", (3941, 3977), True, 'import numpy as np\n'), ((7968, 7987), 'os.remove', 'os.remove', (['"""psa.in"""'], {}), "('psa.in')\n", (7977, 7987), False, 'import os\n'), ((7996, 8016), 'os.remove', 'os.remove', (['"""psa.out"""'], {}), "('psa.out')\n", (8005, 8016), False, 'import os\n'), ((4515, 4580), 'math.sqrt', 'math.sqrt', (['((xDot - xa) ** 2 + (yDot - ya) ** 2 + (zDot - za) ** 2)'], {}), '((xDot - xa) ** 2 + (yDot - ya) ** 2 + (zDot - za) ** 2)\n', (4524, 4580), False, 'import math\n'), ((4994, 5044), 'numpy.vstack', 'np.vstack', (['[getDots[comptDots], getrgb[comptDots]]'], {}), '([getDots[comptDots], getrgb[comptDots]])\n', (5003, 5044), True, 'import numpy as np\n'), ((2791, 2911), 'math.sqrt', 'math.sqrt', (['((getx[compt] - getx[compt2]) ** 2 + (gety[compt] - gety[compt2]) ** 2 + (\n getz[compt] - getz[compt2]) ** 2)'], {}), '((getx[compt] - getx[compt2]) ** 2 + (gety[compt] - gety[compt2]) **\n 2 + (getz[compt] - getz[compt2]) ** 2)\n', (2800, 2911), False, 'import math\n'), ((5098, 5148), 'numpy.vstack', 'np.vstack', (['[getDots[comptDots], getrgb[comptDots]]'], {}), '([getDots[comptDots], getrgb[comptDots]])\n', (5107, 5148), True, 'import numpy as np\n'), ((5202, 5252), 'numpy.vstack', 'np.vstack', (['[getDots[comptDots], getrgb[comptDots]]'], {}), '([getDots[comptDots], getrgb[comptDots]])\n', (5211, 5252), True, 'import numpy as np\n'), ((5306, 5356), 'numpy.vstack', 'np.vstack', (['[getDots[comptDots], getrgb[comptDots]]'], {}), '([getDots[comptDots], getrgb[comptDots]])\n', (5315, 5356), True, 'import numpy as np\n')] |
"""
Test script for utils.py function.
"""
import os
import numpy as np
import pytest
from astropy import units as u
from cwinpy.utils import (
ellipticity_to_q22,
gcd_array,
get_psr_name,
initialise_ephemeris,
int_to_alpha,
is_par_file,
logfactorial,
q22_to_ellipticity,
)
from lalpulsar.PulsarParametersWrapper import PulsarParametersPy
def test_logfactorial():
"""
Test log factorial function
"""
a = 3
assert logfactorial(a) == np.log(3 * 2 * 1)
a = 3.0
assert logfactorial(a) == np.log(3 * 2 * 1)
def test_gcd_array():
"""
Test greatest common divisor function.
"""
a = 1 # non-list value
with pytest.raises(TypeError):
gcd_array(a)
a = [1] # single value
with pytest.raises(ValueError):
gcd_array(a)
a = [5, 25, 90]
assert gcd_array(a) == 5
def test_int_to_alpha():
"""
Test integer to alphabetical string conversion.
"""
pos = 2.3
with pytest.raises(TypeError):
int_to_alpha(pos)
pos = -1
with pytest.raises(ValueError):
int_to_alpha(pos)
assert int_to_alpha(1) == "A"
assert int_to_alpha(1, case="lower") == "a"
assert int_to_alpha(26) == "Z"
assert int_to_alpha(26, case="lower") == "z"
assert int_to_alpha(27) == "AA"
assert int_to_alpha(28) == "AB"
assert int_to_alpha(200) == "GR"
assert int_to_alpha(1000) == "ALL"
def test_is_par_file():
"""
Test failure of is_par_file.
"""
assert is_par_file("blah_blah_blah") is False
# test par files that don't contain required attributes
brokenpar = "broken.par"
values = {
"F": [100.0],
"RAJ": 0.1,
"DECJ": -0.1,
"PSRJ": "J0101-0101",
}
for leavekey in list(values.keys()):
keys = list(values.keys())
psr = PulsarParametersPy()
for key in keys:
if key != leavekey:
psr[key] = values[key]
psr.pp_to_par(brokenpar)
assert is_par_file(brokenpar) is False
os.remove(brokenpar)
def test_get_psr_name():
"""
Test extraction of pulsar name.
"""
for item, name in zip(
["PSRJ", "PSRB", "PSR", "NAME"],
["J0123+1234", "B0124+12", "J0123+1234", "B0124+12"],
):
psr = PulsarParametersPy()
psr[item] = name
assert get_psr_name(psr) == name
def test_ellipticity_to_q22():
"""
Test ellipticity conversion to mass quadrupole.
"""
epsilon = [1e-9, 1e-8]
expected_q22 = np.array([1e29, 1e30]) * np.sqrt(15.0 / (8.0 * np.pi))
q22 = ellipticity_to_q22(epsilon[0])
assert np.isclose(q22, expected_q22[0])
# test units
q22units = ellipticity_to_q22(epsilon[0], units=True)
assert np.isclose(q22units.value, expected_q22[0])
assert q22units.unit == u.Unit("kg m2")
# test array like
q22 = ellipticity_to_q22(epsilon)
assert len(q22) == len(epsilon)
assert np.allclose(q22, expected_q22)
def test_q22_to_ellipticity_to_q22():
"""
Test mass quadrupole conversion to ellipticity.
"""
q22 = [1e29, 1e30]
expected_epsilon = np.array([1e-9, 1e-8]) / np.sqrt(15.0 / (8.0 * np.pi))
epsilon = q22_to_ellipticity(q22[0])
assert np.isclose(epsilon, expected_epsilon[0])
# test array like
epsilon = q22_to_ellipticity(q22)
assert len(q22) == len(epsilon)
assert np.allclose(epsilon, expected_epsilon)
# test no unit
epsilon = q22_to_ellipticity(q22[0] * u.kg * u.m ** 2)
assert np.isclose(epsilon, expected_epsilon[0])
assert not hasattr(epsilon, "unit")
def test_initialise_ephemeris():
"""
Test reading of ephemeris files.
"""
with pytest.raises(ValueError):
initialise_ephemeris(units="lhfld")
with pytest.raises(IOError):
initialise_ephemeris(
earthfile="jksgdksg", sunfile="lhlbca", timefile="lshdldgls"
)
with pytest.raises(IOError):
initialise_ephemeris(
earthfile="jksgdksg", sunfile="lhlbca", timefile="lshdldgls"
)
with pytest.raises(IOError):
initialise_ephemeris(timefile="lshdldgls")
edat, tdat = initialise_ephemeris()
assert edat.nentriesE == 175322
assert edat.nentriesS == 17534
assert edat.dtEtable == 7200.0
assert edat.dtStable == 72000.0
assert edat.etype == 2
assert tdat.nentriesT == 87660
assert tdat.dtTtable == 14400.0
| [
"cwinpy.utils.int_to_alpha",
"numpy.allclose",
"numpy.isclose",
"numpy.sqrt",
"astropy.units.Unit",
"numpy.log",
"cwinpy.utils.q22_to_ellipticity",
"os.remove",
"numpy.array",
"cwinpy.utils.is_par_file",
"cwinpy.utils.ellipticity_to_q22",
"pytest.raises",
"lalpulsar.PulsarParametersWrapper.P... | [((2618, 2648), 'cwinpy.utils.ellipticity_to_q22', 'ellipticity_to_q22', (['epsilon[0]'], {}), '(epsilon[0])\n', (2636, 2648), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((2661, 2693), 'numpy.isclose', 'np.isclose', (['q22', 'expected_q22[0]'], {}), '(q22, expected_q22[0])\n', (2671, 2693), True, 'import numpy as np\n'), ((2727, 2769), 'cwinpy.utils.ellipticity_to_q22', 'ellipticity_to_q22', (['epsilon[0]'], {'units': '(True)'}), '(epsilon[0], units=True)\n', (2745, 2769), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((2782, 2825), 'numpy.isclose', 'np.isclose', (['q22units.value', 'expected_q22[0]'], {}), '(q22units.value, expected_q22[0])\n', (2792, 2825), True, 'import numpy as np\n'), ((2903, 2930), 'cwinpy.utils.ellipticity_to_q22', 'ellipticity_to_q22', (['epsilon'], {}), '(epsilon)\n', (2921, 2930), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((2979, 3009), 'numpy.allclose', 'np.allclose', (['q22', 'expected_q22'], {}), '(q22, expected_q22)\n', (2990, 3009), True, 'import numpy as np\n'), ((3234, 3260), 'cwinpy.utils.q22_to_ellipticity', 'q22_to_ellipticity', (['q22[0]'], {}), '(q22[0])\n', (3252, 3260), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((3273, 3313), 'numpy.isclose', 'np.isclose', (['epsilon', 'expected_epsilon[0]'], {}), '(epsilon, expected_epsilon[0])\n', (3283, 3313), True, 'import numpy as np\n'), ((3351, 3374), 'cwinpy.utils.q22_to_ellipticity', 'q22_to_ellipticity', (['q22'], {}), '(q22)\n', (3369, 3374), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((3423, 3461), 'numpy.allclose', 'np.allclose', (['epsilon', 'expected_epsilon'], {}), '(epsilon, expected_epsilon)\n', (3434, 3461), True, 'import numpy as np\n'), ((3496, 3540), 'cwinpy.utils.q22_to_ellipticity', 'q22_to_ellipticity', (['(q22[0] * u.kg * u.m ** 2)'], {}), '(q22[0] * u.kg * u.m ** 2)\n', (3514, 3540), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((3553, 3593), 'numpy.isclose', 'np.isclose', (['epsilon', 'expected_epsilon[0]'], {}), '(epsilon, expected_epsilon[0])\n', (3563, 3593), True, 'import numpy as np\n'), ((4200, 4222), 'cwinpy.utils.initialise_ephemeris', 'initialise_ephemeris', ([], {}), '()\n', (4220, 4222), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((470, 485), 'cwinpy.utils.logfactorial', 'logfactorial', (['a'], {}), '(a)\n', (482, 485), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((489, 506), 'numpy.log', 'np.log', (['(3 * 2 * 1)'], {}), '(3 * 2 * 1)\n', (495, 506), True, 'import numpy as np\n'), ((531, 546), 'cwinpy.utils.logfactorial', 'logfactorial', (['a'], {}), '(a)\n', (543, 546), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((550, 567), 'numpy.log', 'np.log', (['(3 * 2 * 1)'], {}), '(3 * 2 * 1)\n', (556, 567), True, 'import numpy as np\n'), ((689, 713), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (702, 713), False, 'import pytest\n'), ((723, 735), 'cwinpy.utils.gcd_array', 'gcd_array', (['a'], {}), '(a)\n', (732, 735), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((774, 799), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (787, 799), False, 'import pytest\n'), ((809, 821), 'cwinpy.utils.gcd_array', 'gcd_array', (['a'], {}), '(a)\n', (818, 821), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((854, 866), 'cwinpy.utils.gcd_array', 'gcd_array', (['a'], {}), '(a)\n', (863, 866), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((991, 1015), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1004, 1015), False, 'import pytest\n'), ((1025, 1042), 'cwinpy.utils.int_to_alpha', 'int_to_alpha', (['pos'], {}), '(pos)\n', (1037, 1042), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((1066, 1091), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1079, 1091), False, 'import pytest\n'), ((1101, 1118), 'cwinpy.utils.int_to_alpha', 'int_to_alpha', (['pos'], {}), '(pos)\n', (1113, 1118), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((1131, 1146), 'cwinpy.utils.int_to_alpha', 'int_to_alpha', (['(1)'], {}), '(1)\n', (1143, 1146), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((1165, 1194), 'cwinpy.utils.int_to_alpha', 'int_to_alpha', (['(1)'], {'case': '"""lower"""'}), "(1, case='lower')\n", (1177, 1194), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((1213, 1229), 'cwinpy.utils.int_to_alpha', 'int_to_alpha', (['(26)'], {}), '(26)\n', (1225, 1229), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((1248, 1278), 'cwinpy.utils.int_to_alpha', 'int_to_alpha', (['(26)'], {'case': '"""lower"""'}), "(26, case='lower')\n", (1260, 1278), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((1297, 1313), 'cwinpy.utils.int_to_alpha', 'int_to_alpha', (['(27)'], {}), '(27)\n', (1309, 1313), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((1333, 1349), 'cwinpy.utils.int_to_alpha', 'int_to_alpha', (['(28)'], {}), '(28)\n', (1345, 1349), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((1369, 1386), 'cwinpy.utils.int_to_alpha', 'int_to_alpha', (['(200)'], {}), '(200)\n', (1381, 1386), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((1406, 1424), 'cwinpy.utils.int_to_alpha', 'int_to_alpha', (['(1000)'], {}), '(1000)\n', (1418, 1424), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((1521, 1550), 'cwinpy.utils.is_par_file', 'is_par_file', (['"""blah_blah_blah"""'], {}), "('blah_blah_blah')\n", (1532, 1550), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((1857, 1877), 'lalpulsar.PulsarParametersWrapper.PulsarParametersPy', 'PulsarParametersPy', ([], {}), '()\n', (1875, 1877), False, 'from lalpulsar.PulsarParametersWrapper import PulsarParametersPy\n'), ((2065, 2085), 'os.remove', 'os.remove', (['brokenpar'], {}), '(brokenpar)\n', (2074, 2085), False, 'import os\n'), ((2317, 2337), 'lalpulsar.PulsarParametersWrapper.PulsarParametersPy', 'PulsarParametersPy', ([], {}), '()\n', (2335, 2337), False, 'from lalpulsar.PulsarParametersWrapper import PulsarParametersPy\n'), ((2553, 2577), 'numpy.array', 'np.array', (['[1e+29, 1e+30]'], {}), '([1e+29, 1e+30])\n', (2561, 2577), True, 'import numpy as np\n'), ((2578, 2607), 'numpy.sqrt', 'np.sqrt', (['(15.0 / (8.0 * np.pi))'], {}), '(15.0 / (8.0 * np.pi))\n', (2585, 2607), True, 'import numpy as np\n'), ((2854, 2869), 'astropy.units.Unit', 'u.Unit', (['"""kg m2"""'], {}), "('kg m2')\n", (2860, 2869), True, 'from astropy import units as u\n'), ((3165, 3189), 'numpy.array', 'np.array', (['[1e-09, 1e-08]'], {}), '([1e-09, 1e-08])\n', (3173, 3189), True, 'import numpy as np\n'), ((3190, 3219), 'numpy.sqrt', 'np.sqrt', (['(15.0 / (8.0 * np.pi))'], {}), '(15.0 / (8.0 * np.pi))\n', (3197, 3219), True, 'import numpy as np\n'), ((3732, 3757), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3745, 3757), False, 'import pytest\n'), ((3767, 3802), 'cwinpy.utils.initialise_ephemeris', 'initialise_ephemeris', ([], {'units': '"""lhfld"""'}), "(units='lhfld')\n", (3787, 3802), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((3813, 3835), 'pytest.raises', 'pytest.raises', (['IOError'], {}), '(IOError)\n', (3826, 3835), False, 'import pytest\n'), ((3845, 3932), 'cwinpy.utils.initialise_ephemeris', 'initialise_ephemeris', ([], {'earthfile': '"""jksgdksg"""', 'sunfile': '"""lhlbca"""', 'timefile': '"""lshdldgls"""'}), "(earthfile='jksgdksg', sunfile='lhlbca', timefile=\n 'lshdldgls')\n", (3865, 3932), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((3960, 3982), 'pytest.raises', 'pytest.raises', (['IOError'], {}), '(IOError)\n', (3973, 3982), False, 'import pytest\n'), ((3992, 4079), 'cwinpy.utils.initialise_ephemeris', 'initialise_ephemeris', ([], {'earthfile': '"""jksgdksg"""', 'sunfile': '"""lhlbca"""', 'timefile': '"""lshdldgls"""'}), "(earthfile='jksgdksg', sunfile='lhlbca', timefile=\n 'lshdldgls')\n", (4012, 4079), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((4107, 4129), 'pytest.raises', 'pytest.raises', (['IOError'], {}), '(IOError)\n', (4120, 4129), False, 'import pytest\n'), ((4139, 4181), 'cwinpy.utils.initialise_ephemeris', 'initialise_ephemeris', ([], {'timefile': '"""lshdldgls"""'}), "(timefile='lshdldgls')\n", (4159, 4181), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((2024, 2046), 'cwinpy.utils.is_par_file', 'is_par_file', (['brokenpar'], {}), '(brokenpar)\n', (2035, 2046), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((2379, 2396), 'cwinpy.utils.get_psr_name', 'get_psr_name', (['psr'], {}), '(psr)\n', (2391, 2396), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n')] |
import ast
import os
from mr_proper.public_api import is_function_pure
from mr_proper.utils.ast import get_ast_tree
def test_ok_for_destructive_assignment():
funcdef = ast.parse("""
def foo(a):
b, c = a
return b * c
""".strip()).body[0]
assert is_function_pure(funcdef)
def test_is_function_pure_fail_case():
funcdef = ast.parse("""
def foo(a):
print(a)
""".strip()).body[0]
assert not is_function_pure(funcdef)
def test_is_function_pure_fail_case_for_recursive():
test_file_path = os.path.join(os.path.dirname(__file__), 'test_files/test.py')
ast_tree = get_ast_tree(test_file_path)
foo_node = ast_tree.body[0]
assert not is_function_pure(
foo_node,
file_ast_tree=ast_tree,
pyfilepath=test_file_path,
recursive=True,
)
| [
"mr_proper.public_api.is_function_pure",
"mr_proper.utils.ast.get_ast_tree",
"os.path.dirname"
] | [((267, 292), 'mr_proper.public_api.is_function_pure', 'is_function_pure', (['funcdef'], {}), '(funcdef)\n', (283, 292), False, 'from mr_proper.public_api import is_function_pure\n'), ((606, 634), 'mr_proper.utils.ast.get_ast_tree', 'get_ast_tree', (['test_file_path'], {}), '(test_file_path)\n', (618, 634), False, 'from mr_proper.utils.ast import get_ast_tree\n'), ((427, 452), 'mr_proper.public_api.is_function_pure', 'is_function_pure', (['funcdef'], {}), '(funcdef)\n', (443, 452), False, 'from mr_proper.public_api import is_function_pure\n'), ((542, 567), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (557, 567), False, 'import os\n'), ((682, 780), 'mr_proper.public_api.is_function_pure', 'is_function_pure', (['foo_node'], {'file_ast_tree': 'ast_tree', 'pyfilepath': 'test_file_path', 'recursive': '(True)'}), '(foo_node, file_ast_tree=ast_tree, pyfilepath=\n test_file_path, recursive=True)\n', (698, 780), False, 'from mr_proper.public_api import is_function_pure\n')] |
from __future__ import annotations
import re
from datetime import datetime, timezone
from decimal import Decimal, InvalidOperation
from typing import TYPE_CHECKING, Iterable, List, Mapping, Optional, Sequence, Union
import flask_babel
from babel import numbers
from dateutil.relativedelta import relativedelta
from flask_babel import ngettext
from flask_wtf import FlaskForm
from structlog import get_logger
from wtforms import Field, StringField, validators
from wtforms.compat import string_types
from app.forms import error_messages
from app.forms.fields import (
DateField,
DecimalFieldWithSeparator,
IntegerFieldWithSeparator,
)
from app.jinja_filters import format_number, get_formatted_currency
from app.questionnaire.rules.utils import parse_datetime
from app.utilities import safe_content
if TYPE_CHECKING:
from app.forms.questionnaire_form import QuestionnaireForm # pragma: no cover
logger = get_logger()
tld_part_regex = re.compile(
r"^([a-z]{2,63}|xn--([a-z0-9]+-)*[a-z0-9]+)$", re.IGNORECASE
)
email_regex = re.compile(r"^.+@([^.@][^@\s]+)$")
OptionalMessage = Optional[Mapping[str, str]]
NumType = Union[int, Decimal]
PeriodType = Mapping[str, int]
class NumberCheck:
def __init__(self, message: Optional[str] = None):
self.message = message or error_messages["INVALID_NUMBER"]
def __call__(
self,
form: FlaskForm,
field: Union[DecimalFieldWithSeparator, IntegerFieldWithSeparator],
) -> None:
try:
Decimal(
field.raw_data[0].replace(
numbers.get_group_symbol(flask_babel.get_locale()), ""
)
)
except (ValueError, TypeError, InvalidOperation, AttributeError) as exc:
raise validators.StopValidation(self.message) from exc
if "e" in field.raw_data[0].lower():
raise validators.StopValidation(self.message)
class ResponseRequired:
"""
Validates that input was provided for this field. This is a copy of the
InputRequired validator provided by wtforms, which checks that form-input data
was provided, but additionally adds a kwarg to strip whitespace, as is available
on the Optional() validator wtforms provides. Oddly, stripping whitespace is not
an option for DataRequired or InputRequired validators in wtforms.
"""
field_flags = ("required",)
def __init__(self, message: str, strip_whitespace: bool = True):
self.message = message
if strip_whitespace:
self.string_check = lambda s: s.strip()
else:
self.string_check = lambda s: s
def __call__(self, form: "QuestionnaireForm", field: Field) -> None:
if (
not field.raw_data
or not field.raw_data[0]
or not self.string_check(field.raw_data[0])
):
field.errors[:] = []
raise validators.StopValidation(self.message)
class NumberRange:
"""
Validates that a number is of a minimum and/or maximum value, inclusive.
This will work with any comparable number type, such as floats and
decimals, not just integers.
:param minimum:
The minimum required value of the number. If not provided, minimum
value will not be checked.
:param maximum:
The maximum value of the number. If not provided, maximum value
will not be checked.
"""
def __init__(
self,
minimum: Optional[NumType] = None,
minimum_exclusive: bool = False,
maximum: Optional[NumType] = None,
maximum_exclusive: bool = False,
messages: OptionalMessage = None,
currency: Optional[str] = None,
):
self.minimum = minimum
self.maximum = maximum
self.minimum_exclusive = minimum_exclusive
self.maximum_exclusive = maximum_exclusive
self.messages: Mapping[str, str] = {**error_messages, **(messages or {})}
self.currency = currency
def __call__(
self,
form: "QuestionnaireForm",
field: Union[DecimalFieldWithSeparator, IntegerFieldWithSeparator],
) -> None:
value: Union[int, Decimal] = field.data
if value is not None:
error_message = self.validate_minimum(value) or self.validate_maximum(value)
if error_message:
raise validators.ValidationError(error_message)
def validate_minimum(self, value: NumType) -> Optional[str]:
if self.minimum is None:
return None
if self.minimum_exclusive and value <= self.minimum:
return self.messages["NUMBER_TOO_SMALL_EXCLUSIVE"] % dict(
min=format_playback_value(self.minimum, self.currency)
)
if value < self.minimum:
return self.messages["NUMBER_TOO_SMALL"] % dict(
min=format_playback_value(self.minimum, self.currency)
)
return None
def validate_maximum(self, value: NumType) -> Optional[str]:
if self.maximum is None:
return None
if self.maximum_exclusive and value >= self.maximum:
return self.messages["NUMBER_TOO_LARGE_EXCLUSIVE"] % dict(
max=format_playback_value(self.maximum, self.currency)
)
if value > self.maximum:
return self.messages["NUMBER_TOO_LARGE"] % dict(
max=format_playback_value(self.maximum, self.currency)
)
return None
class DecimalPlaces:
"""
Validates that an input has less than or equal to a
set number of decimal places
:param max_decimals:
The maximum allowed number of decimal places.
"""
def __init__(self, max_decimals: int = 0, messages: OptionalMessage = None):
self.max_decimals = max_decimals
self.messages = {**error_messages, **(messages or {})}
def __call__(
self, form: "QuestionnaireForm", field: DecimalFieldWithSeparator
) -> None:
data = (
field.raw_data[0]
.replace(numbers.get_group_symbol(flask_babel.get_locale()), "")
.replace(" ", "")
)
decimal_symbol = numbers.get_decimal_symbol(flask_babel.get_locale())
if data and decimal_symbol in data:
if self.max_decimals == 0:
raise validators.ValidationError(self.messages["INVALID_INTEGER"])
if len(data.split(decimal_symbol)[1]) > self.max_decimals:
raise validators.ValidationError(
self.messages["INVALID_DECIMAL"] % dict(max=self.max_decimals)
)
class OptionalForm:
"""
Allows completely empty form and stops the validation chain from continuing.
Will not stop the validation chain if any one of the fields is populated.
"""
field_flags = ("optional",)
def __call__(self, form: Sequence["QuestionnaireForm"], field: Field) -> None:
empty_form = True
for formfield in form:
has_raw_data = hasattr(formfield, "raw_data")
is_empty = has_raw_data and len(formfield.raw_data) == 0
is_blank = (
has_raw_data
and len(formfield.raw_data) >= 1
and isinstance(formfield.raw_data[0], string_types)
and not formfield.raw_data[0]
)
# By default we'll receive empty arrays for values not posted, so need to allow empty lists
empty_field = True if is_empty else is_blank
empty_form &= empty_field
if empty_form:
raise validators.StopValidation()
class DateRequired:
field_flags = ("required",)
def __init__(self, message: Optional[str] = None):
self.message = message or error_messages["MANDATORY_DATE"]
def __call__(self, form: "QuestionnaireForm", field: DateField) -> None:
"""
Raise exception if ALL fields have not been filled out.
Not having that field is the same as not filling it out
as the remaining fields would also have to be empty for
exception to be raised.
"""
day_not_entered = not form.day.data if hasattr(form, "day") else True
month_not_entered = not form.month.data if hasattr(form, "month") else True
year_not_entered = not form.year.data
if day_not_entered and month_not_entered and year_not_entered:
raise validators.StopValidation(self.message)
class DateCheck:
def __init__(self, message: Optional[str] = None):
self.message = message or error_messages["INVALID_DATE"]
def __call__(self, form: "QuestionnaireForm", field: StringField) -> None:
if not form.data:
raise validators.StopValidation(self.message)
try:
if hasattr(form, "day"):
datetime.strptime(form.data, "%Y-%m-%d").replace(tzinfo=timezone.utc)
elif hasattr(form, "month"):
datetime.strptime(form.data, "%Y-%m").replace(tzinfo=timezone.utc)
else:
datetime.strptime(form.data, "%Y").replace(tzinfo=timezone.utc)
except ValueError as exc:
raise validators.StopValidation(self.message) from exc
class SingleDatePeriodCheck:
def __init__(
self,
messages: OptionalMessage = None,
date_format: str = "d MMMM yyyy",
minimum_date: Optional[datetime] = None,
maximum_date: Optional[datetime] = None,
):
self.messages = {**error_messages, **(messages or {})}
self.minimum_date = minimum_date
self.maximum_date = maximum_date
self.date_format = date_format
def __call__(self, form: "QuestionnaireForm", field: StringField) -> None:
date = parse_datetime(form.data)
if self.minimum_date and date and date < self.minimum_date:
raise validators.ValidationError(
self.messages["SINGLE_DATE_PERIOD_TOO_EARLY"]
% dict(
min=self._format_playback_date(
self.minimum_date + relativedelta(days=-1), self.date_format
)
)
)
if self.maximum_date and date and date > self.maximum_date:
raise validators.ValidationError(
self.messages["SINGLE_DATE_PERIOD_TOO_LATE"]
% dict(
max=self._format_playback_date(
self.maximum_date + relativedelta(days=+1), self.date_format
)
)
)
@staticmethod
def _format_playback_date(date: datetime, date_format: str = "d MMMM yyyy") -> str:
formatted_date: str = flask_babel.format_date(date, format=date_format)
return formatted_date
class DateRangeCheck:
def __init__(
self,
messages: OptionalMessage = None,
period_min: Optional[dict[str, int]] = None,
period_max: Optional[dict[str, int]] = None,
):
self.messages = {**error_messages, **(messages or {})}
self.period_min = period_min
self.period_max = period_max
def __call__(
self, form: "QuestionnaireForm", from_field: DateField, to_field: DateField
) -> None:
from_date = parse_datetime(from_field.data)
to_date = parse_datetime(to_field.data)
if from_date and to_date:
if from_date >= to_date:
raise validators.ValidationError(self.messages["INVALID_DATE_RANGE"])
answered_range_relative = relativedelta(to_date, from_date)
if self.period_min:
min_range = self._return_relative_delta(self.period_min)
if self._is_first_relative_delta_largest(
min_range, answered_range_relative
):
raise validators.ValidationError(
self.messages["DATE_PERIOD_TOO_SMALL"]
% dict(min=self._build_range_length_error(self.period_min))
)
if self.period_max:
max_range = self._return_relative_delta(self.period_max)
if self._is_first_relative_delta_largest(
answered_range_relative, max_range
):
raise validators.ValidationError(
self.messages["DATE_PERIOD_TOO_LARGE"]
% dict(max=self._build_range_length_error(self.period_max))
)
@staticmethod
def _return_relative_delta(period_object: PeriodType) -> relativedelta:
return relativedelta(
years=period_object.get("years", 0),
months=period_object.get("months", 0),
days=period_object.get("days", 0),
)
@staticmethod
def _is_first_relative_delta_largest(
relativedelta1: relativedelta, relativedelta2: relativedelta
) -> bool:
epoch = datetime.min # generic epoch for comparison purposes only
date1 = epoch + relativedelta1
date2 = epoch + relativedelta2
return date1 > date2
@staticmethod
def _build_range_length_error(period_object: PeriodType) -> str:
error_message = ""
if "years" in period_object:
error_message = ngettext(
"%(num)s year", "%(num)s years", period_object["years"]
)
if "months" in period_object:
message_addition = ngettext(
"%(num)s month", "%(num)s months", period_object["months"]
)
error_message += (
message_addition if error_message == "" else ", " + message_addition
)
if "days" in period_object:
message_addition = ngettext(
"%(num)s day", "%(num)s days", period_object["days"]
)
error_message += (
message_addition if error_message == "" else ", " + message_addition
)
return error_message
class SumCheck:
def __init__(
self, messages: OptionalMessage = None, currency: Optional[str] = None
):
self.messages = {**error_messages, **(messages or {})}
self.currency = currency
def __call__(
self,
form: QuestionnaireForm,
conditions: List[str],
total: Union[Decimal, int],
target_total: Union[Decimal, float],
) -> None:
if len(conditions) > 1:
try:
conditions.remove("equals")
except ValueError as exc:
raise Exception(
"There are multiple conditions, but equals is not one of them. "
"We only support <= and >="
) from exc
condition = f"{conditions[0]} or equals"
else:
condition = conditions[0]
is_valid, message = self._is_valid(condition, total, target_total)
if not is_valid:
raise validators.ValidationError(
self.messages[message]
% dict(total=format_playback_value(target_total, self.currency))
)
@staticmethod
def _is_valid(
condition: str,
total: Union[Decimal, float],
target_total: Union[Decimal, float],
) -> tuple[bool, str]:
if condition == "equals":
return total == target_total, "TOTAL_SUM_NOT_EQUALS"
if condition == "less than":
return total < target_total, "TOTAL_SUM_NOT_LESS_THAN"
if condition == "greater than":
return total > target_total, "TOTAL_SUM_NOT_GREATER_THAN"
if condition == "greater than or equals":
return total >= target_total, "TOTAL_SUM_NOT_GREATER_THAN_OR_EQUALS"
if condition == "less than or equals":
return total <= target_total, "TOTAL_SUM_NOT_LESS_THAN_OR_EQUALS"
def format_playback_value(
value: Union[float, Decimal], currency: Optional[str] = None
) -> str:
if currency:
return get_formatted_currency(value, currency)
formatted_number: str = format_number(value)
return formatted_number
def format_message_with_title(error_message: str, question_title: str) -> str:
return error_message % {"question_title": safe_content(question_title)}
class MutuallyExclusiveCheck:
def __init__(self, question_title: str, messages: OptionalMessage = None):
self.messages = {**error_messages, **(messages or {})}
self.question_title = question_title
def __call__(
self, answer_values: Iterable, is_mandatory: bool, is_only_checkboxes: bool
) -> None:
total_answered = sum(1 for value in answer_values if value)
if total_answered > 1:
raise validators.ValidationError(self.messages["MUTUALLY_EXCLUSIVE"])
if is_mandatory and total_answered < 1:
message = format_message_with_title(
self.messages["MANDATORY_CHECKBOX"]
if is_only_checkboxes
else self.messages["MANDATORY_QUESTION"],
self.question_title,
)
raise validators.ValidationError(message)
def sanitise_mobile_number(data: str) -> str:
data = re.sub(r"[\s.,\t\-{}\[\]()/]", "", data)
return re.sub(r"^(0{1,2}44|\+44|0)", "", data)
class MobileNumberCheck:
def __init__(self, message: OptionalMessage = None):
self.message = message or error_messages["INVALID_MOBILE_NUMBER"]
def __call__(self, form: "QuestionnaireForm", field: StringField) -> None:
data = sanitise_mobile_number(field.data)
if len(data) != 10 or not re.match("^7[0-9]+$", data):
raise validators.ValidationError(self.message)
class EmailTLDCheck:
def __init__(self, message: Optional[str] = None):
self.message = message or error_messages["INVALID_EMAIL_FORMAT"]
def __call__(self, form: "QuestionnaireForm", field: StringField) -> None:
if match := email_regex.match(field.data):
hostname = match.group(1)
try:
hostname = hostname.encode("idna").decode("ascii")
except UnicodeError as exc:
raise validators.StopValidation(self.message) from exc
parts = hostname.split(".")
if len(parts) > 1 and not tld_part_regex.match(parts[-1]):
raise validators.StopValidation(self.message)
| [
"structlog.get_logger",
"app.jinja_filters.get_formatted_currency",
"dateutil.relativedelta.relativedelta",
"re.compile",
"wtforms.validators.ValidationError",
"datetime.datetime.strptime",
"wtforms.validators.StopValidation",
"app.questionnaire.rules.utils.parse_datetime",
"re.match",
"flask_babe... | [((925, 937), 'structlog.get_logger', 'get_logger', ([], {}), '()\n', (935, 937), False, 'from structlog import get_logger\n'), ((956, 1027), 're.compile', 're.compile', (['"""^([a-z]{2,63}|xn--([a-z0-9]+-)*[a-z0-9]+)$"""', 're.IGNORECASE'], {}), "('^([a-z]{2,63}|xn--([a-z0-9]+-)*[a-z0-9]+)$', re.IGNORECASE)\n", (966, 1027), False, 'import re\n'), ((1049, 1083), 're.compile', 're.compile', (['"""^.+@([^.@][^@\\\\s]+)$"""'], {}), "('^.+@([^.@][^@\\\\s]+)$')\n", (1059, 1083), False, 'import re\n'), ((16000, 16020), 'app.jinja_filters.format_number', 'format_number', (['value'], {}), '(value)\n', (16013, 16020), False, 'from app.jinja_filters import format_number, get_formatted_currency\n'), ((17133, 17177), 're.sub', 're.sub', (['"""[\\\\s.,\\\\t\\\\-{}\\\\[\\\\]()/]"""', '""""""', 'data'], {}), "('[\\\\s.,\\\\t\\\\-{}\\\\[\\\\]()/]', '', data)\n", (17139, 17177), False, 'import re\n'), ((17185, 17224), 're.sub', 're.sub', (['"""^(0{1,2}44|\\\\+44|0)"""', '""""""', 'data'], {}), "('^(0{1,2}44|\\\\+44|0)', '', data)\n", (17191, 17224), False, 'import re\n'), ((9759, 9784), 'app.questionnaire.rules.utils.parse_datetime', 'parse_datetime', (['form.data'], {}), '(form.data)\n', (9773, 9784), False, 'from app.questionnaire.rules.utils import parse_datetime\n'), ((10705, 10754), 'flask_babel.format_date', 'flask_babel.format_date', (['date'], {'format': 'date_format'}), '(date, format=date_format)\n', (10728, 10754), False, 'import flask_babel\n'), ((11271, 11302), 'app.questionnaire.rules.utils.parse_datetime', 'parse_datetime', (['from_field.data'], {}), '(from_field.data)\n', (11285, 11302), False, 'from app.questionnaire.rules.utils import parse_datetime\n'), ((11321, 11350), 'app.questionnaire.rules.utils.parse_datetime', 'parse_datetime', (['to_field.data'], {}), '(to_field.data)\n', (11335, 11350), False, 'from app.questionnaire.rules.utils import parse_datetime\n'), ((11544, 11577), 'dateutil.relativedelta.relativedelta', 'relativedelta', (['to_date', 'from_date'], {}), '(to_date, from_date)\n', (11557, 11577), False, 'from dateutil.relativedelta import relativedelta\n'), ((15931, 15970), 'app.jinja_filters.get_formatted_currency', 'get_formatted_currency', (['value', 'currency'], {}), '(value, currency)\n', (15953, 15970), False, 'from app.jinja_filters import format_number, get_formatted_currency\n'), ((1880, 1919), 'wtforms.validators.StopValidation', 'validators.StopValidation', (['self.message'], {}), '(self.message)\n', (1905, 1919), False, 'from wtforms import Field, StringField, validators\n'), ((2909, 2948), 'wtforms.validators.StopValidation', 'validators.StopValidation', (['self.message'], {}), '(self.message)\n', (2934, 2948), False, 'from wtforms import Field, StringField, validators\n'), ((6208, 6232), 'flask_babel.get_locale', 'flask_babel.get_locale', ([], {}), '()\n', (6230, 6232), False, 'import flask_babel\n'), ((7596, 7623), 'wtforms.validators.StopValidation', 'validators.StopValidation', ([], {}), '()\n', (7621, 7623), False, 'from wtforms import Field, StringField, validators\n'), ((8425, 8464), 'wtforms.validators.StopValidation', 'validators.StopValidation', (['self.message'], {}), '(self.message)\n', (8450, 8464), False, 'from wtforms import Field, StringField, validators\n'), ((8728, 8767), 'wtforms.validators.StopValidation', 'validators.StopValidation', (['self.message'], {}), '(self.message)\n', (8753, 8767), False, 'from wtforms import Field, StringField, validators\n'), ((13217, 13282), 'flask_babel.ngettext', 'ngettext', (['"""%(num)s year"""', '"""%(num)s years"""', "period_object['years']"], {}), "('%(num)s year', '%(num)s years', period_object['years'])\n", (13225, 13282), False, 'from flask_babel import ngettext\n'), ((13382, 13450), 'flask_babel.ngettext', 'ngettext', (['"""%(num)s month"""', '"""%(num)s months"""', "period_object['months']"], {}), "('%(num)s month', '%(num)s months', period_object['months'])\n", (13390, 13450), False, 'from flask_babel import ngettext\n'), ((13678, 13740), 'flask_babel.ngettext', 'ngettext', (['"""%(num)s day"""', '"""%(num)s days"""', "period_object['days']"], {}), "('%(num)s day', '%(num)s days', period_object['days'])\n", (13686, 13740), False, 'from flask_babel import ngettext\n'), ((16176, 16204), 'app.utilities.safe_content', 'safe_content', (['question_title'], {}), '(question_title)\n', (16188, 16204), False, 'from app.utilities import safe_content\n'), ((16660, 16723), 'wtforms.validators.ValidationError', 'validators.ValidationError', (["self.messages['MUTUALLY_EXCLUSIVE']"], {}), "(self.messages['MUTUALLY_EXCLUSIVE'])\n", (16686, 16723), False, 'from wtforms import Field, StringField, validators\n'), ((17038, 17073), 'wtforms.validators.ValidationError', 'validators.ValidationError', (['message'], {}), '(message)\n', (17064, 17073), False, 'from wtforms import Field, StringField, validators\n'), ((17595, 17635), 'wtforms.validators.ValidationError', 'validators.ValidationError', (['self.message'], {}), '(self.message)\n', (17621, 17635), False, 'from wtforms import Field, StringField, validators\n'), ((1767, 1806), 'wtforms.validators.StopValidation', 'validators.StopValidation', (['self.message'], {}), '(self.message)\n', (1792, 1806), False, 'from wtforms import Field, StringField, validators\n'), ((4366, 4407), 'wtforms.validators.ValidationError', 'validators.ValidationError', (['error_message'], {}), '(error_message)\n', (4392, 4407), False, 'from wtforms import Field, StringField, validators\n'), ((6339, 6399), 'wtforms.validators.ValidationError', 'validators.ValidationError', (["self.messages['INVALID_INTEGER']"], {}), "(self.messages['INVALID_INTEGER'])\n", (6365, 6399), False, 'from wtforms import Field, StringField, validators\n'), ((9179, 9218), 'wtforms.validators.StopValidation', 'validators.StopValidation', (['self.message'], {}), '(self.message)\n', (9204, 9218), False, 'from wtforms import Field, StringField, validators\n'), ((11445, 11508), 'wtforms.validators.ValidationError', 'validators.ValidationError', (["self.messages['INVALID_DATE_RANGE']"], {}), "(self.messages['INVALID_DATE_RANGE'])\n", (11471, 11508), False, 'from wtforms import Field, StringField, validators\n'), ((17548, 17575), 're.match', 're.match', (['"""^7[0-9]+$"""', 'data'], {}), "('^7[0-9]+$', data)\n", (17556, 17575), False, 'import re\n'), ((18284, 18323), 'wtforms.validators.StopValidation', 'validators.StopValidation', (['self.message'], {}), '(self.message)\n', (18309, 18323), False, 'from wtforms import Field, StringField, validators\n'), ((18102, 18141), 'wtforms.validators.StopValidation', 'validators.StopValidation', (['self.message'], {}), '(self.message)\n', (18127, 18141), False, 'from wtforms import Field, StringField, validators\n'), ((1606, 1630), 'flask_babel.get_locale', 'flask_babel.get_locale', ([], {}), '()\n', (1628, 1630), False, 'import flask_babel\n'), ((6085, 6109), 'flask_babel.get_locale', 'flask_babel.get_locale', ([], {}), '()\n', (6107, 6109), False, 'import flask_babel\n'), ((8835, 8875), 'datetime.datetime.strptime', 'datetime.strptime', (['form.data', '"""%Y-%m-%d"""'], {}), "(form.data, '%Y-%m-%d')\n", (8852, 8875), False, 'from datetime import datetime, timezone\n'), ((8962, 8999), 'datetime.datetime.strptime', 'datetime.strptime', (['form.data', '"""%Y-%m"""'], {}), "(form.data, '%Y-%m')\n", (8979, 8999), False, 'from datetime import datetime, timezone\n'), ((9063, 9097), 'datetime.datetime.strptime', 'datetime.strptime', (['form.data', '"""%Y"""'], {}), "(form.data, '%Y')\n", (9080, 9097), False, 'from datetime import datetime, timezone\n'), ((10082, 10104), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(-1)'}), '(days=-1)\n', (10095, 10104), False, 'from dateutil.relativedelta import relativedelta\n'), ((10473, 10495), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(+1)'}), '(days=+1)\n', (10486, 10495), False, 'from dateutil.relativedelta import relativedelta\n')] |
from avalon import io
def publish(asset_id, subset_name, families):
"""
Publish subset.
:param asset_id: (object)
:param subset_name: (str)
:param families: (list)
:return:
"""
subset_context = {
'name': subset_name,
'parent': asset_id,
'type': 'subset',
'data': {
'families': families,
'subsetGroup': ''
},
'schema': 'avalon-core:subset-3.0'}
_filter = {"parent": asset_id, "name": subset_name}
subset_data = io.find_one(_filter)
if subset_data is None:
subset_id = io.insert_one(subset_context).inserted_id
else:
subset_id = subset_data['_id']
return subset_id
| [
"avalon.io.find_one",
"avalon.io.insert_one"
] | [((526, 546), 'avalon.io.find_one', 'io.find_one', (['_filter'], {}), '(_filter)\n', (537, 546), False, 'from avalon import io\n'), ((596, 625), 'avalon.io.insert_one', 'io.insert_one', (['subset_context'], {}), '(subset_context)\n', (609, 625), False, 'from avalon import io\n')] |
import unittest
from typing import Tuple
from neofoodclub import NeoFoodClub # type: ignore
from neofoodclub.types import RoundData # type: ignore
# i picked the smallest round I could quickly find
test_round_data: RoundData = {
"currentOdds": [
[1, 2, 13, 3, 5],
[1, 4, 2, 4, 6],
[1, 3, 13, 7, 2],
[1, 13, 2, 3, 3],
[1, 8, 2, 4, 13],
],
"foods": [
[26, 25, 4, 9, 21, 1, 33, 11, 7, 10],
[12, 9, 14, 35, 25, 6, 21, 19, 40, 37],
[17, 30, 21, 39, 37, 15, 29, 40, 31, 10],
[10, 18, 35, 9, 34, 23, 27, 32, 28, 12],
[11, 20, 9, 33, 7, 14, 4, 23, 31, 26],
],
"lastChange": "2021-02-16T23:47:18+00:00",
"openingOdds": [
[1, 2, 13, 3, 5],
[1, 4, 2, 4, 6],
[1, 3, 13, 7, 2],
[1, 13, 2, 3, 3],
[1, 8, 2, 4, 12],
],
"pirates": [
[2, 8, 14, 11],
[20, 7, 6, 10],
[19, 4, 12, 15],
[3, 1, 5, 13],
[17, 16, 18, 9],
],
"round": 7956,
"start": "2021-02-15T23:47:41+00:00",
"timestamp": "2021-02-16T23:47:37+00:00",
"winners": [1, 3, 4, 2, 4],
"changes": [
{"arena": 1, "new": 6, "old": 5, "pirate": 4, "t": "2021-02-16T23:47:18+00:00"},
{
"arena": 4,
"new": 8,
"old": 12,
"pirate": 1,
"t": "2021-02-16T23:47:18+00:00",
},
{"arena": 4, "new": 4, "old": 6, "pirate": 3, "t": "2021-02-16T23:47:18+00:00"},
{
"arena": 4,
"new": 12,
"old": 13,
"pirate": 4,
"t": "2021-02-16T23:47:18+00:00",
},
],
}
test_bet_hash = "ltqvqwgimhqtvrnywrwvijwnn"
test_indices: Tuple[Tuple[int, ...], ...] = (
(2, 1, 3, 4, 3),
(1, 4, 1, 3, 1),
(4, 2, 1, 1, 1),
(3, 2, 2, 1, 2),
(3, 1, 3, 4, 4),
(1, 3, 2, 2, 3),
(4, 4, 4, 2, 3),
(2, 4, 2, 4, 1),
(1, 3, 1, 4, 4),
(2, 2, 3, 2, 3),
)
test_binaries: Tuple[int, ...] = (
0x48212,
0x81828,
0x14888,
0x24484,
0x28211,
0x82442,
0x11142,
0x41418,
0x82811,
0x44242,
)
test_expected_results = (test_bet_hash, test_indices, test_binaries)
test_nfc = NeoFoodClub(test_round_data)
hash_bets = test_nfc.make_bets_from_hash(test_bet_hash)
indices_bets = test_nfc.make_bets_from_indices(test_indices) # type: ignore
binaries_bets = test_nfc.make_bets_from_binaries(*test_binaries)
########################################################################################################################
class BetDecodingTest(unittest.TestCase):
def test_bet_hash_encoding(self):
self.assertEqual(
(hash_bets.bets_hash, hash_bets.indices, tuple(hash_bets)),
test_expected_results,
)
def test_bet_indices_encoding(self):
self.assertEqual(
(indices_bets.bets_hash, indices_bets.indices, tuple(indices_bets)),
test_expected_results,
)
def test_bet_binary_encoding(self):
self.assertEqual(
(binaries_bets.bets_hash, binaries_bets.indices, tuple(binaries_bets)),
test_expected_results,
)
class BetEquivalenceTest(unittest.TestCase):
def test_bet_equivalence(self):
self.assertTrue(hash_bets == indices_bets and indices_bets == binaries_bets)
| [
"neofoodclub.NeoFoodClub"
] | [((2219, 2247), 'neofoodclub.NeoFoodClub', 'NeoFoodClub', (['test_round_data'], {}), '(test_round_data)\n', (2230, 2247), False, 'from neofoodclub import NeoFoodClub\n')] |
"""Here are defined Python functions of views.
Views are binded to URLs in :mod:`.urls`.
"""
import datetime
import hashlib
import json
import os
from distutils.version import LooseVersion
from json.encoder import JSONEncoder
from urllib.parse import quote
from django import forms
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import PermissionDenied
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.urls import reverse
from django.db.models import Q
from django.http import HttpResponse, Http404, QueryDict, HttpResponseRedirect
from django.shortcuts import get_object_or_404, redirect, resolve_url
from django.template.response import TemplateResponse
from django.utils.html import escape
from django.utils.http import is_safe_url, urlencode
from django.utils.timezone import utc
from django.utils.translation import ugettext_lazy as _, ugettext
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.debug import sensitive_post_parameters
from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, \
PackageType, normalize_str
from pythonnest.xmlrpc import XMLRPCSite, site
__author__ = '<NAME>'
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S'
XML_RPC_SITE = XMLRPCSite()
def prepare_query(previous_query, prefix, key, value, global_and=True):
kwargs = {}
if (isinstance(value, list) or isinstance(value, tuple)) and len(value) > 1:
value = [normalize_str(x) for x in value]
kwargs = {prefix + key + '__in': value}
elif isinstance(value, list) or isinstance(value, tuple):
value = normalize_str(value[0])
if not kwargs:
kwargs = {prefix + key + '__icontains': value}
if previous_query is None:
return Q(**kwargs)
elif global_and:
return previous_query & Q(**kwargs)
return previous_query | Q(**kwargs)
class JSONDatetime(JSONEncoder):
def default(self, o):
if isinstance(o, datetime.datetime):
return o.strftime(DATE_FORMAT)
return super(JSONDatetime, self).default(o)
@csrf_exempt
def xmlrpc(request):
return site.dispatch(request)
# noinspection PyUnusedLocal
def package_json(request, package_name):
package = get_object_or_404(Package, normalized_name__iexact=normalize_str(package_name))
releases = list(Release.objects.filter(package=package).order_by('-id')[0:1])
if not releases:
raise Http404
release = releases[0]
result = {'info': release.data(), 'urls': [x.data() for x in ReleaseDownload.objects.filter(release=release)]}
return HttpResponse(json.dumps(result, ensure_ascii=False, cls=JSONDatetime, indent=4),
content_type='application/json')
# noinspection PyUnusedLocal
def version_json(request, package_name, version):
release = get_object_or_404(Release, package__normalized_name__iexact=normalize_str(package_name), version=version)
result = {'info': release.data(), 'urls': [x.data() for x in ReleaseDownload.objects.filter(release=release)]}
return HttpResponse(json.dumps(result, ensure_ascii=False, cls=JSONDatetime, indent=4),
content_type='application/json')
class SearchForm(forms.Form):
"""Upload form"""
search = forms.CharField(max_length=255)
@never_cache
def simple(request, package_name=None, version=None):
if package_name is not None:
package = get_object_or_404(Package, normalized_name__iexact=normalize_str(package_name))
if version is not None:
release = get_object_or_404(Release, package=package, version__iexact=version)
downloads = ReleaseDownload.objects.filter(release=release)
else:
downloads = ReleaseDownload.objects.filter(package=package)
else:
package = None
downloads = ReleaseDownload.objects.all()
template_values = {'package': package, 'downloads': downloads}
return TemplateResponse(request, 'pythonnest/simple.html', template_values)
@csrf_exempt
def setup(request):
if request.method != 'POST':
raise PermissionDenied(_('Only POST request are allowed'))
ct_type = request.META.get('CONTENT_TYPE', '')
infos = [x.strip().partition('=') for x in ct_type.split(';')]
boundary, encoding = None, 'ascii'
for info in infos:
if info[0] == 'boundary':
boundary = info[2]
elif info[0] == 'charset':
encoding = info[2]
if boundary is None:
raise PermissionDenied(_('Invalid POST form'))
# parse the POST query by hand
mid_boundary = ('\n--' + boundary + '\n').encode(encoding)
end_boundary = ('\n--' + boundary + '--\n').encode(encoding)
fields = request.body.split(mid_boundary)
values = QueryDict('', mutable=True, encoding=encoding)
files = {}
for part in fields:
lines = part.split(b'\n\n', 1)
if len(lines) != 2:
continue
infos = [x.strip().partition('=') for x in lines[0].decode(encoding).split(';')]
key, filename = None, None
for info in infos:
if info[0] == 'name':
key = info[2][1:-1]
elif info[0] == 'filename':
filename = info[2][1:-1]
if key is None:
continue
value = lines[1]
if value.endswith(end_boundary):
value = value[:-len(end_boundary)]
if filename is None:
values.setlistdefault(key, [])
values.appendlist(key, value)
else:
files[key] = filename, value
# the POST data are parsed, let's go
action = values.get(':action')
if action in ('submit', 'file_upload'):
package_name = values.get('name', '')
version_name = values.get('version', '')
if not package_name or not version_name:
raise PermissionDenied(_('No package name provided'))
if request.user.is_anonymous:
return HttpResponse(ugettext('You must be authenticated'), status=401)
package, package_created = Package.objects.get_or_create(name=package_name)
if package_created:
PackageRole(package=package, user=request.user, role=PackageRole.OWNER).save()
elif not request.user.is_superuser:
if PackageRole.objects.filter(package=package, user=request.user).count() == 0:
return HttpResponse(ugettext('You are not allowed to update this package'), status=401)
for attr_name in ('name', 'home_page', 'author_email', 'download_url', 'author', 'license', 'summary',
'maintainer', 'maintainer_email', 'project_url', ):
if values.get(attr_name):
setattr(package, attr_name, values.get(attr_name))
package.save()
release, created = Release.objects.get_or_create(package=package, version=version_name)
for attr_name in ('stable_version', 'description', 'platform', 'keywords', 'docs_url',):
if values.get(attr_name):
setattr(package, attr_name, values.get(attr_name))
release.classifiers.clear()
for classifier in values.getlist('classifiers', []):
release.classifiers.add(Classifier.get(classifier))
for attr_name in ('requires', 'requires_dist', 'provides', 'provides_dist', 'obsoletes', 'obsoletes_dist',
'requires_external', 'requires_python'):
getattr(release, attr_name).clear()
for dep in values.getlist(attr_name, []):
getattr(release, attr_name).add(Dependence.get(dep))
release.save()
if action == 'file_upload':
if 'content' not in files:
raise PermissionDenied
filename, content = files['content']
# noinspection PyUnboundLocalVariable
if ReleaseDownload.objects.filter(package=package, release=release, filename=filename).count() > 0:
raise PermissionDenied
md5 = hashlib.md5(content).hexdigest()
if md5 != values.get('md5_digest'):
raise PermissionDenied
download = ReleaseDownload(package=package, release=release, filename=filename)
path = download.abspath
path_dirname = os.path.dirname(path)
if not os.path.isdir(path_dirname):
os.makedirs(path_dirname)
with open(path, 'wb') as out_fd:
out_fd.write(content)
download.md5_digest = md5
download.size = len(content)
download.upload_time = datetime.datetime.utcnow().replace(tzinfo=utc)
download.url = settings.MEDIA_URL + path[MEDIA_ROOT_LEN:]
download.file = download.relpath
download.package_type = PackageType.get(values.get('filetype', 'source'))
download.comment_text = values.get('comment', '')
download.python_version = values.get('pyversion')
download.log()
template_values = {}
return TemplateResponse(request, 'pythonnest/simple.html', template_values)
def search_result(request, query, alt_text):
url_query = urlencode({k: v for (k, v) in request.GET.items() if k != 'page'})
nav_url = '%s?%s' % (request.path, url_query)
paginator = Paginator(query, 30)
page_number = request.GET.get('page')
try:
result_page = paginator.page(page_number)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
result_page = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
result_page = paginator.page(paginator.num_pages)
template_values = {'result_page': result_page, 'nav_url': nav_url, 'alt_text': alt_text, 'query': query, }
return TemplateResponse(request, 'pythonnest/search.html', template_values)
def index(request):
"""Index view, displaying and processing a form."""
search = SearchForm(request.GET)
if search.is_valid():
orig_pattern = search.cleaned_data['search']
patterns = orig_pattern.split()
sub_query = None
for pattern in patterns:
sub_query = prepare_query(sub_query, '', 'normalized_name', pattern, global_and=True)
query = Package.objects.filter(sub_query).distinct().select_related()
alt_text = None
if orig_pattern.find('*') == -1:
alt_text = _('You should search “<a href="?search=%%2A%(pattern)s%%2A">*%(text)s*</a>” '
'to find more packages.') % {'pattern': quote(orig_pattern), 'text': escape(orig_pattern)}
return search_result(request, query, alt_text)
full_uri = settings.SERVER_BASE_URL[:-1]
base_url = settings.SERVER_NAME
use_ssl = settings.USE_SSL
template_values = {'base_url': base_url, 'use_ssl': use_ssl, 'full_uri': full_uri, }
return TemplateResponse(request, 'pythonnest/index.html', template_values)
def all_packages(request, order_by='normalized_name'):
alt_text = _('No package matched your query.')
query = Package.objects.all().select_related().order_by(order_by)
return search_result(request, query, alt_text)
def show_classifier(request, classifier_id):
classifier = get_object_or_404(Classifier, id=classifier_id)
alt_text = _('No package matched your query.')
query = classifier.release_set.all().select_related('package')
return search_result(request, query, alt_text)
def show_package(request, package_id, release_id=None):
package = get_object_or_404(Package, id=package_id)
roles = PackageRole.objects.filter(package=package).order_by('role').select_related()
releases = list(Release.objects.filter(package=package).order_by('-id').select_related())
release = None
releases = sorted(releases, key=lambda x: LooseVersion(str(x.version)), reverse=True)
if release_id is not None:
release = get_object_or_404(Release, id=release_id, package=package)
elif releases:
release = releases[0]
class RoleForm(forms.Form):
username = forms.CharField(max_length=255, label=_('Username'))
role = forms.ChoiceField(required=False, widget=forms.Select(), choices=PackageRole.ROLES, label=_('Role'))
downloads = ReleaseDownload.objects.filter(release=release).order_by('filename')
is_admin = package.is_admin(request.user)
add_user_form = None
if is_admin:
if request.method == 'POST':
add_user_form = RoleForm(request.POST)
if add_user_form.is_valid():
username = add_user_form.cleaned_data['username']
role = add_user_form.cleaned_data['role']
user = User.objects.get_or_create(username=username)[0]
PackageRole.objects.get_or_create(package=package, user=user, role=int(role))
return redirect('pythonnest.views.show_package', package_id=package_id)
else:
add_user_form = RoleForm()
template_values = {'title': _('PythonNest'),
'package': package, 'roles': roles, 'is_admin': is_admin, 'add_user_form': add_user_form,
'is_editable': request.user in set([x.user for x in roles]),
'release': release, 'releases': releases, 'downloads': downloads, }
return TemplateResponse(request, 'pythonnest/package.html', template_values)
@login_required
def delete_download(request, download_id):
download = get_object_or_404(ReleaseDownload, id=download_id)
package = download.package
release = download.release
if PackageRole.objects.filter(package=package, role=PackageRole.OWNER, user=request.user).count() == 0 and \
not request.user.is_superuser:
raise PermissionDenied
abspath = download.abspath
download.delete()
if os.path.isfile(abspath):
os.remove(abspath)
if ReleaseDownload.objects.filter(release=release).count() == 0:
release.delete()
response = HttpResponseRedirect(reverse('pythonnest.views.show_package', kwargs={'package_id': package.id}))
else:
response = HttpResponseRedirect(reverse('pythonnest.views.show_package',
kwargs={'package_id': package.id, 'release_id': release.id}))
if Release.objects.filter(package=package).count() == 0:
package.delete()
response = HttpResponseRedirect(reverse('pythonnest.views.index'))
return response
@login_required
def delete_role(request, role_id):
role = get_object_or_404(PackageRole, id=role_id)
package = role.package
if not package.is_admin(request.user):
raise PermissionDenied
role.delete()
return redirect('pythonnest.views.show_package', package_id=package.id)
@sensitive_post_parameters()
@never_cache
def create_user(request, template_name='create_user.html',
redirect_field_name=REDIRECT_FIELD_NAME,
user_creation_form=UserCreationForm,
current_app=None, extra_context=None):
"""
Displays the login form and handles the login action.
"""
# noinspection PyUnusedLocal
current_app = current_app
redirect_to = request.REQUEST.get(redirect_field_name, '')
if request.method == "POST":
form = user_creation_form(data=request.POST)
if form.is_valid():
# Ensure the user-originating redirection url is safe.
if not is_safe_url(url=redirect_to, host=request.get_host()):
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
form.save(commit=True)
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
return HttpResponseRedirect(redirect_to)
else:
form = user_creation_form()
request.session.set_test_cookie()
current_site = get_current_site(request)
context = {
'form': form,
redirect_field_name: redirect_to,
'site': current_site,
'site_name': current_site.name,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
| [
"django.http.QueryDict",
"pythonnest.xmlrpc.site.dispatch",
"django.urls.reverse",
"pythonnest.models.ReleaseDownload.objects.all",
"pythonnest.models.Package.objects.filter",
"os.remove",
"django.http.HttpResponseRedirect",
"pythonnest.models.Release.objects.get_or_create",
"pythonnest.models.Packa... | [((1617, 1629), 'pythonnest.xmlrpc.XMLRPCSite', 'XMLRPCSite', ([], {}), '()\n', (1627, 1629), False, 'from pythonnest.xmlrpc import XMLRPCSite, site\n'), ((15152, 15179), 'django.views.decorators.debug.sensitive_post_parameters', 'sensitive_post_parameters', ([], {}), '()\n', (15177, 15179), False, 'from django.views.decorators.debug import sensitive_post_parameters\n'), ((2486, 2508), 'pythonnest.xmlrpc.site.dispatch', 'site.dispatch', (['request'], {}), '(request)\n', (2499, 2508), False, 'from pythonnest.xmlrpc import XMLRPCSite, site\n'), ((3622, 3653), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (3637, 3653), False, 'from django import forms\n'), ((4296, 4364), 'django.template.response.TemplateResponse', 'TemplateResponse', (['request', '"""pythonnest/simple.html"""', 'template_values'], {}), "(request, 'pythonnest/simple.html', template_values)\n", (4312, 4364), False, 'from django.template.response import TemplateResponse\n'), ((5117, 5163), 'django.http.QueryDict', 'QueryDict', (['""""""'], {'mutable': '(True)', 'encoding': 'encoding'}), "('', mutable=True, encoding=encoding)\n", (5126, 5163), False, 'from django.http import HttpResponse, Http404, QueryDict, HttpResponseRedirect\n'), ((9372, 9440), 'django.template.response.TemplateResponse', 'TemplateResponse', (['request', '"""pythonnest/simple.html"""', 'template_values'], {}), "(request, 'pythonnest/simple.html', template_values)\n", (9388, 9440), False, 'from django.template.response import TemplateResponse\n'), ((9637, 9657), 'django.core.paginator.Paginator', 'Paginator', (['query', '(30)'], {}), '(query, 30)\n', (9646, 9657), False, 'from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\n'), ((10164, 10232), 'django.template.response.TemplateResponse', 'TemplateResponse', (['request', '"""pythonnest/search.html"""', 'template_values'], {}), "(request, 'pythonnest/search.html', template_values)\n", (10180, 10232), False, 'from django.template.response import TemplateResponse\n'), ((11251, 11318), 'django.template.response.TemplateResponse', 'TemplateResponse', (['request', '"""pythonnest/index.html"""', 'template_values'], {}), "(request, 'pythonnest/index.html', template_values)\n", (11267, 11318), False, 'from django.template.response import TemplateResponse\n'), ((11391, 11426), 'django.utils.translation.ugettext_lazy', '_', (['"""No package matched your query."""'], {}), "('No package matched your query.')\n", (11392, 11426), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((11612, 11659), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Classifier'], {'id': 'classifier_id'}), '(Classifier, id=classifier_id)\n', (11629, 11659), False, 'from django.shortcuts import get_object_or_404, redirect, resolve_url\n'), ((11675, 11710), 'django.utils.translation.ugettext_lazy', '_', (['"""No package matched your query."""'], {}), "('No package matched your query.')\n", (11676, 11710), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((11901, 11942), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Package'], {'id': 'package_id'}), '(Package, id=package_id)\n', (11918, 11942), False, 'from django.shortcuts import get_object_or_404, redirect, resolve_url\n'), ((13696, 13765), 'django.template.response.TemplateResponse', 'TemplateResponse', (['request', '"""pythonnest/package.html"""', 'template_values'], {}), "(request, 'pythonnest/package.html', template_values)\n", (13712, 13765), False, 'from django.template.response import TemplateResponse\n'), ((13842, 13892), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['ReleaseDownload'], {'id': 'download_id'}), '(ReleaseDownload, id=download_id)\n', (13859, 13892), False, 'from django.shortcuts import get_object_or_404, redirect, resolve_url\n'), ((14202, 14225), 'os.path.isfile', 'os.path.isfile', (['abspath'], {}), '(abspath)\n', (14216, 14225), False, 'import os\n'), ((14911, 14953), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['PackageRole'], {'id': 'role_id'}), '(PackageRole, id=role_id)\n', (14928, 14953), False, 'from django.shortcuts import get_object_or_404, redirect, resolve_url\n'), ((15084, 15148), 'django.shortcuts.redirect', 'redirect', (['"""pythonnest.views.show_package"""'], {'package_id': 'package.id'}), "('pythonnest.views.show_package', package_id=package.id)\n", (15092, 15148), False, 'from django.shortcuts import get_object_or_404, redirect, resolve_url\n'), ((16242, 16267), 'django.contrib.sites.shortcuts.get_current_site', 'get_current_site', (['request'], {}), '(request)\n', (16258, 16267), False, 'from django.contrib.sites.shortcuts import get_current_site\n'), ((16507, 16556), 'django.template.response.TemplateResponse', 'TemplateResponse', (['request', 'template_name', 'context'], {}), '(request, template_name, context)\n', (16523, 16556), False, 'from django.template.response import TemplateResponse\n'), ((2121, 2132), 'django.db.models.Q', 'Q', ([], {}), '(**kwargs)\n', (2122, 2132), False, 'from django.db.models import Q\n'), ((2226, 2237), 'django.db.models.Q', 'Q', ([], {}), '(**kwargs)\n', (2227, 2237), False, 'from django.db.models import Q\n'), ((2965, 3031), 'json.dumps', 'json.dumps', (['result'], {'ensure_ascii': '(False)', 'cls': 'JSONDatetime', 'indent': '(4)'}), '(result, ensure_ascii=False, cls=JSONDatetime, indent=4)\n', (2975, 3031), False, 'import json\n'), ((3430, 3496), 'json.dumps', 'json.dumps', (['result'], {'ensure_ascii': '(False)', 'cls': 'JSONDatetime', 'indent': '(4)'}), '(result, ensure_ascii=False, cls=JSONDatetime, indent=4)\n', (3440, 3496), False, 'import json\n'), ((4188, 4217), 'pythonnest.models.ReleaseDownload.objects.all', 'ReleaseDownload.objects.all', ([], {}), '()\n', (4215, 4217), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((6410, 6458), 'pythonnest.models.Package.objects.get_or_create', 'Package.objects.get_or_create', ([], {'name': 'package_name'}), '(name=package_name)\n', (6439, 6458), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((7162, 7230), 'pythonnest.models.Release.objects.get_or_create', 'Release.objects.get_or_create', ([], {'package': 'package', 'version': 'version_name'}), '(package=package, version=version_name)\n', (7191, 7230), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((12285, 12343), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Release'], {'id': 'release_id', 'package': 'package'}), '(Release, id=release_id, package=package)\n', (12302, 12343), False, 'from django.shortcuts import get_object_or_404, redirect, resolve_url\n'), ((13380, 13395), 'django.utils.translation.ugettext_lazy', '_', (['"""PythonNest"""'], {}), "('PythonNest')\n", (13381, 13395), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((14235, 14253), 'os.remove', 'os.remove', (['abspath'], {}), '(abspath)\n', (14244, 14253), False, 'import os\n'), ((1818, 1834), 'pythonnest.models.normalize_str', 'normalize_str', (['x'], {}), '(x)\n', (1831, 1834), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((1977, 2000), 'pythonnest.models.normalize_str', 'normalize_str', (['value[0]'], {}), '(value[0])\n', (1990, 2000), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((2646, 2673), 'pythonnest.models.normalize_str', 'normalize_str', (['package_name'], {}), '(package_name)\n', (2659, 2673), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((3245, 3272), 'pythonnest.models.normalize_str', 'normalize_str', (['package_name'], {}), '(package_name)\n', (3258, 3272), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((3908, 3976), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Release'], {'package': 'package', 'version__iexact': 'version'}), '(Release, package=package, version__iexact=version)\n', (3925, 3976), False, 'from django.shortcuts import get_object_or_404, redirect, resolve_url\n'), ((4001, 4048), 'pythonnest.models.ReleaseDownload.objects.filter', 'ReleaseDownload.objects.filter', ([], {'release': 'release'}), '(release=release)\n', (4031, 4048), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((4087, 4134), 'pythonnest.models.ReleaseDownload.objects.filter', 'ReleaseDownload.objects.filter', ([], {'package': 'package'}), '(package=package)\n', (4117, 4134), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((4464, 4498), 'django.utils.translation.ugettext_lazy', '_', (['"""Only POST request are allowed"""'], {}), "('Only POST request are allowed')\n", (4465, 4498), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((4867, 4889), 'django.utils.translation.ugettext_lazy', '_', (['"""Invalid POST form"""'], {}), "('Invalid POST form')\n", (4868, 4889), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((8496, 8564), 'pythonnest.models.ReleaseDownload', 'ReleaseDownload', ([], {'package': 'package', 'release': 'release', 'filename': 'filename'}), '(package=package, release=release, filename=filename)\n', (8511, 8564), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((8628, 8649), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (8643, 8649), False, 'import os\n'), ((12631, 12678), 'pythonnest.models.ReleaseDownload.objects.filter', 'ReleaseDownload.objects.filter', ([], {'release': 'release'}), '(release=release)\n', (12661, 12678), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((14388, 14463), 'django.urls.reverse', 'reverse', (['"""pythonnest.views.show_package"""'], {'kwargs': "{'package_id': package.id}"}), "('pythonnest.views.show_package', kwargs={'package_id': package.id})\n", (14395, 14463), False, 'from django.urls import reverse\n'), ((14515, 14620), 'django.urls.reverse', 'reverse', (['"""pythonnest.views.show_package"""'], {'kwargs': "{'package_id': package.id, 'release_id': release.id}"}), "('pythonnest.views.show_package', kwargs={'package_id': package.id,\n 'release_id': release.id})\n", (14522, 14620), False, 'from django.urls import reverse\n'), ((14792, 14825), 'django.urls.reverse', 'reverse', (['"""pythonnest.views.index"""'], {}), "('pythonnest.views.index')\n", (14799, 14825), False, 'from django.urls import reverse\n'), ((16105, 16138), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['redirect_to'], {}), '(redirect_to)\n', (16125, 16138), False, 'from django.http import HttpResponse, Http404, QueryDict, HttpResponseRedirect\n'), ((2186, 2197), 'django.db.models.Q', 'Q', ([], {}), '(**kwargs)\n', (2187, 2197), False, 'from django.db.models import Q\n'), ((2891, 2938), 'pythonnest.models.ReleaseDownload.objects.filter', 'ReleaseDownload.objects.filter', ([], {'release': 'release'}), '(release=release)\n', (2921, 2938), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((3356, 3403), 'pythonnest.models.ReleaseDownload.objects.filter', 'ReleaseDownload.objects.filter', ([], {'release': 'release'}), '(release=release)\n', (3386, 3403), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((3825, 3852), 'pythonnest.models.normalize_str', 'normalize_str', (['package_name'], {}), '(package_name)\n', (3838, 3852), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((6223, 6252), 'django.utils.translation.ugettext_lazy', '_', (['"""No package name provided"""'], {}), "('No package name provided')\n", (6224, 6252), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((6324, 6361), 'django.utils.translation.ugettext', 'ugettext', (['"""You must be authenticated"""'], {}), "('You must be authenticated')\n", (6332, 6361), False, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((7566, 7592), 'pythonnest.models.Classifier.get', 'Classifier.get', (['classifier'], {}), '(classifier)\n', (7580, 7592), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((8669, 8696), 'os.path.isdir', 'os.path.isdir', (['path_dirname'], {}), '(path_dirname)\n', (8682, 8696), False, 'import os\n'), ((8714, 8739), 'os.makedirs', 'os.makedirs', (['path_dirname'], {}), '(path_dirname)\n', (8725, 8739), False, 'import os\n'), ((10789, 10894), 'django.utils.translation.ugettext_lazy', '_', (['"""You should search “<a href="?search=%%2A%(pattern)s%%2A">*%(text)s*</a>” to find more packages."""'], {}), '(\'You should search “<a href="?search=%%2A%(pattern)s%%2A">*%(text)s*</a>” to find more packages.\'\n )\n', (10790, 10894), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((12483, 12496), 'django.utils.translation.ugettext_lazy', '_', (['"""Username"""'], {}), "('Username')\n", (12484, 12496), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((12554, 12568), 'django.forms.Select', 'forms.Select', ([], {}), '()\n', (12566, 12568), False, 'from django import forms\n'), ((12603, 12612), 'django.utils.translation.ugettext_lazy', '_', (['"""Role"""'], {}), "('Role')\n", (12604, 12612), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((13230, 13294), 'django.shortcuts.redirect', 'redirect', (['"""pythonnest.views.show_package"""'], {'package_id': 'package_id'}), "('pythonnest.views.show_package', package_id=package_id)\n", (13238, 13294), False, 'from django.shortcuts import get_object_or_404, redirect, resolve_url\n'), ((14261, 14308), 'pythonnest.models.ReleaseDownload.objects.filter', 'ReleaseDownload.objects.filter', ([], {'release': 'release'}), '(release=release)\n', (14291, 14308), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((14673, 14712), 'pythonnest.models.Release.objects.filter', 'Release.objects.filter', ([], {'package': 'package'}), '(package=package)\n', (14695, 14712), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((15903, 15943), 'django.shortcuts.resolve_url', 'resolve_url', (['settings.LOGIN_REDIRECT_URL'], {}), '(settings.LOGIN_REDIRECT_URL)\n', (15914, 15943), False, 'from django.shortcuts import get_object_or_404, redirect, resolve_url\n'), ((2695, 2734), 'pythonnest.models.Release.objects.filter', 'Release.objects.filter', ([], {'package': 'package'}), '(package=package)\n', (2717, 2734), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((6499, 6570), 'pythonnest.models.PackageRole', 'PackageRole', ([], {'package': 'package', 'user': 'request.user', 'role': 'PackageRole.OWNER'}), '(package=package, user=request.user, role=PackageRole.OWNER)\n', (6510, 6570), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((7926, 7945), 'pythonnest.models.Dependence.get', 'Dependence.get', (['dep'], {}), '(dep)\n', (7940, 7945), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((8353, 8373), 'hashlib.md5', 'hashlib.md5', (['content'], {}), '(content)\n', (8364, 8373), False, 'import hashlib\n'), ((8937, 8963), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (8961, 8963), False, 'import datetime\n'), ((10932, 10951), 'urllib.parse.quote', 'quote', (['orig_pattern'], {}), '(orig_pattern)\n', (10937, 10951), False, 'from urllib.parse import quote\n'), ((10961, 10981), 'django.utils.html.escape', 'escape', (['orig_pattern'], {}), '(orig_pattern)\n', (10967, 10981), False, 'from django.utils.html import escape\n'), ((11439, 11460), 'pythonnest.models.Package.objects.all', 'Package.objects.all', ([], {}), '()\n', (11458, 11460), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((11955, 11998), 'pythonnest.models.PackageRole.objects.filter', 'PackageRole.objects.filter', ([], {'package': 'package'}), '(package=package)\n', (11981, 11998), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((13064, 13109), 'django.contrib.auth.models.User.objects.get_or_create', 'User.objects.get_or_create', ([], {'username': 'username'}), '(username=username)\n', (13090, 13109), False, 'from django.contrib.auth.models import User\n'), ((13962, 14053), 'pythonnest.models.PackageRole.objects.filter', 'PackageRole.objects.filter', ([], {'package': 'package', 'role': 'PackageRole.OWNER', 'user': 'request.user'}), '(package=package, role=PackageRole.OWNER, user=\n request.user)\n', (13988, 14053), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((6750, 6804), 'django.utils.translation.ugettext', 'ugettext', (['"""You are not allowed to update this package"""'], {}), "('You are not allowed to update this package')\n", (6758, 6804), False, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((8199, 8287), 'pythonnest.models.ReleaseDownload.objects.filter', 'ReleaseDownload.objects.filter', ([], {'package': 'package', 'release': 'release', 'filename': 'filename'}), '(package=package, release=release, filename=\n filename)\n', (8229, 8287), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((10639, 10672), 'pythonnest.models.Package.objects.filter', 'Package.objects.filter', (['sub_query'], {}), '(sub_query)\n', (10661, 10672), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((12053, 12092), 'pythonnest.models.Release.objects.filter', 'Release.objects.filter', ([], {'package': 'package'}), '(package=package)\n', (12075, 12092), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n'), ((6637, 6699), 'pythonnest.models.PackageRole.objects.filter', 'PackageRole.objects.filter', ([], {'package': 'package', 'user': 'request.user'}), '(package=package, user=request.user)\n', (6663, 6699), False, 'from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, PackageType, normalize_str\n')] |
from scrapy.item import Item, Field
class MyImage(Item):
image_urls = Field()
images = Field()
image_paths = Field()
image_label = Field()
| [
"scrapy.item.Field"
] | [((77, 84), 'scrapy.item.Field', 'Field', ([], {}), '()\n', (82, 84), False, 'from scrapy.item import Item, Field\n'), ((98, 105), 'scrapy.item.Field', 'Field', ([], {}), '()\n', (103, 105), False, 'from scrapy.item import Item, Field\n'), ((124, 131), 'scrapy.item.Field', 'Field', ([], {}), '()\n', (129, 131), False, 'from scrapy.item import Item, Field\n'), ((150, 157), 'scrapy.item.Field', 'Field', ([], {}), '()\n', (155, 157), False, 'from scrapy.item import Item, Field\n')] |
"""Setup inaccel-vitis package."""
from setuptools import find_namespace_packages, setup
setup(
name = 'inaccel-vitis',
packages = find_namespace_packages(include = ['inaccel.*']),
namespace_packages = ['inaccel'],
version = '0.2',
license = 'Apache-2.0',
description = 'InAccel Vitis Libraries',
author = 'InAccel',
author_email = '<EMAIL>',
url = 'https://docs.inaccel.com',
keywords = ['InAccel Coral', 'FPGA', 'inaccel', 'Vitis'],
install_requires = [
'coral-api==2.*', 'opencv-python',
],
include_package_data = True,
zip_safe = True,
classifiers = [
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
python_requires = '>=3.7',
)
| [
"setuptools.find_namespace_packages"
] | [((140, 186), 'setuptools.find_namespace_packages', 'find_namespace_packages', ([], {'include': "['inaccel.*']"}), "(include=['inaccel.*'])\n", (163, 186), False, 'from setuptools import find_namespace_packages, setup\n')] |
# pylint: disable=redefined-outer-name,protected-access
# pylint: disable=missing-function-docstring,missing-module-docstring,missing-class-docstring
import panel as pn
from panel import Template
from awesome_panel_extensions.frameworks.fast import FastTemplate
def test_constructor():
# Given
column = pn.Column()
main = [column]
# When
template = FastTemplate(main=main)
# Then
assert issubclass(FastTemplate, Template)
assert template.main == main
| [
"awesome_panel_extensions.frameworks.fast.FastTemplate",
"panel.Column"
] | [((314, 325), 'panel.Column', 'pn.Column', ([], {}), '()\n', (323, 325), True, 'import panel as pn\n'), ((372, 395), 'awesome_panel_extensions.frameworks.fast.FastTemplate', 'FastTemplate', ([], {'main': 'main'}), '(main=main)\n', (384, 395), False, 'from awesome_panel_extensions.frameworks.fast import FastTemplate\n')] |
from typing import Dict, Any
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.utils.experiment_utils import LinearDecay, PipelineStage
from baseline_configs.one_phase.one_phase_rgb_base import (
OnePhaseRGBBaseExperimentConfig,
)
class OnePhaseRGBPPOExperimentConfig(OnePhaseRGBBaseExperimentConfig):
USE_RESNET_CNN = False
@classmethod
def tag(cls) -> str:
return "OnePhaseRGBPPO"
@classmethod
def num_train_processes(cls) -> int:
return 40
@classmethod
def _training_pipeline_info(cls, **kwargs) -> Dict[str, Any]:
"""Define how the model trains."""
training_steps = cls.TRAINING_STEPS
return dict(
named_losses=dict(
ppo_loss=PPO(clip_decay=LinearDecay(training_steps), **PPOConfig)
),
pipeline_stages=[
PipelineStage(loss_names=["ppo_loss"], max_stage_steps=training_steps,)
],
num_steps=64,
num_mini_batch=1,
update_repeats=3,
use_lr_decay=True,
lr=3e-4,
)
| [
"allenact.utils.experiment_utils.LinearDecay",
"allenact.utils.experiment_utils.PipelineStage"
] | [((943, 1013), 'allenact.utils.experiment_utils.PipelineStage', 'PipelineStage', ([], {'loss_names': "['ppo_loss']", 'max_stage_steps': 'training_steps'}), "(loss_names=['ppo_loss'], max_stage_steps=training_steps)\n", (956, 1013), False, 'from allenact.utils.experiment_utils import LinearDecay, PipelineStage\n'), ((840, 867), 'allenact.utils.experiment_utils.LinearDecay', 'LinearDecay', (['training_steps'], {}), '(training_steps)\n', (851, 867), False, 'from allenact.utils.experiment_utils import LinearDecay, PipelineStage\n')] |
from setuptools import setup, find_packages
from malias import __version__
setup(
name = "malias",
version = __version__,
author = '<NAME>',
author_email = '<EMAIL>',
license = 'MIT',
keywords = 'malias system alias',
description = '',
url = 'https://github.com/sfable/malias',
download_url = 'https://github.com/sfable/malias/archive/master.zip',
packages = find_packages(),
install_requires = ['docopt'],
zip_safe = True,
entry_points = {
'console_scripts': ['malias=malias.cli:main']
},
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'
]
)
| [
"setuptools.find_packages"
] | [((401, 416), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (414, 416), False, 'from setuptools import setup, find_packages\n')] |
#BSD 3-Clause License
#
#Copyright (c) 2019, The Regents of the University of Minnesota
#
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#* Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
#* Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#* Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 21 21:16:37 2019
This is the main code that runs simulated annleaing to generate the data
@author: <NAME>
"""
import sys
import numpy as np
from create_template import define_templates
from T6_PSI_settings import T6_PSI_settings
from simulated_annealer import simulated_annealer
import random
from tqdm import tqdm
#from eliminate_templates import load_template_list
def main():
# Read the json user input file and the current maps that need to be run
# taken as an argument from the scripts
settings_obj = T6_PSI_settings.load_obj()
if len(sys.argv) == 3:
map_start = int(sys.argv[1])
num_maps = int(sys.argv[2])
congestion_enabled = 1
elif len(sys.argv) == 4:
map_start = int(sys.argv[1])
num_maps = int(sys.argv[2])
if sys.argv[3] == "no_congestion":
congestion_enabled = 0
else:
congestion_enabled = 1
else:
map_start = 1
num_maps = 15
congestion_enabled = 1
print("Warning defaulting to %d %d and with congestion" % (map_start, num_maps))
print(sys.argv)
#print(num_maps)
# Initialize the SA parameters
T_init = 70
T_final = 0.0005
alpha_temp = 0.95
num_moves_per_step = 5
state = [] #np.zeros((num_maps, settings_obj.NUM_REGIONS))
e = []#np.zeros(num_maps)
max_drop = [] #np.zeros((num_maps, settings_obj.NUM_REGIONS))
template_list = define_templates(settings_obj, generate_g=0)
congestion = []
all_templates = settings_obj.load_template_list()#range(settings_obj.NUM_TEMPLATES))
size_region_x = int(settings_obj.WIDTH_REGION * 1e6)
size_region_y = int(settings_obj.LENGTH_REGION * 1e6)
current_maps = []
for i in tqdm(range(num_maps)):
# print(i)
power_map_file = settings_obj.map_dir + "current_map_%d.csv" % (
i + map_start)
currents = np.genfromtxt(power_map_file, delimiter=',')
for y in range(settings_obj.current_map_num_regions):
for x in range(settings_obj.current_map_num_regions):
print("%d %d "%(x,y))
#region and neighbors
current_region = np.zeros((3*size_region_x,3*size_region_y))
init_state = np.zeros(9, int)
if congestion_enabled == 1 :
signal_cong = [0.3 + 0.7*random.uniform(0, 1) for _ in range(9) ]
else:
signal_cong = [0 for _ in range(9) ]
if x == 0:
x_start = 0
x_end = x_start+2*size_region_x
if y == 0:
y_start = 0
y_end = y_start+2*size_region_y
current_region[size_region_x:,size_region_y:] = (
currents[x_start:x_end,y_start:y_end])
elif y == settings_obj.current_map_num_regions-1:
y_start = (y-1)*size_region_y
y_end = y_start+2*size_region_y
current_region[size_region_x:,0:2*size_region_y] = (
currents[x_start:x_end,y_start:y_end])
else:
y_start = (y-1)*size_region_y
y_end = y_start+3*size_region_y
current_region[size_region_x:,:] = (
currents[x_start:x_end,y_start:y_end])
elif x == settings_obj.current_map_num_regions-1:
x_start = (x-1)*size_region_x
x_end = x_start+2*size_region_x
if y == 0:
y_start = 0
y_end = y_start+2*size_region_y
current_region[0:2*size_region_x,size_region_y:] = (
currents[x_start:x_end,y_start:y_end])
elif y == settings_obj.current_map_num_regions-1:
y_start = (y-1)*size_region_y
y_end = y_start+2*size_region_y
current_region[0:2*size_region_x,0:2*size_region_y] = (
currents[x_start:x_end,y_start:y_end])
else:
y_start = (y-1)*size_region_y
y_end = y_start+3*size_region_y
current_region[0:2*size_region_x,:] = (
currents[x_start:x_end,y_start:y_end])
else:
x_start = (x-1)*size_region_x
x_end = x_start+3*size_region_x
if y == 0:
y_start = 0
y_end = y_start+2*size_region_y
current_region[:,size_region_y:] = (
currents[x_start:x_end,y_start:y_end])
elif y == settings_obj.current_map_num_regions-1:
y_start = (y-1)*size_region_y
y_end = y_start+2*size_region_y
current_region[:,0:2*size_region_y] = (
currents[x_start:x_end,y_start:y_end])
else:
y_start = (y-1)*size_region_y
y_end = y_start+3*size_region_y
current_region[:,:] = (
currents[x_start:x_end,y_start:y_end])
pdn_opt = simulated_annealer(init_state, T_init, T_final,
alpha_temp, num_moves_per_step,
current_region,congestion_enabled)
n_state, n_e, n_max_drop = pdn_opt.sim_anneal(
all_templates, template_list,signal_cong)
state.append(n_state)
max_drop.append(n_max_drop)
congestion.append(signal_cong)
current_maps.append(current_region.reshape(-1))
e.append(n_e)
#print(n_state,n_max_drop,signal_cong,n_e)
with open(
settings_obj.parallel_run_dir + 'max_drop_%d_to_%d.csv' %
(map_start, map_start + num_maps - 1), 'w') as outfile:
np.savetxt(outfile, max_drop, delimiter=',', fmt='%f')
with open(
settings_obj.parallel_run_dir + 'state_%d_to_%d.csv' %
(map_start, map_start + num_maps - 1), 'w') as outfile:
np.savetxt(outfile, state, delimiter=',', fmt='%d')
with open(
settings_obj.parallel_run_dir + 'energy_%d_to_%d.csv' %
(map_start, map_start + num_maps - 1), 'w') as outfile:
np.savetxt(outfile, e, delimiter=',', fmt='%f')
if congestion_enabled ==1:
with open(
settings_obj.parallel_run_dir + 'congest_%d_to_%d.csv' %
(map_start, map_start + num_maps - 1), 'w') as outfile:
np.savetxt(outfile,congestion, delimiter=',', fmt='%f')
with open(
settings_obj.parallel_run_dir + 'current_maps_%d_to_%d.csv' %
(map_start, map_start + num_maps - 1), 'w') as outfile:
np.savetxt(outfile,current_maps, delimiter=',', fmt='%f')
if __name__ == '__main__':
main()
| [
"create_template.define_templates",
"random.uniform",
"simulated_annealer.simulated_annealer",
"numpy.zeros",
"numpy.savetxt",
"T6_PSI_settings.T6_PSI_settings.load_obj",
"numpy.genfromtxt"
] | [((2165, 2191), 'T6_PSI_settings.T6_PSI_settings.load_obj', 'T6_PSI_settings.load_obj', ([], {}), '()\n', (2189, 2191), False, 'from T6_PSI_settings import T6_PSI_settings\n'), ((3075, 3119), 'create_template.define_templates', 'define_templates', (['settings_obj'], {'generate_g': '(0)'}), '(settings_obj, generate_g=0)\n', (3091, 3119), False, 'from create_template import define_templates\n'), ((3539, 3583), 'numpy.genfromtxt', 'np.genfromtxt', (['power_map_file'], {'delimiter': '""","""'}), "(power_map_file, delimiter=',')\n", (3552, 3583), True, 'import numpy as np\n'), ((7825, 7879), 'numpy.savetxt', 'np.savetxt', (['outfile', 'max_drop'], {'delimiter': '""","""', 'fmt': '"""%f"""'}), "(outfile, max_drop, delimiter=',', fmt='%f')\n", (7835, 7879), True, 'import numpy as np\n'), ((8034, 8085), 'numpy.savetxt', 'np.savetxt', (['outfile', 'state'], {'delimiter': '""","""', 'fmt': '"""%d"""'}), "(outfile, state, delimiter=',', fmt='%d')\n", (8044, 8085), True, 'import numpy as np\n'), ((8241, 8288), 'numpy.savetxt', 'np.savetxt', (['outfile', 'e'], {'delimiter': '""","""', 'fmt': '"""%f"""'}), "(outfile, e, delimiter=',', fmt='%f')\n", (8251, 8288), True, 'import numpy as np\n'), ((8709, 8767), 'numpy.savetxt', 'np.savetxt', (['outfile', 'current_maps'], {'delimiter': '""","""', 'fmt': '"""%f"""'}), "(outfile, current_maps, delimiter=',', fmt='%f')\n", (8719, 8767), True, 'import numpy as np\n'), ((8492, 8548), 'numpy.savetxt', 'np.savetxt', (['outfile', 'congestion'], {'delimiter': '""","""', 'fmt': '"""%f"""'}), "(outfile, congestion, delimiter=',', fmt='%f')\n", (8502, 8548), True, 'import numpy as np\n'), ((3821, 3869), 'numpy.zeros', 'np.zeros', (['(3 * size_region_x, 3 * size_region_y)'], {}), '((3 * size_region_x, 3 * size_region_y))\n', (3829, 3869), True, 'import numpy as np\n'), ((3894, 3910), 'numpy.zeros', 'np.zeros', (['(9)', 'int'], {}), '(9, int)\n', (3902, 3910), True, 'import numpy as np\n'), ((7097, 7216), 'simulated_annealer.simulated_annealer', 'simulated_annealer', (['init_state', 'T_init', 'T_final', 'alpha_temp', 'num_moves_per_step', 'current_region', 'congestion_enabled'], {}), '(init_state, T_init, T_final, alpha_temp,\n num_moves_per_step, current_region, congestion_enabled)\n', (7115, 7216), False, 'from simulated_annealer import simulated_annealer\n'), ((4001, 4021), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (4015, 4021), False, 'import random\n')] |
#
# Copyright 2013 <NAME>
# Copyright 2014 Red Hat, Inc
#
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
import six
from six.moves.urllib import parse as urlparse
from stevedore import extension
from ceilometer.openstack.common import context
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log
from ceilometer.openstack.common import service as os_service
from ceilometer import pipeline
LOG = log.getLogger(__name__)
class Resources(object):
def __init__(self, agent_manager):
self.agent_manager = agent_manager
self._resources = []
self._discovery = []
def extend(self, pipeline):
self._resources.extend(pipeline.resources)
self._discovery.extend(pipeline.discovery)
@property
def resources(self):
source_discovery = (self.agent_manager.discover(self._discovery)
if self._discovery else [])
return self._resources + source_discovery
class PollingTask(object):
"""Polling task for polling samples and inject into pipeline.
A polling task can be invoked periodically or only once.
"""
def __init__(self, agent_manager):
self.manager = agent_manager
self.pollsters = set()
# we extend the amalgamation of all static resources for this
# set of pollsters with a common interval, so as to also
# include any dynamically discovered resources specific to
# the matching pipelines (if either is present, the per-agent
# default discovery is overridden)
resource_factory = lambda: Resources(agent_manager)
self.resources = collections.defaultdict(resource_factory)
self.publish_context = pipeline.PublishContext(
agent_manager.context)
def add(self, pollster, pipelines):
self.publish_context.add_pipelines(pipelines)
for pipe_line in pipelines:
self.resources[pollster.name].extend(pipe_line)
self.pollsters.update([pollster])
def poll_and_publish(self):
"""Polling sample and publish into pipeline."""
agent_resources = self.manager.discover()
with self.publish_context as publisher:
cache = {}
for pollster in self.pollsters:
key = pollster.name
LOG.info(_("Polling pollster %s"), key)
source_resources = list(self.resources[key].resources)
try:
samples = list(pollster.obj.get_samples(
manager=self.manager,
cache=cache,
resources=source_resources or agent_resources,
))
publisher(samples)
except Exception as err:
LOG.warning(_(
'Continue after error from %(name)s: %(error)s')
% ({'name': pollster.name, 'error': err}),
exc_info=True)
class AgentManager(os_service.Service):
def __init__(self, namespace, default_discovery=None):
super(AgentManager, self).__init__()
default_discovery = default_discovery or []
self.default_discovery = default_discovery
self.pollster_manager = self._extensions('poll', namespace)
self.discovery_manager = self._extensions('discover')
self.context = context.RequestContext('admin', 'admin', is_admin=True)
@staticmethod
def _extensions(category, agent_ns=None):
namespace = ('ceilometer.%s.%s' % (category, agent_ns) if agent_ns
else 'ceilometer.%s' % category)
return extension.ExtensionManager(
namespace=namespace,
invoke_on_load=True,
)
def create_polling_task(self):
"""Create an initially empty polling task."""
return PollingTask(self)
def setup_polling_tasks(self):
polling_tasks = {}
for pipe_line, pollster in itertools.product(
self.pipeline_manager.pipelines,
self.pollster_manager.extensions):
if pipe_line.support_meter(pollster.name):
polling_task = polling_tasks.get(pipe_line.get_interval())
if not polling_task:
polling_task = self.create_polling_task()
polling_tasks[pipe_line.get_interval()] = polling_task
polling_task.add(pollster, [pipe_line])
return polling_tasks
def start(self):
self.pipeline_manager = pipeline.setup_pipeline()
for interval, task in six.iteritems(self.setup_polling_tasks()):
self.tg.add_timer(interval,
self.interval_task,
task=task)
@staticmethod
def interval_task(task):
task.poll_and_publish()
@staticmethod
def _parse_discoverer(url):
s = urlparse.urlparse(url)
return (s.scheme or s.path), (s.netloc + s.path if s.scheme else None)
def _discoverer(self, name):
for d in self.discovery_manager:
if d.name == name:
return d.obj
return None
def discover(self, discovery=None):
resources = []
for url in (discovery or self.default_discovery):
name, param = self._parse_discoverer(url)
discoverer = self._discoverer(name)
if discoverer:
try:
discovered = discoverer.discover(param)
resources.extend(discovered)
except Exception as err:
LOG.exception(_('Unable to discover resources: %s') % err)
else:
LOG.warning(_('Unknown discovery extension: %s') % name)
return resources
| [
"ceilometer.openstack.common.gettextutils._",
"stevedore.extension.ExtensionManager",
"ceilometer.openstack.common.log.getLogger",
"ceilometer.pipeline.setup_pipeline",
"itertools.product",
"ceilometer.openstack.common.context.RequestContext",
"six.moves.urllib.parse.urlparse",
"ceilometer.pipeline.Pu... | [((1038, 1061), 'ceilometer.openstack.common.log.getLogger', 'log.getLogger', (['__name__'], {}), '(__name__)\n', (1051, 1061), False, 'from ceilometer.openstack.common import log\n'), ((2256, 2297), 'collections.defaultdict', 'collections.defaultdict', (['resource_factory'], {}), '(resource_factory)\n', (2279, 2297), False, 'import collections\n'), ((2329, 2375), 'ceilometer.pipeline.PublishContext', 'pipeline.PublishContext', (['agent_manager.context'], {}), '(agent_manager.context)\n', (2352, 2375), False, 'from ceilometer import pipeline\n'), ((3995, 4050), 'ceilometer.openstack.common.context.RequestContext', 'context.RequestContext', (['"""admin"""', '"""admin"""'], {'is_admin': '(True)'}), "('admin', 'admin', is_admin=True)\n", (4017, 4050), False, 'from ceilometer.openstack.common import context\n'), ((4260, 4328), 'stevedore.extension.ExtensionManager', 'extension.ExtensionManager', ([], {'namespace': 'namespace', 'invoke_on_load': '(True)'}), '(namespace=namespace, invoke_on_load=True)\n', (4286, 4328), False, 'from stevedore import extension\n'), ((4585, 4674), 'itertools.product', 'itertools.product', (['self.pipeline_manager.pipelines', 'self.pollster_manager.extensions'], {}), '(self.pipeline_manager.pipelines, self.pollster_manager.\n extensions)\n', (4602, 4674), False, 'import itertools\n'), ((5148, 5173), 'ceilometer.pipeline.setup_pipeline', 'pipeline.setup_pipeline', ([], {}), '()\n', (5171, 5173), False, 'from ceilometer import pipeline\n'), ((5522, 5544), 'six.moves.urllib.parse.urlparse', 'urlparse.urlparse', (['url'], {}), '(url)\n', (5539, 5544), True, 'from six.moves.urllib import parse as urlparse\n'), ((2937, 2961), 'ceilometer.openstack.common.gettextutils._', '_', (['"""Polling pollster %s"""'], {}), "('Polling pollster %s')\n", (2938, 2961), False, 'from ceilometer.openstack.common.gettextutils import _\n'), ((6326, 6362), 'ceilometer.openstack.common.gettextutils._', '_', (['"""Unknown discovery extension: %s"""'], {}), "('Unknown discovery extension: %s')\n", (6327, 6362), False, 'from ceilometer.openstack.common.gettextutils import _\n'), ((3410, 3460), 'ceilometer.openstack.common.gettextutils._', '_', (['"""Continue after error from %(name)s: %(error)s"""'], {}), "('Continue after error from %(name)s: %(error)s')\n", (3411, 3460), False, 'from ceilometer.openstack.common.gettextutils import _\n'), ((6235, 6272), 'ceilometer.openstack.common.gettextutils._', '_', (['"""Unable to discover resources: %s"""'], {}), "('Unable to discover resources: %s')\n", (6236, 6272), False, 'from ceilometer.openstack.common.gettextutils import _\n')] |
# Copyright 2013 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import datetime
import pretend
import pytest
from werkzeug.exceptions import NotFound, BadRequest
from werkzeug.routing import Map
from warehouse.legacy import pypi, xmlrpc
from warehouse.packaging import urls
@pytest.mark.parametrize("content_type", [None, "text/html", "__empty__"])
def test_pypi_index(content_type):
headers = {}
if content_type != "__empty__":
headers["Content-Type"] = content_type
app = pretend.stub()
request = pretend.stub(
args={},
headers=headers,
url_adapter=pretend.stub(
build=pretend.call_recorder(
lambda *a, **kw: "/",
),
),
)
# request for /pypi with no additional request information redirects
# to site root
#
resp = pypi.pypi(app, request)
assert resp.status_code == 301
assert resp.headers["Location"] == "/"
assert request.url_adapter.build.calls == [
pretend.call(
"warehouse.views.index",
{},
force_external=False,
),
]
def test_pypi_route_action(monkeypatch):
app = pretend.stub()
request = pretend.stub(
args={':action': 'test'},
headers={},
)
_action_methods = {}
monkeypatch.setattr(pypi, '_action_methods', _action_methods)
@pypi.register('test')
def test(app, request):
test.called = True
return 'success'
resp = pypi.pypi(app, request)
assert resp == 'success'
assert test.called
def test_pypi_route_action_double(monkeypatch):
_action_methods = {'test': None}
monkeypatch.setattr(pypi, '_action_methods', _action_methods)
with pytest.raises(KeyError):
pypi.register('test')
def test_daytime(monkeypatch):
app = pretend.stub()
request = pretend.stub()
monkeypatch.setattr(time, 'time', lambda: 0)
resp = pypi.daytime(app, request)
assert resp.response[0] == b'19700101T00:00:00\n'
@pytest.mark.parametrize(("version", "callback"), [
(None, None),
(None, 'yes'),
('1.0', 'yes'),
('1.0', None),
])
def test_json(monkeypatch, version, callback):
get_project = pretend.call_recorder(lambda n: {'name': 'spam'})
get_project_versions = pretend.call_recorder(lambda n: ['2.0', '1.0'])
get_last_serial = pretend.call_recorder(lambda *n: 42)
app = pretend.stub(
config=pretend.stub(cache=pretend.stub(browser=False, varnish=False)),
db=pretend.stub(
packaging=pretend.stub(
get_project=get_project,
get_project_versions=get_project_versions,
get_last_serial=get_last_serial,
)
)
)
request = pretend.stub(args={})
if callback:
request.args['callback'] = callback
release_data = pretend.call_recorder(lambda n, v: dict(some='data'))
release_urls = pretend.call_recorder(lambda n, v: [dict(
some='url',
upload_time=datetime.date(1970, 1, 1)
)])
all_release_urls = pretend.call_recorder(lambda *n: {
'1.0': [dict(some='data', upload_time=datetime.date(1970, 1, 1))],
'2.0': [dict(some='data', upload_time=datetime.date(1970, 1, 1))],
})
Interface = pretend.call_recorder(lambda a, r: pretend.stub(
release_data=release_data,
release_urls=release_urls,
all_release_urls=all_release_urls,
))
monkeypatch.setattr(xmlrpc, 'Interface', Interface)
resp = pypi.project_json(app, request, project_name='spam',
version=version)
assert get_project.calls == [pretend.call('spam')]
assert get_project_versions.calls == [pretend.call('spam')]
assert release_data.calls == [pretend.call('spam', version or '2.0')]
assert release_urls.calls == [pretend.call('spam', version or '2.0')]
assert all_release_urls.calls == [pretend.call('spam')]
assert get_last_serial.calls == [pretend.call()]
expected = '{"info": {"some": "data"}, ' \
'"releases": ' \
'{"1.0": [{"some": "data", "upload_time": "1970-01-01T00:00:00"}], ' \
'"2.0": [{"some": "data", "upload_time": "1970-01-01T00:00:00"}]}, ' \
'"urls": [{"some": "url", "upload_time": "1970-01-01T00:00:00"}]}'
if callback:
expected = '/**/ %s(%s);' % (callback, expected)
assert resp.data == expected.encode("utf8")
def test_jsonp_invalid():
app = pretend.stub()
request = pretend.stub(args={'callback': 'quite invalid'})
with pytest.raises(BadRequest):
pypi.project_json(app, request, project_name='spam')
@pytest.mark.parametrize(("project", "version"), [
(None, None),
(pretend.stub(name="spam"), None),
(pretend.stub(name="spam"), '1'),
])
def test_json_missing(monkeypatch, project, version):
return_value = {'name': project} if project else None
get_project = pretend.call_recorder(lambda n: return_value)
get_project_versions = pretend.call_recorder(lambda n: [])
app = pretend.stub(
db=pretend.stub(
packaging=pretend.stub(
get_project=get_project,
get_project_versions=get_project_versions,
)
)
)
request = pretend.stub(args={})
with pytest.raises(NotFound):
pypi.project_json(app, request, project_name='spam', version=version)
def test_rss(app, monkeypatch):
now = datetime.datetime.utcnow()
get_recently_updated = pretend.call_recorder(lambda num=10: [
dict(name='spam', version='1.0', summary='hai spam', created=now),
dict(name='ham', version='2.0', summary='hai ham', created=now),
dict(name='spam', version='2.0', summary='hai spam v2', created=now),
])
app.db = pretend.stub(
packaging=pretend.stub(
get_recently_updated=get_recently_updated,
)
)
app.config = pretend.stub(
site={"url": "http://test.server/", "name": "PyPI"},
)
request = pretend.stub(
url_adapter=Map(urls.urls).bind('test.server', '/'),
)
resp = pypi.rss(app, request)
assert resp.response.context == {
"description": "package updates",
"site": {"name": "PyPI", "url": "http://test.server/"},
"releases": [
{
"url": "http://test.server/project/spam/1.0/",
"version": "1.0",
"name": "spam",
"summary": "hai spam",
"created": now,
},
{
"url": "http://test.server/project/ham/2.0/",
"version": "2.0",
"name": "ham",
"summary": "hai ham",
"created": now,
},
{
"url": "http://test.server/project/spam/2.0/",
"version": "2.0",
"name": "spam",
"summary": "hai spam v2",
"created": now,
}
],
}
assert get_recently_updated.calls == [pretend.call(num=40)]
def test_packages_rss(app, monkeypatch):
now = datetime.datetime.utcnow()
get_recent_projects = pretend.call_recorder(lambda num=10: [
dict(name='spam', version='1.0', summary='hai spam', created=now),
dict(name='ham', version='2.0', summary='hai ham', created=now),
dict(name='eggs', version='21.0', summary='hai eggs!', created=now),
])
app.db = pretend.stub(
packaging=pretend.stub(
get_recent_projects=get_recent_projects,
)
)
app.config = pretend.stub(
site={"url": "http://test.server/", "name": "PyPI"},
)
request = pretend.stub(
url_adapter=Map(urls.urls).bind('test.server', '/'),
)
resp = pypi.packages_rss(app, request)
assert resp.response.context == {
"description": "new projects",
"site": {"name": "PyPI", "url": "http://test.server/"},
"releases": [
{
"url": "http://test.server/project/spam/",
"version": "1.0",
"name": "spam",
"summary": "hai spam",
"created": now,
},
{
"url": "http://test.server/project/ham/",
"version": "2.0",
"name": "ham",
"summary": "hai ham",
"created": now,
},
{
"url": "http://test.server/project/eggs/",
"version": "21.0",
"name": "eggs",
"summary": "hai eggs!",
"created": now,
},
],
}
assert get_recent_projects.calls == [pretend.call(num=40)]
def test_rss_xml_template(app, monkeypatch):
template = app.templates.get_template('legacy/rss.xml')
content = template.render(
site=dict(url='http://test.server/', name="PyPI"),
description='package updates',
releases=[
{
'url': 'http://test.server/project/spam/',
'version': u'1.0',
'name': u'spam',
'summary': u'hai spam',
'created': datetime.date(1970, 1, 1),
}, {
'url': 'http://test.server/project/ham/',
'version': u'2.0',
'name': u'ham',
'summary': u'hai ham',
'created': datetime.date(1970, 1, 1),
}, {
'url': 'http://test.server/project/eggs/',
'version': u'21.0',
'name': u'eggs',
'summary': u'hai eggs!',
'created': datetime.date(1970, 1, 1),
}
],
)
assert content == '''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE rss PUBLIC "-//Netscape Communications//DTD RSS 0.91//EN" \
"http://my.netscape.com/publish/formats/rss-0.91.dtd">
<rss version="0.91">
<channel>
<title>PyPI Recent Package Updates</title>
<link>http://test.server/</link>
<description>Recent package updates at PyPI</description>
<language>en</language>
\n\
<item>
<title>spam 1.0</title>
<link>http://test.server/project/spam/</link>
<guid>http://test.server/project/spam/</guid>
<description>hai spam</description>
<pubDate>01 Jan 1970 00:00:00 GMT</pubDate>
</item>
\n\
<item>
<title>ham 2.0</title>
<link>http://test.server/project/ham/</link>
<guid>http://test.server/project/ham/</guid>
<description>hai ham</description>
<pubDate>01 Jan 1970 00:00:00 GMT</pubDate>
</item>
\n\
<item>
<title>eggs 21.0</title>
<link>http://test.server/project/eggs/</link>
<guid>http://test.server/project/eggs/</guid>
<description>hai eggs!</description>
<pubDate>01 Jan 1970 00:00:00 GMT</pubDate>
</item>
\n\
</channel>
</rss>'''
| [
"warehouse.legacy.pypi.rss",
"werkzeug.routing.Map",
"warehouse.legacy.pypi.project_json",
"datetime.datetime.utcnow",
"warehouse.legacy.pypi.packages_rss",
"warehouse.legacy.pypi.pypi",
"pytest.mark.parametrize",
"warehouse.legacy.pypi.daytime",
"pytest.raises",
"pretend.stub",
"pretend.call",
... | [((795, 868), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""content_type"""', "[None, 'text/html', '__empty__']"], {}), "('content_type', [None, 'text/html', '__empty__'])\n", (818, 868), False, 'import pytest\n'), ((2528, 2642), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('version', 'callback')", "[(None, None), (None, 'yes'), ('1.0', 'yes'), ('1.0', None)]"], {}), "(('version', 'callback'), [(None, None), (None,\n 'yes'), ('1.0', 'yes'), ('1.0', None)])\n", (2551, 2642), False, 'import pytest\n'), ((1016, 1030), 'pretend.stub', 'pretend.stub', ([], {}), '()\n', (1028, 1030), False, 'import pretend\n'), ((1355, 1378), 'warehouse.legacy.pypi.pypi', 'pypi.pypi', (['app', 'request'], {}), '(app, request)\n', (1364, 1378), False, 'from warehouse.legacy import pypi, xmlrpc\n'), ((1684, 1698), 'pretend.stub', 'pretend.stub', ([], {}), '()\n', (1696, 1698), False, 'import pretend\n'), ((1713, 1763), 'pretend.stub', 'pretend.stub', ([], {'args': "{':action': 'test'}", 'headers': '{}'}), "(args={':action': 'test'}, headers={})\n", (1725, 1763), False, 'import pretend\n'), ((1885, 1906), 'warehouse.legacy.pypi.register', 'pypi.register', (['"""test"""'], {}), "('test')\n", (1898, 1906), False, 'from warehouse.legacy import pypi, xmlrpc\n'), ((1999, 2022), 'warehouse.legacy.pypi.pypi', 'pypi.pypi', (['app', 'request'], {}), '(app, request)\n', (2008, 2022), False, 'from warehouse.legacy import pypi, xmlrpc\n'), ((2337, 2351), 'pretend.stub', 'pretend.stub', ([], {}), '()\n', (2349, 2351), False, 'import pretend\n'), ((2366, 2380), 'pretend.stub', 'pretend.stub', ([], {}), '()\n', (2378, 2380), False, 'import pretend\n'), ((2443, 2469), 'warehouse.legacy.pypi.daytime', 'pypi.daytime', (['app', 'request'], {}), '(app, request)\n', (2455, 2469), False, 'from warehouse.legacy import pypi, xmlrpc\n'), ((2723, 2772), 'pretend.call_recorder', 'pretend.call_recorder', (["(lambda n: {'name': 'spam'})"], {}), "(lambda n: {'name': 'spam'})\n", (2744, 2772), False, 'import pretend\n'), ((2800, 2847), 'pretend.call_recorder', 'pretend.call_recorder', (["(lambda n: ['2.0', '1.0'])"], {}), "(lambda n: ['2.0', '1.0'])\n", (2821, 2847), False, 'import pretend\n'), ((2870, 2906), 'pretend.call_recorder', 'pretend.call_recorder', (['(lambda *n: 42)'], {}), '(lambda *n: 42)\n', (2891, 2906), False, 'import pretend\n'), ((3264, 3285), 'pretend.stub', 'pretend.stub', ([], {'args': '{}'}), '(args={})\n', (3276, 3285), False, 'import pretend\n'), ((4025, 4094), 'warehouse.legacy.pypi.project_json', 'pypi.project_json', (['app', 'request'], {'project_name': '"""spam"""', 'version': 'version'}), "(app, request, project_name='spam', version=version)\n", (4042, 4094), False, 'from warehouse.legacy import pypi, xmlrpc\n'), ((4970, 4984), 'pretend.stub', 'pretend.stub', ([], {}), '()\n', (4982, 4984), False, 'import pretend\n'), ((4999, 5047), 'pretend.stub', 'pretend.stub', ([], {'args': "{'callback': 'quite invalid'}"}), "(args={'callback': 'quite invalid'})\n", (5011, 5047), False, 'import pretend\n'), ((5426, 5471), 'pretend.call_recorder', 'pretend.call_recorder', (['(lambda n: return_value)'], {}), '(lambda n: return_value)\n', (5447, 5471), False, 'import pretend\n'), ((5499, 5534), 'pretend.call_recorder', 'pretend.call_recorder', (['(lambda n: [])'], {}), '(lambda n: [])\n', (5520, 5534), False, 'import pretend\n'), ((5764, 5785), 'pretend.stub', 'pretend.stub', ([], {'args': '{}'}), '(args={})\n', (5776, 5785), False, 'import pretend\n'), ((5943, 5969), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (5967, 5969), False, 'import datetime\n'), ((6418, 6483), 'pretend.stub', 'pretend.stub', ([], {'site': "{'url': 'http://test.server/', 'name': 'PyPI'}"}), "(site={'url': 'http://test.server/', 'name': 'PyPI'})\n", (6430, 6483), False, 'import pretend\n'), ((6607, 6629), 'warehouse.legacy.pypi.rss', 'pypi.rss', (['app', 'request'], {}), '(app, request)\n', (6615, 6629), False, 'from warehouse.legacy import pypi, xmlrpc\n'), ((7617, 7643), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (7641, 7643), False, 'import datetime\n'), ((8087, 8152), 'pretend.stub', 'pretend.stub', ([], {'site': "{'url': 'http://test.server/', 'name': 'PyPI'}"}), "(site={'url': 'http://test.server/', 'name': 'PyPI'})\n", (8099, 8152), False, 'import pretend\n'), ((8276, 8307), 'warehouse.legacy.pypi.packages_rss', 'pypi.packages_rss', (['app', 'request'], {}), '(app, request)\n', (8293, 8307), False, 'from warehouse.legacy import pypi, xmlrpc\n'), ((2239, 2262), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (2252, 2262), False, 'import pytest\n'), ((2272, 2293), 'warehouse.legacy.pypi.register', 'pypi.register', (['"""test"""'], {}), "('test')\n", (2285, 2293), False, 'from warehouse.legacy import pypi, xmlrpc\n'), ((5057, 5082), 'pytest.raises', 'pytest.raises', (['BadRequest'], {}), '(BadRequest)\n', (5070, 5082), False, 'import pytest\n'), ((5092, 5144), 'warehouse.legacy.pypi.project_json', 'pypi.project_json', (['app', 'request'], {'project_name': '"""spam"""'}), "(app, request, project_name='spam')\n", (5109, 5144), False, 'from warehouse.legacy import pypi, xmlrpc\n'), ((5796, 5819), 'pytest.raises', 'pytest.raises', (['NotFound'], {}), '(NotFound)\n', (5809, 5819), False, 'import pytest\n'), ((5829, 5898), 'warehouse.legacy.pypi.project_json', 'pypi.project_json', (['app', 'request'], {'project_name': '"""spam"""', 'version': 'version'}), "(app, request, project_name='spam', version=version)\n", (5846, 5898), False, 'from warehouse.legacy import pypi, xmlrpc\n'), ((1513, 1576), 'pretend.call', 'pretend.call', (['"""warehouse.views.index"""', '{}'], {'force_external': '(False)'}), "('warehouse.views.index', {}, force_external=False)\n", (1525, 1576), False, 'import pretend\n'), ((3822, 3927), 'pretend.stub', 'pretend.stub', ([], {'release_data': 'release_data', 'release_urls': 'release_urls', 'all_release_urls': 'all_release_urls'}), '(release_data=release_data, release_urls=release_urls,\n all_release_urls=all_release_urls)\n', (3834, 3927), False, 'import pretend\n'), ((4158, 4178), 'pretend.call', 'pretend.call', (['"""spam"""'], {}), "('spam')\n", (4170, 4178), False, 'import pretend\n'), ((4222, 4242), 'pretend.call', 'pretend.call', (['"""spam"""'], {}), "('spam')\n", (4234, 4242), False, 'import pretend\n'), ((4278, 4316), 'pretend.call', 'pretend.call', (['"""spam"""', "(version or '2.0')"], {}), "('spam', version or '2.0')\n", (4290, 4316), False, 'import pretend\n'), ((4352, 4390), 'pretend.call', 'pretend.call', (['"""spam"""', "(version or '2.0')"], {}), "('spam', version or '2.0')\n", (4364, 4390), False, 'import pretend\n'), ((4430, 4450), 'pretend.call', 'pretend.call', (['"""spam"""'], {}), "('spam')\n", (4442, 4450), False, 'import pretend\n'), ((4489, 4503), 'pretend.call', 'pretend.call', ([], {}), '()\n', (4501, 4503), False, 'import pretend\n'), ((5221, 5246), 'pretend.stub', 'pretend.stub', ([], {'name': '"""spam"""'}), "(name='spam')\n", (5233, 5246), False, 'import pretend\n'), ((5260, 5285), 'pretend.stub', 'pretend.stub', ([], {'name': '"""spam"""'}), "(name='spam')\n", (5272, 5285), False, 'import pretend\n'), ((6316, 6371), 'pretend.stub', 'pretend.stub', ([], {'get_recently_updated': 'get_recently_updated'}), '(get_recently_updated=get_recently_updated)\n', (6328, 6371), False, 'import pretend\n'), ((7542, 7562), 'pretend.call', 'pretend.call', ([], {'num': '(40)'}), '(num=40)\n', (7554, 7562), False, 'import pretend\n'), ((7987, 8040), 'pretend.stub', 'pretend.stub', ([], {'get_recent_projects': 'get_recent_projects'}), '(get_recent_projects=get_recent_projects)\n', (7999, 8040), False, 'import pretend\n'), ((9204, 9224), 'pretend.call', 'pretend.call', ([], {'num': '(40)'}), '(num=40)\n', (9216, 9224), False, 'import pretend\n'), ((1153, 1196), 'pretend.call_recorder', 'pretend.call_recorder', (["(lambda *a, **kw: '/')"], {}), "(lambda *a, **kw: '/')\n", (1174, 1196), False, 'import pretend\n'), ((2965, 3007), 'pretend.stub', 'pretend.stub', ([], {'browser': '(False)', 'varnish': '(False)'}), '(browser=False, varnish=False)\n', (2977, 3007), False, 'import pretend\n'), ((3057, 3175), 'pretend.stub', 'pretend.stub', ([], {'get_project': 'get_project', 'get_project_versions': 'get_project_versions', 'get_last_serial': 'get_last_serial'}), '(get_project=get_project, get_project_versions=\n get_project_versions, get_last_serial=get_last_serial)\n', (3069, 3175), False, 'import pretend\n'), ((5606, 5691), 'pretend.stub', 'pretend.stub', ([], {'get_project': 'get_project', 'get_project_versions': 'get_project_versions'}), '(get_project=get_project, get_project_versions=get_project_versions\n )\n', (5618, 5691), False, 'import pretend\n'), ((6548, 6562), 'werkzeug.routing.Map', 'Map', (['urls.urls'], {}), '(urls.urls)\n', (6551, 6562), False, 'from werkzeug.routing import Map\n'), ((8217, 8231), 'werkzeug.routing.Map', 'Map', (['urls.urls'], {}), '(urls.urls)\n', (8220, 8231), False, 'from werkzeug.routing import Map\n'), ((9689, 9714), 'datetime.date', 'datetime.date', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (9702, 9714), False, 'import datetime\n'), ((9924, 9949), 'datetime.date', 'datetime.date', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (9937, 9949), False, 'import datetime\n'), ((10164, 10189), 'datetime.date', 'datetime.date', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (10177, 10189), False, 'import datetime\n'), ((3522, 3547), 'datetime.date', 'datetime.date', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (3535, 3547), False, 'import datetime\n'), ((3660, 3685), 'datetime.date', 'datetime.date', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (3673, 3685), False, 'import datetime\n'), ((3735, 3760), 'datetime.date', 'datetime.date', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (3748, 3760), False, 'import datetime\n')] |
import numpy as np
class SequenceTools(object):
dna2gray_ = {'c': (0, 0), 't': (1, 0), 'g': (1, 1), 'a': (0, 1)}
gray2dna_ = {(0, 0): 'c', (1, 0): 't', (1, 1): 'g', (0, 1): 'a'}
codon2protein_ = {'ttt': 'f', 'ttc': 'f', 'tta': 'l', 'ttg': 'l', 'tct': 's', 'tcc': 's', 'tca': 's',
'tcg': 's', 'tat': 'y', 'tac': 'y', 'taa': '!', 'tag': '!', 'tgt': 'c', 'tgc': 'c',
'tga': '!', 'tgg': 'w', 'ctt': 'l', 'ctc': 'l', 'cta': 'l', 'ctg': 'l', 'cct': 'p',
'ccc': 'p', 'cca': 'p', 'ccg': 'p', 'cat': 'h', 'cac': 'h', 'caa': 'q', 'cag': 'q',
'cgt': 'r', 'cgc': 'r', 'cga': 'r', 'cgg': 'r', 'att': 'i', 'atc': 'i', 'ata': 'i',
'atg': 'm', 'act': 't', 'acc': 't', 'aca': 't', 'acg': 't', 'aat': 'n', 'aac': 'n',
'aaa': 'k', 'aag': 'k', 'agt': 's', 'agc': 's', 'aga': 'r', 'agg': 'r', 'gtt': 'v',
'gtc': 'v', 'gta': 'v', 'gtg': 'v', 'gct': 'a', 'gcc': 'a', 'gca': 'a', 'gcg': 'a',
'gat': 'd', 'gac': 'd', 'gaa': 'e', 'gag': 'e', 'ggt': 'g', 'ggc': 'g', 'gga': 'g',
'ggg': 'g'}
protein2codon_ = {
'l': ['tta', 'ttg', 'ctt', 'ctc', 'cta', 'ctg'],
's': ['tct', 'tcc', 'tca', 'tcg', 'agt', 'agc'],
'r': ['cgt', 'cgc', 'cga', 'cgg', 'aga', 'agg'],
'v': ['gtt', 'gtc', 'gta', 'gtg'],
'a': ['gct', 'gcc', 'gca', 'gcg'],
'p': ['cct', 'ccc', 'cca', 'ccg'],
't': ['act', 'acc', 'aca', 'acg'],
'g': ['ggt', 'ggc', 'gga', 'ggg'],
'stop': ['taa', 'tag', 'tga'],
'i': ['att', 'atc', 'ata'],
'y': ['tat', 'tac'],
'f': ['ttt', 'ttc'],
'c': ['tgt', 'tgc'],
'h': ['cat', 'cac'],
'q': ['caa', 'cag'],
'n': ['aat', 'aac'],
'k': ['aaa', 'aag'],
'd': ['gat', 'gac'],
'e': ['gaa', 'gag'],
'w': ['tgg'],
'm': ['atg']
}
protein2constraint_ = {
'l': {(1,): {('t',)}, (0, 2): {('t', 'a'), ('t', 'g'), ('c', 't'), ('c', 'c'), ('c', 'a'), ('c', 'g')}},
's': {(0, 1, 2): {('t', 'c', 't'), ('t', 'c', 'c'), ('t', 'c', 'a'), ('t', 'c', 'g'), ('a', 'g', 't'),
('a', 'g', 'c')}},
'r': {(1,): {('g',)}, (0, 2): {('c', 't'), ('c', 'c'), ('c', 'a'), ('c', 'g'), ('a', 'a'), ('a', 'g')}},
'v': {(0,): {('g',)}, (1,): {('t',)}, (2,): {('g',), ('t',), ('a',), ('c',)}},
'a': {(0,): {('g',)}, (1,): {('c',)}, (2,): {('g',), ('t',), ('a',), ('c',)}},
'p': {(0,): {('c',)}, (1,): {('c',)}, (2,): {('g',), ('t',), ('a',), ('c',)}},
't': {(0,): {('a',)}, (1,): {('c',)}, (2,): {('g',), ('t',), ('a',), ('c',)}},
'g': {(0,): {('g',)}, (1,): {('g',)}, (2,): {('g',), ('t',), ('a',), ('c',)}},
'stop': {(0,): {('t',)}, (1, 2): {('a', 'a'), ('a', 'g'), ('g', 'a')}},
'i': {(0,): {('a',)}, (1,): {('t',)}, (2,): {('t',), ('a',), ('c',)}},
'y': {(0,): {('t',)}, (1,): {('a',)}, (2,): {('t',), ('c',)}},
'f': {(0,): {('t',)}, (1,): {('t',)}, (2,): {('t',), ('c',)}},
'c': {(0,): {('t',)}, (1,): {('g',)}, (2,): {('t',), ('c',)}},
'h': {(0,): {('c',)}, (1,): {('a',)}, (2,): {('t',), ('c',)}},
'q': {(0,): {('c',)}, (1,): {('a',)}, (2,): {('a',), ('g',)}},
'n': {(0,): {('a',)}, (1,): {('a',)}, (2,): {('t',), ('c',)}},
'k': {(0,): {('a',)}, (1,): {('a',)}, (2,): {('a',), ('g',)}},
'd': {(0,): {('g',)}, (1,): {('a',)}, (2,): {('t',), ('c',)}},
'e': {(0,): {('g',)}, (1,): {('a',)}, (2,): {('a',), ('g',)}},
'w': {(0,): {('t',)}, (1,): {('g',)}, (2,): {('g',)}},
'm': {(0,): {('a',)}, (1,): {('t',)}, (2,): {('g',)}},
}
# Integer mapping from Fernandes and Vinga (2016)
codon2idx_ = {'aaa': 1, 'aac': 2, 'aag': 3, 'aat': 4, 'aca': 5, 'acc': 6, 'acg': 7, 'act': 8, 'aga': 9,
'agc': 10, 'agg': 11, 'agt': 12, 'ata': 13, 'atc': 14, 'atg': 15, 'att': 16, 'caa': 17,
'cac': 18, 'cag': 19, 'cat': 20, 'cca': 21, 'ccc': 22, 'ccg': 23, 'cct': 24, 'cga': 25,
'cgc': 26, 'cgg': 27, 'cgt': 28, 'cta': 29, 'ctc': 30, 'ctg': 31, 'ctt': 32, 'gaa': 33,
'gac': 34, 'gag': 35, 'gat': 36, 'gca': 37, 'gcc': 38, 'gcg': 39, 'gct': 40, 'gga': 41,
'ggc': 42, 'ggg': 43, 'ggt': 44, 'gta': 45, 'gtc': 46, 'gtg': 47, 'gtt': 48, 'taa': 49,
'tac': 50, 'tag': 51, 'tat': 52, 'tca': 53, 'tcc': 54, 'tcg': 55, 'tct': 56, 'tga': 57,
'tgc': 58, 'tgg': 59, 'tgt': 60, 'tta': 61, 'ttc': 62, 'ttg': 63, 'ttt': 64}
@staticmethod
def convert_dna_to_rna(seq):
dna2rna = {'t': 'u', 'a': 'a', 'g': 'g', 'c': 'c'}
return "".join([dna2rna[s] for s in seq])
@staticmethod
def convert_dna_arr_to_str(dna_arr, base_order='ATCG'):
""" Convert N x 4 tokenized array into length N string """
dna_seq_str = ''
for i in range(dna_arr.shape[0]):
token = np.argmax(dna_arr[i, :])
dna_seq_str += base_order[token]
return dna_seq_str
@staticmethod
def get_aa_codons():
aa_list = sorted(list(SequenceTools.protein2codon_.keys()))
aa_codons = np.zeros((len(aa_list), 6, 3, 4))
i = 0
for aa in aa_list:
cods = SequenceTools.protein2codon_[aa]
j = 0
for c in cods:
cod_arr = SequenceTools.convert_dna_str_to_arr(c)
aa_codons[i, j] = cod_arr
j += 1
i += 1
return aa_codons
@staticmethod
def convert_dna_str_to_arr(dna_str, base_order='ATCG'):
""" Convert length N string into N x 4 tokenized array"""
dna_str = dna_str.upper()
N = len(dna_str)
dna_arr = np.zeros((N, 4))
for i in range(N):
idx = base_order.index(dna_str[i])
dna_arr[i, idx] = 1.
return dna_arr
@staticmethod
def convert_dna_arr_to_gray(dna_arr, base_order='ATCG'):
""" Convert N x 4 tokenized array into 2N x 2 tokenized gray code array"""
N = dna_arr.shape[0]
gray_arr = np.zeros((2 * N, 2))
for i in range(N):
token = np.argmax(dna_arr[i, :])
dna_i = base_order[token]
gray_i = SequenceTools.dna2gray_[dna_i]
for j in range(2):
gray_arr[2 * i + j, gray_i[j]] = 1
return gray_arr
@staticmethod
def convert_gray_to_dna_str(gray_arr):
Ngray = gray_arr.shape[0]
dna_str = ''
i = 0
while i < Ngray:
g1 = int(np.argmax(gray_arr[i, :]))
g2 = int(np.argmax(gray_arr[i + 1, :]))
dna_str += SequenceTools.gray2dna_[(g1, g2)]
i += 2
return dna_str
@staticmethod
def convert_dna_str_to_gray(dna_str):
"""Convert length N string into 2N x 2 tokenized gray code array"""
dna_str = dna_str.lower()
N = len(dna_str)
gray_arr = np.zeros((2 * N, 2))
for i in range(N):
gray_i = SequenceTools.dna2gray_[dna_str[i]]
for j in range(2):
gray_arr[2 * i + j, gray_i[j]] = 1
return gray_arr
@staticmethod
def convert_rna_to_dna(seq):
rna2dna = {'u': 't', 'a': 'a', 'g': 'g', 'c': 'c'}
return "".join([rna2dna[s] for s in seq])
@classmethod
def get_codon_from_idx(cls, idx):
idx2codon = {val: key for key, val in SequenceTools.codon2idx_.items()}
return idx2codon[idx]
@classmethod
def get_start_codon_int(cls):
return SequenceTools.codon2idx_['atg']
@classmethod
def get_stop_codon_ints(cls):
stop_codons = SequenceTools.protein2codon_['stop']
return [SequenceTools.codon2idx_[s] for s in stop_codons]
@classmethod
def translate_dna_str(cls, dna_seq):
dna_seq = dna_seq.lower()
prot_seq = []
i = 0
while i < len(dna_seq):
cod = dna_seq[i:i + 3]
prot_seq.append(SequenceTools.codon2protein_[cod])
i += 3
prot_seq = "".join(prot_seq)
return prot_seq
| [
"numpy.zeros",
"numpy.argmax"
] | [((5834, 5850), 'numpy.zeros', 'np.zeros', (['(N, 4)'], {}), '((N, 4))\n', (5842, 5850), True, 'import numpy as np\n'), ((6192, 6212), 'numpy.zeros', 'np.zeros', (['(2 * N, 2)'], {}), '((2 * N, 2))\n', (6200, 6212), True, 'import numpy as np\n'), ((7051, 7071), 'numpy.zeros', 'np.zeros', (['(2 * N, 2)'], {}), '((2 * N, 2))\n', (7059, 7071), True, 'import numpy as np\n'), ((5036, 5060), 'numpy.argmax', 'np.argmax', (['dna_arr[i, :]'], {}), '(dna_arr[i, :])\n', (5045, 5060), True, 'import numpy as np\n'), ((6260, 6284), 'numpy.argmax', 'np.argmax', (['dna_arr[i, :]'], {}), '(dna_arr[i, :])\n', (6269, 6284), True, 'import numpy as np\n'), ((6658, 6683), 'numpy.argmax', 'np.argmax', (['gray_arr[i, :]'], {}), '(gray_arr[i, :])\n', (6667, 6683), True, 'import numpy as np\n'), ((6706, 6735), 'numpy.argmax', 'np.argmax', (['gray_arr[i + 1, :]'], {}), '(gray_arr[i + 1, :])\n', (6715, 6735), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates and prints a few examples for illustration purposes.
"""
import random
from absl import app
import rules
import splits
def main(_):
random.seed(0)
rules.precompute_rules()
(train, test) = splits.generate_training_and_test_sets_iid(
50, 20, 500, 0.9, answer_at_the_end=False)
print(f"train: {len(train)}, test: {len(test)}")
for e in train[:5]:
print("- Train ---------------")
print(" inputs: " + e.inputs)
print(" targets: " + e.targets)
for e in test[:5]:
print("- Test ---------------")
print(" inputs: " + e.inputs)
print(" targets: " + e.targets)
if __name__ == "__main__":
app.run(main)
| [
"rules.precompute_rules",
"splits.generate_training_and_test_sets_iid",
"absl.app.run",
"random.seed"
] | [((759, 773), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (770, 773), False, 'import random\n'), ((776, 800), 'rules.precompute_rules', 'rules.precompute_rules', ([], {}), '()\n', (798, 800), False, 'import rules\n'), ((820, 909), 'splits.generate_training_and_test_sets_iid', 'splits.generate_training_and_test_sets_iid', (['(50)', '(20)', '(500)', '(0.9)'], {'answer_at_the_end': '(False)'}), '(50, 20, 500, 0.9,\n answer_at_the_end=False)\n', (862, 909), False, 'import splits\n'), ((1258, 1271), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (1265, 1271), False, 'from absl import app\n')] |
import subprocess
lib_list = ['numpy','ymmsl','sobol_seq','csv','seaborn','zenodo_get']
for lib_name in lib_list:
try:
import lib_name
except ImportError:
if lib_name == 'csv':
print(lib_name,' Module not installed')
subprocess.run(['pip','install','python-csv'])
else:
print(lib_name,' Module not installed')
subprocess.run(['pip','install','%s'%lib_name])
import numpy as np
import ymmsl
import sobol_seq
import csv
import os
import seaborn as sns
import zenodo_get
# Transform the normalized sample matrix to ranges of uncertain parameters
def dim_transform(sobol_vector,uncertain_list):
dim = len(uncertain_list)
for num_dim in range(dim):
para_max = uncertain_list[num_dim].get('max')
para_min = uncertain_list[num_dim].get('min')
sobol_vector[:,num_dim] = para_min + (para_max-para_min)*sobol_vector[:,num_dim]
return sobol_vector
####################################################################################
##### Sample generation and UQ campaign creation (including instances folder)#######
####################################################################################
# Note:
# This is used to generate UQ samples for only four biological parameters:
# 1) Endothelium endpoint 2)smc max stran 3)balloon extension 4) Fenestration probability
# Naming of Folder and files for samples
# Level 0: UQtest (UQ campaign name)
# Level 1: UQtest/A (sample matrix of sobol sequence)
# Level 2: UQtest/A/A_X where X vary from 1 -> N (N: number of samples)
# Level 3: UQtest/A/A_X/input.ymmsl
### Main function
# Number of samples for UQ
# Note that ISR3D is a computationally intensive application.
# Running 128 instances would need some cluster resources
# You can start with a small number, 16 for instances.
NumSample = 128
# Template path to the ymmsl file (relative path from ISR3D/Result/UQtest/ to ISR3D/UQ/template/input_stage4.ymmsl)
input_path = '../../UQ/template/'
input_ymmsl_filename = 'input_stage4.ymmsl'
# Output directory for UQ campagin folder and name
output_path = './'
experiment_name = 'UQtest'
# Read in the data of template ymmsl file
with open(input_path+input_ymmsl_filename,'r') as f:
ymmsl_data = ymmsl.load(f)
# Take out the unchanged model part and need-for-change settings part for ymmsl model
model = ymmsl_data.model
settings = ymmsl_data.settings
# Set uncertain parameters and its ranges as a list
ymmsl_uncertain_parameters = [
{
'name': 'smc.endo_endpoint',
'min': 10.0,
'max': 20.0
},
{
'name': 'smc.balloon_extension',
'min': 0.5,
'max': 1.5
},
{
'name': 'smc.smc_max_strain',
'min': 1.2,
'max': 1.8
},
{
'name': 'smc.fenestration_probability',
'min': 0.0,# Calculate the lumen volume from (lumen_area_of_each_slice*depth_of_slice)
'max': 0.1
}]
# Count the total uncertain input dimensions (here 4 parameters)
num_uncer_para = len(ymmsl_uncertain_parameters)
print('Number of uncertain parameter: '+str(num_uncer_para))
# Generate sobel sequence range (0,1), save the file and transform to (min,max)
A = sobol_seq.i4_sobol_generate(num_uncer_para,NumSample)
A = dim_transform(A,ymmsl_uncertain_parameters)
np.savetxt("A.csv",A)
# Create corresponding directory and folders
try:
os.mkdir(output_path+experiment_name)
except OSError:
print ("Creation of the directory %s failed" % output_path+experiment_name)
else:
print ("Successfully created the directory %s" % output_path+experiment_name)
# A: Replace the corresponding value within the dict and output the file
os.mkdir(output_path+experiment_name+'/A')
checklist = ['A']
for n in range(NumSample):
sample_path = output_path+experiment_name+'/A'+'/A_'+str(n)
os.mkdir(sample_path)
# Generate file for ymmsl
num_para = 0
for para in ymmsl_uncertain_parameters:
settings[para.get('name')] = float(A[n,num_para])
num_para = num_para + 1
config = ymmsl.Configuration(model, settings, None, None)
with open(sample_path+'/input_stage4.ymmsl', 'w') as f:
ymmsl.save(config, f)
print('ymmsl input for each UQ instance has been generated')
####################################################################################
##### Run shell script to broadcast other input files to each sample folder#########
####################################################################################
import subprocess
# Download Other input files from Zenodo
print('Start to download other input files for ISR3D from Zenodo')
subprocess.run(['wget https://zenodo.org/record/4603912/files/stage3.test_vessel.dat'],shell = True)
subprocess.run(['wget https://zenodo.org/record/4603912/files/stage3.test_vessel_nb.dat'],shell = True)
subprocess.run(['wget https://zenodo.org/record/4603912/files/test_vessel_centerline.csv'],shell = True)
print('Start to broadcast the input to each UQ instance directory')
# Template path to the ymmsl file (relative path from ISR3D/Result/UQtest/ to ISR3D/UQ/function/BCastStage3.sh)
pass_arg = str(NumSample)
subprocess.run(['bash','../../UQ/function/BCastStage3.sh', '%s'%pass_arg])
print('Sample generation done') | [
"ymmsl.load",
"ymmsl.save",
"ymmsl.Configuration",
"subprocess.run",
"os.mkdir",
"numpy.savetxt",
"sobol_seq.i4_sobol_generate"
] | [((3344, 3398), 'sobol_seq.i4_sobol_generate', 'sobol_seq.i4_sobol_generate', (['num_uncer_para', 'NumSample'], {}), '(num_uncer_para, NumSample)\n', (3371, 3398), False, 'import sobol_seq\n'), ((3446, 3468), 'numpy.savetxt', 'np.savetxt', (['"""A.csv"""', 'A'], {}), "('A.csv', A)\n", (3456, 3468), True, 'import numpy as np\n'), ((3820, 3866), 'os.mkdir', 'os.mkdir', (["(output_path + experiment_name + '/A')"], {}), "(output_path + experiment_name + '/A')\n", (3828, 3866), False, 'import os\n'), ((4791, 4899), 'subprocess.run', 'subprocess.run', (["['wget https://zenodo.org/record/4603912/files/stage3.test_vessel.dat']"], {'shell': '(True)'}), "([\n 'wget https://zenodo.org/record/4603912/files/stage3.test_vessel.dat'],\n shell=True)\n", (4805, 4899), False, 'import subprocess\n'), ((4892, 5004), 'subprocess.run', 'subprocess.run', (["['wget https://zenodo.org/record/4603912/files/stage3.test_vessel_nb.dat']"], {'shell': '(True)'}), "([\n 'wget https://zenodo.org/record/4603912/files/stage3.test_vessel_nb.dat'\n ], shell=True)\n", (4906, 5004), False, 'import subprocess\n'), ((4996, 5109), 'subprocess.run', 'subprocess.run', (["['wget https://zenodo.org/record/4603912/files/test_vessel_centerline.csv']"], {'shell': '(True)'}), "([\n 'wget https://zenodo.org/record/4603912/files/test_vessel_centerline.csv'\n ], shell=True)\n", (5010, 5109), False, 'import subprocess\n'), ((5307, 5384), 'subprocess.run', 'subprocess.run', (["['bash', '../../UQ/function/BCastStage3.sh', '%s' % pass_arg]"], {}), "(['bash', '../../UQ/function/BCastStage3.sh', '%s' % pass_arg])\n", (5321, 5384), False, 'import subprocess\n'), ((2312, 2325), 'ymmsl.load', 'ymmsl.load', (['f'], {}), '(f)\n', (2322, 2325), False, 'import ymmsl\n'), ((3523, 3562), 'os.mkdir', 'os.mkdir', (['(output_path + experiment_name)'], {}), '(output_path + experiment_name)\n', (3531, 3562), False, 'import os\n'), ((3977, 3998), 'os.mkdir', 'os.mkdir', (['sample_path'], {}), '(sample_path)\n', (3985, 3998), False, 'import os\n'), ((4208, 4256), 'ymmsl.Configuration', 'ymmsl.Configuration', (['model', 'settings', 'None', 'None'], {}), '(model, settings, None, None)\n', (4227, 4256), False, 'import ymmsl\n'), ((4325, 4346), 'ymmsl.save', 'ymmsl.save', (['config', 'f'], {}), '(config, f)\n', (4335, 4346), False, 'import ymmsl\n'), ((229, 277), 'subprocess.run', 'subprocess.run', (["['pip', 'install', 'python-csv']"], {}), "(['pip', 'install', 'python-csv'])\n", (243, 277), False, 'import subprocess\n'), ((331, 382), 'subprocess.run', 'subprocess.run', (["['pip', 'install', '%s' % lib_name]"], {}), "(['pip', 'install', '%s' % lib_name])\n", (345, 382), False, 'import subprocess\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import numpy as np
from matrixprofile import core
from matrixprofile.io.protobuf.proto_messages_pb2 import (
Location, Motif, MPFOutput
)
def get_matrix_attributes(matrix):
"""
Utility function to extract the rows, cols and flattened array from a
numpy array so it can be stored in the MPFOutput protobuf message.
Parameters
----------
matrix : np.ndarray
The numpy array to extract the attributes from.
Returns
-------
tuple :
A tuple containing the rows, cols and flattened array.
"""
if not core.is_array_like(matrix) or len(matrix) < 1:
return None, None, None
rows = matrix.shape[0]
cols = 0
if len(matrix.shape) > 1:
cols = matrix.shape[1]
return rows, cols, matrix.flatten()
def get_windows(profile):
"""
Utility function to format the windows from a profile structure ensuring
that the windows are in an array.
Parameters
----------
profile : dict
The MatrixProfile or PMP profile.
Returns
-------
list :
The window(s) in a list.
"""
windows = []
if core.is_mp_obj(profile):
windows.append(profile.get('w'))
elif core.is_pmp_obj(profile):
windows = profile.get('windows')
return windows
def get_proto_motif(motif):
"""
Utility function to convert a motif from a MatrixProfile or PMP structure
ensuring that it is compatible with the MPFOutput message.
Note
----
A single dimensional motif location will only have a row index and
a column index of 0.
Parameters
----------
motif : dict
The motif to convert.
Returns
-------
Motif :
The motif object for MPFOutput message.
"""
out_motif = Motif()
for indices in motif['motifs']:
tmp = Location()
tmp.row = 0
tmp.col = 0
# handle single integer location
if core.is_array_like(indices):
tmp.row = indices[0]
tmp.col = indices[1]
else:
tmp.row = indices
out_motif.motifs.append(tmp)
for neighbor in motif['neighbors']:
tmp = Location()
tmp.row = 0
tmp.col = 0
# handle single integer location
if core.is_array_like(neighbor):
tmp.row = neighbor[0]
tmp.col = neighbor[1]
else:
tmp.row = neighbor
out_motif.neighbors.append(tmp)
return out_motif
def get_proto_discord(discord):
"""
Utility function to convert a discord into the MPFOutput message
format.
Note
----
A single dimensional discord location will only have a row index and
a column index of 0.
Parameters
----------
discord : int or tuple
The discord with row, col index or single index.
Returns
-------
Location :
The Location message used in the MPFOutput protobuf message.
"""
out_discord = Location()
out_discord.row = 0
out_discord.col = 0
if core.is_array_like(discord):
out_discord.row = discord[0]
out_discord.col = discord[1]
else:
out_discord.row = discord
return out_discord
def profile_to_proto(profile):
"""
Utility function that takes a MatrixProfile or PMP profile data structure
and converts it to the MPFOutput protobuf message object.
Parameters
----------
profile : dict
The profile to convert.
Returns
-------
MPFOutput :
The MPFOutput protobuf message object.
"""
output = MPFOutput()
# add higher level attributes that work for PMP and MP
output.klass = profile.get('class')
output.algorithm = profile.get('algorithm')
output.metric = profile.get('metric')
output.sample_pct = profile.get('sample_pct')
# add time series data
ts = profile.get('data').get('ts')
query = profile.get('data').get('query')
rows, cols, data = get_matrix_attributes(ts)
output.ts.rows = rows
output.ts.cols = cols
output.ts.data.extend(data)
# add query data
query = profile.get('data').get('query')
rows, cols, data = get_matrix_attributes(query)
if rows and cols and core.is_array_like(data):
output.query.rows = rows
output.query.cols = cols
output.query.data.extend(data)
# add window(s)
output.windows.extend(get_windows(profile))
# add motifs
motifs = profile.get('motifs')
if not isinstance(motifs, type(None)):
for motif in motifs:
output.motifs.append(get_proto_motif(motif))
# add discords
discords = profile.get('discords')
if not isinstance(discords, type(None)):
for discord in discords:
output.discords.append(get_proto_discord(discord))
# add cmp
cmp = profile.get('cmp')
if not isinstance(cmp, type(None)):
rows, cols, data = get_matrix_attributes(cmp)
output.cmp.rows = rows
output.cmp.cols = cols
output.cmp.data.extend(data)
# add av
av = profile.get('av')
if not isinstance(av, type(None)):
rows, cols, data = get_matrix_attributes(av)
output.av.rows = rows
output.av.cols = cols
output.av.data.extend(data)
# add av_type
av_type = profile.get('av_type')
if not isinstance(av_type, type(None)) and len(av_type) > 0:
output.av_type = av_type
# add the matrix profile specific attributes
if core.is_mp_obj(profile):
output.mp.ez = profile.get('ez')
output.mp.join = profile.get('join')
# add mp
rows, cols, data = get_matrix_attributes(profile.get('mp'))
output.mp.mp.rows = rows
output.mp.mp.cols = cols
output.mp.mp.data.extend(data)
# add pi
rows, cols, data = get_matrix_attributes(profile.get('pi'))
output.mp.pi.rows = rows
output.mp.pi.cols = cols
output.mp.pi.data.extend(data)
# add lmp
rows, cols, data = get_matrix_attributes(profile.get('lmp'))
if rows and cols and core.is_array_like(data):
output.mp.lmp.rows = rows
output.mp.lmp.cols = cols
output.mp.lmp.data.extend(data)
# add lpi
rows, cols, data = get_matrix_attributes(profile.get('lpi'))
if rows and cols and core.is_array_like(data):
output.mp.lpi.rows = rows
output.mp.lpi.cols = cols
output.mp.lpi.data.extend(data)
# add rmp
rows, cols, data = get_matrix_attributes(profile.get('rmp'))
if rows and cols and core.is_array_like(data):
output.mp.rmp.rows = rows
output.mp.rmp.cols = cols
output.mp.rmp.data.extend(data)
# add rpi
rows, cols, data = get_matrix_attributes(profile.get('rpi'))
if rows and cols and core.is_array_like(data):
output.mp.rpi.rows = rows
output.mp.rpi.cols = cols
output.mp.rpi.data.extend(data)
# add the pan matrix profile specific attributes
elif core.is_pmp_obj(profile):
# add pmp
rows, cols, data = get_matrix_attributes(profile.get('pmp'))
output.pmp.pmp.rows = rows
output.pmp.pmp.cols = cols
output.pmp.pmp.data.extend(data)
# add pmpi
rows, cols, data = get_matrix_attributes(profile.get('pmpi'))
output.pmp.pmpi.rows = rows
output.pmp.pmpi.cols = cols
output.pmp.pmpi.data.extend(data)
else:
raise ValueError('Expecting Pan-MatrixProfile or MatrixProfile!')
return output
def to_mpf(profile):
"""
Converts a given profile object into MPF binary file format.
Parameters
----------
profile : dict_like
A MatrixProfile or Pan-MatrixProfile data structure.
Returns
-------
str :
The profile as a binary formatted string.
"""
obj = profile_to_proto(profile)
return obj.SerializeToString()
def from_proto_to_array(value):
"""
Utility function to convert a protobuf array back into the correct
dimensions.
Parameters
----------
value : array_like
The array to transform.
Returns
-------
np.ndarray :
The transformed array.
"""
if isinstance(value, type(None)) or len(value.data) < 1:
return None
shape = (value.rows, value.cols)
out = np.array(value.data)
if shape[1] > 0:
out = out.reshape(shape)
return out
def discords_from_proto(discords, is_one_dimensional=False):
"""
Utility function to transform discord locations back to single dimension
or multi-dimension location.
Parameter
---------
discords : array_like
The protobuf formatted array.
is_one_dimensional : boolean
A flag to indicate if the original locations should be 1D.
Returns
-------
np.ndarray :
The transformed discord locations.
"""
out = []
for discord in discords:
if is_one_dimensional:
out.append(discord.row)
else:
out.append((discord.row, discord.col))
return np.array(out, dtype=int)
def motifs_from_proto(motifs, is_one_dimensional=False):
"""
Utility function to transform motif locations back to single dimension
or multi-dimension location.
Parameter
---------
motifs : array_like
The protobuf formatted array.
is_one_dimensional : boolean
A flag to indicate if the original locations should be 1D.
Returns
-------
list :
The transformed motif locations.
"""
out = []
for motif in motifs:
tmp = {'motifs': [], 'neighbors': []}
for location in motif.motifs:
if is_one_dimensional:
tmp['motifs'].append(location.row)
else:
tmp['motifs'].append((location.row, location.col))
for neighbor in motif.neighbors:
if is_one_dimensional:
tmp['neighbors'].append(neighbor.row)
else:
tmp['neighbors'].append((neighbor.row, neighbor.col))
out.append(tmp)
return out
def from_mpf(profile):
"""
Converts binary formatted MPFOutput message into a profile data structure.
Parameters
----------
profile : str
The profile as a binary formatted MPFOutput message.
Returns
-------
profile : dict_like
A MatrixProfile or Pan-MatrixProfile data structure.
"""
obj = MPFOutput()
obj.ParseFromString(profile)
out = {}
is_one_dimensional = False
# load in all higher level attributes
out['class'] = obj.klass
out['algorithm'] = obj.algorithm
out['metric'] = obj.metric
out['sample_pct'] = obj.sample_pct
out['data'] = {
'ts': from_proto_to_array(obj.ts),
'query': from_proto_to_array(obj.query)
}
if obj.klass == 'MatrixProfile':
out['mp'] = from_proto_to_array(obj.mp.mp)
out['pi'] = from_proto_to_array(obj.mp.pi)
out['lmp'] = from_proto_to_array(obj.mp.lmp)
out['lpi'] = from_proto_to_array(obj.mp.lpi)
out['rmp'] = from_proto_to_array(obj.mp.rmp)
out['rpi'] = from_proto_to_array(obj.mp.rpi)
out['ez'] = obj.mp.ez
out['join'] = obj.mp.join
out['w'] = obj.windows[0]
is_one_dimensional = len(out['mp'].shape) == 1
elif obj.klass == 'PMP':
out['pmp'] = from_proto_to_array(obj.pmp.pmp)
out['pmpi'] = from_proto_to_array(obj.pmp.pmpi)
out['windows'] = np.array(obj.windows)
if not isinstance(obj.discords, type(None)) and len(obj.discords) > 0:
out['discords'] = discords_from_proto(
obj.discords, is_one_dimensional=is_one_dimensional)
if not isinstance(obj.motifs, type(None)) and len(obj.motifs) > 0:
out['motifs'] = motifs_from_proto(
obj.motifs, is_one_dimensional=is_one_dimensional)
if not isinstance(obj.cmp, type(None)) and len(obj.cmp.data) > 0:
out['cmp'] = from_proto_to_array(obj.cmp)
if not isinstance(obj.av, type(None)) and len(obj.av.data) > 0:
out['av'] = from_proto_to_array(obj.av)
if not isinstance(obj.av_type, type(None)) and len(obj.av_type) > 0:
out['av_type'] = obj.av_type
return out
| [
"matrixprofile.core.is_array_like",
"matrixprofile.io.protobuf.proto_messages_pb2.Location",
"matrixprofile.core.is_pmp_obj",
"numpy.array",
"matrixprofile.io.protobuf.proto_messages_pb2.Motif",
"matrixprofile.core.is_mp_obj",
"matrixprofile.io.protobuf.proto_messages_pb2.MPFOutput"
] | [((1392, 1415), 'matrixprofile.core.is_mp_obj', 'core.is_mp_obj', (['profile'], {}), '(profile)\n', (1406, 1415), False, 'from matrixprofile import core\n'), ((2035, 2042), 'matrixprofile.io.protobuf.proto_messages_pb2.Motif', 'Motif', ([], {}), '()\n', (2040, 2042), False, 'from matrixprofile.io.protobuf.proto_messages_pb2 import Location, Motif, MPFOutput\n'), ((3230, 3240), 'matrixprofile.io.protobuf.proto_messages_pb2.Location', 'Location', ([], {}), '()\n', (3238, 3240), False, 'from matrixprofile.io.protobuf.proto_messages_pb2 import Location, Motif, MPFOutput\n'), ((3297, 3324), 'matrixprofile.core.is_array_like', 'core.is_array_like', (['discord'], {}), '(discord)\n', (3315, 3324), False, 'from matrixprofile import core\n'), ((3840, 3851), 'matrixprofile.io.protobuf.proto_messages_pb2.MPFOutput', 'MPFOutput', ([], {}), '()\n', (3849, 3851), False, 'from matrixprofile.io.protobuf.proto_messages_pb2 import Location, Motif, MPFOutput\n'), ((5743, 5766), 'matrixprofile.core.is_mp_obj', 'core.is_mp_obj', (['profile'], {}), '(profile)\n', (5757, 5766), False, 'from matrixprofile import core\n'), ((8684, 8704), 'numpy.array', 'np.array', (['value.data'], {}), '(value.data)\n', (8692, 8704), True, 'import numpy as np\n'), ((9430, 9454), 'numpy.array', 'np.array', (['out'], {'dtype': 'int'}), '(out, dtype=int)\n', (9438, 9454), True, 'import numpy as np\n'), ((10811, 10822), 'matrixprofile.io.protobuf.proto_messages_pb2.MPFOutput', 'MPFOutput', ([], {}), '()\n', (10820, 10822), False, 'from matrixprofile.io.protobuf.proto_messages_pb2 import Location, Motif, MPFOutput\n'), ((1467, 1491), 'matrixprofile.core.is_pmp_obj', 'core.is_pmp_obj', (['profile'], {}), '(profile)\n', (1482, 1491), False, 'from matrixprofile import core\n'), ((2094, 2104), 'matrixprofile.io.protobuf.proto_messages_pb2.Location', 'Location', ([], {}), '()\n', (2102, 2104), False, 'from matrixprofile.io.protobuf.proto_messages_pb2 import Location, Motif, MPFOutput\n'), ((2198, 2225), 'matrixprofile.core.is_array_like', 'core.is_array_like', (['indices'], {}), '(indices)\n', (2216, 2225), False, 'from matrixprofile import core\n'), ((2430, 2440), 'matrixprofile.io.protobuf.proto_messages_pb2.Location', 'Location', ([], {}), '()\n', (2438, 2440), False, 'from matrixprofile.io.protobuf.proto_messages_pb2 import Location, Motif, MPFOutput\n'), ((2534, 2562), 'matrixprofile.core.is_array_like', 'core.is_array_like', (['neighbor'], {}), '(neighbor)\n', (2552, 2562), False, 'from matrixprofile import core\n'), ((4482, 4506), 'matrixprofile.core.is_array_like', 'core.is_array_like', (['data'], {}), '(data)\n', (4500, 4506), False, 'from matrixprofile import core\n'), ((7351, 7375), 'matrixprofile.core.is_pmp_obj', 'core.is_pmp_obj', (['profile'], {}), '(profile)\n', (7366, 7375), False, 'from matrixprofile import core\n'), ((825, 851), 'matrixprofile.core.is_array_like', 'core.is_array_like', (['matrix'], {}), '(matrix)\n', (843, 851), False, 'from matrixprofile import core\n'), ((6353, 6377), 'matrixprofile.core.is_array_like', 'core.is_array_like', (['data'], {}), '(data)\n', (6371, 6377), False, 'from matrixprofile import core\n'), ((6616, 6640), 'matrixprofile.core.is_array_like', 'core.is_array_like', (['data'], {}), '(data)\n', (6634, 6640), False, 'from matrixprofile import core\n'), ((6879, 6903), 'matrixprofile.core.is_array_like', 'core.is_array_like', (['data'], {}), '(data)\n', (6897, 6903), False, 'from matrixprofile import core\n'), ((7142, 7166), 'matrixprofile.core.is_array_like', 'core.is_array_like', (['data'], {}), '(data)\n', (7160, 7166), False, 'from matrixprofile import core\n'), ((11868, 11889), 'numpy.array', 'np.array', (['obj.windows'], {}), '(obj.windows)\n', (11876, 11889), True, 'import numpy as np\n')] |
from decouple import config
MEDIA_PATH = config("MEDIA_PATH", "/home/mdcg/Documents/")
| [
"decouple.config"
] | [((43, 88), 'decouple.config', 'config', (['"""MEDIA_PATH"""', '"""/home/mdcg/Documents/"""'], {}), "('MEDIA_PATH', '/home/mdcg/Documents/')\n", (49, 88), False, 'from decouple import config\n')] |
from flask import Flask, jsonify, request, render_template
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
app = Flask(__name__)
app.config['CUSTOM_VAR'] = 5
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///web_app.db'
db = SQLAlchemy(app)
migrate = Migrate(app, db)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128))
class Tweet(db.Model):
id = db.Column(db.Integer, primary_key=True)
status = db.Column(db.String)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
#Routing
@app.route('/')
def index():
#return 'hello world'
return render_template('homepage.html')
@app.route('/about')
def about():
return 'about me'
@app.route('/users')
@app.route('/users.json')
def users():
users = User.query.all()
users_response = []
for u in users:
user_dict = u.__dict__
del user_dict['_sa_instance_state']
users_response.append(user_dict)
return jsonify(users_response)
@app.route('/users/create', methods=['Post'])
def create_user():
print('Creating a new user...')
print('Form data:', dict(request.form))
if 'name' in request.form:
name = request.form['name']
print(name)
db.session.add(User(name=name))
db.session.commit()
return jsonify({'message': 'created ok', 'name': name})
else:
return jsonify({'message': 'oops please specify a name'})
@app.route('/hello')
def hello(name=None):
print('Visiting the hello page')
print('Request params:', dict(request.args))
if 'name' in request.args:
name = request.args['name']
message = f'hello, {name}'
else:
message = 'hello world'
return render_template('hello.html', message=message)
| [
"flask.render_template",
"flask.Flask",
"flask_migrate.Migrate",
"flask_sqlalchemy.SQLAlchemy",
"flask.jsonify"
] | [((141, 156), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (146, 156), False, 'from flask import Flask, jsonify, request, render_template\n'), ((255, 270), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (265, 270), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((282, 298), 'flask_migrate.Migrate', 'Migrate', (['app', 'db'], {}), '(app, db)\n', (289, 298), False, 'from flask_migrate import Migrate\n'), ((654, 686), 'flask.render_template', 'render_template', (['"""homepage.html"""'], {}), "('homepage.html')\n", (669, 686), False, 'from flask import Flask, jsonify, request, render_template\n'), ((1007, 1030), 'flask.jsonify', 'jsonify', (['users_response'], {}), '(users_response)\n', (1014, 1030), False, 'from flask import Flask, jsonify, request, render_template\n'), ((1764, 1810), 'flask.render_template', 'render_template', (['"""hello.html"""'], {'message': 'message'}), "('hello.html', message=message)\n", (1779, 1810), False, 'from flask import Flask, jsonify, request, render_template\n'), ((1348, 1396), 'flask.jsonify', 'jsonify', (["{'message': 'created ok', 'name': name}"], {}), "({'message': 'created ok', 'name': name})\n", (1355, 1396), False, 'from flask import Flask, jsonify, request, render_template\n'), ((1422, 1472), 'flask.jsonify', 'jsonify', (["{'message': 'oops please specify a name'}"], {}), "({'message': 'oops please specify a name'})\n", (1429, 1472), False, 'from flask import Flask, jsonify, request, render_template\n')] |
import pathing
import collections
import itertools
import copy
__all__ = ('Tree', 'Cache', 'Entry', 'EntryCache', 'RowCache', 'BulkRowCache',
'AlikeBulkRowCache')
class Tree(dict):
"""
Simple **BTree** implementation.
"""
__slots__ = ()
def _make(self):
"""
Create a subtree; called with no arguments.
"""
return ()
def _crawl(self, keys, stop = True, fail = False):
"""
Reach the node corresponding to ``keys``.
If set to not ``stop``, create subtrees.
Yield are nodes along the path.
"""
node = self
keys = iter(keys)
while True:
yield node
try:
key = next(keys)
except StopIteration:
break
try:
node = node[key]
except KeyError:
if stop:
if fail:
raise
return
args = self._make()
node[key] = node = self.__class__(*args)
def select(self, keys):
"""
Get the value corresponding to ``keys``.
"""
(*trees, value) = self._crawl(keys, fail = True)
return value
def create(self, keys, value):
"""
Place a value at ``keys``, creating subtrees.
"""
(*keys, key) = keys
(*trees, tree) = self._crawl(keys, stop = False)
tree[key] = value
def _brush(self, keys):
"""
Remove the final value and all empty subtrees along ``keys``.
"""
trees = self._crawl(keys)
pairs = tuple(zip(trees, keys))
for (tree, key) in reversed(pairs):
yield tree.pop(key)
if tree:
break
def delete(self, keys):
"""
Remove and return the value at ``keys``, deleting empty subtrees.
"""
(value, *trees) = self._brush(keys)
return value
def switch(self, old, new):
"""
Move the value at ``old`` to ``new``, deleting and creating subtrees.
"""
node = Tree.delete(self, old)
Tree.create(self, new, node)
return node
class Cache(Tree):
"""
Same as :class:`.Tree`, but declaring its expected depth allows for floor
traversing.
"""
__slots__ = ('_depth',)
def __init__(self, depth):
super().__init__()
self._depth = depth
@property
def depth(self):
"""
Get the expected depth.
"""
return self._depth
def _make(self):
depth = self._depth - 1
return (depth,)
def traverse(self):
"""
Yield ``(keys, value)`` pairs.
"""
yield from pathing.derive(self, min = self._depth, max = self._depth)
def entries(self):
"""
Yield all floor values.
"""
for (keys, value) in self.traverse():
yield value
def __repr__(self):
entries = tuple(self.entries())
size = len(entries)
return f'<Cache({size})>'
class Entry:
"""
:class:`dict`\-like attribute-accessible and read-only representation of
data.
.. code-block:: py
>>> entry = Entry({'name': 'Pup', 'age': 6})
>>> entry.name
'Pup'
>>> list(entry)
['name', 'age']
"""
__slots__ = ('__data__',)
def __init__(self, data = None, direct = False):
self.__data__ = {} if data is None else data if direct else data.copy()
def __getitem__(self, key):
return self.__data__[key]
def __getattr__(self, key):
try:
value = self[key]
except KeyError as error:
raise AttributeError(*error.args) from None
return value
def __copy__(self):
data = self.__data__.copy()
fake = self.__class__(data, True)
return fake
def __deepcopy__(self, memo):
fake = copy.copy(self)
data = fake.__data__
for (key, value) in data.items():
data[key] = copy.deepcopy(value)
return fake
def __iter__(self):
yield from self.__data__.keys()
def __repr__(self):
items = self.__data__.items()
make = '{0}={1}'.format
show = '|'.join(itertools.starmap(make, items))
return f'<{self.__class__.__name__}({show})>'
def _create(value):
"""
Create or return an entry from the value.
"""
return value if isinstance(value, Entry) else Entry(value)
def _modify(entry, data):
"""
Modify an :class:`.Entry` with the data.
"""
entry.__data__.update(data)
class EntryCache(Cache):
"""
Store, create and modify :class:`.Entry` instances.
"""
__slots__ = ('_manage',)
_Asset = collections.namedtuple('Asset', 'create modify')
def __init__(self, depth, create = _create, modify = _modify):
super().__init__(depth)
self._manage = self._Asset(create, modify)
def _make(self, *args, **kwargs):
args = super()._make()
return (*args, self._manage.create, self._manage.modify)
def create(self, keys, data):
"""
Create and put an entry at ``keys``.
"""
entry = self._manage.create(data)
super().create(keys, entry)
return entry
def modify(self, keys, data):
"""
Modify the entry at ``keys``\'s with ``data`` and return ``(old, new)``.
"""
entry = Tree.select(self, keys)
dummy = copy.deepcopy(entry)
self._manage.modify(entry, data)
return (dummy, entry)
class RowCache(EntryCache):
"""
Knowing primary allows for better handling.
"""
__slots__ = ('_primary',)
def __init__(self, primary):
super().__init__(len(primary))
self._primary = primary
@property
def primary(self):
"""
Get the primary keys.
"""
return self._primary
def _make(self):
primary = self._primary[:1]
return (primary,)
def query(self, data):
"""
Get all available values against the ``data``\'s primary keys.
"""
store = []
try:
store.extend(data[key] for key in self._primary)
except KeyError:
pass
return tuple(store)
def create(self, data):
"""
Create and put an entry in the spot designated by its primary keys.
"""
keys = self.query(data)
result = super().create(keys, data)
return result
def modify(self, keys, data):
"""
Modify an entry and change its spot if necessary.
"""
result = super().modify(keys, data)
new = self.query(data)
size = len(new)
old = keys[:size]
if not tuple(old) == new:
super().switch(old, new)
return result
class BulkRowCache(RowCache):
"""
Similar to its base, but data inputs should be arrays of data.
"""
__slots__ = ()
def create(self, data):
"""
Create and return all entries.
"""
results = []
for data in data:
result = super().create(data)
results.append(result)
return tuple(results)
def modify(self, keys, data):
"""
Modify all entries at ``keys`` and change their spot if necessary.
The keys don't need to be a full path to specific entries.
.. tip::
- Assume two entries exist at ``(0, 1, 2)`` and ``(0, 1, 4)``.
- Updating ``(0, 1)`` requires ``data`` to be a two-item array.
- That array should contain the respective data in order.
"""
node = super().select(keys)
depth = self._depth - len(keys)
items = pathing.derive(node, max = depth, min = depth)
try:
(subkeys, _entries) = zip(*items)
except ValueError:
subkeys = ()
results = []
for (subkeys, data) in zip(subkeys, data):
curkeys = (*keys, *subkeys)
result = super().modify(curkeys, data)
results.append(result)
return results
class SoftBulkRowCache(BulkRowCache):
"""
Lookup methods return an array of entries, or empty instead of failing.
"""
__slots__ = ()
@classmethod
def _flat(cls, node):
"""
Turn the node into either a tree's entries or a single-item array.
"""
return tuple(node.entries()) if isinstance(node, cls) else (node,)
def select(self, keys):
"""
Refer to :meth:`.Tree.select`.
"""
try:
node = super().select(keys)
except KeyError:
result = ()
else:
result = self._flat(node)
return result
def modify(self, keys, data):
"""
Refer to :meth:`.BulkRowCache.modify`.
"""
try:
result = super().modify(keys, data)
except KeyError:
result = ()
return result
def delete(self, keys):
"""
Refer to :meth:`.Tree.delete`.
"""
try:
node = super().delete(keys)
except KeyError:
result = ()
else:
result = self._flat(node)
return result
class AlikeBulkRowCache(SoftBulkRowCache):
"""
Active methods accept ``(keys, data)`` for consistency.
"""
__slots__ = ()
def create(self, keys, data):
"""
``data`` is used, ``keys`` is not.
Refer to :meth:`.BulkRowCache.create`.
"""
result = super().create(data)
return result
def modify(self, keys, data):
"""
``data`` and ``keys`` are used.
Refer to :meth:`.SoftBulkRowCache.modify`.
"""
result = super().modify(keys, data)
return result
def delete(self, keys, data):
"""
``keys`` is used, ``data`` is not.
Refer to :meth:`.SoftBulkRowCache.delete`.
"""
result = super().delete(keys)
return result
| [
"collections.namedtuple",
"itertools.starmap",
"copy.deepcopy",
"copy.copy",
"pathing.derive"
] | [((4868, 4916), 'collections.namedtuple', 'collections.namedtuple', (['"""Asset"""', '"""create modify"""'], {}), "('Asset', 'create modify')\n", (4890, 4916), False, 'import collections\n'), ((4026, 4041), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (4035, 4041), False, 'import copy\n'), ((5610, 5630), 'copy.deepcopy', 'copy.deepcopy', (['entry'], {}), '(entry)\n', (5623, 5630), False, 'import copy\n'), ((7926, 7968), 'pathing.derive', 'pathing.derive', (['node'], {'max': 'depth', 'min': 'depth'}), '(node, max=depth, min=depth)\n', (7940, 7968), False, 'import pathing\n'), ((2804, 2858), 'pathing.derive', 'pathing.derive', (['self'], {'min': 'self._depth', 'max': 'self._depth'}), '(self, min=self._depth, max=self._depth)\n', (2818, 2858), False, 'import pathing\n'), ((4137, 4157), 'copy.deepcopy', 'copy.deepcopy', (['value'], {}), '(value)\n', (4150, 4157), False, 'import copy\n'), ((4365, 4395), 'itertools.starmap', 'itertools.starmap', (['make', 'items'], {}), '(make, items)\n', (4382, 4395), False, 'import itertools\n')] |
#!/usr/bin/env python
"""
Manually tweak a talk's status, leaving a note about why.
"""
import sys
import argparse
import pycon_bot.mongo
from pycon_bot.models import TalkProposal, Note
p = argparse.ArgumentParser()
p.add_argument('talk_id', type=int)
p.add_argument('new_status', choices=[c[0] for c in TalkProposal.STATUSES])
p.add_argument('note')
p.add_argument('--dsn')
args = p.parse_args()
if not pycon_bot.mongo.connect(args.dsn):
sys.stderr.write("Need to pass --dsn or set env[MONGO_DSN].")
sys.exit(1)
t = TalkProposal.objects.get(talk_id=args.talk_id)
t.update(
push__notes = Note(text=args.note),
set__status = args.new_status
)
| [
"pycon_bot.models.TalkProposal.objects.get",
"argparse.ArgumentParser",
"sys.stderr.write",
"sys.exit",
"pycon_bot.models.Note"
] | [((192, 217), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (215, 217), False, 'import argparse\n'), ((528, 574), 'pycon_bot.models.TalkProposal.objects.get', 'TalkProposal.objects.get', ([], {'talk_id': 'args.talk_id'}), '(talk_id=args.talk_id)\n', (552, 574), False, 'from pycon_bot.models import TalkProposal, Note\n'), ((445, 506), 'sys.stderr.write', 'sys.stderr.write', (['"""Need to pass --dsn or set env[MONGO_DSN]."""'], {}), "('Need to pass --dsn or set env[MONGO_DSN].')\n", (461, 506), False, 'import sys\n'), ((511, 522), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (519, 522), False, 'import sys\n'), ((603, 623), 'pycon_bot.models.Note', 'Note', ([], {'text': 'args.note'}), '(text=args.note)\n', (607, 623), False, 'from pycon_bot.models import TalkProposal, Note\n')] |
from __future__ import division, print_function, absolute_import, unicode_literals
import os
from mog_commons.case_class import CaseClass
from mog_commons.functional import omap, oget
class JavaSetting(CaseClass):
class Memory(CaseClass):
def __init__(self,
java_version, # just use for assertion
heap_min=None,
heap_max=None,
perm_min=None,
perm_max=None,
metaspace_min=None,
metaspace_max=None,
new_min=None,
new_max=None,
survivor_ratio=None,
target_survivor_ratio=None):
# constraints
assert perm_min is None or java_version < 1.8, 'java.memory.perm_min is not applicable to java >= 1.8'
assert perm_max is None or java_version < 1.8, 'java.memory.perm_max is not applicable to java >= 1.8'
assert metaspace_min is None or java_version >= 1.8, \
'java.memory.metaspace_min is not applicable to java < 1.8'
assert metaspace_max is None or java_version >= 1.8, \
'java.memory.metaspace_max is not applicable to java < 1.8'
CaseClass.__init__(
self,
('heap_min', heap_min),
('heap_max', heap_max),
('perm_min', perm_min),
('perm_max', perm_max),
('metaspace_min', metaspace_min),
('metaspace_max', metaspace_max),
('new_min', new_min),
('new_max', new_max),
('survivor_ratio', survivor_ratio),
('target_survivor_ratio', target_survivor_ratio)
)
def get_opts(self):
xs = [
omap(lambda s: '-Xms%s' % s, self.heap_min),
omap(lambda s: '-Xmx%s' % s, self.heap_max),
omap(lambda s: '-XX:PermSize=%s' % s, self.perm_min),
omap(lambda s: '-XX:MaxPermSize=%s' % s, self.perm_max),
omap(lambda s: '-XX:MetaspaceSize=%s' % s, self.metaspace_min),
omap(lambda s: '-XX:MaxMetaspaceSize=%s' % s, self.metaspace_max),
omap(lambda s: '-Xmn%s' % s, self.new_min),
omap(lambda s: '-XX:MaxNewSize=%s' % s, self.new_max),
omap(lambda x: '-XX:SurvivorRatio=%d' % x, self.survivor_ratio),
omap(lambda x: '-XX:TargetSurvivorRatio=%d' % x, self.target_survivor_ratio),
]
return [x for x in xs if x is not None]
class JMX(CaseClass):
def __init__(self, port=None, ssl=None, authenticate=None):
CaseClass.__init__(self, ('port', port), ('ssl', ssl), ('authenticate', authenticate))
def get_opts(self):
if self.port is None:
return []
xs = [
'-Dcom.sun.management.jmxremote',
omap(lambda x: '-Dcom.sun.management.jmxremote.port=%d' % x, self.port),
omap(lambda b: '-Dcom.sun.management.jmxremote.ssl=%s' % JavaSetting.py_to_java_str(b), self.ssl),
omap(lambda b: '-Dcom.sun.management.jmxremote.authenticate=%s' % JavaSetting.py_to_java_str(b), self.authenticate),
]
return [x for x in xs if x is not None]
def __init__(self,
home=None,
version=None,
server=None,
memory=None,
jmx=None,
prop=None,
option=None):
# constraints
assert home is not None and os.path.isabs(home), 'java.home is required and must be an absolute path'
assert version is not None, 'java.version is required'
# TODO: check value types and format
assert prop is None or isinstance(prop, dict), 'java.prop must be a dict'
assert option is None or isinstance(option, list), 'java.option must be a list'
CaseClass.__init__(
self,
('home', home),
('version', version),
('server', server),
('memory', JavaSetting.Memory(version, **oget(memory, {}))),
('jmx', JavaSetting.JMX(**oget(jmx, {}))),
('prop', oget(prop, {})),
('option', oget(option, [])),
)
def get_executable(self):
return os.path.join(self.home, 'bin', 'java')
def get_opts(self):
sv = ['-server'] if self.server else []
pr = ['-D%s=%s' % (k, JavaSetting.py_to_java_str(v)) for k, v in sorted(self.prop.items())]
return sv + self.memory.get_opts() + self.jmx.get_opts() + pr + self.option
def get_args(self):
return [self.get_executable()] + self.get_opts()
@staticmethod
def py_to_java_str(value):
"""Convert python data to Java-like string"""
if isinstance(value, bool):
return str(value).lower()
else:
return str(value)
| [
"os.path.isabs",
"mog_commons.functional.omap",
"mog_commons.case_class.CaseClass.__init__",
"os.path.join",
"mog_commons.functional.oget"
] | [((4434, 4472), 'os.path.join', 'os.path.join', (['self.home', '"""bin"""', '"""java"""'], {}), "(self.home, 'bin', 'java')\n", (4446, 4472), False, 'import os\n'), ((1277, 1613), 'mog_commons.case_class.CaseClass.__init__', 'CaseClass.__init__', (['self', "('heap_min', heap_min)", "('heap_max', heap_max)", "('perm_min', perm_min)", "('perm_max', perm_max)", "('metaspace_min', metaspace_min)", "('metaspace_max', metaspace_max)", "('new_min', new_min)", "('new_max', new_max)", "('survivor_ratio', survivor_ratio)", "('target_survivor_ratio', target_survivor_ratio)"], {}), "(self, ('heap_min', heap_min), ('heap_max', heap_max), (\n 'perm_min', perm_min), ('perm_max', perm_max), ('metaspace_min',\n metaspace_min), ('metaspace_max', metaspace_max), ('new_min', new_min),\n ('new_max', new_max), ('survivor_ratio', survivor_ratio), (\n 'target_survivor_ratio', target_survivor_ratio))\n", (1295, 1613), False, 'from mog_commons.case_class import CaseClass\n'), ((2741, 2831), 'mog_commons.case_class.CaseClass.__init__', 'CaseClass.__init__', (['self', "('port', port)", "('ssl', ssl)", "('authenticate', authenticate)"], {}), "(self, ('port', port), ('ssl', ssl), ('authenticate',\n authenticate))\n", (2759, 2831), False, 'from mog_commons.case_class import CaseClass\n'), ((3676, 3695), 'os.path.isabs', 'os.path.isabs', (['home'], {}), '(home)\n', (3689, 3695), False, 'import os\n'), ((1850, 1893), 'mog_commons.functional.omap', 'omap', (["(lambda s: '-Xms%s' % s)", 'self.heap_min'], {}), "(lambda s: '-Xms%s' % s, self.heap_min)\n", (1854, 1893), False, 'from mog_commons.functional import omap, oget\n'), ((1911, 1954), 'mog_commons.functional.omap', 'omap', (["(lambda s: '-Xmx%s' % s)", 'self.heap_max'], {}), "(lambda s: '-Xmx%s' % s, self.heap_max)\n", (1915, 1954), False, 'from mog_commons.functional import omap, oget\n'), ((1972, 2024), 'mog_commons.functional.omap', 'omap', (["(lambda s: '-XX:PermSize=%s' % s)", 'self.perm_min'], {}), "(lambda s: '-XX:PermSize=%s' % s, self.perm_min)\n", (1976, 2024), False, 'from mog_commons.functional import omap, oget\n'), ((2042, 2097), 'mog_commons.functional.omap', 'omap', (["(lambda s: '-XX:MaxPermSize=%s' % s)", 'self.perm_max'], {}), "(lambda s: '-XX:MaxPermSize=%s' % s, self.perm_max)\n", (2046, 2097), False, 'from mog_commons.functional import omap, oget\n'), ((2115, 2177), 'mog_commons.functional.omap', 'omap', (["(lambda s: '-XX:MetaspaceSize=%s' % s)", 'self.metaspace_min'], {}), "(lambda s: '-XX:MetaspaceSize=%s' % s, self.metaspace_min)\n", (2119, 2177), False, 'from mog_commons.functional import omap, oget\n'), ((2195, 2260), 'mog_commons.functional.omap', 'omap', (["(lambda s: '-XX:MaxMetaspaceSize=%s' % s)", 'self.metaspace_max'], {}), "(lambda s: '-XX:MaxMetaspaceSize=%s' % s, self.metaspace_max)\n", (2199, 2260), False, 'from mog_commons.functional import omap, oget\n'), ((2278, 2320), 'mog_commons.functional.omap', 'omap', (["(lambda s: '-Xmn%s' % s)", 'self.new_min'], {}), "(lambda s: '-Xmn%s' % s, self.new_min)\n", (2282, 2320), False, 'from mog_commons.functional import omap, oget\n'), ((2338, 2391), 'mog_commons.functional.omap', 'omap', (["(lambda s: '-XX:MaxNewSize=%s' % s)", 'self.new_max'], {}), "(lambda s: '-XX:MaxNewSize=%s' % s, self.new_max)\n", (2342, 2391), False, 'from mog_commons.functional import omap, oget\n'), ((2409, 2472), 'mog_commons.functional.omap', 'omap', (["(lambda x: '-XX:SurvivorRatio=%d' % x)", 'self.survivor_ratio'], {}), "(lambda x: '-XX:SurvivorRatio=%d' % x, self.survivor_ratio)\n", (2413, 2472), False, 'from mog_commons.functional import omap, oget\n'), ((2490, 2566), 'mog_commons.functional.omap', 'omap', (["(lambda x: '-XX:TargetSurvivorRatio=%d' % x)", 'self.target_survivor_ratio'], {}), "(lambda x: '-XX:TargetSurvivorRatio=%d' % x, self.target_survivor_ratio)\n", (2494, 2566), False, 'from mog_commons.functional import omap, oget\n'), ((3002, 3073), 'mog_commons.functional.omap', 'omap', (["(lambda x: '-Dcom.sun.management.jmxremote.port=%d' % x)", 'self.port'], {}), "(lambda x: '-Dcom.sun.management.jmxremote.port=%d' % x, self.port)\n", (3006, 3073), False, 'from mog_commons.functional import omap, oget\n'), ((4319, 4333), 'mog_commons.functional.oget', 'oget', (['prop', '{}'], {}), '(prop, {})\n', (4323, 4333), False, 'from mog_commons.functional import omap, oget\n'), ((4359, 4375), 'mog_commons.functional.oget', 'oget', (['option', '[]'], {}), '(option, [])\n', (4363, 4375), False, 'from mog_commons.functional import omap, oget\n'), ((4223, 4239), 'mog_commons.functional.oget', 'oget', (['memory', '{}'], {}), '(memory, {})\n', (4227, 4239), False, 'from mog_commons.functional import omap, oget\n'), ((4281, 4294), 'mog_commons.functional.oget', 'oget', (['jmx', '{}'], {}), '(jmx, {})\n', (4285, 4294), False, 'from mog_commons.functional import omap, oget\n')] |
# Monitor and control Apache web server workers from Python.
#
# Author: <NAME> <<EMAIL>>
# Last Change: November 27, 2019
# URL: https://apache-manager.readthedocs.io
"""
A ``top`` like interactive viewer for Apache web server metrics.
The :mod:`~apache_manager.interactive` module implements a ``top`` like
interactive viewer for Apache web server metrics using curses_. It can be
invoked from the command line using ``apache-manager --watch``.
Please note that the functions in this module are not included in the test
suite and are excluded from coverage calculations because:
1. For now this module is just an interesting experiment. It might disappear
completely or I might change it significantly, it all depends on time and
interest. For example it would be cool to have a tool like mytop_ or
innotop_ for Apache workers, but it's going to take time to build something
like that and I have 40+ open source projects and limited spare time, so I'm
not going to commit to anything :-).
2. This is my first time working with Python's :mod:`curses` module (and
curses_ interfaces in general) and it's not yet clear to me how feasible it
is to test an interactive command line interface that's not line based.
.. _curses: https://en.wikipedia.org/wiki/Curses_(programming_library)
.. _innotop: https://github.com/innotop/innotop
.. _mytop: http://jeremy.zawodny.com/mysql/mytop/
"""
# Standard library modules.
import curses
import logging
import sys
import time
# External dependencies.
import coloredlogs
from humanfriendly.terminal import connected_to_terminal, warning
def watch_metrics(manager):
"""Watch Apache web server metrics in a ``top`` like interface."""
if connected_to_terminal(sys.stdout):
try:
curses.wrapper(redraw_loop, manager)
except KeyboardInterrupt:
pass
else:
warning("Error: The 'apache-manager --watch' command requires an interactive terminal!")
sys.exit(1)
def redraw_loop(screen, manager):
"""The main loop that continuously redraws Apache web server metrics."""
# Ugly workaround to avoid circular import errors due to interdependencies
# between the apache_manager.cli and apache_manager.interactive modules.
from apache_manager.cli import report_metrics, line_is_heading
# Hide warnings (they'll mess up the curses layout).
coloredlogs.set_level(logging.ERROR)
# Hide the text cursor.
cursor_mode = curses.curs_set(0)
# Make Control-C behave normally.
curses.noraw()
# Enable non-blocking getch().
screen.nodelay(True)
try:
# Repeat until the user aborts.
while True:
lnum = 0
for line in report_metrics(manager):
attributes = 0
if line_is_heading(line):
attributes |= curses.A_BOLD
screen.addstr(lnum, 0, line, attributes)
lnum += 1
# Redraw screen.
screen.refresh()
# Wait a while before refreshing the screen, but enable the user to
# quit in the mean time.
for i in range(10):
if screen.getch() == ord('q'):
return
# Don't burn through CPU like crazy :-).
time.sleep(0.1)
# Update metrics in next iteration.
manager.refresh()
# Clear screen for next iteration.
screen.erase()
finally:
# Restore cursor mode.
curses.curs_set(cursor_mode)
# Clean up the screen after we're done.
screen.erase()
| [
"apache_manager.cli.report_metrics",
"curses.wrapper",
"apache_manager.cli.line_is_heading",
"curses.curs_set",
"time.sleep",
"sys.exit",
"coloredlogs.set_level",
"humanfriendly.terminal.warning",
"humanfriendly.terminal.connected_to_terminal",
"curses.noraw"
] | [((1712, 1745), 'humanfriendly.terminal.connected_to_terminal', 'connected_to_terminal', (['sys.stdout'], {}), '(sys.stdout)\n', (1733, 1745), False, 'from humanfriendly.terminal import connected_to_terminal, warning\n'), ((2384, 2420), 'coloredlogs.set_level', 'coloredlogs.set_level', (['logging.ERROR'], {}), '(logging.ERROR)\n', (2405, 2420), False, 'import coloredlogs\n'), ((2467, 2485), 'curses.curs_set', 'curses.curs_set', (['(0)'], {}), '(0)\n', (2482, 2485), False, 'import curses\n'), ((2528, 2542), 'curses.noraw', 'curses.noraw', ([], {}), '()\n', (2540, 2542), False, 'import curses\n'), ((1878, 1976), 'humanfriendly.terminal.warning', 'warning', (['"""Error: The \'apache-manager --watch\' command requires an interactive terminal!"""'], {}), '(\n "Error: The \'apache-manager --watch\' command requires an interactive terminal!"\n )\n', (1885, 1976), False, 'from humanfriendly.terminal import connected_to_terminal, warning\n'), ((1975, 1986), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1983, 1986), False, 'import sys\n'), ((3520, 3548), 'curses.curs_set', 'curses.curs_set', (['cursor_mode'], {}), '(cursor_mode)\n', (3535, 3548), False, 'import curses\n'), ((1772, 1808), 'curses.wrapper', 'curses.wrapper', (['redraw_loop', 'manager'], {}), '(redraw_loop, manager)\n', (1786, 1808), False, 'import curses\n'), ((2717, 2740), 'apache_manager.cli.report_metrics', 'report_metrics', (['manager'], {}), '(manager)\n', (2731, 2740), False, 'from apache_manager.cli import report_metrics, line_is_heading\n'), ((2792, 2813), 'apache_manager.cli.line_is_heading', 'line_is_heading', (['line'], {}), '(line)\n', (2807, 2813), False, 'from apache_manager.cli import report_metrics, line_is_heading\n'), ((3300, 3315), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (3310, 3315), False, 'import time\n')] |
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from users.views import LogIn, LogOut, Settings, SignUp
urlpatterns = [
path(f"{settings.ADMIN_URL}/", admin.site.urls),
path("", include("questions.urls")),
path("api/v1/", include("api.urls")),
path("login", LogIn.as_view(), name="login"),
path("logout", LogOut.as_view(), name="logout"),
path("settings", Settings.as_view(), name="settings"),
path("signup", SignUp.as_view(), name="signup"),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"django.urls.include",
"users.views.SignUp.as_view",
"django.conf.urls.static.static",
"users.views.LogOut.as_view",
"users.views.LogIn.as_view",
"django.urls.path",
"users.views.Settings.as_view"
] | [((226, 273), 'django.urls.path', 'path', (['f"""{settings.ADMIN_URL}/"""', 'admin.site.urls'], {}), "(f'{settings.ADMIN_URL}/', admin.site.urls)\n", (230, 273), False, 'from django.urls import include, path\n'), ((614, 675), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (620, 675), False, 'from django.conf.urls.static import static\n'), ((288, 313), 'django.urls.include', 'include', (['"""questions.urls"""'], {}), "('questions.urls')\n", (295, 313), False, 'from django.urls import include, path\n'), ((336, 355), 'django.urls.include', 'include', (['"""api.urls"""'], {}), "('api.urls')\n", (343, 355), False, 'from django.urls import include, path\n'), ((376, 391), 'users.views.LogIn.as_view', 'LogIn.as_view', ([], {}), '()\n', (389, 391), False, 'from users.views import LogIn, LogOut, Settings, SignUp\n'), ((427, 443), 'users.views.LogOut.as_view', 'LogOut.as_view', ([], {}), '()\n', (441, 443), False, 'from users.views import LogIn, LogOut, Settings, SignUp\n'), ((482, 500), 'users.views.Settings.as_view', 'Settings.as_view', ([], {}), '()\n', (498, 500), False, 'from users.views import LogIn, LogOut, Settings, SignUp\n'), ((539, 555), 'users.views.SignUp.as_view', 'SignUp.as_view', ([], {}), '()\n', (553, 555), False, 'from users.views import LogIn, LogOut, Settings, SignUp\n')] |
from numpy import *
from math import log
# x: features, y: classes
def infogain(x, y):
info_gains = zeros(x.shape[1]) # features of x
# calculate entropy of the data *hy*
# with regards to class y
cl = unique(y)
hy = 0
for i in range(len(cl)):
c = cl[i]
py = float(sum(y==c))/len(y) # probability of the class c in the data
hy = hy+py*log(py,2)
hy = -hy
# compute IG for each feature (columns)
for col in range(x.shape[1]): # features are on the columns
values = unique(x[:,col]) # the distinct values of each feature
# calculate conditional entropy *hyx = H(Y|X)*
hyx = 0
for i in range(len(values)): # for all values of the feature
f = values[i] # value of the specific feature
yf = y[where(x[:,col]==f)] # array with the the data points index where feature i = f
# calculate h for classes given feature f
yclasses = unique(yf) # number of classes
# hyx = 0; # conditional class probability initialization
for j in range(len(yclasses)):
yc = yclasses[j]
pyf = float(sum(yf==yc))/len(yf) # probability calls condition on the feature value
hyx = hyx+pyf*log(pyf,2) # conditional entropy
hyx = -hyx
# Information gain
info_gains[col] = hy - hyx
return info_gains
| [
"math.log"
] | [((389, 399), 'math.log', 'log', (['py', '(2)'], {}), '(py, 2)\n', (392, 399), False, 'from math import log\n'), ((1277, 1288), 'math.log', 'log', (['pyf', '(2)'], {}), '(pyf, 2)\n', (1280, 1288), False, 'from math import log\n')] |
import unittest
from qpanel.utils import clean_str_to_div_id, underscore_to_camelcase, \
timedelta_from_field_dict
from qpanel.convert import convert_time_when_param
import time
import datetime
class UtilsTestClass(unittest.TestCase):
def test_clean_str_to_div(self):
div = 'ro/.d. i _@l_k/d_@'
self.assertEqual(clean_str_to_div_id(div), 'ro-_d_ i __l_k-d__')
def test_underscore_to_camelcase(self):
a = 'rodrigoRamirez'
self.assertEqual(underscore_to_camelcase(a), 'Rodrigoramirez')
a = 'rodrigo_Ramirez'
self.assertEqual(underscore_to_camelcase(a), 'RodrigoRamirez')
a = 'rodrigo_ramirez'
self.assertEqual(underscore_to_camelcase(a), 'RodrigoRamirez')
a = '_rodrigo_ramirez'
self.assertEqual(underscore_to_camelcase(a), '_RodrigoRamirez')
def test_timedelta_from_field_dict(self):
now = time.time()
d = {'time': now, 'time2': 'hola'}
self.assertEqual(timedelta_from_field_dict('time', d, now + 1),
datetime.timedelta(0, 1))
self.assertNotEqual(
timedelta_from_field_dict(
'time',
d,
now + 1),
datetime.timedelta(
0,
10))
self.assertEqual(
timedelta_from_field_dict(
'time',
d,
now + 100),
datetime.timedelta(
0,
100))
self.assertEqual(
timedelta_from_field_dict(
'timeno',
d,
now + 100),
datetime.timedelta(
0,
0))
self.assertEqual(
str(timedelta_from_field_dict('time', d, now + 1)), '0:00:01')
self.assertEqual(
timedelta_from_field_dict(
'time', d, now), datetime.timedelta(
0, 0))
self.assertEqual(
str(timedelta_from_field_dict('time', d, now)), '0:00:00')
d2 = {'time': 60, 'time2': 6001}
self.assertEqual(str(timedelta_from_field_dict(
'time', d2, None, True)), '0:01:00')
self.assertEqual(str(timedelta_from_field_dict(
'time2', d2, None, True)), '1:40:01')
def test_convert_time_when_param(self):
value = 'test1,00:00:00'
self.assertEqual(convert_time_when_param(value),
{'when': 'test1', 'hour': '00:00:00'})
value = 'test1'
self.assertEqual(convert_time_when_param(value),
{'when': 'test1', 'hour': '00:00:00'})
value = 'test1, 00:00:01'
self.assertEqual(convert_time_when_param(value),
{'when': 'test1', 'hour': '00:00:01'})
value = 'test1, string_wrong'
self.assertEqual(convert_time_when_param(value),
{'when': 'test1', 'hour': '00:00:00'})
value = 'test1; 00:00:01'
self.assertEqual(convert_time_when_param(value, splitter=';'),
{'when': 'test1', 'hour': '00:00:01'})
# runs the unit tests
if __name__ == '__main__':
unittest.main()
| [
"qpanel.utils.clean_str_to_div_id",
"qpanel.utils.underscore_to_camelcase",
"qpanel.utils.timedelta_from_field_dict",
"unittest.main",
"datetime.timedelta",
"qpanel.convert.convert_time_when_param",
"time.time"
] | [((3181, 3196), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3194, 3196), False, 'import unittest\n'), ((898, 909), 'time.time', 'time.time', ([], {}), '()\n', (907, 909), False, 'import time\n'), ((339, 363), 'qpanel.utils.clean_str_to_div_id', 'clean_str_to_div_id', (['div'], {}), '(div)\n', (358, 363), False, 'from qpanel.utils import clean_str_to_div_id, underscore_to_camelcase, timedelta_from_field_dict\n'), ((486, 512), 'qpanel.utils.underscore_to_camelcase', 'underscore_to_camelcase', (['a'], {}), '(a)\n', (509, 512), False, 'from qpanel.utils import clean_str_to_div_id, underscore_to_camelcase, timedelta_from_field_dict\n'), ((587, 613), 'qpanel.utils.underscore_to_camelcase', 'underscore_to_camelcase', (['a'], {}), '(a)\n', (610, 613), False, 'from qpanel.utils import clean_str_to_div_id, underscore_to_camelcase, timedelta_from_field_dict\n'), ((688, 714), 'qpanel.utils.underscore_to_camelcase', 'underscore_to_camelcase', (['a'], {}), '(a)\n', (711, 714), False, 'from qpanel.utils import clean_str_to_div_id, underscore_to_camelcase, timedelta_from_field_dict\n'), ((790, 816), 'qpanel.utils.underscore_to_camelcase', 'underscore_to_camelcase', (['a'], {}), '(a)\n', (813, 816), False, 'from qpanel.utils import clean_str_to_div_id, underscore_to_camelcase, timedelta_from_field_dict\n'), ((978, 1023), 'qpanel.utils.timedelta_from_field_dict', 'timedelta_from_field_dict', (['"""time"""', 'd', '(now + 1)'], {}), "('time', d, now + 1)\n", (1003, 1023), False, 'from qpanel.utils import clean_str_to_div_id, underscore_to_camelcase, timedelta_from_field_dict\n'), ((1050, 1074), 'datetime.timedelta', 'datetime.timedelta', (['(0)', '(1)'], {}), '(0, 1)\n', (1068, 1074), False, 'import datetime\n'), ((1117, 1162), 'qpanel.utils.timedelta_from_field_dict', 'timedelta_from_field_dict', (['"""time"""', 'd', '(now + 1)'], {}), "('time', d, now + 1)\n", (1142, 1162), False, 'from qpanel.utils import clean_str_to_div_id, underscore_to_camelcase, timedelta_from_field_dict\n'), ((1225, 1250), 'datetime.timedelta', 'datetime.timedelta', (['(0)', '(10)'], {}), '(0, 10)\n', (1243, 1250), False, 'import datetime\n'), ((1323, 1370), 'qpanel.utils.timedelta_from_field_dict', 'timedelta_from_field_dict', (['"""time"""', 'd', '(now + 100)'], {}), "('time', d, now + 100)\n", (1348, 1370), False, 'from qpanel.utils import clean_str_to_div_id, underscore_to_camelcase, timedelta_from_field_dict\n'), ((1433, 1459), 'datetime.timedelta', 'datetime.timedelta', (['(0)', '(100)'], {}), '(0, 100)\n', (1451, 1459), False, 'import datetime\n'), ((1532, 1581), 'qpanel.utils.timedelta_from_field_dict', 'timedelta_from_field_dict', (['"""timeno"""', 'd', '(now + 100)'], {}), "('timeno', d, now + 100)\n", (1557, 1581), False, 'from qpanel.utils import clean_str_to_div_id, underscore_to_camelcase, timedelta_from_field_dict\n'), ((1644, 1668), 'datetime.timedelta', 'datetime.timedelta', (['(0)', '(0)'], {}), '(0, 0)\n', (1662, 1668), False, 'import datetime\n'), ((1842, 1883), 'qpanel.utils.timedelta_from_field_dict', 'timedelta_from_field_dict', (['"""time"""', 'd', 'now'], {}), "('time', d, now)\n", (1867, 1883), False, 'from qpanel.utils import clean_str_to_div_id, underscore_to_camelcase, timedelta_from_field_dict\n'), ((1902, 1926), 'datetime.timedelta', 'datetime.timedelta', (['(0)', '(0)'], {}), '(0, 0)\n', (1920, 1926), False, 'import datetime\n'), ((2398, 2428), 'qpanel.convert.convert_time_when_param', 'convert_time_when_param', (['value'], {}), '(value)\n', (2421, 2428), False, 'from qpanel.convert import convert_time_when_param\n'), ((2544, 2574), 'qpanel.convert.convert_time_when_param', 'convert_time_when_param', (['value'], {}), '(value)\n', (2567, 2574), False, 'from qpanel.convert import convert_time_when_param\n'), ((2700, 2730), 'qpanel.convert.convert_time_when_param', 'convert_time_when_param', (['value'], {}), '(value)\n', (2723, 2730), False, 'from qpanel.convert import convert_time_when_param\n'), ((2860, 2890), 'qpanel.convert.convert_time_when_param', 'convert_time_when_param', (['value'], {}), '(value)\n', (2883, 2890), False, 'from qpanel.convert import convert_time_when_param\n'), ((3016, 3060), 'qpanel.convert.convert_time_when_param', 'convert_time_when_param', (['value'], {'splitter': '""";"""'}), "(value, splitter=';')\n", (3039, 3060), False, 'from qpanel.convert import convert_time_when_param\n'), ((1745, 1790), 'qpanel.utils.timedelta_from_field_dict', 'timedelta_from_field_dict', (['"""time"""', 'd', '(now + 1)'], {}), "('time', d, now + 1)\n", (1770, 1790), False, 'from qpanel.utils import clean_str_to_div_id, underscore_to_camelcase, timedelta_from_field_dict\n'), ((1987, 2028), 'qpanel.utils.timedelta_from_field_dict', 'timedelta_from_field_dict', (['"""time"""', 'd', 'now'], {}), "('time', d, now)\n", (2012, 2028), False, 'from qpanel.utils import clean_str_to_div_id, underscore_to_camelcase, timedelta_from_field_dict\n'), ((2113, 2162), 'qpanel.utils.timedelta_from_field_dict', 'timedelta_from_field_dict', (['"""time"""', 'd2', 'None', '(True)'], {}), "('time', d2, None, True)\n", (2138, 2162), False, 'from qpanel.utils import clean_str_to_div_id, underscore_to_camelcase, timedelta_from_field_dict\n'), ((2218, 2268), 'qpanel.utils.timedelta_from_field_dict', 'timedelta_from_field_dict', (['"""time2"""', 'd2', 'None', '(True)'], {}), "('time2', d2, None, True)\n", (2243, 2268), False, 'from qpanel.utils import clean_str_to_div_id, underscore_to_camelcase, timedelta_from_field_dict\n')] |
import sys
import subprocess
import_dir = sys.argv[1]
sys.path.append(import_dir)
# https://python-chess.readthedocs.io/en/v0.23.10/
import chess
import chess.uci
def main():
chess_engine = ChessEngine()
while True:
line = raw_input()
# some split token corresponding to that in chess_displayable.rpy
args = line.split('#')
if not args:
continue
if args[0] == 'quit':
chess_engine.kill_stockfish()
break
elif args[0] == 'fen':
chess_engine.init_board(args)
elif args[0] == 'stockfish':
chess_engine.init_stockfish(args)
elif args[0] == 'stockfish_move':
chess_engine.get_stockfish_move()
elif args[0] == 'game_status':
chess_engine.get_game_status()
elif args[0] == 'piece_at':
chess_engine.get_piece_at(args)
elif args[0] == 'is_capture':
chess_engine.get_is_capture(args)
elif args[0] == 'legal_moves':
chess_engine.get_legal_moves()
elif args[0] == 'push_move':
chess_engine.push_move(args)
elif args[0] == 'pop_move':
chess_engine.pop_move()
sys.stdout.flush()
class ChessEngine():
def __init__(self):
# enum game_status as defined in chess_displayable.rpy
self.INCHECK = 1
self.THREEFOLD = 2
self.FIFTYMOVES = 3
self.DRAW = 4
self.CHECKMATE = 5
self.STALEMATE = 6
self.board = None # the chess board object
self.stockfish = None # chess AI engine
self.stockfish_movetime = None
self.stockfish_depth = None
def init_board(self, args):
fen = args[1]
self.board = chess.Board(fen=fen)
def init_stockfish(self, args):
stockfish_path = args[1]
is_os_windows = eval(args[2])
self.stockfish_movetime = int(args[3])
self.stockfish_depth = int(args[4])
# stop stockfish from opening up shell on windows
# https://stackoverflow.com/a/63538680
startupinfo = None
if is_os_windows:
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW
self.stockfish = chess.uci.popen_engine(stockfish_path, startupinfo=startupinfo)
self.stockfish.uci()
self.stockfish.position(self.board)
def kill_stockfish(self):
self.stockfish.quit()
def get_piece_at(self, args):
file_idx, rank_idx = int(args[1]), int(args[2])
piece = self.board.piece_at(chess.square(file_idx, rank_idx))
if piece:
print(piece.symbol())
else:
print('None')
def get_is_capture(self, args):
move_uci = args[1]
move = chess.Move.from_uci(move_uci)
print(self.board.is_capture(move))
def get_game_status(self):
if self.board.is_checkmate():
print(self.CHECKMATE)
return
if self.board.is_stalemate():
print(self.STALEMATE)
return
if self.board.can_claim_threefold_repetition():
print(self.THREEFOLD)
return
if self.board.can_claim_fifty_moves():
print(self.FIFTYMOVES)
return
if self.board.is_check():
print(self.INCHECK)
return
print('-1') # no change to game_status
def get_stockfish_move(self):
self.stockfish.position(self.board)
move = self.stockfish.go(movetime=self.stockfish_movetime, depth=self.stockfish_depth)
move = move.bestmove
print(move.uci())
def get_legal_moves(self):
print('#'.join([move.uci() for move in self.board.legal_moves]))
def push_move(self, args):
move_uci = args[1]
move = chess.Move.from_uci(move_uci)
self.board.push(move)
print(self.board.turn)
def pop_move(self):
# this should not raise an IndexError as the logic has been handled by the caller
self.board.pop()
print(self.board.turn)
if __name__ == '__main__':
main() | [
"chess.Move.from_uci",
"chess.uci.popen_engine",
"chess.Board",
"subprocess.STARTUPINFO",
"sys.stdout.flush",
"sys.path.append",
"chess.square"
] | [((55, 82), 'sys.path.append', 'sys.path.append', (['import_dir'], {}), '(import_dir)\n', (70, 82), False, 'import sys\n'), ((1233, 1251), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1249, 1251), False, 'import sys\n'), ((1770, 1790), 'chess.Board', 'chess.Board', ([], {'fen': 'fen'}), '(fen=fen)\n', (1781, 1790), False, 'import chess\n'), ((2298, 2361), 'chess.uci.popen_engine', 'chess.uci.popen_engine', (['stockfish_path'], {'startupinfo': 'startupinfo'}), '(stockfish_path, startupinfo=startupinfo)\n', (2320, 2361), False, 'import chess\n'), ((2828, 2857), 'chess.Move.from_uci', 'chess.Move.from_uci', (['move_uci'], {}), '(move_uci)\n', (2847, 2857), False, 'import chess\n'), ((3865, 3894), 'chess.Move.from_uci', 'chess.Move.from_uci', (['move_uci'], {}), '(move_uci)\n', (3884, 3894), False, 'import chess\n'), ((2181, 2205), 'subprocess.STARTUPINFO', 'subprocess.STARTUPINFO', ([], {}), '()\n', (2203, 2205), False, 'import subprocess\n'), ((2623, 2655), 'chess.square', 'chess.square', (['file_idx', 'rank_idx'], {}), '(file_idx, rank_idx)\n', (2635, 2655), False, 'import chess\n')] |
# -*- coding: utf-8 -*-
"""Interface for a domain: in/out test, random point generation, and update limiting (for constrained optimization)."""
from builtins import object
from abc import ABCMeta, abstractmethod, abstractproperty
from future.utils import with_metaclass
class DomainInterface(with_metaclass(ABCMeta, object)):
"""Interface for a domain: in/out test, random point generation, and update limiting (for constrained optimization)."""
@abstractproperty
def dim(self):
"""Return the number of spatial dimensions."""
pass
@abstractmethod
def check_point_inside(self, point):
r"""Check if a point is inside the domain/on its boundary or outside.
:param point: point to check
:type point: array of float64 with shape (dim)
:return: true if point is inside the domain
:rtype: bool
"""
pass
@abstractmethod
def get_bounding_box(self):
"""Return a list of ClosedIntervals representing a bounding box for this domain."""
pass
@abstractmethod
def get_constraint_list(self):
"""Return a list of lambda functions expressing the domain bounds as linear constraints. Used by COBYLA.
:return: a list of lambda functions corresponding to constraints
:rtype: array of lambda functions with shape (dim * 2)
"""
pass
@abstractmethod
def generate_random_point_in_domain(self, random_source=None):
"""Generate ``point`` uniformly at random such that ``self.check_point_inside(point)`` is True.
.. Note:: if you need multiple points, use generate_uniform_random_points_in_domain instead;
depending on implementation, it may ield better distributions over many points. For example,
tensor product type domains use latin hypercube sampling instead of repeated random draws
which guarantees that no non-uniform clusters may arise (in subspaces) versus this method
which treats all draws independently.
:return: point in domain
:rtype: array of float64 with shape (dim)
"""
pass
@abstractmethod
def generate_uniform_random_points_in_domain(self, num_points, random_source):
r"""Generate AT MOST ``num_points`` uniformly distributed points from the domain.
.. NOTE::
The number of points returned may be LESS THAN ``num_points``!
Implementations may use rejection sampling. In such cases, generating the requested
number of points may be unreasonably slow, so implementers are allowed to generate
fewer than ``num_points`` results.
:param num_points: max number of points to generate
:type num_points: int >= 0
:param random_source:
:type random_source: callable yielding uniform random numbers in [0,1]
:return: uniform random sampling of points from the domain; may be fewer than ``num_points``!
:rtype: array of float64 with shape (num_points_generated, dim)
"""
pass
@abstractmethod
def compute_update_restricted_to_domain(self, max_relative_change, current_point, update_vector):
r"""Compute a new update so that CheckPointInside(``current_point`` + ``new_update``) is true.
Changes new_update_vector so that:
``point_new = point + new_update_vector``
has coordinates such that ``CheckPointInside(point_new)`` returns true.
``new_update_vector`` is a function of ``update_vector``.
``new_update_vector`` is just a copy of ``update_vector`` if ``current_point`` is already inside the domain.
.. NOTE::
We modify update_vector (instead of returning point_new) so that further update
limiting/testing may be performed.
:param max_relative_change: max change allowed per update (as a relative fraction of current distance to boundary)
:type max_relative_change: float64 in (0, 1]
:param current_point: starting point
:type current_point: array of float64 with shape (dim)
:param update_vector: proposed update
:type update_vector: array of float64 with shape (dim)
:return: new update so that the final point remains inside the domain
:rtype: array of float64 with shape (dim)
"""
pass
| [
"future.utils.with_metaclass"
] | [((294, 325), 'future.utils.with_metaclass', 'with_metaclass', (['ABCMeta', 'object'], {}), '(ABCMeta, object)\n', (308, 325), False, 'from future.utils import with_metaclass\n')] |
import re
# $ babyLisp(‘(add 1 2)’)
# $ 3
# $ babyLisp(‘(multiply 4 (add 2 3))’)
# $ 20
def add(a, b):
return a+b
def subtract(a, b):
return a-b
def multiply(a, b):
return a*b
def divide(a, b):
return a/b
def baby_lisp(lisp_string):
brackets = re.sub(r"\)", "))", lisp_string)
commands = re.sub(r"([a-z]+) ", r"\1(", brackets)
final = re.sub(r" ", ",", commands)
return eval(final)
assert baby_lisp("(add 1 2)") == 3
assert baby_lisp("(multiply 4 (add 2 3))") == 20
assert baby_lisp("(multiply (add (subtract 2 1) (multiply 5 1)) (add 2 3))") == 30
| [
"re.sub"
] | [((272, 304), 're.sub', 're.sub', (['"""\\\\)"""', '"""))"""', 'lisp_string'], {}), "('\\\\)', '))', lisp_string)\n", (278, 304), False, 'import re\n'), ((320, 357), 're.sub', 're.sub', (['"""([a-z]+) """', '"""\\\\1("""', 'brackets'], {}), "('([a-z]+) ', '\\\\1(', brackets)\n", (326, 357), False, 'import re\n'), ((371, 397), 're.sub', 're.sub', (['""" """', '""","""', 'commands'], {}), "(' ', ',', commands)\n", (377, 397), False, 'import re\n')] |
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
import sqlite3
from sqlite3 import Error
#creating window
class Fp(Tk):
def __init__(self):
super().__init__()
self.iconbitmap(r'libico.ico')
self.maxsize(480, 320)
self.title("Forget Password")
self.canvas = Canvas(width=500, height=500, bg='black')
self.canvas.pack()
self.photo = PhotoImage(file='forgot.png')
self.canvas.create_image(-20, -20, image=self.photo, anchor=NW)
#creating variables
a = StringVar()
b = StringVar()
c = StringVar()
d = StringVar()
e = StringVar()
#verifying input
def ins():
if (len(d.get())) < 8 or len(e.get()) < 8:
while True:
if not re.search("[a-z]", d.get()):
flag = -1
break
elif not re.search("[A-Z]", d.get()):
flag = -1
break
elif not re.search("[0-9]", d.get()):
flag = -1
break
elif not re.search("[_@$]", d.get()):
flag = -1
break
elif re.search("\s", d.get()):
flag = -1
break
else:
flag = 0
break
if len(d.get()) == 0:
messagebox.showinfo("Error","Please Enter Your Password")
elif flag == -1:
messagebox.showinfo("Error","Minimum 8 characters.\nThe alphabets must be between [a-z]\nAt least one alphabet should be of Upper Case [A-Z]\nAt least 1 number or digit between [0-9].\nAt least 1 character from [ _ or @ or $ ].")
elif d.get() != e.get():
messagebox.showinfo("Error","New and retype password are not some")
else:
try:
self.conn = sqlite3.connect('library_administration.db')
self.myCursor = self.conn.cursor()
self.myCursor.execute("Update admin set password = ? where id = ?",[e.get(),a.get()])
self.conn.commit()
self.myCursor.close()
self.conn.close()
messagebox.showinfo("Confirm","Password Updated Successfuly")
self.destroy()
except Error:
messagebox.showerror("Error","Something Goes Wrong")
def check():
if len(a.get()) < 5:
messagebox.showinfo("Error","Please Enter User Id")
elif len(b.get()) == 0:
messagebox.showinfo("Error","Please Choose a question")
elif len(c.get()) == 0:
messagebox.showinfo("Error", "Please Enter a answer")
else:
try:
self.conn = sqlite3.connect('library_administration.db')
self.myCursor = self.conn.cursor()
self.myCursor.execute("Select id,secQuestion,secAnswer from admin where id = ?",[a.get()])
pc = self.myCursor.fetchone()
if not pc:
messagebox.showinfo("Error", "Something Wrong in the Details")
elif str(pc[0]) == a.get() or str(pc[1]) == b.get() or str(pc[2]) == c.get():
Label(self, text="New Password", font=('arial', 15, 'bold')).place(x=40, y=220)
Entry(self, show = "*", textvariable=d, width=40).place(x=230, y=224)
Label(self, text="Retype Password", font=('arial', 15, 'bold')).place(x=40, y=270)
Entry(self, show = "*", textvariable=e, width=40).place(x=230, y=274)
Button(self, text="Submit", width=15, command=ins).place(x=230, y=324)
except Error:
messagebox.showerror("Error","Something Goes Wrong")
#label and input box
Label(self, text="Enter User Id",bg='black',fg='white', font=('arial', 15, 'bold')).place(x=40, y=20)
Label(self, text="Security Question",bg='black',fg='white',font=('arial', 15, 'bold')).place(x=40, y= 70)
Label(self, text="Security Answer",bg='black',fg='white',font=('arial', 15, 'bold')).place(x=40, y= 120)
Entry(self, textvariable=a, width=40).place(x=230, y=24)
ttk.Combobox(self, textvariable = b,values=["What is your school name?", "What is your home name?","What is your Father name?", "What is your pet name?"], width=37,state="readonly").place(x=230, y=74)
Entry(self, show = "*", textvariable=c, width=40).place(x=230, y=124)
Button(self, text='Verify', width=15,command = check).place(x=275, y=170)
Fp().mainloop()
| [
"tkinter.messagebox.showerror",
"tkinter.messagebox.showinfo",
"sqlite3.connect",
"tkinter.ttk.Combobox"
] | [((2763, 2815), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Error"""', '"""Please Enter User Id"""'], {}), "('Error', 'Please Enter User Id')\n", (2782, 2815), False, 'from tkinter import messagebox\n'), ((4641, 4831), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self'], {'textvariable': 'b', 'values': "['What is your school name?', 'What is your home name?',\n 'What is your Father name?', 'What is your pet name?']", 'width': '(37)', 'state': '"""readonly"""'}), "(self, textvariable=b, values=['What is your school name?',\n 'What is your home name?', 'What is your Father name?',\n 'What is your pet name?'], width=37, state='readonly')\n", (4653, 4831), False, 'from tkinter import ttk\n'), ((1594, 1652), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Error"""', '"""Please Enter Your Password"""'], {}), "('Error', 'Please Enter Your Password')\n", (1613, 1652), False, 'from tkinter import messagebox\n'), ((1992, 2060), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Error"""', '"""New and retype password are not some"""'], {}), "('Error', 'New and retype password are not some')\n", (2011, 2060), False, 'from tkinter import messagebox\n'), ((2869, 2925), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Error"""', '"""Please Choose a question"""'], {}), "('Error', 'Please Choose a question')\n", (2888, 2925), False, 'from tkinter import messagebox\n'), ((1707, 1946), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Error"""', '"""Minimum 8 characters.\nThe alphabets must be between [a-z]\nAt least one alphabet should be of Upper Case [A-Z]\nAt least 1 number or digit between [0-9].\nAt least 1 character from [ _ or @ or $ ]."""'], {}), '(\'Error\',\n """Minimum 8 characters.\nThe alphabets must be between [a-z]\nAt least one alphabet should be of Upper Case [A-Z]\nAt least 1 number or digit between [0-9].\nAt least 1 character from [ _ or @ or $ ]."""\n )\n', (1726, 1946), False, 'from tkinter import messagebox\n'), ((2134, 2178), 'sqlite3.connect', 'sqlite3.connect', (['"""library_administration.db"""'], {}), "('library_administration.db')\n", (2149, 2178), False, 'import sqlite3\n'), ((2485, 2547), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Confirm"""', '"""Password Updated Successfuly"""'], {}), "('Confirm', 'Password Updated Successfuly')\n", (2504, 2547), False, 'from tkinter import messagebox\n'), ((2979, 3032), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Error"""', '"""Please Enter a answer"""'], {}), "('Error', 'Please Enter a answer')\n", (2998, 3032), False, 'from tkinter import messagebox\n'), ((2635, 2688), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Something Goes Wrong"""'], {}), "('Error', 'Something Goes Wrong')\n", (2655, 2688), False, 'from tkinter import messagebox\n'), ((3107, 3151), 'sqlite3.connect', 'sqlite3.connect', (['"""library_administration.db"""'], {}), "('library_administration.db')\n", (3122, 3151), False, 'import sqlite3\n'), ((3428, 3490), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Error"""', '"""Something Wrong in the Details"""'], {}), "('Error', 'Something Wrong in the Details')\n", (3447, 3490), False, 'from tkinter import messagebox\n'), ((4141, 4194), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Something Goes Wrong"""'], {}), "('Error', 'Something Goes Wrong')\n", (4161, 4194), False, 'from tkinter import messagebox\n')] |
import config
from selectsql import SelectSql
class RssSql(object):
def __init__(self):
self.database = config.get_database_config()
self.select_sql = SelectSql(self.database)
self.do_not_success = "do_not_success"
self.do_success = "do_success"
self.user = {}
self.xpath = {}
self.xpath_id = -1
#not success,return []
async def get_user_id_password(self,user_name):
conn = await self.select_sql.sql_conn()
res = await conn.fetchrow("""
SELECT user_id,user_name,password FROM rss_user WHERE user_name = $1
""",user_name)
await conn.close()
return res
#not success,return []
async def insert_xpath(self,user_id,
site_url,
entry_css,
entry_link_css,
add_base_url,
rss_link_prefix,
site_title_css,
site_motto_css,
entry_content_css,
author_css,
datetime_css,
interval_time,
rss_link,
base_url):
conn = await self.select_sql.sql_conn()
res = await conn.fetchrow("""
INSERT INTO xpath (user_id,site_url,
entry_css,entry_link_css,add_base_url,
rss_link_prefix,site_title_css,site_motto_css,
entry_content_css,author_css,datetime_css,
interval_time,rss_link,base_url)
VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13,$14)
RETURNING xpath_id;
""",user_id,site_url,entry_css,entry_link_css,
add_base_url,rss_link_prefix,
site_title_css,site_motto_css,entry_content_css,
author_css,datetime_css,interval_time,rss_link,base_url)
await conn.close()
return res
#not success,return []
async def get_xpath_interval_one(self,xpath_id):
conn = await self.select_sql.sql_conn()
res = await conn.fetchrow("""
SELECT xpath_id,interval_time FROM xpath WHERE xpath_id = $1
""",xpath_id)
await conn.close()
return res
#not success,return []
async def get_xpath_id_interval_all(self):
conn = await self.select_sql.sql_conn()
res = await conn.fetch("""
SELECT xpath_id,interval_time FROM xpath
""")
await conn.close()
return res
#not success,return []
async def get_xpath_from_user_id(self,user_id):
conn = await self.select_sql.sql_conn()
res = await conn.fetch("""
SELECT * FROM xpath WHERE user_id = $1
""", user_id)
await conn.close()
return res
#not success,return []
async def get_xpath_one_from_xpath_id(self,xpath_id):
conn = await self.select_sql.sql_conn()
res = await conn.fetchrow("""
SELECT * FROM xpath WHERE xpath_id = $1
""", xpath_id)
await conn.close()
return res
#not success,return []
async def get_xpath_one_from_url_name(self,url_name):
conn = await self.select_sql.sql_conn()
res = await conn.fetch("""
SELECT * FROM xpath WHERE rss_link = $1
""", url_name)
await conn.close()
#print(f"the user_id is:{user_id}")
#print(f"the rss is:{res}")
return res
#not success,return []
async def update_xpath_one_from_rss_link(self,
site_url,
entry_css,
entry_link_css,
add_base_url,
site_title_css,
site_motto_css,
entry_content_css,
author_css,
datetime_css,
interval_time,
rss_link,
base_url
):
conn = await self.select_sql.sql_conn()
res = await conn.fetchrow("""
UPDATE xpath SET site_url = $1,
entry_css = $2,entry_link_css = $3,add_base_url = $4,
site_title_css = $5,site_motto_css = $6,entry_content_css = $7,
author_css = $8,datetime_css = $9,interval_time = $10,
base_url = $11
WHERE rss_link = $12 RETURNING xpath_id
""",site_url,entry_css,entry_link_css,add_base_url,
site_title_css,site_motto_css,entry_content_css,
author_css,datetime_css,interval_time,base_url,
rss_link)
await conn.close()
return res
#not success,return []
async def insert_rss(self,user_id,xpath_id,site_title,rss_url_name,
rss_content,rss_last_build_time,rss_sha256sum):
conn = await self.select_sql.sql_conn()
res = await conn.fetchrow("""
INSERT INTO rss (user_id,xpath_id,site_title,rss_url_name,
rss_content,rss_last_build_time,rss_sha256sum)
VALUES ($1,$2,$3,$4,$5,$6,$7) RETURNING xpath_id
""", user_id,
xpath_id,
site_title,
rss_url_name,
rss_content,
rss_last_build_time,
rss_sha256sum)
await conn.close()
return res
#not success,return []
async def get_one_rss_from_userid_xpathid(self,user_id,xpath_id):
conn = await self.select_sql.sql_conn()
res = await conn.fetchrow("""
SELECT * FROM rss WHERE user_id = $1 AND xpath_id = $2;
""", user_id,xpath_id)
await conn.close()
return res
#not success,return []
async def get_all_rss_from_userid(self,user_id):
conn = await self.select_sql.sql_conn()
res = await conn.fetch("""
SELECT * FROM rss WHERE user_id = $1
""", user_id)
await conn.close()
#print(f"the user_id is:{user_id}")
#print(f"the rss is:{res}")
if len(res) == 0:
res = [{"site_title": "rss_is_none","rss_url_name": "no_url"}]
return res
#not success,return []
async def get_one_rss_from_url_name(self,url_name):
conn = await self.select_sql.sql_conn()
res = await conn.fetch("""
SELECT * FROM rss WHERE rss_url_name = $1
""", url_name)
await conn.close()
#print(f"the user_id is:{user_id}")
#print(f"the rss is:{res}")
if len(res) == 0:
res = [{"rss_content": "no rss,maybe deleted","rss_url_name": "no_url"}]
return res
#not success,return "do_not_success"
async def update_one_rss_xpath_id(self,rss_content,
rss_sha256sum,xpath_id):
conn = await self.select_sql.sql_conn()
try:
res = await conn.execute("""
UPDATE rss SET rss_content = $1,
rss_sha256sum = $2 WHERE xpath_id = $3
""",rss_content,
rss_sha256sum,xpath_id)
await conn.close()
except:
res = self.do_not_success
return res
else:
return res
#not success,return []
async def delete_one_rss_from_url_name(self,url_name):
conn = await self.select_sql.sql_conn()
res1 = await conn.fetchrow("""
DELETE FROM rss WHERE rss_url_name = $1 RETURNING *
""", url_name)
res2 = await conn.fetchrow("""
DELETE FROM xpath WHERE rss_link = $1 RETURNING *
""",url_name)
await conn.close()
#print(f"the user_id is:{user_id}")
#print(f"the rss is:{res}")
if len(res1) != 0 and len(res2) != 0:
res = self.do_success
else:
res = self.do_not_success
return res
| [
"selectsql.SelectSql",
"config.get_database_config"
] | [((117, 145), 'config.get_database_config', 'config.get_database_config', ([], {}), '()\n', (143, 145), False, 'import config\n'), ((172, 196), 'selectsql.SelectSql', 'SelectSql', (['self.database'], {}), '(self.database)\n', (181, 196), False, 'from selectsql import SelectSql\n')] |
import unittest
import os, sys
import communication_tests
if communication_tests.MPI.Rank() == 0:
communication_tests.CompilerInfo()
print()
communication_tests.MPI.Barrier()
is_slave_process = (("--tests-slave" in sys.argv[1:]) or (communication_tests.MPI.Rank() == 1))
if is_slave_process:
from base_communication_test import BaseCommunicationTestDataSender
BaseCommunicationTestDataSender().Execute()
else:
loader = unittest.TestLoader()
tests = loader.discover(os.path.dirname(__file__)) # automatically discover all tests in this directory
testRunner = unittest.runner.TextTestRunner(verbosity=1)
sys.exit(not testRunner.run(tests).wasSuccessful()) | [
"communication_tests.MPI.Rank",
"communication_tests.MPI.Barrier",
"base_communication_test.BaseCommunicationTestDataSender",
"communication_tests.CompilerInfo",
"os.path.dirname",
"unittest.runner.TextTestRunner",
"unittest.TestLoader"
] | [((151, 184), 'communication_tests.MPI.Barrier', 'communication_tests.MPI.Barrier', ([], {}), '()\n', (182, 184), False, 'import communication_tests\n'), ((62, 92), 'communication_tests.MPI.Rank', 'communication_tests.MPI.Rank', ([], {}), '()\n', (90, 92), False, 'import communication_tests\n'), ((103, 137), 'communication_tests.CompilerInfo', 'communication_tests.CompilerInfo', ([], {}), '()\n', (135, 137), False, 'import communication_tests\n'), ((443, 464), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (462, 464), False, 'import unittest\n'), ((590, 633), 'unittest.runner.TextTestRunner', 'unittest.runner.TextTestRunner', ([], {'verbosity': '(1)'}), '(verbosity=1)\n', (620, 633), False, 'import unittest\n'), ((244, 274), 'communication_tests.MPI.Rank', 'communication_tests.MPI.Rank', ([], {}), '()\n', (272, 274), False, 'import communication_tests\n'), ((493, 518), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (508, 518), False, 'import os, sys\n'), ((380, 413), 'base_communication_test.BaseCommunicationTestDataSender', 'BaseCommunicationTestDataSender', ([], {}), '()\n', (411, 413), False, 'from base_communication_test import BaseCommunicationTestDataSender\n')] |
#!/usr/bin/env python3
# Advent of Code 2020
# <NAME>
import sys # .stdin
import re # .search
# ======================================================================
# HELPER FUNCTIONS
# ======================================================================
def does_rule_allow_num(rule, num):
assert isinstance(rule, list)
assert len(rule) == 5
assert isinstance(rule[0], str)
for k in rule[1:]:
assert isinstance(k, int)
assert rule[1] <= rule[2]
assert rule[3] <= rule[4]
assert isinstance(num, int)
return rule[1] <= num <= rule[2] or rule[3] <= num <= rule[4]
# check_valid_ticket - returns list of all invalid numbers found in the
# given ticket, according to the given rules. For a valid ticket, this
# list will be empty.
def check_valid_ticket(rules, ticket):
assert isinstance(rules, list)
assert isinstance(ticket, list)
invalid_numbers = []
for n in ticket:
for r in rules:
if does_rule_allow_num(r, n):
break
else:
invalid_numbers.append(n)
return invalid_numbers
# ======================================================================
# MAIN PROGRAM
# ======================================================================
# *** Process Input ***
# Read rules
rules = []
for line in sys.stdin:
line = line.rstrip()
if not line:
break
re1 = r"^([a-z][a-z ]*[a-z]|[a-z]): (\d+)-(\d+) or (\d+)-(\d+)$"
mo1 = re.search(re1, line)
assert mo1
the_rule = [mo1[1],
int(mo1[2]), int(mo1[3]), int(mo1[4]), int(mo1[5])]
rules.append(the_rule)
rules_count = len(rules)
# Read your ticket
line = sys.stdin.readline()
line = line.rstrip()
assert line == "your ticket:"
line = sys.stdin.readline()
line = line.rstrip()
ticket_strs = line.split(",")
your_ticket = [ int(n_str) for n_str in ticket_strs ]
assert len(your_ticket) == rules_count
line = sys.stdin.readline()
line = line.rstrip()
assert not line
# Read nearby tickets
line = sys.stdin.readline()
line = line.rstrip()
assert line == "nearby tickets:"
nearby_tickets = []
for line in sys.stdin:
line = line.rstrip()
assert line
ticket_strs = line.split(",")
the_ticket = [ int(n_str) for n_str in ticket_strs ]
assert len(the_ticket) == rules_count
nearby_tickets.append(the_ticket)
# *** Do Computation ***
all_invalid_numbers = []
for nt in nearby_tickets:
invalid_numbers = check_valid_ticket(rules, nt)
all_invalid_numbers += invalid_numbers
# *** Print Answer ***
print(f"Answer: {sum(all_invalid_numbers)}")
| [
"sys.stdin.readline",
"re.search"
] | [((1701, 1721), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (1719, 1721), False, 'import sys\n'), ((1780, 1800), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (1798, 1800), False, 'import sys\n'), ((1952, 1972), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (1970, 1972), False, 'import sys\n'), ((2040, 2060), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (2058, 2060), False, 'import sys\n'), ((1494, 1514), 're.search', 're.search', (['re1', 'line'], {}), '(re1, line)\n', (1503, 1514), False, 'import re\n')] |
# Starts from OS Mastermap base map and:
# 1. Assigns CEH Landcover map (LCM) definition of either Arable or Improved grassland to agricultural land polygons
# 2. Assigns Rural Payments Agency CROME Crop map data (input must be dissolved by land use code and joined to description
# and simplified description (Arable, Improved grassland, Short-rotation coppice)
# 3. Assigns Natural England Priority Habitat data.
# Set up to loop through a set of Local Authority Districts
# -----------------------------------------------------------------------------------------------------------------
import time
import arcpy
import os
import MyFunctions
arcpy.CheckOutExtension("Spatial")
print(''.join(["## Started on : ", time.ctime()]))
arcpy.env.overwriteOutput = True # Overwrites files
arcpy.env.qualifiedFieldNames = False # Joined fields will be exported without the join table name
arcpy.env.XYTolerance = "0.001 Meters"
# region = "Oxon"
region = "Arc"
# method = "HLU"
method = "CROME_LCM_PHI"
if method == "CROME_LCM_PHI":
folder = r"C:\Users\cenv0389\Documents\Oxon_GIS\OxCamArc"
if region == "Arc":
LADs_included = ["Bedfordshire", "Buckinghamshire", "Cambridgeshire", "Northamptonshire"]
Hab_field = "Interpreted_habitat"
elif region == "Oxon":
LADs_included = ["Oxfordshire"]
Hab_field = "Interpreted_Habitat"
data_gdb = os.path.join(folder, "Data\Data.gdb")
LAD_table = os.path.join(data_gdb, "Arc_LADs")
CROME_data = os.path.join(data_gdb, "CROME_Arc_dissolve")
elif region == "Oxon" and method == "HLU":
# Operate in the Oxon_county folder
folder = r"C:\Users\cenv0389\Documents\Oxon_GIS\Oxon_county\Data"
data_gdb = os.path.join(folder, "Data.gdb")
LAD_table = os.path.join(folder, "Data.gdb", "Oxon_LADs")
CROME_data = os.path.join(data_gdb, "CROME_Oxon_dissolve")
Hab_field = "BAP_Habitat"
else:
print("ERROR: you cannot combine region " + region + " with method " + method)
exit()
LAD_names = []
needed_fields = ["TOID", "Theme", "DescriptiveGroup", "DescriptiveTerm", "Make", "OSMM_hab"]
# What method are we using to create the base map? Merge or intersect? This affects the processing stages used.
# merge_or_intersect = "intersect"
merge_or_intersect = "merge"
# Which stages of the code do we want to run? Depends on whether we are using merge or intersect to create the base map,
# as the merge is a two-stage process in which this script is called twice. Also useful for debugging or updates.
if merge_or_intersect == "intersect":
process_LCM = False
process_CROME = True
process_PHI = True
delete_landform = False
intersect_PHI = False
interpret_PHI = True
out_fc = "OSMM_LCM_PHI_intersect"
elif merge_or_intersect == "merge":
# Change step = 1 to step = 2 after running Merge_into_base_map to merge OSMM_LCM with PHI
step = 2
if step == 1:
process_LCM = True
process_CROME = True
process_PHI = True
delete_landform = True
intersect_PHI = False
interpret_PHI = False
elif step == 2:
process_LCM = False
process_CROME = False
process_PHI = True
delete_landform = False
intersect_PHI = False
interpret_PHI = True
out_fc = "OSMM_LCM_PHI_merge"
arcpy.env.workspace = data_gdb
LADs = arcpy.SearchCursor(os.path.join(data_gdb, LAD_table))
for LAD in LADs:
LAD_full_name = LAD.getValue("desc_")
LAD_county = LAD.getValue("county")
if LAD_county in LADs_included:
LAD_name = LAD_full_name.replace(" ", "")
LAD_names.append(LAD_name)
# Now process each LAD gdb
# Use CEH LCM to determine whether OSMM 'Agricultural land' is arable or improved grassland.
if process_LCM:
for LAD in LAD_names:
print ("Processing " + LAD)
arcpy.env.workspace = os.path.join(folder, LAD + ".gdb")
print("Copying OSMM to OSMM_LCM")
arcpy.CopyFeatures_management("OSMM", "OSMM_LCM")
print ("Adding LCM farmland interpretation to " + LAD)
MyFunctions.delete_fields("OSMM_LCM", needed_fields, "")
print (" Adding habitat fields")
MyFunctions.check_and_add_field("OSMM_LCM", "LCM_farmland", "TEXT", 100)
MyFunctions.check_and_add_field("OSMM_LCM", Hab_field, "TEXT", 100)
arcpy.CalculateField_management("OSMM_LCM", Hab_field, "!OSMM_hab!", "PYTHON_9.3")
print (" Identifying arable land")
arcpy.MakeFeatureLayer_management("OSMM_LCM", "ag_lyr")
arcpy.SelectLayerByAttribute_management("ag_lyr", where_clause="OSMM_hab = 'Agricultural land' OR OSMM_hab = 'Natural surface'")
arcpy.SelectLayerByLocation_management("ag_lyr", "HAVE_THEIR_CENTER_IN", "LCM_arable", selection_type="SUBSET_SELECTION")
arcpy.CalculateField_management("ag_lyr","LCM_farmland", "'Arable'", "PYTHON_9.3")
arcpy.CalculateField_management("ag_lyr", Hab_field, "'Arable'", "PYTHON_9.3")
arcpy.Delete_management("ag_lyr")
print (" Identifying improved grassland")
arcpy.MakeFeatureLayer_management("OSMM_LCM", "ag_lyr2")
arcpy.SelectLayerByAttribute_management("ag_lyr2", where_clause="OSMM_hab = 'Agricultural land' OR OSMM_hab = 'Natural surface'")
arcpy.SelectLayerByLocation_management("ag_lyr2", "HAVE_THEIR_CENTER_IN", "LCM_improved_grassland",
selection_type="SUBSET_SELECTION")
arcpy.CalculateField_management("ag_lyr2", "LCM_farmland", "'Improved grassland'", "PYTHON_9.3")
arcpy.Delete_management("ag_lyr2")
# Set interpreted habitat to Improved grassland if this is 'agricultural land'or Amenity grassland if this is 'Natural surface'
# unless it is railside (do not want to flag this as amenity grassland because it is not generally accessible)
expression = "LCM_farmland = 'Improved grassland' AND " + Hab_field + " = 'Agricultural land'"
MyFunctions.select_and_copy("OSMM_LCM", Hab_field, expression, "'Improved grassland'")
expression = "LCM_farmland = 'Improved grassland' AND " + Hab_field + " = 'Natural surface' AND DescriptiveGroup <> 'Rail'"
MyFunctions.select_and_copy("OSMM_LCM", Hab_field, expression, "'Amenity grassland'")
print(''.join(["## Finished on : ", time.ctime()]))
# Add crop type from CROME map, but only for agricultural land. This is probably better data then LCM and is freely available.
# This assumes we are adding CROME after adding LCM (so the Interpreted habitat field is already added and populated in the process_LCM
# step above), but in fact it is probably best just to use CROME (once we have tested vs LCM), so need to modify this step to include
# adding the interpreted habitat field
if process_CROME:
for LAD in LAD_names:
print ("Processing " + LAD)
arcpy.env.workspace = os.path.join(folder, LAD + ".gdb")
in_map = "OSMM_LCM"
out_map = in_map + "_CROME"
print("Copying " + in_map + " to " + out_map)
arcpy.CopyFeatures_management(in_map, out_map)
print ("Adding CROME farmland interpretation to " + LAD)
print (" Adding habitat fields")
MyFunctions.check_and_add_field(out_map, "CROME_farmland", "TEXT", 50)
print(" Copying OBJECTID for base map")
MyFunctions.check_and_add_field(out_map, "BaseID_CROME", "LONG", 0)
arcpy.CalculateField_management(out_map, "BaseID_CROME", "!OBJECTID!", "PYTHON_9.3")
print (" Identifying farmland")
arcpy.MakeFeatureLayer_management(out_map, "ag_lyr")
expression = "Interpreted_hab IN ('Agricultural land', 'Natural surface') OR Interpreted_hab LIKE 'Arable%'"
expression = expression + " OR Interpreted_hab LIKE 'Improved grassland%'"
arcpy.SelectLayerByAttribute_management("ag_lyr", where_clause=expression)
print(" Calculating percentage of farmland features within CROME polygons")
arcpy.TabulateIntersection_analysis(CROME_data, ["LUCODE", "Land Use Description", "field", "Shape_Area"],
"ag_lyr", "CROME_TI", ["BaseID_CROME", Hab_field, "Shape_Area"])
print(" Sorting TI table by size so that larger intersections are first in the list")
arcpy.Sort_management("CROME_TI", "CROME_TI_sort", [["AREA", "ASCENDING"]])
print (" Adding fields for CROME data")
MyFunctions.check_and_add_field(out_map, "CROME_desc", "TEXT", 50)
MyFunctions.check_and_add_field(out_map, "CROME_simple", "TEXT", 30)
print (" Joining CROME info for base map polygons that are >50% inside CROME polygons")
arcpy.AddJoin_management("ag_lyr", "BaseID_CROME", "CROME_TI_sort", "BaseID_CROME", "KEEP_ALL")
print(" Copying CROME data")
arcpy.SelectLayerByAttribute_management("ag_lyr", where_clause="CROME_TI_sort.PERCENTAGE > 50")
arcpy.CalculateField_management("ag_lyr", out_map + ".CROME_desc", "!CROME_TI_sort.Land Use Description!", "PYTHON_9.3")
arcpy.CalculateField_management("ag_lyr", out_map + ".CROME_simple", "!CROME_TI_sort.field!", "PYTHON_9.3")
# Remove the join
arcpy.RemoveJoin_management("ag_lyr", "CROME_TI_sort")
arcpy.Delete_management("ag_lyr")
# Set interpreted habitat to Improved grassland if this is 'agricultural land'or Amenity grassland if this is 'Natural surface'
# unless it is railside (do not want to flag this as amenity grassland because it is not generally accessible)
expression = "CROME_desc IN ('Grass', 'Fallow Land') AND " + Hab_field + " IN ('Agricultural land', 'Arable')"
MyFunctions.select_and_copy(out_map, Hab_field, expression, "'Improved grassland'")
expression = "CROME_desc IN ('Grass', 'Fallow Land') AND " + Hab_field + " = 'Natural surface' AND DescriptiveGroup <> 'Rail'"
MyFunctions.select_and_copy(out_map, Hab_field, expression, "'Amenity grassland'")
expression = "CROME_desc = 'Arable' AND " + Hab_field + " = 'Agricultural land'"
MyFunctions.select_and_copy(out_map, Hab_field, expression, "'Arable'")
expression = "CROME_desc = 'Short Rotation Coppice' AND " + Hab_field + " = 'Agricultural land'"
MyFunctions.select_and_copy(out_map, Hab_field, expression, "'Arable'")
print(''.join(["## Finished on : ", time.ctime()]))
if process_PHI:
for LAD in LAD_names:
arcpy.env.workspace = os.path.join(folder, LAD + ".gdb")
if delete_landform:
print(" Deleting overlapping 'Landform' and 'Pylon' from OSMM for " + LAD)
arcpy.MakeFeatureLayer_management("OSMM_LCM", "OSMM_layer")
expression = "DescriptiveGroup LIKE '%Landform%' OR DescriptiveTerm IN ('Cliff','Slope','Pylon')"
arcpy.SelectLayerByAttribute_management("OSMM_layer", where_clause=expression)
arcpy.DeleteFeatures_management("OSMM_layer")
arcpy.Delete_management("OSMM_layer")
if intersect_PHI:
print ("Intersecting " + LAD)
arcpy.Identity_analysis("OSMM_LCM", "PHI", out_fc, "NO_FID")
if interpret_PHI:
print ("Interpreting " + LAD)
# Copy PHI habitat across, but not for manmade, gardens, water, unidentified PHI, wood pasture or OMHD (dealt with later)
expression = "Make = 'Natural' AND DescriptiveGroup NOT LIKE '%water%' AND DescriptiveGroup NOT LIKE '%Water%' AND " \
"OSMM_hab <> 'Roadside - unknown surface' AND OSMM_hab <> 'Track' AND OSMM_hab <> 'Standing water' "
expression2 = expression + " AND PHI IS NOT NULL AND PHI <> '' AND PHI NOT LIKE 'No main%' AND " \
"PHI NOT LIKE 'Wood-pasture%' AND PHI NOT LIKE 'Open Mosaic%'"
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", expression2, "!PHI!")
# Correction for traditional orchards in large gardens
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "PHI = 'Traditional orchard' AND OSMM_hab = 'Garden'",
"'Traditional orchards'")
# Other corrections / consolidations
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "Interpreted_habitat = 'Deciduous woodland'",
"'Broadleaved woodland - semi-natural'")
expression3 = "Interpreted_habitat LIKE '%grazing marsh%' OR Interpreted_habitat LIKE 'Purple moor grass%'"
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", expression3, "'Marshy grassland'")
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "Interpreted_habitat LIKE '%semi-improved grassland%'",
"'Semi-natural grassland'")
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "Interpreted_habitat LIKE '%meadow%'",
"'Neutral grassland'")
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "Interpreted_habitat = 'Traditional orchard'",
"'Traditional orchards'")
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "Interpreted_habitat LIKE '%alcareous%'",
"'Calcareous grassland'")
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "Interpreted_habitat = 'Lowland heathland'",
"'Heathland'")
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "Interpreted_habitat = 'Reedbeds'",
"'Reedbed'")
# Copy over OMHD only if the habitat is fairly generic (OMHD dataset covers areas of mixed habitats)
expression5 = "(OMHD IS NOT NULL AND OMHD <> '') AND (Interpreted_habitat IN ('Arable', 'Agricultural land'," \
" 'Improved grassland', 'Natural surface', 'Cultivated/disturbed land', 'Bare ground', 'Landfill (inactive)'," \
"'Quarry or spoil (disused)', 'Sealed surface'))"
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", expression5, "'Open mosaic habitats'")
# Copy over Wood pasture only if the habitat is fairly generic (WPP dataset covers very large areas of mixed habitats)
expression4 = "(WPP IS NOT NULL AND WPP <> '') AND (Interpreted_habitat IN ('Arable', 'Agricultural land', " \
"'Improved grassland', 'Natural surface', 'Cultivated/disturbed land') OR " \
"Interpreted_habitat LIKE 'Scattered%' OR Interpreted_habitat LIKE 'Semi-natural grassland%')"
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", expression4, "'Parkland and scattered trees - broadleaved'")
print(''.join(["## Finished on : ", time.ctime()]))
if merge_or_intersect == "merge":
if step == 1:
print ("Now run Merge_into_Base_Map.py to merge OSMM_LCM with PHI, then set step = 2 in this code and re-run to interpret habitats")
exit() | [
"time.ctime",
"arcpy.CopyFeatures_management",
"arcpy.CalculateField_management",
"MyFunctions.check_and_add_field",
"arcpy.SelectLayerByLocation_management",
"arcpy.MakeFeatureLayer_management",
"arcpy.CheckOutExtension",
"os.path.join",
"arcpy.Sort_management",
"arcpy.Identity_analysis",
"MyFu... | [((647, 681), 'arcpy.CheckOutExtension', 'arcpy.CheckOutExtension', (['"""Spatial"""'], {}), "('Spatial')\n", (670, 681), False, 'import arcpy\n'), ((1394, 1432), 'os.path.join', 'os.path.join', (['folder', '"""Data\\\\Data.gdb"""'], {}), "(folder, 'Data\\\\Data.gdb')\n", (1406, 1432), False, 'import os\n'), ((1448, 1482), 'os.path.join', 'os.path.join', (['data_gdb', '"""Arc_LADs"""'], {}), "(data_gdb, 'Arc_LADs')\n", (1460, 1482), False, 'import os\n'), ((1500, 1544), 'os.path.join', 'os.path.join', (['data_gdb', '"""CROME_Arc_dissolve"""'], {}), "(data_gdb, 'CROME_Arc_dissolve')\n", (1512, 1544), False, 'import os\n'), ((3375, 3408), 'os.path.join', 'os.path.join', (['data_gdb', 'LAD_table'], {}), '(data_gdb, LAD_table)\n', (3387, 3408), False, 'import os\n'), ((1713, 1745), 'os.path.join', 'os.path.join', (['folder', '"""Data.gdb"""'], {}), "(folder, 'Data.gdb')\n", (1725, 1745), False, 'import os\n'), ((1762, 1807), 'os.path.join', 'os.path.join', (['folder', '"""Data.gdb"""', '"""Oxon_LADs"""'], {}), "(folder, 'Data.gdb', 'Oxon_LADs')\n", (1774, 1807), False, 'import os\n'), ((1825, 1870), 'os.path.join', 'os.path.join', (['data_gdb', '"""CROME_Oxon_dissolve"""'], {}), "(data_gdb, 'CROME_Oxon_dissolve')\n", (1837, 1870), False, 'import os\n'), ((3859, 3893), 'os.path.join', 'os.path.join', (['folder', "(LAD + '.gdb')"], {}), "(folder, LAD + '.gdb')\n", (3871, 3893), False, 'import os\n'), ((3944, 3993), 'arcpy.CopyFeatures_management', 'arcpy.CopyFeatures_management', (['"""OSMM"""', '"""OSMM_LCM"""'], {}), "('OSMM', 'OSMM_LCM')\n", (3973, 3993), False, 'import arcpy\n'), ((4065, 4121), 'MyFunctions.delete_fields', 'MyFunctions.delete_fields', (['"""OSMM_LCM"""', 'needed_fields', '""""""'], {}), "('OSMM_LCM', needed_fields, '')\n", (4090, 4121), False, 'import MyFunctions\n'), ((4172, 4244), 'MyFunctions.check_and_add_field', 'MyFunctions.check_and_add_field', (['"""OSMM_LCM"""', '"""LCM_farmland"""', '"""TEXT"""', '(100)'], {}), "('OSMM_LCM', 'LCM_farmland', 'TEXT', 100)\n", (4203, 4244), False, 'import MyFunctions\n'), ((4253, 4320), 'MyFunctions.check_and_add_field', 'MyFunctions.check_and_add_field', (['"""OSMM_LCM"""', 'Hab_field', '"""TEXT"""', '(100)'], {}), "('OSMM_LCM', Hab_field, 'TEXT', 100)\n", (4284, 4320), False, 'import MyFunctions\n'), ((4329, 4415), 'arcpy.CalculateField_management', 'arcpy.CalculateField_management', (['"""OSMM_LCM"""', 'Hab_field', '"""!OSMM_hab!"""', '"""PYTHON_9.3"""'], {}), "('OSMM_LCM', Hab_field, '!OSMM_hab!',\n 'PYTHON_9.3')\n", (4360, 4415), False, 'import arcpy\n'), ((4465, 4520), 'arcpy.MakeFeatureLayer_management', 'arcpy.MakeFeatureLayer_management', (['"""OSMM_LCM"""', '"""ag_lyr"""'], {}), "('OSMM_LCM', 'ag_lyr')\n", (4498, 4520), False, 'import arcpy\n'), ((4529, 4662), 'arcpy.SelectLayerByAttribute_management', 'arcpy.SelectLayerByAttribute_management', (['"""ag_lyr"""'], {'where_clause': '"""OSMM_hab = \'Agricultural land\' OR OSMM_hab = \'Natural surface\'"""'}), '(\'ag_lyr\', where_clause=\n "OSMM_hab = \'Agricultural land\' OR OSMM_hab = \'Natural surface\'")\n', (4568, 4662), False, 'import arcpy\n'), ((4666, 4791), 'arcpy.SelectLayerByLocation_management', 'arcpy.SelectLayerByLocation_management', (['"""ag_lyr"""', '"""HAVE_THEIR_CENTER_IN"""', '"""LCM_arable"""'], {'selection_type': '"""SUBSET_SELECTION"""'}), "('ag_lyr', 'HAVE_THEIR_CENTER_IN',\n 'LCM_arable', selection_type='SUBSET_SELECTION')\n", (4704, 4791), False, 'import arcpy\n'), ((4796, 4883), 'arcpy.CalculateField_management', 'arcpy.CalculateField_management', (['"""ag_lyr"""', '"""LCM_farmland"""', '"""\'Arable\'"""', '"""PYTHON_9.3"""'], {}), '(\'ag_lyr\', \'LCM_farmland\', "\'Arable\'",\n \'PYTHON_9.3\')\n', (4827, 4883), False, 'import arcpy\n'), ((4887, 4965), 'arcpy.CalculateField_management', 'arcpy.CalculateField_management', (['"""ag_lyr"""', 'Hab_field', '"""\'Arable\'"""', '"""PYTHON_9.3"""'], {}), '(\'ag_lyr\', Hab_field, "\'Arable\'", \'PYTHON_9.3\')\n', (4918, 4965), False, 'import arcpy\n'), ((4974, 5007), 'arcpy.Delete_management', 'arcpy.Delete_management', (['"""ag_lyr"""'], {}), "('ag_lyr')\n", (4997, 5007), False, 'import arcpy\n'), ((5068, 5124), 'arcpy.MakeFeatureLayer_management', 'arcpy.MakeFeatureLayer_management', (['"""OSMM_LCM"""', '"""ag_lyr2"""'], {}), "('OSMM_LCM', 'ag_lyr2')\n", (5101, 5124), False, 'import arcpy\n'), ((5133, 5267), 'arcpy.SelectLayerByAttribute_management', 'arcpy.SelectLayerByAttribute_management', (['"""ag_lyr2"""'], {'where_clause': '"""OSMM_hab = \'Agricultural land\' OR OSMM_hab = \'Natural surface\'"""'}), '(\'ag_lyr2\', where_clause=\n "OSMM_hab = \'Agricultural land\' OR OSMM_hab = \'Natural surface\'")\n', (5172, 5267), False, 'import arcpy\n'), ((5271, 5409), 'arcpy.SelectLayerByLocation_management', 'arcpy.SelectLayerByLocation_management', (['"""ag_lyr2"""', '"""HAVE_THEIR_CENTER_IN"""', '"""LCM_improved_grassland"""'], {'selection_type': '"""SUBSET_SELECTION"""'}), "('ag_lyr2', 'HAVE_THEIR_CENTER_IN',\n 'LCM_improved_grassland', selection_type='SUBSET_SELECTION')\n", (5309, 5409), False, 'import arcpy\n'), ((5461, 5561), 'arcpy.CalculateField_management', 'arcpy.CalculateField_management', (['"""ag_lyr2"""', '"""LCM_farmland"""', '"""\'Improved grassland\'"""', '"""PYTHON_9.3"""'], {}), '(\'ag_lyr2\', \'LCM_farmland\',\n "\'Improved grassland\'", \'PYTHON_9.3\')\n', (5492, 5561), False, 'import arcpy\n'), ((5566, 5600), 'arcpy.Delete_management', 'arcpy.Delete_management', (['"""ag_lyr2"""'], {}), "('ag_lyr2')\n", (5589, 5600), False, 'import arcpy\n'), ((5968, 6058), 'MyFunctions.select_and_copy', 'MyFunctions.select_and_copy', (['"""OSMM_LCM"""', 'Hab_field', 'expression', '"""\'Improved grassland\'"""'], {}), '(\'OSMM_LCM\', Hab_field, expression,\n "\'Improved grassland\'")\n', (5995, 6058), False, 'import MyFunctions\n'), ((6195, 6284), 'MyFunctions.select_and_copy', 'MyFunctions.select_and_copy', (['"""OSMM_LCM"""', 'Hab_field', 'expression', '"""\'Amenity grassland\'"""'], {}), '(\'OSMM_LCM\', Hab_field, expression,\n "\'Amenity grassland\'")\n', (6222, 6284), False, 'import MyFunctions\n'), ((6889, 6923), 'os.path.join', 'os.path.join', (['folder', "(LAD + '.gdb')"], {}), "(folder, LAD + '.gdb')\n", (6901, 6923), False, 'import os\n'), ((7050, 7096), 'arcpy.CopyFeatures_management', 'arcpy.CopyFeatures_management', (['in_map', 'out_map'], {}), '(in_map, out_map)\n', (7079, 7096), False, 'import arcpy\n'), ((7212, 7282), 'MyFunctions.check_and_add_field', 'MyFunctions.check_and_add_field', (['out_map', '"""CROME_farmland"""', '"""TEXT"""', '(50)'], {}), "(out_map, 'CROME_farmland', 'TEXT', 50)\n", (7243, 7282), False, 'import MyFunctions\n'), ((7345, 7412), 'MyFunctions.check_and_add_field', 'MyFunctions.check_and_add_field', (['out_map', '"""BaseID_CROME"""', '"""LONG"""', '(0)'], {}), "(out_map, 'BaseID_CROME', 'LONG', 0)\n", (7376, 7412), False, 'import MyFunctions\n'), ((7421, 7509), 'arcpy.CalculateField_management', 'arcpy.CalculateField_management', (['out_map', '"""BaseID_CROME"""', '"""!OBJECTID!"""', '"""PYTHON_9.3"""'], {}), "(out_map, 'BaseID_CROME', '!OBJECTID!',\n 'PYTHON_9.3')\n", (7452, 7509), False, 'import arcpy\n'), ((7556, 7608), 'arcpy.MakeFeatureLayer_management', 'arcpy.MakeFeatureLayer_management', (['out_map', '"""ag_lyr"""'], {}), "(out_map, 'ag_lyr')\n", (7589, 7608), False, 'import arcpy\n'), ((7817, 7891), 'arcpy.SelectLayerByAttribute_management', 'arcpy.SelectLayerByAttribute_management', (['"""ag_lyr"""'], {'where_clause': 'expression'}), "('ag_lyr', where_clause=expression)\n", (7856, 7891), False, 'import arcpy\n'), ((7990, 8170), 'arcpy.TabulateIntersection_analysis', 'arcpy.TabulateIntersection_analysis', (['CROME_data', "['LUCODE', 'Land Use Description', 'field', 'Shape_Area']", '"""ag_lyr"""', '"""CROME_TI"""', "['BaseID_CROME', Hab_field, 'Shape_Area']"], {}), "(CROME_data, ['LUCODE',\n 'Land Use Description', 'field', 'Shape_Area'], 'ag_lyr', 'CROME_TI', [\n 'BaseID_CROME', Hab_field, 'Shape_Area'])\n", (8025, 8170), False, 'import arcpy\n'), ((8314, 8389), 'arcpy.Sort_management', 'arcpy.Sort_management', (['"""CROME_TI"""', '"""CROME_TI_sort"""', "[['AREA', 'ASCENDING']]"], {}), "('CROME_TI', 'CROME_TI_sort', [['AREA', 'ASCENDING']])\n", (8335, 8389), False, 'import arcpy\n'), ((8452, 8518), 'MyFunctions.check_and_add_field', 'MyFunctions.check_and_add_field', (['out_map', '"""CROME_desc"""', '"""TEXT"""', '(50)'], {}), "(out_map, 'CROME_desc', 'TEXT', 50)\n", (8483, 8518), False, 'import MyFunctions\n'), ((8527, 8595), 'MyFunctions.check_and_add_field', 'MyFunctions.check_and_add_field', (['out_map', '"""CROME_simple"""', '"""TEXT"""', '(30)'], {}), "(out_map, 'CROME_simple', 'TEXT', 30)\n", (8558, 8595), False, 'import MyFunctions\n'), ((8706, 8805), 'arcpy.AddJoin_management', 'arcpy.AddJoin_management', (['"""ag_lyr"""', '"""BaseID_CROME"""', '"""CROME_TI_sort"""', '"""BaseID_CROME"""', '"""KEEP_ALL"""'], {}), "('ag_lyr', 'BaseID_CROME', 'CROME_TI_sort',\n 'BaseID_CROME', 'KEEP_ALL')\n", (8730, 8805), False, 'import arcpy\n'), ((8853, 8953), 'arcpy.SelectLayerByAttribute_management', 'arcpy.SelectLayerByAttribute_management', (['"""ag_lyr"""'], {'where_clause': '"""CROME_TI_sort.PERCENTAGE > 50"""'}), "('ag_lyr', where_clause=\n 'CROME_TI_sort.PERCENTAGE > 50')\n", (8892, 8953), False, 'import arcpy\n'), ((8957, 9081), 'arcpy.CalculateField_management', 'arcpy.CalculateField_management', (['"""ag_lyr"""', "(out_map + '.CROME_desc')", '"""!CROME_TI_sort.Land Use Description!"""', '"""PYTHON_9.3"""'], {}), "('ag_lyr', out_map + '.CROME_desc',\n '!CROME_TI_sort.Land Use Description!', 'PYTHON_9.3')\n", (8988, 9081), False, 'import arcpy\n'), ((9086, 9197), 'arcpy.CalculateField_management', 'arcpy.CalculateField_management', (['"""ag_lyr"""', "(out_map + '.CROME_simple')", '"""!CROME_TI_sort.field!"""', '"""PYTHON_9.3"""'], {}), "('ag_lyr', out_map + '.CROME_simple',\n '!CROME_TI_sort.field!', 'PYTHON_9.3')\n", (9117, 9197), False, 'import arcpy\n'), ((9229, 9283), 'arcpy.RemoveJoin_management', 'arcpy.RemoveJoin_management', (['"""ag_lyr"""', '"""CROME_TI_sort"""'], {}), "('ag_lyr', 'CROME_TI_sort')\n", (9256, 9283), False, 'import arcpy\n'), ((9292, 9325), 'arcpy.Delete_management', 'arcpy.Delete_management', (['"""ag_lyr"""'], {}), "('ag_lyr')\n", (9315, 9325), False, 'import arcpy\n'), ((9709, 9796), 'MyFunctions.select_and_copy', 'MyFunctions.select_and_copy', (['out_map', 'Hab_field', 'expression', '"""\'Improved grassland\'"""'], {}), '(out_map, Hab_field, expression,\n "\'Improved grassland\'")\n', (9736, 9796), False, 'import MyFunctions\n'), ((9936, 10022), 'MyFunctions.select_and_copy', 'MyFunctions.select_and_copy', (['out_map', 'Hab_field', 'expression', '"""\'Amenity grassland\'"""'], {}), '(out_map, Hab_field, expression,\n "\'Amenity grassland\'")\n', (9963, 10022), False, 'import MyFunctions\n'), ((10116, 10187), 'MyFunctions.select_and_copy', 'MyFunctions.select_and_copy', (['out_map', 'Hab_field', 'expression', '"""\'Arable\'"""'], {}), '(out_map, Hab_field, expression, "\'Arable\'")\n', (10143, 10187), False, 'import MyFunctions\n'), ((10301, 10372), 'MyFunctions.select_and_copy', 'MyFunctions.select_and_copy', (['out_map', 'Hab_field', 'expression', '"""\'Arable\'"""'], {}), '(out_map, Hab_field, expression, "\'Arable\'")\n', (10328, 10372), False, 'import MyFunctions\n'), ((10507, 10541), 'os.path.join', 'os.path.join', (['folder', "(LAD + '.gdb')"], {}), "(folder, LAD + '.gdb')\n", (10519, 10541), False, 'import os\n'), ((718, 730), 'time.ctime', 'time.ctime', ([], {}), '()\n', (728, 730), False, 'import time\n'), ((10672, 10731), 'arcpy.MakeFeatureLayer_management', 'arcpy.MakeFeatureLayer_management', (['"""OSMM_LCM"""', '"""OSMM_layer"""'], {}), "('OSMM_LCM', 'OSMM_layer')\n", (10705, 10731), False, 'import arcpy\n'), ((10854, 10932), 'arcpy.SelectLayerByAttribute_management', 'arcpy.SelectLayerByAttribute_management', (['"""OSMM_layer"""'], {'where_clause': 'expression'}), "('OSMM_layer', where_clause=expression)\n", (10893, 10932), False, 'import arcpy\n'), ((10945, 10990), 'arcpy.DeleteFeatures_management', 'arcpy.DeleteFeatures_management', (['"""OSMM_layer"""'], {}), "('OSMM_layer')\n", (10976, 10990), False, 'import arcpy\n'), ((11003, 11040), 'arcpy.Delete_management', 'arcpy.Delete_management', (['"""OSMM_layer"""'], {}), "('OSMM_layer')\n", (11026, 11040), False, 'import arcpy\n'), ((11122, 11182), 'arcpy.Identity_analysis', 'arcpy.Identity_analysis', (['"""OSMM_LCM"""', '"""PHI"""', 'out_fc', '"""NO_FID"""'], {}), "('OSMM_LCM', 'PHI', out_fc, 'NO_FID')\n", (11145, 11182), False, 'import arcpy\n'), ((11871, 11956), 'MyFunctions.select_and_copy', 'MyFunctions.select_and_copy', (['out_fc', '"""Interpreted_habitat"""', 'expression2', '"""!PHI!"""'], {}), "(out_fc, 'Interpreted_habitat', expression2, '!PHI!'\n )\n", (11898, 11956), False, 'import MyFunctions\n'), ((12032, 12179), 'MyFunctions.select_and_copy', 'MyFunctions.select_and_copy', (['out_fc', '"""Interpreted_habitat"""', '"""PHI = \'Traditional orchard\' AND OSMM_hab = \'Garden\'"""', '"""\'Traditional orchards\'"""'], {}), '(out_fc, \'Interpreted_habitat\',\n "PHI = \'Traditional orchard\' AND OSMM_hab = \'Garden\'",\n "\'Traditional orchards\'")\n', (12059, 12179), False, 'import MyFunctions\n'), ((12274, 12427), 'MyFunctions.select_and_copy', 'MyFunctions.select_and_copy', (['out_fc', '"""Interpreted_habitat"""', '"""Interpreted_habitat = \'Deciduous woodland\'"""', '"""\'Broadleaved woodland - semi-natural\'"""'], {}), '(out_fc, \'Interpreted_habitat\',\n "Interpreted_habitat = \'Deciduous woodland\'",\n "\'Broadleaved woodland - semi-natural\'")\n', (12301, 12427), False, 'import MyFunctions\n'), ((12592, 12689), 'MyFunctions.select_and_copy', 'MyFunctions.select_and_copy', (['out_fc', '"""Interpreted_habitat"""', 'expression3', '"""\'Marshy grassland\'"""'], {}), '(out_fc, \'Interpreted_habitat\', expression3,\n "\'Marshy grassland\'")\n', (12619, 12689), False, 'import MyFunctions\n'), ((12698, 12848), 'MyFunctions.select_and_copy', 'MyFunctions.select_and_copy', (['out_fc', '"""Interpreted_habitat"""', '"""Interpreted_habitat LIKE \'%semi-improved grassland%\'"""', '"""\'Semi-natural grassland\'"""'], {}), '(out_fc, \'Interpreted_habitat\',\n "Interpreted_habitat LIKE \'%semi-improved grassland%\'",\n "\'Semi-natural grassland\'")\n', (12725, 12848), False, 'import MyFunctions\n'), ((12893, 13017), 'MyFunctions.select_and_copy', 'MyFunctions.select_and_copy', (['out_fc', '"""Interpreted_habitat"""', '"""Interpreted_habitat LIKE \'%meadow%\'"""', '"""\'Neutral grassland\'"""'], {}), '(out_fc, \'Interpreted_habitat\',\n "Interpreted_habitat LIKE \'%meadow%\'", "\'Neutral grassland\'")\n', (12920, 13017), False, 'import MyFunctions\n'), ((13066, 13201), 'MyFunctions.select_and_copy', 'MyFunctions.select_and_copy', (['out_fc', '"""Interpreted_habitat"""', '"""Interpreted_habitat = \'Traditional orchard\'"""', '"""\'Traditional orchards\'"""'], {}), '(out_fc, \'Interpreted_habitat\',\n "Interpreted_habitat = \'Traditional orchard\'", "\'Traditional orchards\'")\n', (13093, 13201), False, 'import MyFunctions\n'), ((13250, 13380), 'MyFunctions.select_and_copy', 'MyFunctions.select_and_copy', (['out_fc', '"""Interpreted_habitat"""', '"""Interpreted_habitat LIKE \'%alcareous%\'"""', '"""\'Calcareous grassland\'"""'], {}), '(out_fc, \'Interpreted_habitat\',\n "Interpreted_habitat LIKE \'%alcareous%\'", "\'Calcareous grassland\'")\n', (13277, 13380), False, 'import MyFunctions\n'), ((13429, 13551), 'MyFunctions.select_and_copy', 'MyFunctions.select_and_copy', (['out_fc', '"""Interpreted_habitat"""', '"""Interpreted_habitat = \'Lowland heathland\'"""', '"""\'Heathland\'"""'], {}), '(out_fc, \'Interpreted_habitat\',\n "Interpreted_habitat = \'Lowland heathland\'", "\'Heathland\'")\n', (13456, 13551), False, 'import MyFunctions\n'), ((13600, 13711), 'MyFunctions.select_and_copy', 'MyFunctions.select_and_copy', (['out_fc', '"""Interpreted_habitat"""', '"""Interpreted_habitat = \'Reedbeds\'"""', '"""\'Reedbed\'"""'], {}), '(out_fc, \'Interpreted_habitat\',\n "Interpreted_habitat = \'Reedbeds\'", "\'Reedbed\'")\n', (13627, 13711), False, 'import MyFunctions\n'), ((14214, 14315), 'MyFunctions.select_and_copy', 'MyFunctions.select_and_copy', (['out_fc', '"""Interpreted_habitat"""', 'expression5', '"""\'Open mosaic habitats\'"""'], {}), '(out_fc, \'Interpreted_habitat\', expression5,\n "\'Open mosaic habitats\'")\n', (14241, 14315), False, 'import MyFunctions\n'), ((14804, 14927), 'MyFunctions.select_and_copy', 'MyFunctions.select_and_copy', (['out_fc', '"""Interpreted_habitat"""', 'expression4', '"""\'Parkland and scattered trees - broadleaved\'"""'], {}), '(out_fc, \'Interpreted_habitat\', expression4,\n "\'Parkland and scattered trees - broadleaved\'")\n', (14831, 14927), False, 'import MyFunctions\n'), ((6326, 6338), 'time.ctime', 'time.ctime', ([], {}), '()\n', (6336, 6338), False, 'import time\n'), ((10418, 10430), 'time.ctime', 'time.ctime', ([], {}), '()\n', (10428, 10430), False, 'import time\n'), ((14973, 14985), 'time.ctime', 'time.ctime', ([], {}), '()\n', (14983, 14985), False, 'import time\n')] |
# Generated by Django 3.2.3 on 2022-02-09 12:51
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='video',
name='percent',
field=models.PositiveIntegerField(default=1, validators=[django.core.validators.MaxValueValidator(100), django.core.validators.MinValueValidator(1)]),
),
migrations.AlterField(
model_name='video',
name='video_240',
field=models.FileField(blank=True, null=True, upload_to='Videos/240p/%Y/%m/%d'),
),
migrations.AlterField(
model_name='video',
name='video_360',
field=models.FileField(blank=True, null=True, upload_to='Videos/360p/%Y/%m/%d'),
),
]
| [
"django.db.models.FileField"
] | [((619, 692), 'django.db.models.FileField', 'models.FileField', ([], {'blank': '(True)', 'null': '(True)', 'upload_to': '"""Videos/240p/%Y/%m/%d"""'}), "(blank=True, null=True, upload_to='Videos/240p/%Y/%m/%d')\n", (635, 692), False, 'from django.db import migrations, models\n'), ((816, 889), 'django.db.models.FileField', 'models.FileField', ([], {'blank': '(True)', 'null': '(True)', 'upload_to': '"""Videos/360p/%Y/%m/%d"""'}), "(blank=True, null=True, upload_to='Videos/360p/%Y/%m/%d')\n", (832, 889), False, 'from django.db import migrations, models\n')] |
# Code to transform the driver sensor OGMs to the ego vehicle's OGM frame of reference.
import matplotlib.pyplot as plt
import numpy as np
import math
import copy
from utils.grid_utils import global_grid
import time
from scipy.spatial import cKDTree
import pdb
def mask_in_EgoGrid(global_grid_x, global_grid_y, ref_xy, ego_xy, pred_egoGrid, pred_maps, res, mask_unk=None, tolerance=1):
# Consider only the unknown cells in pred_egoGrid (ego sensor grid before trasfering values).
indices = np.where(mask_unk)
ego_x = ego_xy[0][indices]
ego_y = ego_xy[1][indices]
ego_xy = [ego_x, ego_y]
flat_indicies = indices[0]*pred_egoGrid.shape[1]+indices[1]
# ref indx --> global indx
ref_x_ind = np.floor(global_grid_x.shape[1]*(ref_xy[0]-x_min+res/2.)/(x_max-x_min+res)).astype(int) # column index
ref_y_ind = np.floor(global_grid_y.shape[0]*(ref_xy[1]-y_min+res/2.)/(y_max-y_min+res)).astype(int) # row index
ref_global_ind = np.vstack((ref_y_ind.flatten(), ref_x_ind.flatten())).T
# ego indx --> global indx
ego_x_ind = np.floor(global_grid_x.shape[1]*(ego_xy[0]-x_min+res/2.)/(x_max-x_min+res)).astype(int) # column index
ego_y_ind = np.floor(global_grid_y.shape[0]*(ego_xy[1]-y_min+res/2.)/(y_max-y_min+res)).astype(int) # row index
ego_global_ind = np.vstack((ego_y_ind.flatten(), ego_x_ind.flatten())).T
# Look for the matching global_grid indices between the ref_grid and ego_grid.
kdtree = cKDTree(ref_global_ind)
dists, inds = kdtree.query(ego_global_ind)
pred_egoGrid_flat = pred_egoGrid.flatten()
pred_maps_flat = pred_maps.flatten()
# Back to the local grid indices. Tolerance should be an integer because kd tree is comparing indices.
ego_ind = flat_indicies[np.where(dists<=tolerance)]
ref_ind = inds[np.where(dists<=tolerance)]
# Assign the values for the corresponding cells.
pred_egoGrid_flat[ego_ind] = pred_maps_flat[ref_ind]
pred_egoGrid = pred_egoGrid_flat.reshape(pred_egoGrid.shape)
return pred_egoGrid
def Transfer_to_EgoGrid(ref_local_xy, pred_maps, ego_local_xy, ego_sensor_grid, endpoint, res=0.1, mask_unk=None):
global x_min, x_max, y_min, y_max
#####################################################################################################################################
## Goal : Transfer pred_maps (in driver sensor's grid) cell information to the unknown cells of ego car's sensor_grid
## Method : Use global grid as an intermediate (ref indx --> global indx --> ego indx).
## ref_local_xy (N, 2, w, h) & pred_maps (N, w, h)
## ego_xy (2, w', h') & & ego_sensor_grid (w', h')
## return pred_maps_egoGrid(N, w', h')
## * N : number of agents
#####################################################################################################################################
x_min = endpoint[0]
x_max = endpoint[2]
y_min = endpoint[1]
y_max = endpoint[3]
global_res = 1.0
global_grid_x, global_grid_y = global_grid(np.array([x_min,y_min]),np.array([x_max,y_max]),global_res)
if np.any(ref_local_xy[0] == None):
pred_maps_egoGrid.append(None)
else:
pred_egoGrid = copy.copy(ego_sensor_grid)
pred_egoGrid = np.ones(ego_sensor_grid.shape)*2
pred_egoGrid = mask_in_EgoGrid(global_grid_x, global_grid_y, ref_local_xy, ego_local_xy, pred_egoGrid, pred_maps, res, mask_unk)
return pred_egoGrid | [
"numpy.ones",
"scipy.spatial.cKDTree",
"numpy.where",
"numpy.floor",
"numpy.any",
"numpy.array",
"copy.copy"
] | [((501, 519), 'numpy.where', 'np.where', (['mask_unk'], {}), '(mask_unk)\n', (509, 519), True, 'import numpy as np\n'), ((1460, 1483), 'scipy.spatial.cKDTree', 'cKDTree', (['ref_global_ind'], {}), '(ref_global_ind)\n', (1467, 1483), False, 'from scipy.spatial import cKDTree\n'), ((3095, 3126), 'numpy.any', 'np.any', (['(ref_local_xy[0] == None)'], {}), '(ref_local_xy[0] == None)\n', (3101, 3126), True, 'import numpy as np\n'), ((1757, 1785), 'numpy.where', 'np.where', (['(dists <= tolerance)'], {}), '(dists <= tolerance)\n', (1765, 1785), True, 'import numpy as np\n'), ((1804, 1832), 'numpy.where', 'np.where', (['(dists <= tolerance)'], {}), '(dists <= tolerance)\n', (1812, 1832), True, 'import numpy as np\n'), ((3027, 3051), 'numpy.array', 'np.array', (['[x_min, y_min]'], {}), '([x_min, y_min])\n', (3035, 3051), True, 'import numpy as np\n'), ((3051, 3075), 'numpy.array', 'np.array', (['[x_max, y_max]'], {}), '([x_max, y_max])\n', (3059, 3075), True, 'import numpy as np\n'), ((3201, 3227), 'copy.copy', 'copy.copy', (['ego_sensor_grid'], {}), '(ego_sensor_grid)\n', (3210, 3227), False, 'import copy\n'), ((722, 816), 'numpy.floor', 'np.floor', (['(global_grid_x.shape[1] * (ref_xy[0] - x_min + res / 2.0) / (x_max - x_min +\n res))'], {}), '(global_grid_x.shape[1] * (ref_xy[0] - x_min + res / 2.0) / (x_max -\n x_min + res))\n', (730, 816), True, 'import numpy as np\n'), ((841, 935), 'numpy.floor', 'np.floor', (['(global_grid_y.shape[0] * (ref_xy[1] - y_min + res / 2.0) / (y_max - y_min +\n res))'], {}), '(global_grid_y.shape[0] * (ref_xy[1] - y_min + res / 2.0) / (y_max -\n y_min + res))\n', (849, 935), True, 'import numpy as np\n'), ((1067, 1161), 'numpy.floor', 'np.floor', (['(global_grid_x.shape[1] * (ego_xy[0] - x_min + res / 2.0) / (x_max - x_min +\n res))'], {}), '(global_grid_x.shape[1] * (ego_xy[0] - x_min + res / 2.0) / (x_max -\n x_min + res))\n', (1075, 1161), True, 'import numpy as np\n'), ((1186, 1280), 'numpy.floor', 'np.floor', (['(global_grid_y.shape[0] * (ego_xy[1] - y_min + res / 2.0) / (y_max - y_min +\n res))'], {}), '(global_grid_y.shape[0] * (ego_xy[1] - y_min + res / 2.0) / (y_max -\n y_min + res))\n', (1194, 1280), True, 'import numpy as np\n'), ((3252, 3282), 'numpy.ones', 'np.ones', (['ego_sensor_grid.shape'], {}), '(ego_sensor_grid.shape)\n', (3259, 3282), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from django.core.management.base import BaseCommand
from emailhub.utils.email import send_unsent_emails
log = logging.getLogger('emailhub')
class Command(BaseCommand):
"""
EmailHub management command
"""
help = 'EmailHub management command'
def add_arguments(self, parser):
parser.add_argument(
'--send',
dest='send',
action='store_true',
default=False,
help='Send unsent emails')
parser.add_argument(
'--status',
dest='status',
action='store_true',
default=False,
help='EmailHub system status')
parser.add_argument(
'--create-template',
dest='create_template',
action='store_true',
default=False,
help='Create a new template')
parser.add_argument(
'--list-templates',
dest='list_templates',
action='store_true',
default=False,
help='List templates')
def handle(self, *args, **options):
if options.get('send'):
send_unsent_emails()
| [
"logging.getLogger",
"emailhub.utils.email.send_unsent_emails"
] | [((194, 223), 'logging.getLogger', 'logging.getLogger', (['"""emailhub"""'], {}), "('emailhub')\n", (211, 223), False, 'import logging\n'), ((1215, 1235), 'emailhub.utils.email.send_unsent_emails', 'send_unsent_emails', ([], {}), '()\n', (1233, 1235), False, 'from emailhub.utils.email import send_unsent_emails\n')] |
from django.contrib import admin
from .models import soc, osc, univ_soc_woc
admin.site.register(soc)
admin.site.register(osc)
admin.site.register(univ_soc_woc)
| [
"django.contrib.admin.site.register"
] | [((80, 104), 'django.contrib.admin.site.register', 'admin.site.register', (['soc'], {}), '(soc)\n', (99, 104), False, 'from django.contrib import admin\n'), ((106, 130), 'django.contrib.admin.site.register', 'admin.site.register', (['osc'], {}), '(osc)\n', (125, 130), False, 'from django.contrib import admin\n'), ((132, 165), 'django.contrib.admin.site.register', 'admin.site.register', (['univ_soc_woc'], {}), '(univ_soc_woc)\n', (151, 165), False, 'from django.contrib import admin\n')] |
'''
This code will clean the OB datasets and combine all the cleaned data into one
Dataset name: O-27-Da Yan
semi-automate code, needs some hands work. LOL But God is so good to me.
1. 9 different buildings in this dataset, and each building has different rooms
3. each room has different window, door, ac, indoor, outdoor info
4. I processed building A to F by hand, then figured out that I can rename the files first, then use code to process
5. rename the file by type and number, such as window1, indoor1, ac1, door1, etc.
6. code automated G, H, I
7. the folder has multiple types of data, csv and xlsx, figure out the file type, then rean into pandas
8. concat the outdoor datetime and temperature with ac data, then judge if the ac is on or off
'''
import os
import glob
import string
import datetime
import pandas as pd
import matplotlib.pyplot as plt
# specify the path
data_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/2021-05-28-1130-raw-data/Annex 79 Data Collection/O-27-Da Yan/_yapan_processing/processed/'
template_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/OB Database Consolidation/Templates/'
save_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/2021-05-28-1130-raw-data/Annex 79 Data Collection/O-27-Da Yan/_yapan_processing/_sql/'
# generate the name of different building folders
alphabet_string = string.ascii_uppercase
alphabet_list = list(alphabet_string)
building_names = alphabet_list[:9]
''' 1. process data by folders '''
begin_time = datetime.datetime.now()
# create dataframe to store the data
combined_window = pd.DataFrame()
combined_door = pd.DataFrame()
combined_hvac = pd.DataFrame()
combined_indoor = pd.DataFrame()
combined_outdoor = pd.DataFrame()
''' process outdoor data '''
print(f'Process outdoor data')
os.chdir(data_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
outdoor_files = list(filter(lambda name: 'outdoor_building' in name, root_files)) # filter out the door status files
combined_outdoor = pd.concat([pd.read_csv(f) for f in outdoor_files])
''' manual processed data '''
print(f'Process manually processed data')
building_names_1 = building_names[:6]
# unit test
# i = 0
# folder_name = building_names_1[i]
for index, bld_name in enumerate(building_names_1):
print(f'Reading the data under building folder {bld_name}')
building_path = data_path + bld_name + '/'
os.chdir(building_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
# combine
indoor_files = list(filter(lambda name: 'indoor' in name, root_files)) # filter out the indoor files
window_files = list(filter(lambda name: 'window' in name, root_files)) # filter out the window files
hvac_files = list(filter(lambda name: 'hvac' in name, root_files)) # filter out the ac files
door_files = list(filter(lambda name: 'door_status' in name, root_files)) # filter out the door status files
# read anc combine the files under this folder
if indoor_files: # make sure it is not empty
indoor_temp_df = pd.concat([pd.read_csv(f) for f in indoor_files])
combined_indoor = pd.concat([combined_indoor, indoor_temp_df], ignore_index=True) # concat the data
else:
pass
if window_files:
window_temp_df = pd.concat([pd.read_csv(f) for f in window_files])
combined_window = pd.concat([combined_window, window_temp_df], ignore_index=True) # concat the data
else:
pass
if hvac_files:
hvac_temp_df = pd.concat([pd.read_csv(f) for f in hvac_files])
combined_hvac = pd.concat([combined_hvac, hvac_temp_df], ignore_index=True) # concat the data
# print(combined_hvac.isnull().sum())
# print(index)
else:
pass
if door_files:
door_temp_df = pd.concat([pd.read_csv(f) for f in door_files])
combined_door = pd.concat([combined_door, door_temp_df], ignore_index=True) # concat the data
# print(combined_door.isnull().sum())
# print(index)
else:
pass
''' auto mated process by building level '''
building_names = ['G', 'H', 'I']
building_ids = [7, 8, 9]
for index, bld_name in enumerate(building_names):
print(f'Dealing with data under building folder {bld_name}')
building_path = data_path + bld_name + '/'
os.chdir(building_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
'''' room level '''
for room_id in sub_folders:
print(f'Dealing with data under room folder {room_id}')
room_path = building_path + room_id + '/'
os.chdir(room_path) # pwd
file_names = os.listdir() # get all the file names
window_files = list(filter(lambda name: 'window' in name, file_names)) # filter out the window files
hvac_files = list(filter(lambda name: 'ac' in name, file_names)) # filter out the ac files
door_files = list(filter(lambda name: 'door' in name, file_names)) # filter out the door files
# read and combine files
if window_files:
for window_name in window_files:
name, extension = os.path.splitext(window_name) # get the path and extension of a file
if extension == '.CSV': # if the file is csv file
temp_df = pd.read_csv(window_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Window_Status'] # rename the columns
else:
temp_df = pd.read_excel(window_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Window_Status']
temp_df['Window_ID'] = int(name.split('_')[0][6:])
temp_df['Room_ID'] = int(room_id) # assign Room_ID
temp_df['Building_ID'] = building_ids[index] # assign Building_ID
combined_window = pd.concat([combined_window, temp_df], ignore_index=True) # concat the data
else:
pass
if door_files:
for door_name in door_files:
name, extension = os.path.splitext(door_name) # get the path and extension of a file
if extension == '.CSV': # if the file is csv file
temp_df = pd.read_csv(door_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Door_Status'] # rename the columns
else:
temp_df = pd.read_excel(door_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Door_Status']
temp_df['Door_ID'] = int(name.split('_')[0][4:])
temp_df['Room_ID'] = int(room_id) # assign Room_ID
temp_df['Building_ID'] = building_ids[index] # assign Building_ID
combined_door = pd.concat([combined_door, temp_df], ignore_index=True) # concat the data
else:
pass
if hvac_files:
for hvac_name in hvac_files:
name, extension = os.path.splitext(hvac_name) # get the path and extension of a file
if extension == '.CSV': # if the file is csv file
temp_df = pd.read_csv(hvac_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'yapan_supply _t']
else:
temp_df = pd.read_excel(hvac_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'yapan_supply _t']
temp_df['HVAC_Zone_ID'] = int(name.split('_')[0][2:]) # get the number of ac
temp_df['Room_ID'] = int(room_id) # assign Room_ID
temp_df['Building_ID'] = building_ids[index] # assign Building_ID
combined_hvac = pd.concat([combined_hvac, temp_df], ignore_index=True) # concat the data
else:
pass
# drop na rows when specific column is null
combined_indoor = combined_indoor[combined_indoor['Date_Time'].notnull()]
combined_outdoor = combined_outdoor[combined_outdoor['Date_Time'].notnull()]
combined_window = combined_window[combined_window['Date_Time'].notnull()]
combined_door = combined_door[combined_door['Date_Time'].notnull()]
combined_hvac = combined_hvac[combined_hvac['Date_Time'].notnull()]
# process windows, door open/close data
combined_door['Door_Status'] = combined_door['Door_Status'].replace([0, 1, 2], [1, 0, 0])
combined_window['Window_Status'] = combined_window['Window_Status'].replace([0, 1, 2], [1, 0, 0])
# format datetime
print("Formatting datetime!")
combined_indoor['Date_Time'] = pd.to_datetime(combined_indoor['Date_Time'], format='%m/%d/%Y %H:%M')
combined_outdoor['Date_Time'] = pd.to_datetime(combined_outdoor['Date_Time'], format='%m/%d/%Y %H:%M')
combined_window['Date_Time'] = pd.to_datetime(combined_window['Date_Time'], infer_datetime_format=True)
combined_door['Date_Time'] = pd.to_datetime(combined_door['Date_Time'], infer_datetime_format=True)
combined_hvac['Date_Time'] = pd.to_datetime(combined_hvac['Date_Time'], infer_datetime_format=True)
# format data type
print(combined_indoor.dtypes)
print(combined_outdoor.dtypes)
print(combined_window.dtypes)
print(combined_door.dtypes)
print(combined_hvac.dtypes)
combined_indoor['Building_ID'] = combined_indoor['Building_ID'].astype(int)
combined_indoor['Room_ID'] = combined_indoor['Room_ID'].astype(int)
combined_outdoor['Building_ID'] = combined_outdoor['Building_ID'].astype(int)
combined_window['Building_ID'] = combined_window['Building_ID'].astype(int)
combined_window['Room_ID'] = combined_window['Room_ID'].astype(int)
combined_window['Window_ID'] = combined_window['Window_ID'].astype(int)
combined_door['Building_ID'] = combined_door['Building_ID'].astype(int)
combined_door['Room_ID'] = combined_door['Room_ID'].astype(int)
combined_door['Door_ID'] = combined_door['Door_ID'].astype(int)
combined_hvac['Building_ID'] = combined_hvac['Building_ID'].astype(int)
combined_hvac['Room_ID'] = combined_hvac['Room_ID'].astype(int)
combined_hvac['HVAC_Zone_ID'] = combined_hvac['HVAC_Zone_ID'].astype(int)
# replace null with empty
# # check combined data
# print('check null values')
# print(combined_window.isnull().sum())
# print(combined_door.isnull().sum())
# print(combined_hvac.isnull().sum())
#
# # check the unique IDs
# print(combined_window.Window_ID.unique())
# print(combined_door.Door_ID.unique())
# print(combined_hvac.HVAC_Zone_ID.unique())
#
# print(combined_hvac.Building_ID.unique())
# print(combined_window.Building_ID.unique())
# print(combined_door.Building_ID.unique())
# save data
combined_indoor.to_csv(save_path + 'combined_indoor.csv', index=False)
combined_outdoor.to_csv(save_path + 'combined_outdoor.csv', index=False)
combined_window.to_csv(save_path + 'combined_window.csv', index=False)
combined_door.to_csv(save_path + 'combined_door.csv', index=False)
combined_hvac.to_csv(save_path + 'combined_hvac.csv', index=False)
''' read templates and save data into the standard templates '''
# data
combined_indoor = pd.read_csv(save_path + 'combined_indoor.csv')
combined_outdoor = pd.read_csv(save_path + 'combined_outdoor.csv')
combined_window = pd.read_csv(save_path + 'combined_window.csv')
combined_door = pd.read_csv(save_path + 'combined_door.csv')
combined_hvac = pd.read_csv(save_path + 'combined_hvac.csv')
# templates
# read templates into pandas
template_window = pd.read_csv(template_path+'Window_Status.csv')
template_door = pd.read_csv(template_path+'Door_Status.csv')
template_hvac = pd.read_csv(template_path+'HVAC_Measurement.csv')
template_indoor = pd.read_csv(template_path+'Indoor_Measurement.csv')
template_outdoor = pd.read_csv(template_path+'Outdoor_Measurement.csv')
# columns
print(template_window.columns)
print(combined_window.columns)
print(template_door.columns)
print(combined_door.columns)
print(template_hvac.columns)
print(combined_hvac.columns)
print(template_indoor.columns)
print(combined_indoor.columns)
print(template_outdoor.columns)
print(combined_outdoor.columns)
# concat data
template_window = pd.concat([template_window, combined_window], ignore_index=True)
template_door = pd.concat([template_door, combined_door], ignore_index=True)
template_hvac = pd.concat([template_hvac, combined_hvac], ignore_index=True)
template_indoor = pd.concat([template_indoor, combined_indoor], ignore_index=True)
template_outdoor = pd.concat([template_outdoor, combined_outdoor], ignore_index=True)
template_door = template_door.drop(columns=['Study_ID'])
template_outdoor = template_outdoor.drop(columns=['Buiulding_ID'])
# columns
print(template_window.columns)
print(template_door.columns)
print(template_hvac.columns)
print(template_indoor.columns)
print(template_outdoor.columns)
# data types
print(template_window.dtypes)
print(template_door.dtypes)
print(template_hvac.dtypes)
print(template_indoor.dtypes)
print(template_outdoor.dtypes)
# format datetime
print("Formatting datetime!")
template_indoor['Date_Time'] = pd.to_datetime(template_indoor['Date_Time'], format='%Y-%m-%d %H:%M:%S')
template_outdoor['Date_Time'] = pd.to_datetime(template_outdoor['Date_Time'], format='%Y-%m-%d %H:%M:%S')
template_window['Date_Time'] = pd.to_datetime(template_window['Date_Time'], format='%Y-%m-%d %H:%M:%S')
template_door['Date_Time'] = pd.to_datetime(template_door['Date_Time'], format='%Y-%m-%d %H:%M:%S')
template_hvac['Date_Time'] = pd.to_datetime(template_hvac['Date_Time'], format='%Y-%m-%d %H:%M:%S')
# format data types
template_indoor['Building_ID'] = template_indoor['Building_ID'].astype(int)
template_indoor['Room_ID'] = template_indoor['Room_ID'].astype(int)
template_outdoor['Building_ID'] = template_outdoor['Building_ID'].astype(int)
template_window['Building_ID'] = template_window['Building_ID'].astype(int)
template_window['Room_ID'] = template_window['Room_ID'].astype(int)
template_window['Window_ID'] = template_window['Window_ID'].astype(int)
template_door['Building_ID'] = template_door['Building_ID'].astype(int)
template_door['Room_ID'] = template_door['Room_ID'].astype(int)
template_door['Door_ID'] = template_door['Door_ID'].astype(int)
template_hvac['Building_ID'] = template_hvac['Building_ID'].astype(int)
template_hvac['Room_ID'] = template_hvac['Room_ID'].astype(int)
template_hvac['HVAC_Zone_ID'] = template_hvac['HVAC_Zone_ID'].astype(int)
# save data
template_window.to_csv(save_path+'Window_Status.csv', index=False)
template_door.to_csv(save_path+'Door_Status.csv', index=False)
template_hvac.to_csv(save_path+'HVAC_Measurement.csv', index=False)
template_indoor.to_csv(save_path+'Indoor_Measurement.csv', index=False)
template_outdoor.to_csv(save_path+'Outdoor_Measurement.csv', index=False)
# check the unique room ids and building ids
print(template_window['Room_ID'].unique())
print(template_window['Building_ID'].unique())
print(template_door['Room_ID'].unique())
print(template_door['Building_ID'].unique())
print(template_hvac['Room_ID'].unique())
print(template_hvac['Building_ID'].unique())
print(template_indoor['Room_ID'].unique())
print(template_indoor['Building_ID'].unique())
print(template_outdoor['Building_ID'].unique())
''' convert ac measurement to on/off status '''
# read data
template_hvac = pd.read_csv(save_path+'HVAC_Measurement.csv')
template_outdoor = pd.read_csv(save_path+'Outdoor_Measurement.csv')
# check columns
print(template_hvac.columns)
print(template_outdoor.columns)
# check the buildings have ac data and outdoor data
template_hvac.groupby(['Room_ID', 'Building_ID']).size().reset_index()
template_outdoor.groupby(['Building_ID']).size().reset_index()
# check datetime
template_hvac['Date_Time']
template_outdoor['Date_Time']
# merge two dataframes together
hvac_df = template_hvac.merge(template_outdoor, how='left', on=['Building_ID', 'Date_Time'])
# use below two columns to calculate ac status
# hvac_df[['yapan_supply _t', 'Outdoor_Temp']]
hvac_df = hvac_df[hvac_df['Outdoor_Temp'].notnull()]
hvac_df['Cooling_Status'] = hvac_df.loc[:, 'Outdoor_Temp'] - hvac_df.loc[:, 'yapan_supply _t']
# convert negative values to 0-off, positive values to 1-on
hvac_df.loc[hvac_df['Cooling_Status'] < 0, 'Cooling_Status'] = 0
hvac_df.loc[hvac_df['Cooling_Status'] > 0, 'Cooling_Status'] = 1
# save data
cols = list(template_hvac) # get the column names as a slit
hvac_df = hvac_df[cols] # keep only desired columns
hvac_df.drop(['yapan_supply _t'], axis=1, inplace=True) # drop a column
hvac_df.to_csv(save_path+'/final/HVAC_Measurement.csv', index=False)
| [
"os.listdir",
"pandas.read_csv",
"os.walk",
"os.path.splitext",
"os.chdir",
"datetime.datetime.now",
"pandas.read_excel",
"pandas.DataFrame",
"pandas.concat",
"pandas.to_datetime"
] | [((1490, 1513), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1511, 1513), False, 'import datetime\n'), ((1570, 1584), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1582, 1584), True, 'import pandas as pd\n'), ((1601, 1615), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1613, 1615), True, 'import pandas as pd\n'), ((1632, 1646), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1644, 1646), True, 'import pandas as pd\n'), ((1665, 1679), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1677, 1679), True, 'import pandas as pd\n'), ((1699, 1713), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1711, 1713), True, 'import pandas as pd\n'), ((1775, 1794), 'os.chdir', 'os.chdir', (['data_path'], {}), '(data_path)\n', (1783, 1794), False, 'import os\n'), ((8796, 8865), 'pandas.to_datetime', 'pd.to_datetime', (["combined_indoor['Date_Time']"], {'format': '"""%m/%d/%Y %H:%M"""'}), "(combined_indoor['Date_Time'], format='%m/%d/%Y %H:%M')\n", (8810, 8865), True, 'import pandas as pd\n'), ((8898, 8968), 'pandas.to_datetime', 'pd.to_datetime', (["combined_outdoor['Date_Time']"], {'format': '"""%m/%d/%Y %H:%M"""'}), "(combined_outdoor['Date_Time'], format='%m/%d/%Y %H:%M')\n", (8912, 8968), True, 'import pandas as pd\n'), ((9000, 9072), 'pandas.to_datetime', 'pd.to_datetime', (["combined_window['Date_Time']"], {'infer_datetime_format': '(True)'}), "(combined_window['Date_Time'], infer_datetime_format=True)\n", (9014, 9072), True, 'import pandas as pd\n'), ((9102, 9172), 'pandas.to_datetime', 'pd.to_datetime', (["combined_door['Date_Time']"], {'infer_datetime_format': '(True)'}), "(combined_door['Date_Time'], infer_datetime_format=True)\n", (9116, 9172), True, 'import pandas as pd\n'), ((9202, 9272), 'pandas.to_datetime', 'pd.to_datetime', (["combined_hvac['Date_Time']"], {'infer_datetime_format': '(True)'}), "(combined_hvac['Date_Time'], infer_datetime_format=True)\n", (9216, 9272), True, 'import pandas as pd\n'), ((11236, 11282), 'pandas.read_csv', 'pd.read_csv', (["(save_path + 'combined_indoor.csv')"], {}), "(save_path + 'combined_indoor.csv')\n", (11247, 11282), True, 'import pandas as pd\n'), ((11302, 11349), 'pandas.read_csv', 'pd.read_csv', (["(save_path + 'combined_outdoor.csv')"], {}), "(save_path + 'combined_outdoor.csv')\n", (11313, 11349), True, 'import pandas as pd\n'), ((11368, 11414), 'pandas.read_csv', 'pd.read_csv', (["(save_path + 'combined_window.csv')"], {}), "(save_path + 'combined_window.csv')\n", (11379, 11414), True, 'import pandas as pd\n'), ((11431, 11475), 'pandas.read_csv', 'pd.read_csv', (["(save_path + 'combined_door.csv')"], {}), "(save_path + 'combined_door.csv')\n", (11442, 11475), True, 'import pandas as pd\n'), ((11492, 11536), 'pandas.read_csv', 'pd.read_csv', (["(save_path + 'combined_hvac.csv')"], {}), "(save_path + 'combined_hvac.csv')\n", (11503, 11536), True, 'import pandas as pd\n'), ((11597, 11645), 'pandas.read_csv', 'pd.read_csv', (["(template_path + 'Window_Status.csv')"], {}), "(template_path + 'Window_Status.csv')\n", (11608, 11645), True, 'import pandas as pd\n'), ((11660, 11706), 'pandas.read_csv', 'pd.read_csv', (["(template_path + 'Door_Status.csv')"], {}), "(template_path + 'Door_Status.csv')\n", (11671, 11706), True, 'import pandas as pd\n'), ((11721, 11772), 'pandas.read_csv', 'pd.read_csv', (["(template_path + 'HVAC_Measurement.csv')"], {}), "(template_path + 'HVAC_Measurement.csv')\n", (11732, 11772), True, 'import pandas as pd\n'), ((11789, 11842), 'pandas.read_csv', 'pd.read_csv', (["(template_path + 'Indoor_Measurement.csv')"], {}), "(template_path + 'Indoor_Measurement.csv')\n", (11800, 11842), True, 'import pandas as pd\n'), ((11860, 11914), 'pandas.read_csv', 'pd.read_csv', (["(template_path + 'Outdoor_Measurement.csv')"], {}), "(template_path + 'Outdoor_Measurement.csv')\n", (11871, 11914), True, 'import pandas as pd\n'), ((12265, 12329), 'pandas.concat', 'pd.concat', (['[template_window, combined_window]'], {'ignore_index': '(True)'}), '([template_window, combined_window], ignore_index=True)\n', (12274, 12329), True, 'import pandas as pd\n'), ((12346, 12406), 'pandas.concat', 'pd.concat', (['[template_door, combined_door]'], {'ignore_index': '(True)'}), '([template_door, combined_door], ignore_index=True)\n', (12355, 12406), True, 'import pandas as pd\n'), ((12423, 12483), 'pandas.concat', 'pd.concat', (['[template_hvac, combined_hvac]'], {'ignore_index': '(True)'}), '([template_hvac, combined_hvac], ignore_index=True)\n', (12432, 12483), True, 'import pandas as pd\n'), ((12502, 12566), 'pandas.concat', 'pd.concat', (['[template_indoor, combined_indoor]'], {'ignore_index': '(True)'}), '([template_indoor, combined_indoor], ignore_index=True)\n', (12511, 12566), True, 'import pandas as pd\n'), ((12586, 12652), 'pandas.concat', 'pd.concat', (['[template_outdoor, combined_outdoor]'], {'ignore_index': '(True)'}), '([template_outdoor, combined_outdoor], ignore_index=True)\n', (12595, 12652), True, 'import pandas as pd\n'), ((13182, 13254), 'pandas.to_datetime', 'pd.to_datetime', (["template_indoor['Date_Time']"], {'format': '"""%Y-%m-%d %H:%M:%S"""'}), "(template_indoor['Date_Time'], format='%Y-%m-%d %H:%M:%S')\n", (13196, 13254), True, 'import pandas as pd\n'), ((13287, 13360), 'pandas.to_datetime', 'pd.to_datetime', (["template_outdoor['Date_Time']"], {'format': '"""%Y-%m-%d %H:%M:%S"""'}), "(template_outdoor['Date_Time'], format='%Y-%m-%d %H:%M:%S')\n", (13301, 13360), True, 'import pandas as pd\n'), ((13392, 13464), 'pandas.to_datetime', 'pd.to_datetime', (["template_window['Date_Time']"], {'format': '"""%Y-%m-%d %H:%M:%S"""'}), "(template_window['Date_Time'], format='%Y-%m-%d %H:%M:%S')\n", (13406, 13464), True, 'import pandas as pd\n'), ((13494, 13564), 'pandas.to_datetime', 'pd.to_datetime', (["template_door['Date_Time']"], {'format': '"""%Y-%m-%d %H:%M:%S"""'}), "(template_door['Date_Time'], format='%Y-%m-%d %H:%M:%S')\n", (13508, 13564), True, 'import pandas as pd\n'), ((13594, 13664), 'pandas.to_datetime', 'pd.to_datetime', (["template_hvac['Date_Time']"], {'format': '"""%Y-%m-%d %H:%M:%S"""'}), "(template_hvac['Date_Time'], format='%Y-%m-%d %H:%M:%S')\n", (13608, 13664), True, 'import pandas as pd\n'), ((15422, 15469), 'pandas.read_csv', 'pd.read_csv', (["(save_path + 'HVAC_Measurement.csv')"], {}), "(save_path + 'HVAC_Measurement.csv')\n", (15433, 15469), True, 'import pandas as pd\n'), ((15487, 15537), 'pandas.read_csv', 'pd.read_csv', (["(save_path + 'Outdoor_Measurement.csv')"], {}), "(save_path + 'Outdoor_Measurement.csv')\n", (15498, 15537), True, 'import pandas as pd\n'), ((2489, 2512), 'os.chdir', 'os.chdir', (['building_path'], {}), '(building_path)\n', (2497, 2512), False, 'import os\n'), ((4508, 4531), 'os.chdir', 'os.chdir', (['building_path'], {}), '(building_path)\n', (4516, 4531), False, 'import os\n'), ((1821, 1833), 'os.walk', 'os.walk', (['"""."""'], {}), "('.')\n", (1828, 1833), False, 'import os\n'), ((1915, 1927), 'os.walk', 'os.walk', (['"""."""'], {}), "('.')\n", (1922, 1927), False, 'import os\n'), ((2114, 2128), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f)\n', (2125, 2128), True, 'import pandas as pd\n'), ((3333, 3396), 'pandas.concat', 'pd.concat', (['[combined_indoor, indoor_temp_df]'], {'ignore_index': '(True)'}), '([combined_indoor, indoor_temp_df], ignore_index=True)\n', (3342, 3396), True, 'import pandas as pd\n'), ((3561, 3624), 'pandas.concat', 'pd.concat', (['[combined_window, window_temp_df]'], {'ignore_index': '(True)'}), '([combined_window, window_temp_df], ignore_index=True)\n', (3570, 3624), True, 'import pandas as pd\n'), ((3781, 3840), 'pandas.concat', 'pd.concat', (['[combined_hvac, hvac_temp_df]'], {'ignore_index': '(True)'}), '([combined_hvac, hvac_temp_df], ignore_index=True)\n', (3790, 3840), True, 'import pandas as pd\n'), ((4066, 4125), 'pandas.concat', 'pd.concat', (['[combined_door, door_temp_df]'], {'ignore_index': '(True)'}), '([combined_door, door_temp_df], ignore_index=True)\n', (4075, 4125), True, 'import pandas as pd\n'), ((4889, 4908), 'os.chdir', 'os.chdir', (['room_path'], {}), '(room_path)\n', (4897, 4908), False, 'import os\n'), ((4937, 4949), 'os.listdir', 'os.listdir', ([], {}), '()\n', (4947, 4949), False, 'import os\n'), ((2543, 2555), 'os.walk', 'os.walk', (['"""."""'], {}), "('.')\n", (2550, 2555), False, 'import os\n'), ((2641, 2653), 'os.walk', 'os.walk', (['"""."""'], {}), "('.')\n", (2648, 2653), False, 'import os\n'), ((4562, 4574), 'os.walk', 'os.walk', (['"""."""'], {}), "('.')\n", (4569, 4574), False, 'import os\n'), ((4660, 4672), 'os.walk', 'os.walk', (['"""."""'], {}), "('.')\n", (4667, 4672), False, 'import os\n'), ((3268, 3282), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f)\n', (3279, 3282), True, 'import pandas as pd\n'), ((3496, 3510), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f)\n', (3507, 3510), True, 'import pandas as pd\n'), ((3720, 3734), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f)\n', (3731, 3734), True, 'import pandas as pd\n'), ((4005, 4019), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f)\n', (4016, 4019), True, 'import pandas as pd\n'), ((5428, 5457), 'os.path.splitext', 'os.path.splitext', (['window_name'], {}), '(window_name)\n', (5444, 5457), False, 'import os\n'), ((6145, 6201), 'pandas.concat', 'pd.concat', (['[combined_window, temp_df]'], {'ignore_index': '(True)'}), '([combined_window, temp_df], ignore_index=True)\n', (6154, 6201), True, 'import pandas as pd\n'), ((6351, 6378), 'os.path.splitext', 'os.path.splitext', (['door_name'], {}), '(door_name)\n', (6367, 6378), False, 'import os\n'), ((7054, 7108), 'pandas.concat', 'pd.concat', (['[combined_door, temp_df]'], {'ignore_index': '(True)'}), '([combined_door, temp_df], ignore_index=True)\n', (7063, 7108), True, 'import pandas as pd\n'), ((7258, 7285), 'os.path.splitext', 'os.path.splitext', (['hvac_name'], {}), '(hvac_name)\n', (7274, 7285), False, 'import os\n'), ((7976, 8030), 'pandas.concat', 'pd.concat', (['[combined_hvac, temp_df]'], {'ignore_index': '(True)'}), '([combined_hvac, temp_df], ignore_index=True)\n', (7985, 8030), True, 'import pandas as pd\n'), ((5595, 5635), 'pandas.read_csv', 'pd.read_csv', (['window_name'], {'usecols': '[0, 1]'}), '(window_name, usecols=[0, 1])\n', (5606, 5635), True, 'import pandas as pd\n'), ((5779, 5821), 'pandas.read_excel', 'pd.read_excel', (['window_name'], {'usecols': '[0, 1]'}), '(window_name, usecols=[0, 1])\n', (5792, 5821), True, 'import pandas as pd\n'), ((6516, 6554), 'pandas.read_csv', 'pd.read_csv', (['door_name'], {'usecols': '[0, 1]'}), '(door_name, usecols=[0, 1])\n', (6527, 6554), True, 'import pandas as pd\n'), ((6696, 6736), 'pandas.read_excel', 'pd.read_excel', (['door_name'], {'usecols': '[0, 1]'}), '(door_name, usecols=[0, 1])\n', (6709, 6736), True, 'import pandas as pd\n'), ((7423, 7461), 'pandas.read_csv', 'pd.read_csv', (['hvac_name'], {'usecols': '[0, 1]'}), '(hvac_name, usecols=[0, 1])\n', (7434, 7461), True, 'import pandas as pd\n'), ((7585, 7625), 'pandas.read_excel', 'pd.read_excel', (['hvac_name'], {'usecols': '[0, 1]'}), '(hvac_name, usecols=[0, 1])\n', (7598, 7625), True, 'import pandas as pd\n')] |
from django.urls import path
from . views import ConsultantCreateView, UserConsultantPageView, ConsultantDetailView, ConsultantPageView, ConsultantDeleteView
from . import views as consultant_views
urlpatterns = [
path('', ConsultantPageView.as_view(), name='consultants-home'),
path('<int:pk>/', ConsultantDetailView.as_view(), name='consultant-detail'),
path('new/', consultant_views.consultantCreate, name='consultant-create'),
path('<str:username>/consultants', UserConsultantPageView.as_view(), name='user-consultant'),
path('<int:pk>/update/',consultant_views.consultantUpdateView, name='consultant-update'),
path('<int:pk>/consultant/delete/', ConsultantDeleteView.as_view(), name='consultant-delete'),
]
| [
"django.urls.path"
] | [((371, 444), 'django.urls.path', 'path', (['"""new/"""', 'consultant_views.consultantCreate'], {'name': '"""consultant-create"""'}), "('new/', consultant_views.consultantCreate, name='consultant-create')\n", (375, 444), False, 'from django.urls import path\n'), ((548, 642), 'django.urls.path', 'path', (['"""<int:pk>/update/"""', 'consultant_views.consultantUpdateView'], {'name': '"""consultant-update"""'}), "('<int:pk>/update/', consultant_views.consultantUpdateView, name=\n 'consultant-update')\n", (552, 642), False, 'from django.urls import path\n')] |
from enum import Enum
from typing import Optional
import pkg_resources
from gi.repository import Gtk
from gi.repository import Pango
cssprovider = Gtk.CssProvider()
cssprovider.load_from_path(pkg_resources.resource_filename('cct', 'resource/css/indicatorcolors.css'))
class IndicatorState(Enum):
OK = 'ok'
WARNING = 'warning'
ERROR = 'error'
NEUTRAL = 'neutral'
UNKNOWN = 'unknown'
def __str__(self):
return self.value
class Indicator(Gtk.Box):
def __init__(self, label: str, value: object, state: IndicatorState, *args, **kwargs):
super().__init__(*args, **kwargs)
if 'orientation' not in kwargs:
self.set_orientation(Gtk.Orientation.VERTICAL)
self._label = Gtk.Label(label=label)
# self._label.set_hexpand(True)
# self._label.set_hexpand_set(True)
self.pack_start(self._label, True, True, 0)
self._eventbox = Gtk.EventBox()
self.pack_start(self._eventbox, True, True, 0)
self._valuelabel = Gtk.Label(label=str(value))
# self._valuelabel.set_hexpand(False)
# self._valuelabel.set_hexpand_set(False)
self._valuelabel.set_ellipsize(Pango.EllipsizeMode.MIDDLE)
self._valuelabel.set_max_width_chars(1)
self._value = value
self._eventbox.add(self._valuelabel)
self._eventbox.set_border_width(5)
self._eventbox.set_name('indicator_' + str(state))
self._eventbox.get_style_context().add_provider(cssprovider, Gtk.STYLE_PROVIDER_PRIORITY_USER)
self.set_hexpand(True)
self.set_hexpand_set(True)
self._eventbox.queue_draw()
# self.set_size_request(self._label.get_size_request()[0],-1)
def set_label(self, text: str):
return self._label.set_text(text)
def get_label(self) -> str:
return self._label.get_text()
def set_value(self, value: object, state: Optional[IndicatorState] = None):
self._value = value
res = self._valuelabel.set_text(str(value))
self._eventbox.set_tooltip_text(self._label.get_text() + ': ' + value)
self._valuelabel.set_tooltip_text(self._label.get_text() + ': ' + value)
if state is not None:
self.set_state(state)
return res
def get_value(self):
return self._value
def set_state(self, state):
res = self._eventbox.set_name('indicator_' + str(state))
self._valuelabel.set_name('indicator_' + str(state))
self._eventbox.queue_draw()
self._valuelabel.queue_draw()
return res
def get_state(self):
return IndicatorState(self._eventbox.get_name().split('_', 1)[-1])
| [
"gi.repository.Gtk.CssProvider",
"gi.repository.Gtk.Label",
"gi.repository.Gtk.EventBox",
"pkg_resources.resource_filename"
] | [((149, 166), 'gi.repository.Gtk.CssProvider', 'Gtk.CssProvider', ([], {}), '()\n', (164, 166), False, 'from gi.repository import Gtk\n'), ((194, 268), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""cct"""', '"""resource/css/indicatorcolors.css"""'], {}), "('cct', 'resource/css/indicatorcolors.css')\n", (225, 268), False, 'import pkg_resources\n'), ((738, 760), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {'label': 'label'}), '(label=label)\n', (747, 760), False, 'from gi.repository import Gtk\n'), ((936, 950), 'gi.repository.Gtk.EventBox', 'Gtk.EventBox', ([], {}), '()\n', (948, 950), False, 'from gi.repository import Gtk\n')] |
# chapter Matplotlib Plotting
'''
The plot() function is used to draw points (markers) in a diagram.
By default, the plot() function draws a line from point to point.
The function takes parameters for specifying points in the diagram.
Parameter 1 is an array containing the points on the x-axis.
Parameter 2 is an array containing the points on the y-axis.
If we need to plot a line from (1, 3) to (8, 10), we have to pass two arrays [1, 8] and [3, 10] to the plot function.
'''
# Draw a line in a diagram from position (1, 3) to position (8, 10):
import matplotlib.pyplot as plt
import numpy as r
import sys
x=r.array([1,9,])
y=r.array([4,10])
plt.plot(x,y)
plt.show()
'''
Plotting Without Line
To plot only the markers, you can use shortcut string notation parameter 'o', which means 'rings'.
'''
x=r.array([3,10])
y=r.array([0,34])
plt.plot(x,y,'o')
plt.show()
'''
Multiple Points
You can plot as many points as you like, just make sure you have the same number of points in both axis.
Example
Draw a line in a diagram from position (1, 3) to (2, 8) then to (6, 1) and finally to position (8, 10):f
'''
x=r.array([1,2,4,9])
y=r.array([3,6,8,10])
plt.plot(x,y,label="red")
plt.show()
#Two lines to make our compiler able to draw:
plt.savefig(sys.stdout.buffer)
sys.stdout.flush()
'''
Default X-Points
If we do not specify the points in the x-axis, they will get the default values 0, 1, 2, 3, (etc. depending on the length of the y-points.
So, if we take the same example as above, and leave out the x-points, the diagram will look like this:
'''
# Plotting without x-points:
ypoints=r.array([0,2,3,5,6,7,99])
plt.plot(ypoints)
plt.show()
plt.savefig(sys.stdout.buffer)
sys.stdout.flush()
# CHAPTER Matplotlib Markers
'''
Markers
You can use the keyword argument marker to emphasize each point with a specified marker:
'''
x=r.array([0,3,5,6,8,9])
y=r.array([2,4,6,7,8,10])
plt.plot(x,y,marker="*")
plt.show()
'''
Marker Reference
You can choose any of these markers:
Marker Description
'o' Circle
'*' Star
'.' Point
',' Pixel
'x' X
'X' X (filled)
'+' Plus
'P' Plus (filled)
's' Square
'D' Diamond
'd' Diamond (thin)
'p' Pentagon
'H' Hexagon
'h' Hexagon
'v' Triangle Down
'^' Triangle Up
'<' Triangle Left
'>' Triangle Right
'1' Tri Down
'2' Tri Up
'3' Tri Left
'4' Tri Right
'|' Vline
'_' Hline
'''
'''
Format Strings fmt
You can use also use the shortcut string notation parameter to specify the marker.
This parameter is also called fmt, and is written with this syntax:
marker|line|color
Example
Mark each point with a circle:
'''
x=r.array([3,5,5,6,7,8])
y=r.array([1,3,5,6,7,8])
plt.plot(x,y,'-.r')
plt.show()
'''
The marker value can be anything from the Marker Reference above.
The line value can be one of the following:
Line Reference
Line Syntax Description
'-' Solid line
':' Dotted line
'--' Dashed line
'-.' Dashed/dotted line
Note: If you leave out the line value in the fmt parameter, no line will be plottet.
'''
'''
Color Reference
Color Syntax Description
'r' Red
'g' Green
'b' Blue
'c' Cyan
'm' Magenta
'y' Yellow
'k' Black
'w' White
'''
'''
Marker Size
You can use the keyword argument markersize or the shorter version, ms to set the size of the markers:
'''
x=r.array([1,3,4,5,9,5])
y=r.array([0,3,6,8,8])
plt.plot(x,marker='o',ms='17')
plt.show()
'''
Marker Color
You can use the keyword argument markeredgecolor or the shorter mec to set the color of the edge of the markers:
Example
Set the EDGE color to red:
'''
x=r.array([2,3,5,6])
y=r.array('[0,3,5,6,8]')
plt.plot(x,marker='*',ms=34,mec='r')
plt.show()
'''
You can use the keyword argument markerfacecolor or the shorter mfc to set the color inside the edge of the markers:
Example
Set the FACE color to red:
'''
x=r.array([1,3,5,6])
y=r.array([2,3,5,6])
plt.plot(x,marker='*',ms=34,mfc='r')
plt.show()
'''
# Use both the mec and mfc arguments to color of the entire marker:
# Example
# Set the color of both the edge and the face to red:
'''
import matplotlib.pyplot as plt
import numpy as r
y=r.array([0,4,6,7,7,8])
plt.plot(y,marker='*',ms=30,mec='r',mfc='r')
plt.show()
'''
You can also use Hexadecimal color values:
Example
Mark each point with a beautiful green color:
...
plt.plot(ypoints, marker = 'o', ms = 20, mec = '#4CAF50', mfc = '#4CAF50')
...
'''
import matplotlib.pyplot as plt
import numpy as np
x=np.array([1,2,3,4,5,6,5,7])
y=np.array([1,2,4,5,5,6,])
plt.plot(y,ms=34,marker='*',mec='hotpink',mfc="hotpink",linestyle=':')
plt.show()
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"numpy.array",
"sys.stdout.flush",
"matplotlib.pyplot.show"
] | [((646, 661), 'numpy.array', 'r.array', (['[1, 9]'], {}), '([1, 9])\n', (653, 661), True, 'import numpy as r\n'), ((665, 681), 'numpy.array', 'r.array', (['[4, 10]'], {}), '([4, 10])\n', (672, 681), True, 'import numpy as r\n'), ((684, 698), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (692, 698), True, 'import matplotlib.pyplot as plt\n'), ((699, 709), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (707, 709), True, 'import matplotlib.pyplot as plt\n'), ((852, 868), 'numpy.array', 'r.array', (['[3, 10]'], {}), '([3, 10])\n', (859, 868), True, 'import numpy as r\n'), ((873, 889), 'numpy.array', 'r.array', (['[0, 34]'], {}), '([0, 34])\n', (880, 889), True, 'import numpy as r\n'), ((892, 911), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""o"""'], {}), "(x, y, 'o')\n", (900, 911), True, 'import matplotlib.pyplot as plt\n'), ((911, 921), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (919, 921), True, 'import matplotlib.pyplot as plt\n'), ((1181, 1202), 'numpy.array', 'r.array', (['[1, 2, 4, 9]'], {}), '([1, 2, 4, 9])\n', (1188, 1202), True, 'import numpy as r\n'), ((1203, 1225), 'numpy.array', 'r.array', (['[3, 6, 8, 10]'], {}), '([3, 6, 8, 10])\n', (1210, 1225), True, 'import numpy as r\n'), ((1226, 1253), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': '"""red"""'}), "(x, y, label='red')\n", (1234, 1253), True, 'import matplotlib.pyplot as plt\n'), ((1253, 1263), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1261, 1263), True, 'import matplotlib.pyplot as plt\n'), ((1319, 1349), 'matplotlib.pyplot.savefig', 'plt.savefig', (['sys.stdout.buffer'], {}), '(sys.stdout.buffer)\n', (1330, 1349), True, 'import matplotlib.pyplot as plt\n'), ((1351, 1369), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1367, 1369), False, 'import sys\n'), ((1689, 1720), 'numpy.array', 'r.array', (['[0, 2, 3, 5, 6, 7, 99]'], {}), '([0, 2, 3, 5, 6, 7, 99])\n', (1696, 1720), True, 'import numpy as r\n'), ((1718, 1735), 'matplotlib.pyplot.plot', 'plt.plot', (['ypoints'], {}), '(ypoints)\n', (1726, 1735), True, 'import matplotlib.pyplot as plt\n'), ((1737, 1747), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1745, 1747), True, 'import matplotlib.pyplot as plt\n'), ((1751, 1781), 'matplotlib.pyplot.savefig', 'plt.savefig', (['sys.stdout.buffer'], {}), '(sys.stdout.buffer)\n', (1762, 1781), True, 'import matplotlib.pyplot as plt\n'), ((1783, 1801), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1799, 1801), False, 'import sys\n'), ((1950, 1977), 'numpy.array', 'r.array', (['[0, 3, 5, 6, 8, 9]'], {}), '([0, 3, 5, 6, 8, 9])\n', (1957, 1977), True, 'import numpy as r\n'), ((1978, 2006), 'numpy.array', 'r.array', (['[2, 4, 6, 7, 8, 10]'], {}), '([2, 4, 6, 7, 8, 10])\n', (1985, 2006), True, 'import numpy as r\n'), ((2005, 2031), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'marker': '"""*"""'}), "(x, y, marker='*')\n", (2013, 2031), True, 'import matplotlib.pyplot as plt\n'), ((2033, 2043), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2041, 2043), True, 'import matplotlib.pyplot as plt\n'), ((2794, 2821), 'numpy.array', 'r.array', (['[3, 5, 5, 6, 7, 8]'], {}), '([3, 5, 5, 6, 7, 8])\n', (2801, 2821), True, 'import numpy as r\n'), ((2820, 2847), 'numpy.array', 'r.array', (['[1, 3, 5, 6, 7, 8]'], {}), '([1, 3, 5, 6, 7, 8])\n', (2827, 2847), True, 'import numpy as r\n'), ((2846, 2867), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""-.r"""'], {}), "(x, y, '-.r')\n", (2854, 2867), True, 'import matplotlib.pyplot as plt\n'), ((2867, 2877), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2875, 2877), True, 'import matplotlib.pyplot as plt\n'), ((3525, 3552), 'numpy.array', 'r.array', (['[1, 3, 4, 5, 9, 5]'], {}), '([1, 3, 4, 5, 9, 5])\n', (3532, 3552), True, 'import numpy as r\n'), ((3551, 3575), 'numpy.array', 'r.array', (['[0, 3, 6, 8, 8]'], {}), '([0, 3, 6, 8, 8])\n', (3558, 3575), True, 'import numpy as r\n'), ((3575, 3607), 'matplotlib.pyplot.plot', 'plt.plot', (['x'], {'marker': '"""o"""', 'ms': '"""17"""'}), "(x, marker='o', ms='17')\n", (3583, 3607), True, 'import matplotlib.pyplot as plt\n'), ((3607, 3617), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3615, 3617), True, 'import matplotlib.pyplot as plt\n'), ((3802, 3823), 'numpy.array', 'r.array', (['[2, 3, 5, 6]'], {}), '([2, 3, 5, 6])\n', (3809, 3823), True, 'import numpy as r\n'), ((3824, 3846), 'numpy.array', 'r.array', (['"""[0,3,5,6,8]"""'], {}), "('[0,3,5,6,8]')\n", (3831, 3846), True, 'import numpy as r\n'), ((3850, 3889), 'matplotlib.pyplot.plot', 'plt.plot', (['x'], {'marker': '"""*"""', 'ms': '(34)', 'mec': '"""r"""'}), "(x, marker='*', ms=34, mec='r')\n", (3858, 3889), True, 'import matplotlib.pyplot as plt\n'), ((3888, 3898), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3896, 3898), True, 'import matplotlib.pyplot as plt\n'), ((4073, 4094), 'numpy.array', 'r.array', (['[1, 3, 5, 6]'], {}), '([1, 3, 5, 6])\n', (4080, 4094), True, 'import numpy as r\n'), ((4095, 4116), 'numpy.array', 'r.array', (['[2, 3, 5, 6]'], {}), '([2, 3, 5, 6])\n', (4102, 4116), True, 'import numpy as r\n'), ((4117, 4156), 'matplotlib.pyplot.plot', 'plt.plot', (['x'], {'marker': '"""*"""', 'ms': '(34)', 'mfc': '"""r"""'}), "(x, marker='*', ms=34, mfc='r')\n", (4125, 4156), True, 'import matplotlib.pyplot as plt\n'), ((4155, 4165), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4163, 4165), True, 'import matplotlib.pyplot as plt\n'), ((4370, 4397), 'numpy.array', 'r.array', (['[0, 4, 6, 7, 7, 8]'], {}), '([0, 4, 6, 7, 7, 8])\n', (4377, 4397), True, 'import numpy as r\n'), ((4396, 4444), 'matplotlib.pyplot.plot', 'plt.plot', (['y'], {'marker': '"""*"""', 'ms': '(30)', 'mec': '"""r"""', 'mfc': '"""r"""'}), "(y, marker='*', ms=30, mec='r', mfc='r')\n", (4404, 4444), True, 'import matplotlib.pyplot as plt\n'), ((4442, 4452), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4450, 4452), True, 'import matplotlib.pyplot as plt\n'), ((4715, 4749), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 5, 7]'], {}), '([1, 2, 3, 4, 5, 6, 5, 7])\n', (4723, 4749), True, 'import numpy as np\n'), ((4746, 4774), 'numpy.array', 'np.array', (['[1, 2, 4, 5, 5, 6]'], {}), '([1, 2, 4, 5, 5, 6])\n', (4754, 4774), True, 'import numpy as np\n'), ((4774, 4849), 'matplotlib.pyplot.plot', 'plt.plot', (['y'], {'ms': '(34)', 'marker': '"""*"""', 'mec': '"""hotpink"""', 'mfc': '"""hotpink"""', 'linestyle': '""":"""'}), "(y, ms=34, marker='*', mec='hotpink', mfc='hotpink', linestyle=':')\n", (4782, 4849), True, 'import matplotlib.pyplot as plt\n'), ((4848, 4858), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4856, 4858), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
# @Author: Administrator
# @Date: 2019-04-25 06:10:39
# @Last Modified by: Administrator
# @Last Modified time: 2019-05-21 15:30:34
__all__ = [
"mkdir",
"get_abspath",
"read_file",
"json_load",
"json_dump",
"b",
"u",
"Singleton",
"CachedProperty",
]
import os
from requests.compat import json
__ROOT_DIR = os.path.join(os.path.dirname(__file__), "../") # tools/ 为根目录
def mkdir(path):
if not os.path.exists(path):
os.mkdir(path)
def get_abspath(*path):
return os.path.abspath(os.path.join(__ROOT_DIR, *path))
def read_file(file, encoding="utf-8-sig"):
with open(file, "r", encoding=encoding) as fp:
return fp.read()
def json_load(file, **kwargs):
with open(file, "r", encoding="utf-8-sig") as fp:
return json.load(fp, **kwargs)
def json_dump(obj, file, **kwargs):
encoding = kwargs.pop("encoding", "utf-8")
with open(file, "w", encoding=encoding) as fp:
json.dump(obj, fp, **kwargs)
def b(s):
"""
bytes/str/int/float -> bytes
"""
if isinstance(s, bytes):
return s
elif isinstance(s, (str,int,float)):
return str(s).encode("utf-8")
else:
raise TypeError(s)
def u(s):
"""
bytes/str/int/float -> str(utf8)
"""
if isinstance(s, (str,int,float)):
return str(s)
elif isinstance(s, bytes):
return s.decode("utf-8")
else:
raise TypeError(s)
class Singleton(type):
"""
Singleton Metaclass
@link https://github.com/jhao104/proxy_pool/blob/428359c8dada998481f038dbdc8d3923e5850c0e/Util/utilClass.py
"""
_inst = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._inst:
cls._inst[cls] = super(Singleton, cls).__call__(*args)
return cls._inst[cls]
class _Missing(object):
"""
from werkzeug._internal
"""
def __repr__(self):
return 'no value'
def __reduce__(self):
return '_missing'
_MISSING = _Missing()
class CachedProperty(property):
"""
from werkzeug.utils
"""
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __set__(self, obj, value):
obj.__dict__[self.__name__] = value
def __get__(self, obj, type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _MISSING)
if value is _MISSING:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value | [
"os.path.exists",
"os.path.join",
"os.path.dirname",
"requests.compat.json.dump",
"os.mkdir",
"requests.compat.json.load"
] | [((399, 424), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (414, 424), False, 'import os\n'), ((477, 497), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (491, 497), False, 'import os\n'), ((507, 521), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (515, 521), False, 'import os\n'), ((574, 605), 'os.path.join', 'os.path.join', (['__ROOT_DIR', '*path'], {}), '(__ROOT_DIR, *path)\n', (586, 605), False, 'import os\n'), ((828, 851), 'requests.compat.json.load', 'json.load', (['fp'], {}), '(fp, **kwargs)\n', (837, 851), False, 'from requests.compat import json\n'), ((995, 1023), 'requests.compat.json.dump', 'json.dump', (['obj', 'fp'], {}), '(obj, fp, **kwargs)\n', (1004, 1023), False, 'from requests.compat import json\n')] |
#from tree_GA import TreeGA
import os
import time
import json
import pandas as pd
import sys
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from treeGA import TreeGA
def printChart(toFile=False,filename="",lang="en"):
iteration = 0
jobsForMachines = []
jobsForMachinesWithJobId = []
for machineSchedule in bestMachineSchedules:
jobsForMachine = []
jobsForMachineWithJobId = []
for jobOperationCouple in machineSchedule:
jobId = jobOperationCouple[0]
operationNumber = jobOperationCouple[1]
#print("stating time, ending time")
jobStartAndEndTime = bestJobTimings[jobId][operationNumber]
jobsForMachine.append((jobStartAndEndTime[0],jobStartAndEndTime[1] - jobStartAndEndTime[0]))
jobsForMachineWithJobId.append((jobStartAndEndTime[0],jobStartAndEndTime[1],jobId))
#print([jobStartAndEndTime[0],jobStartAndEndTime[1],jobId])
jobsForMachinesWithJobId.append(jobsForMachineWithJobId)
jobsForMachines.append(jobsForMachine)
iteration = iteration + 1
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 22}
plt.rc('font', **font)
chartPatches = []
#colors = ['#e6194b', '#3cb44b', '#ffe119', '#4363d8', '#f58231', '#911eb4', '#46f0f0', '#f032e6', '#bcf60c', '#fabebe', '#008080', '#e6beff', '#9a6324', '#fffac8', '#800000', '#aaffc3', '#808000', '#ffd8b1', '#000075', '#808080', '#000000']
colors = ["#696969","#8b4513","#808000","#483d8b","#008000","#008b8b","#00008b","#8fbc8f","#800080","#b03060","#ff0000","#ffa500","#ffff00","#00ff00","#8a2be2","#00ff7f","#dc143c","#00ffff","#00bfff","#0000ff","#adff2f","#b0c4de","#ff7f50","#ff00ff","#1e90ff","#90ee90","#ff1493","#ee82ee","#ffe4b5","#ffb6c1"]
for j in range(len(bestJobTimings)):
colorHex = colors[j]
chartPatches.append(mpatches.Patch(color=colorHex, label='Job' + str(j)))
fig, schedule = plt.subplots()
fig.set_figheight(18)
fig.set_figwidth(25)
numOfMachines = len(bestMachineSchedules)
# Setting Y-axis limits
schedule.set_ylim(0, numOfMachines * 20)
# Setting X-axis limits
schedule.set_xlim(0, minimumMakespan)
# Setting labels for x-axis and y-axis
if lang == "it":
schedule.set_xlabel("Minuti sin dall'inizio")
schedule.set_ylabel('Macchina')
machineString = "Macchina"
else:
schedule.set_xlabel("Minutes since the start")
schedule.set_ylabel('Machine')
machineString = "Machine"
schedule.grid(True)
# Setting ticks on y-axis
ytiks = []
yticksLabels = []
verticalOffeset = 20
for i in range(numOfMachines):
ytiks.append(i * verticalOffeset)
yticksLabels.append(machineString + " " + str(i))
colorsForChart = []
jobIds = []
for j in range(len(jobsForMachinesWithJobId[i])):
jobId = jobsForMachinesWithJobId[i][j][2]
colorsForChart.append(colors[jobId])
jobIds.append(jobId)
schedule.broken_barh(jobsForMachines[i], (i * verticalOffeset, verticalOffeset/2), facecolors = tuple(colorsForChart))
for j in range(len(jobsForMachines[i])):
x1,x2 = jobsForMachines[i][j]
schedule.text(x=x1 + x2/2, y=(i * verticalOffeset) + 5,s=jobIds[j],ha='center', va='center',color='white',fontsize=18,fontweight="bold")
schedule.set_yticks(ytiks)
# Labelling tickes of y-axis
schedule.set_yticklabels(yticksLabels)
fig.legend(handles=chartPatches,title='Nomi Job', bbox_to_anchor=(0.9, 0.9), loc='upper left')
if toFile:
plt.savefig(filename)
plt.show()
if __name__ == '__main__':
start_time = time.time()
fileInstanceName = sys.argv[1]
populationNumber = int(sys.argv[2])
iterationNumber = int(sys.argv[3])
numberThatIsAPowerOfTwo = int(sys.argv[4])
treeGA = TreeGA(fileName = fileInstanceName,populationNumber = populationNumber,iterationNumber = iterationNumber,numberThatIsAPowerOfTwo = numberThatIsAPowerOfTwo)
treeGA.execute()
minimumMakespan = treeGA.getMinimumMakespan()
bestSolution = treeGA.getBestSolution()
bestMachineSchedules = treeGA.getBestSolutionMachineSchedules()
bestJobTimings = treeGA.getBestSolutionJobTimings()
print("")
print("Minimum Makespan:")
print(minimumMakespan)
print("")
print("Best solution")
print(bestSolution.values)
end_time = time.time()
print("Execution time in seconds: ")
print(end_time - start_time)
if sys.argv[5] == "plot":
printChart()
elif sys.argv[5] == "plot_to_file":
printChart(toFile=True,filename=sys.argv[6],lang="it")
| [
"matplotlib.pyplot.savefig",
"treeGA.TreeGA",
"time.time",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.show"
] | [((1257, 1279), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **font)\n", (1263, 1279), True, 'import matplotlib.pyplot as plt\n'), ((2050, 2064), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2062, 2064), True, 'import matplotlib.pyplot as plt\n'), ((3833, 3843), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3841, 3843), True, 'import matplotlib.pyplot as plt\n'), ((3890, 3901), 'time.time', 'time.time', ([], {}), '()\n', (3899, 3901), False, 'import time\n'), ((4078, 4237), 'treeGA.TreeGA', 'TreeGA', ([], {'fileName': 'fileInstanceName', 'populationNumber': 'populationNumber', 'iterationNumber': 'iterationNumber', 'numberThatIsAPowerOfTwo': 'numberThatIsAPowerOfTwo'}), '(fileName=fileInstanceName, populationNumber=populationNumber,\n iterationNumber=iterationNumber, numberThatIsAPowerOfTwo=\n numberThatIsAPowerOfTwo)\n', (4084, 4237), False, 'from treeGA import TreeGA\n'), ((4638, 4649), 'time.time', 'time.time', ([], {}), '()\n', (4647, 4649), False, 'import time\n'), ((3797, 3818), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (3808, 3818), True, 'import matplotlib.pyplot as plt\n')] |
from fastapi import APIRouter, File, Header, HTTPException, UploadFile
from fastapi.responses import FileResponse, HTMLResponse
from pydantic.main import List
from api import controller
from api.config import application_name
from api.models.TTS_model import TTSModel
router = APIRouter()
MAX_CHARACTERS = 550
class NotVIPplsPAYError(Exception):
pass
@router.get("/", response_class=HTMLResponse)
def home():
return f"<body><h1>API of {application_name}</h1></body>"
@router.post("/taco")
def text_to_tacotron_audio_file(data: TTSModel, model=Header(None)):
try:
text = data.text
if len(text) > MAX_CHARACTERS:
raise NotVIPplsPAYError(
"Too many chararacters."
)
wav_audio_file_path = controller.text_to_tacotron_audio_file(data.text, model)
return FileResponse(str(wav_audio_file_path))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.post("/taco_audio")
async def audio_to_tacotron_audio_file(
file: UploadFile = File(...), model=Header(None)
):
try:
bytes = await file.read()
if len(bytes) < 1:
raise NotImplementedError(
"No audio has been provided, check your microphone."
)
if len(bytes) > 120000:
raise NotVIPplsPAYError(
"Too many bytes."
)
wav_audio_file_path, text = await controller.audio_to_tacotron_audio_file(
bytes, model
)
return FileResponse(str(wav_audio_file_path), headers={'text': text})
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/get_models", response_model=List[str])
def get_available_models():
try:
return controller.get_models()
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
| [
"fastapi.Header",
"fastapi.APIRouter",
"api.controller.text_to_tacotron_audio_file",
"api.controller.get_models",
"api.controller.audio_to_tacotron_audio_file",
"fastapi.File"
] | [((279, 290), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (288, 290), False, 'from fastapi import APIRouter, File, Header, HTTPException, UploadFile\n'), ((559, 571), 'fastapi.Header', 'Header', (['None'], {}), '(None)\n', (565, 571), False, 'from fastapi import APIRouter, File, Header, HTTPException, UploadFile\n'), ((1060, 1069), 'fastapi.File', 'File', (['...'], {}), '(...)\n', (1064, 1069), False, 'from fastapi import APIRouter, File, Header, HTTPException, UploadFile\n'), ((1077, 1089), 'fastapi.Header', 'Header', (['None'], {}), '(None)\n', (1083, 1089), False, 'from fastapi import APIRouter, File, Header, HTTPException, UploadFile\n'), ((769, 825), 'api.controller.text_to_tacotron_audio_file', 'controller.text_to_tacotron_audio_file', (['data.text', 'model'], {}), '(data.text, model)\n', (807, 825), False, 'from api import controller\n'), ((1794, 1817), 'api.controller.get_models', 'controller.get_models', ([], {}), '()\n', (1815, 1817), False, 'from api import controller\n'), ((1445, 1498), 'api.controller.audio_to_tacotron_audio_file', 'controller.audio_to_tacotron_audio_file', (['bytes', 'model'], {}), '(bytes, model)\n', (1484, 1498), False, 'from api import controller\n')] |
# coding: utf-8
import SocketServer,os
# Copyright 2013 <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Furthermore it is derived from the Python documentation examples thus
# some of the code is Copyright © 2001-2013 Python Software
# Foundation; All Rights Reserved
#
# http://docs.python.org/2/library/socketserver.html
#
# run: python freetests.py
# try: curl -v -X GET http://127.0.0.1:8080/
class MyWebServer(SocketServer.BaseRequestHandler):
def handle(self):
self.data = self.request.recv(1024).strip()
#print ("Got a request of: %s\n" % self.data)
#self.request.sendall("OK")
path = self.data.split()[1]
method_type = self.data.split()[0]
if method_type != 'GET': #only GET will be handled !
status_code = "HTTP/1.1 405 Method not allowed\r\n"
content_type = "Content-type: text/html\r\n\r\n"
content = "<html><head></head><body>"+"<h1><center>HTTP/1.1 405 Method not allowed</center></h1></body></html>\n"
self.request.sendall(status_code)
self.request.sendall(content_type)
self.request.sendall(content)
return
if path[-1] == '/':
Path = os.getcwd()+"/www"+path+"index.html"
else:
Path = os.getcwd()+"/www"+path
#print ("Path is %s \n"%Path) /home/tuxin/Desktop/CMPUT404/Assignment1/CMPUT404-WEB-SERVER/www/../../../../../../../../../../../../etc/group
#print ("path is %s \n"%path) /../../../../../../../../../../../../etc/group
#print ("Path is %s \n"%Path)
if ( os.path.exists(Path) == False or "../" in Path or "/.." in Path): #add "../ for the serc check"
#print ("path is %s \n"%path)
header = "HTTP/1.1 404 Not Found\r\n Content-type: text/html\r\n"
file_content ="<html><head></head><body>"+"<h1><center>HTTP/1.1 404 Page Not Found!</center></h1></body></html>\n"
self.request.sendall(header + "\r\n" + file_content)
return
read_file = os.path.abspath(Path)
myfile = open(read_file, 'r') #serve file in www
file_content = ""
for i in myfile:
file_content +=i
myfile.close()
mime_type = Path.split('.')[1] #after the . is the mime type
header = "HTTP/1.1 200 OK\r\n" + "Content-type: text/%s\r\n" %mime_type
self.request.sendall(header + "\r\n" + file_content)
if __name__ == "__main__":
HOST, PORT = "localhost", 8080
SocketServer.TCPServer.allow_reuse_address = True
# Create the server, binding to localhost on port 8080
server = SocketServer.TCPServer((HOST, PORT), MyWebServer)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever()
| [
"SocketServer.TCPServer",
"os.path.exists",
"os.path.abspath",
"os.getcwd"
] | [((3159, 3208), 'SocketServer.TCPServer', 'SocketServer.TCPServer', (['(HOST, PORT)', 'MyWebServer'], {}), '((HOST, PORT), MyWebServer)\n', (3181, 3208), False, 'import SocketServer, os\n'), ((2577, 2598), 'os.path.abspath', 'os.path.abspath', (['Path'], {}), '(Path)\n', (2592, 2598), False, 'import SocketServer, os\n'), ((2144, 2164), 'os.path.exists', 'os.path.exists', (['Path'], {}), '(Path)\n', (2158, 2164), False, 'import SocketServer, os\n'), ((1834, 1845), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1843, 1845), False, 'import SocketServer, os\n'), ((1764, 1775), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1773, 1775), False, 'import SocketServer, os\n')] |
import time
import threading
import sys
import pynrfjprog.HighLevel
import pynrfjprog.APIError
DEFAULT_BLOCK_SIZE = 1024
SECONDS_PER_READ = 0.010
SECONDS_PER_WRITE = 0.010
class RTT:
"""
RTT communication class
Based off: https://github.com/thomasstenersen/pyrtt-viewer/blob/master/pyrtt-viewer
"""
def __init__(self, probe, channel, block_size=DEFAULT_BLOCK_SIZE):
self.probe = probe
self.channel = channel
self.close_event = None
self.writer_thread = None
self.reader_thread = None
self.block_size = block_size
def _writer(self):
while not self.close_event.is_set():
data = sys.stdin.readline()#.strip("\n")
#print(f"WRITER:{data!r}")
if data:
written = self.probe.rtt_write(self.channel, data)
assert written == len(data)
time.sleep(SECONDS_PER_WRITE)
def _reader(self):
while not self.close_event.is_set():
data = self.probe.rtt_read(self.channel, self.block_size)
#print(f"READER:{data!r}")
if not data:
time.sleep(SECONDS_PER_READ)
continue
sys.stdout.write(data)#, flush=True)
sys.stdout.flush()
def run(self):
self.close_event = threading.Event()
self.close_event.clear()
self.reader_thread = threading.Thread(target=self._reader)
self.reader_thread.start()
self.writer_thread = threading.Thread(target=self._writer)
self.writer_thread.start()
try:
while self.reader_thread.is_alive() or \
self.writer_thread.is_alive():
time.sleep(0.1)
except KeyboardInterrupt:
self.close_event.set()
self.reader_thread.join()
self.writer_thread.join()
def term_nrf(mote: int, channel: int=0, block_size: int=DEFAULT_BLOCK_SIZE):
with pynrfjprog.HighLevel.API() as api:
with pynrfjprog.HighLevel.DebugProbe(api, mote) as probe:
probe.rtt_start()
# Wait for rtt to be properly setup
while not probe.rtt_is_control_block_found():
time.sleep(0.01)
try:
rtt = RTT(probe, 0)
rtt.run()
"""while True:
start = time.monotonic()
data = probe.rtt_read(channel, block_size)
if data:
print(data, end="", flush=("\n" in data))
taken = time.monotonic() - start
if taken < SECONDS_PER_READ:
time.sleep(SECONDS_PER_READ - taken)"""
finally:
probe.rtt_stop()
| [
"time.sleep",
"threading.Event",
"sys.stdin.readline",
"threading.Thread",
"sys.stdout.flush",
"sys.stdout.write"
] | [((1369, 1386), 'threading.Event', 'threading.Event', ([], {}), '()\n', (1384, 1386), False, 'import threading\n'), ((1451, 1488), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._reader'}), '(target=self._reader)\n', (1467, 1488), False, 'import threading\n'), ((1555, 1592), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._writer'}), '(target=self._writer)\n', (1571, 1592), False, 'import threading\n'), ((700, 720), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (718, 720), False, 'import sys\n'), ((924, 953), 'time.sleep', 'time.sleep', (['SECONDS_PER_WRITE'], {}), '(SECONDS_PER_WRITE)\n', (934, 953), False, 'import time\n'), ((1250, 1272), 'sys.stdout.write', 'sys.stdout.write', (['data'], {}), '(data)\n', (1266, 1272), False, 'import sys\n'), ((1300, 1318), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1316, 1318), False, 'import sys\n'), ((1180, 1208), 'time.sleep', 'time.sleep', (['SECONDS_PER_READ'], {}), '(SECONDS_PER_READ)\n', (1190, 1208), False, 'import time\n'), ((1766, 1781), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1776, 1781), False, 'import time\n'), ((2281, 2297), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (2291, 2297), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""
Make figures for MUSim paper
AUTHOR: <NAME>
VERSION DATE: 26 June 2019
"""
import os
from os.path import join
import numpy as np
import pandas as pd
from statsmodels.stats.proportion import proportion_confint
import matplotlib.pyplot as plt
def binom_ci_precision(proportion, nobs, method='beta', alpha=0.05):
"""
Get precision for binomial proportion confidence interval
"""
count = proportion * nobs
ci = proportion_confint(count, nobs, method=method, alpha=alpha)
ci_precision = ci[1] - proportion
return ci_precision
def make_power_bar(data, colors, error_bars='se', mean_amp=True, legend=False):
use_colors = colors.copy()
use_cols = ['Fmax', 'cluster_05', 'cluster_01', 'BH', 'BY', 'BKY']
if mean_amp:
use_cols.insert(0, 'mean_amp')
#Get values for error bars
power_data = data.loc[:, use_cols].to_numpy().T
if error_bars.lower() == 'se':
stderr = np.sqrt( (power_data*(1-power_data)) / 10000 )
elif error_bars.lower() == 'ci':
stderr = binom_ci_precision(power_data, 10000)
elif error_bars is None:
stderr = None
else:
raise ValueError('Incorrect input for error_bars')
#Plot
labels = ['Fmax', 'cluster (p≤0.05 threshold)', 'cluster (p≤0.05 threshold)',
'FDR (Benjamini & Hochberg, 1995)', 'FDR (Benjamini & Yekutieli, 2001)',
'FDR (Benjamini et al., 2006)']
if mean_amp:
labels.insert(0, 'mean amplitude')
use_colors.insert(0, 'black')
data.plot.bar(x='time_window', y=use_cols, label=labels, color=use_colors,
fontsize=16, yerr=stderr, legend=legend)
plt.xticks(rotation='horizontal')
plt.xlabel('')
plt.ylim((0,1))
if legend:
plt.legend(loc=(1.04,0), prop={'size': 12})
def make_power_figures(colors, results_dir):
#Get all results csv files
results_files = [file for file in os.listdir(results_dir) if file.endswith('.csv')]
for results_file in results_files:
#Load data
data = pd.read_csv(join(results_dir, results_file))
if 'Power' in results_file and 'Familywise' in results_file:
if 'FamilywisePower' in results_file:
mean_amp = True
else:
mean_amp = False
#Make file with legend
if not os.path.isfile(join(results_dir, 'legend.tif')):
make_power_bar(data[0:3], colors, legend=True)
img_file = join(results_dir, 'legend.tif')
plt.savefig(img_file, bbox_inches='tight', dpi=600)
plt.close()
#Make figures
make_power_bar(data[0:3], colors, error_bars='CI', mean_amp=mean_amp)
img_file = join(results_dir, '%s_N400.tif' % results_file.strip('.csv'))
plt.savefig(img_file, bbox_inches='tight', dpi=600)
plt.close()
make_power_bar(data[3:6], colors, error_bars='CI', mean_amp=mean_amp)
img_file = join(results_dir, '%s_P300.tif' % results_file.strip('.csv'))
plt.savefig(img_file, bbox_inches='tight', dpi=600)
plt.close()
make_power_bar(data[6:9], colors, error_bars='CI', mean_amp=mean_amp)
img_file = join(results_dir, '%s_P1.tif' % results_file.strip('.csv'))
plt.savefig(img_file, bbox_inches='tight', dpi=600)
plt.close()
def make_null_figures(results_dir):
#Get data
data = pd.read_csv(join(results_dir, 'MUSim_Null_FamilywiseTypeI.csv'))
data[['n_trials', 'n_subjects']] = data[['n_trials', 'n_subjects']].astype(int)
#Plotting parameters
use_cols = ['mean_amp', 'Fmax', 'cluster_05', 'cluster_01']
labels = ['mean amplitude', 'Fmax', 'cluster (p ≤ 0.05 threshold)', 'cluster (p ≤ 0.01 threshold)']
use_colors = ['black', 'lightgreen', 'navy', 'cornflowerblue']
for time_wind in ('0 - 300', '300 - 1000'):
for trials in (40, 20, 10):
plot_subset = data[(data['time_window'] == time_wind) & (data['n_trials'] == trials)]
proportions = plot_subset.loc[:, use_cols].to_numpy().T
stderr = binom_ci_precision(proportions, 10000)
#Make bar graph
plot_subset.plot.bar(x='n_subjects', y=use_cols, label=labels, color=use_colors,
fontsize=16, yerr=stderr, legend=False)
plt.xticks(rotation='horizontal')
plt.xlabel('')
plt.ylim((0,0.1))
plt.axhline(y=0.05,linewidth=1, color='r', linestyle='--')
plt.yticks(np.arange(1,11)/100)
plt.xlabel('Number of Subjects', fontsize=18)
#Save file
img_file = join(results_dir, 'MUSim_Null_FamilywiseTypeI_%s_%dtrials.tif' % (time_wind, trials))
plt.savefig(img_file, bbox_inches='tight', dpi=600)
plt.close()
def make_EW_figures(colors, results_dir):
ew_files = [file for file in os.listdir(results_dir) if 'Power_EW' in file and file.endswith('.csv')]
for ew_file in ew_files:
#Get data
data = pd.read_csv(join(results_dir, ew_file))
#Rename colums to labels to be used in figure
data.columns = ['uncorrected', 'Sidak', 'Fmax', 'Clust0.05', 'Clust0.01', 'BH FDR', 'BY FDR', 'BKY FDR']
#Make box plot
bplot = data.loc[:, 'Fmax':].boxplot(whis=[5, 95], showfliers=False,
return_type='dict', patch_artist=True,
fontsize=12)
#For proporition measures, set standard y-scale
if 'onset' not in ew_file and 'offset' not in ew_file:
plt.ylim((0,1))
#Update colors and line sizes
for key in bplot.keys():
i = 0
for item in bplot[key]:
item.set_linewidth(4)
if key == 'medians':
item.set_color('black')
else:
item.set_color(colors[int(i)])
if key in ['whiskers', 'caps']:
i += 0.5
else:
i += 1
#Save figure
img_file = join(results_dir, ew_file.strip('.csv') + '.tif')
plt.savefig(img_file, bbox_inches='tight', dpi=600)
plt.close()
def main():
results_dir = r'C:\Users\ecfne\Documents\Eric\Research\Stats Simulations\MUSim\results'
colors = ['lightgreen', 'navy', 'cornflowerblue', 'red', 'lightcoral', 'firebrick']
make_power_figures(colors, results_dir)
make_null_figures(results_dir)
make_EW_figures(colors, results_dir)
if __name__ == '__main__':
main()
| [
"os.listdir",
"numpy.sqrt",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"statsmodels.stats.proportion.proportion_confint",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"os.path.join",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.ylim",
"matplotlib.pypl... | [((480, 539), 'statsmodels.stats.proportion.proportion_confint', 'proportion_confint', (['count', 'nobs'], {'method': 'method', 'alpha': 'alpha'}), '(count, nobs, method=method, alpha=alpha)\n', (498, 539), False, 'from statsmodels.stats.proportion import proportion_confint\n'), ((1754, 1787), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '"""horizontal"""'}), "(rotation='horizontal')\n", (1764, 1787), True, 'import matplotlib.pyplot as plt\n'), ((1793, 1807), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['""""""'], {}), "('')\n", (1803, 1807), True, 'import matplotlib.pyplot as plt\n'), ((1813, 1829), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (1821, 1829), True, 'import matplotlib.pyplot as plt\n'), ((1006, 1052), 'numpy.sqrt', 'np.sqrt', (['(power_data * (1 - power_data) / 10000)'], {}), '(power_data * (1 - power_data) / 10000)\n', (1013, 1052), True, 'import numpy as np\n'), ((1854, 1898), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(1.04, 0)', 'prop': "{'size': 12}"}), "(loc=(1.04, 0), prop={'size': 12})\n", (1864, 1898), True, 'import matplotlib.pyplot as plt\n'), ((3725, 3776), 'os.path.join', 'join', (['results_dir', '"""MUSim_Null_FamilywiseTypeI.csv"""'], {}), "(results_dir, 'MUSim_Null_FamilywiseTypeI.csv')\n", (3729, 3776), False, 'from os.path import join\n'), ((6628, 6679), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_file'], {'bbox_inches': '"""tight"""', 'dpi': '(600)'}), "(img_file, bbox_inches='tight', dpi=600)\n", (6639, 6679), True, 'import matplotlib.pyplot as plt\n'), ((6689, 6700), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6698, 6700), True, 'import matplotlib.pyplot as plt\n'), ((2029, 2052), 'os.listdir', 'os.listdir', (['results_dir'], {}), '(results_dir)\n', (2039, 2052), False, 'import os\n'), ((2183, 2214), 'os.path.join', 'join', (['results_dir', 'results_file'], {}), '(results_dir, results_file)\n', (2187, 2214), False, 'from os.path import join\n'), ((3011, 3062), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_file'], {'bbox_inches': '"""tight"""', 'dpi': '(600)'}), "(img_file, bbox_inches='tight', dpi=600)\n", (3022, 3062), True, 'import matplotlib.pyplot as plt\n'), ((3076, 3087), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3085, 3087), True, 'import matplotlib.pyplot as plt\n'), ((3285, 3336), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_file'], {'bbox_inches': '"""tight"""', 'dpi': '(600)'}), "(img_file, bbox_inches='tight', dpi=600)\n", (3296, 3336), True, 'import matplotlib.pyplot as plt\n'), ((3350, 3361), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3359, 3361), True, 'import matplotlib.pyplot as plt\n'), ((3556, 3607), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_file'], {'bbox_inches': '"""tight"""', 'dpi': '(600)'}), "(img_file, bbox_inches='tight', dpi=600)\n", (3567, 3607), True, 'import matplotlib.pyplot as plt\n'), ((3621, 3632), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3630, 3632), True, 'import matplotlib.pyplot as plt\n'), ((4671, 4704), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '"""horizontal"""'}), "(rotation='horizontal')\n", (4681, 4704), True, 'import matplotlib.pyplot as plt\n'), ((4718, 4732), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['""""""'], {}), "('')\n", (4728, 4732), True, 'import matplotlib.pyplot as plt\n'), ((4746, 4764), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 0.1)'], {}), '((0, 0.1))\n', (4754, 4764), True, 'import matplotlib.pyplot as plt\n'), ((4777, 4836), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0.05)', 'linewidth': '(1)', 'color': '"""r"""', 'linestyle': '"""--"""'}), "(y=0.05, linewidth=1, color='r', linestyle='--')\n", (4788, 4836), True, 'import matplotlib.pyplot as plt\n'), ((4894, 4939), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Subjects"""'], {'fontsize': '(18)'}), "('Number of Subjects', fontsize=18)\n", (4904, 4939), True, 'import matplotlib.pyplot as plt\n'), ((5002, 5091), 'os.path.join', 'join', (['results_dir', "('MUSim_Null_FamilywiseTypeI_%s_%dtrials.tif' % (time_wind, trials))"], {}), "(results_dir, 'MUSim_Null_FamilywiseTypeI_%s_%dtrials.tif' % (time_wind,\n trials))\n", (5006, 5091), False, 'from os.path import join\n'), ((5101, 5152), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_file'], {'bbox_inches': '"""tight"""', 'dpi': '(600)'}), "(img_file, bbox_inches='tight', dpi=600)\n", (5112, 5152), True, 'import matplotlib.pyplot as plt\n'), ((5166, 5177), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5175, 5177), True, 'import matplotlib.pyplot as plt\n'), ((5271, 5294), 'os.listdir', 'os.listdir', (['results_dir'], {}), '(results_dir)\n', (5281, 5294), False, 'import os\n'), ((5437, 5463), 'os.path.join', 'join', (['results_dir', 'ew_file'], {}), '(results_dir, ew_file)\n', (5441, 5463), False, 'from os.path import join\n'), ((6035, 6051), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (6043, 6051), True, 'import matplotlib.pyplot as plt\n'), ((2654, 2685), 'os.path.join', 'join', (['results_dir', '"""legend.tif"""'], {}), "(results_dir, 'legend.tif')\n", (2658, 2685), False, 'from os.path import join\n'), ((2703, 2754), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_file'], {'bbox_inches': '"""tight"""', 'dpi': '(600)'}), "(img_file, bbox_inches='tight', dpi=600)\n", (2714, 2754), True, 'import matplotlib.pyplot as plt\n'), ((2772, 2783), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2781, 2783), True, 'import matplotlib.pyplot as plt\n'), ((2528, 2559), 'os.path.join', 'join', (['results_dir', '"""legend.tif"""'], {}), "(results_dir, 'legend.tif')\n", (2532, 2559), False, 'from os.path import join\n'), ((4860, 4876), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (4869, 4876), True, 'import numpy as np\n')] |
from __future__ import print_function
from argparse import ArgumentParser
from fastai.learner import *
from fastai.column_data import *
import numpy as np
import pandas as pd
def build_parser():
parser = ArgumentParser()
parser.add_argument('--data', type=str, nargs=None, dest='in_path', help='input file path', required=True)
parser.add_argument('--out-prefix', type=str, nargs=None, dest='model', help='output prefix', required=True)
parser.add_argument('--out-dir', type=str, nargs=None, dest='out_dir', help='output directory', required=True)
parser.add_argument('--num-dim', type=int, nargs=None, dest='num_dim', help='number of dimension of resulting embedding', required=False, default=50)
parser.add_argument('--bs', type=int, nargs=None, dest='bs', help='batch size', required=False, default=64)
parser.add_argument('--num-epoch', type=int, nargs=None, dest='num_eps', help='number of epoch(s)', required=False, default=3)
parser.add_argument('--learning-rate', type=float, nargs=None, dest='lr', help='learning rate', required=False, default=1e-5)
return parser
def main():
parser = build_parser()
opts = parser.parse_args()
if torch.cuda.is_available() and torch.backends.cudnn.enabled:
torch.cuda.set_device(0)
else:
print('CUDA or CUDNN not available.')
return
in_path = opts.in_path
n_factors = opts.num_dim
bs = opts.bs
num_eps = opts.num_eps
lr = opts.lr
out_dir = opts.out_dir
prefix = opts.model
outpath = out_dir+'/'+prefix+'_'
### data preparation
df = pd.read_csv(in_path, sep=',', low_memory=False, index_col=[0], error_bad_lines=False)
sids = list(df.index)
df = df.assign(id=sids)
df = df.reset_index(drop=True)
mdf = pd.melt(df, id_vars=['id'], var_name='gene', value_name='log2exp')
### training
val_idxs = get_cv_idxs(len(mdf))
cd = CollabFilterDataset.from_data_frame(path, mdf, 'id', 'gene', 'log2exp')
learn = cd.get_learner(n_factors, val_idxs, bs, opt_fn=optim.Adam)
learn.fit(lr, num_eps)
learn.save(outpath+'model')
### plot jointplot
preds = learn.predict()
y=learn.data.val_y
jp = sns.jointplot(preds, y, kind='hex', stat_func=None)
jp.set_axis_labels('ground truth log2(exp)', 'predicted log2(exp)')
jp.savefig(outpath+'trn_metric_jointplot.png')
### output embedding
genes = list(df.columns[:-2])
sids = list(df['id'])
geneidx = np.array([cd.item2idx[g] for g in genes])
m=learn.model
m.cuda()
### output gene embedding matrix and bias
gene_emb = to_np(m.i(V(geneidx)))
gene_emb_df = pd.DataFrame(gene_emb, index=genes)
gene_emb_df.to_csv(outpath+'gemb.csv', sep=',')
gene_emb_bias = to_np(m.ib(V(geneidx)))
gene_emb_bias_df = pd.DataFrame(gene_emb_bias, index=genes)
gene_emb_bias_df.to_csv(outpath+'gemb_bias.csv')
### output sample embedding matrix and bias
sampleidx = np.array([cd.user2idx[sid] for sid in sids])
samp_emb = to_np(m.u(V(sampleidx)))
samp_emb_df = pd.DataFrame(samp_emb, index=sids)
samp_emb_df.to_csv(outpath+'semb.csv', sep=',')
samp_emb_bias = to_np(m.ub(V(sampleidx)))
samp_emb_bias_df = pd.DataFrame(samp_emb_bias, index=sids)
samp_emb_bias_df.to_csv(outpath+'semb_bias.csv')
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"pandas.read_csv",
"numpy.array",
"pandas.DataFrame",
"pandas.melt"
] | [((211, 227), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (225, 227), False, 'from argparse import ArgumentParser\n'), ((1605, 1694), 'pandas.read_csv', 'pd.read_csv', (['in_path'], {'sep': '""","""', 'low_memory': '(False)', 'index_col': '[0]', 'error_bad_lines': '(False)'}), "(in_path, sep=',', low_memory=False, index_col=[0],\n error_bad_lines=False)\n", (1616, 1694), True, 'import pandas as pd\n'), ((1790, 1856), 'pandas.melt', 'pd.melt', (['df'], {'id_vars': "['id']", 'var_name': '"""gene"""', 'value_name': '"""log2exp"""'}), "(df, id_vars=['id'], var_name='gene', value_name='log2exp')\n", (1797, 1856), True, 'import pandas as pd\n'), ((2482, 2523), 'numpy.array', 'np.array', (['[cd.item2idx[g] for g in genes]'], {}), '([cd.item2idx[g] for g in genes])\n', (2490, 2523), True, 'import numpy as np\n'), ((2659, 2694), 'pandas.DataFrame', 'pd.DataFrame', (['gene_emb'], {'index': 'genes'}), '(gene_emb, index=genes)\n', (2671, 2694), True, 'import pandas as pd\n'), ((2814, 2854), 'pandas.DataFrame', 'pd.DataFrame', (['gene_emb_bias'], {'index': 'genes'}), '(gene_emb_bias, index=genes)\n', (2826, 2854), True, 'import pandas as pd\n'), ((2973, 3017), 'numpy.array', 'np.array', (['[cd.user2idx[sid] for sid in sids]'], {}), '([cd.user2idx[sid] for sid in sids])\n', (2981, 3017), True, 'import numpy as np\n'), ((3076, 3110), 'pandas.DataFrame', 'pd.DataFrame', (['samp_emb'], {'index': 'sids'}), '(samp_emb, index=sids)\n', (3088, 3110), True, 'import pandas as pd\n'), ((3232, 3271), 'pandas.DataFrame', 'pd.DataFrame', (['samp_emb_bias'], {'index': 'sids'}), '(samp_emb_bias, index=sids)\n', (3244, 3271), True, 'import pandas as pd\n')] |
from pyworkforce.shifts import MinAbsDifference, MinRequiredResources
import pytest
def test_min_abs_difference_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
num_days = 2
scheduler = MinAbsDifference(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
max_period_concurrency=25,
max_shift_concurrency=20)
solution = scheduler.solve()
assert solution['status'] == 'OPTIMAL'
assert 'cost' in solution
assert 'resources_shifts' in solution
assert len(solution['resources_shifts']) == num_days * len(shifts_coverage)
for i in range(num_days * len(shifts_coverage)):
assert solution['resources_shifts'][i]['resources'] >= 0
def test_infeasible_min_abs_difference_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
num_days = 2
scheduler = MinAbsDifference(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
max_period_concurrency=10,
max_shift_concurrency=20)
solution = scheduler.solve()
assert solution['status'] == 'INFEASIBLE'
assert 'cost' in solution
assert 'resources_shifts' in solution
assert solution['cost'] == -1
assert len(solution['resources_shifts']) == 1
assert solution['resources_shifts'][0]['day'] == -1
assert solution['resources_shifts'][0]['shift'] == 'Unknown'
assert solution['resources_shifts'][0]['resources'] == -1
def test_min_required_resources_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
num_days = 2
scheduler = MinRequiredResources(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
max_period_concurrency=25,
max_shift_concurrency=25)
solution = scheduler.solve()
assert solution['status'] == 'OPTIMAL'
assert 'cost' in solution
assert 'resources_shifts' in solution
assert len(solution['resources_shifts']) == num_days * len(shifts_coverage)
for i in range(num_days * len(shifts_coverage)):
assert solution['resources_shifts'][i]['resources'] >= 0
def test_cost_min_required_resources_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
cost_dict = {"Morning": 8, "Afternoon": 8, "Night": 10, "Mixed": 7}
num_days = 2
scheduler = MinRequiredResources(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
cost_dict=cost_dict,
max_period_concurrency=25,
max_shift_concurrency=25)
solution = scheduler.solve()
assert solution['status'] == 'OPTIMAL'
assert 'cost' in solution
assert 'resources_shifts' in solution
assert len(solution['resources_shifts']) == num_days * len(shifts_coverage)
for i in range(num_days * len(shifts_coverage)):
assert solution['resources_shifts'][i]['resources'] >= 0
def test_wrong_cost_min_required_resources_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
cost_dict = {"Morning": 8, "Night": 10, "Mixed": 7}
num_days = 2
with pytest.raises(Exception) as excinfo:
scheduler = MinRequiredResources(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
cost_dict=cost_dict,
max_period_concurrency=25,
max_shift_concurrency=25)
solution = scheduler.solve()
assert str(excinfo.value) == "cost_dict must have the same keys as shifts_coverage"
def test_infeasible_min_required_resources_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
num_days = 2
scheduler = MinRequiredResources(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
max_period_concurrency=25,
max_shift_concurrency=20)
solution = scheduler.solve()
assert solution['status'] == 'INFEASIBLE'
assert 'cost' in solution
assert 'resources_shifts' in solution
assert solution['cost'] == -1
assert len(solution['resources_shifts']) == 1
assert solution['resources_shifts'][0]['day'] == -1
assert solution['resources_shifts'][0]['shift'] == 'Unknown'
assert solution['resources_shifts'][0]['resources'] == -1
| [
"pyworkforce.shifts.MinRequiredResources",
"pytest.raises",
"pyworkforce.shifts.MinAbsDifference"
] | [((822, 1003), 'pyworkforce.shifts.MinAbsDifference', 'MinAbsDifference', ([], {'num_days': 'num_days', 'periods': '(24)', 'shifts_coverage': 'shifts_coverage', 'required_resources': 'required_resources', 'max_period_concurrency': '(25)', 'max_shift_concurrency': '(20)'}), '(num_days=num_days, periods=24, shifts_coverage=\n shifts_coverage, required_resources=required_resources,\n max_period_concurrency=25, max_shift_concurrency=20)\n', (838, 1003), False, 'from pyworkforce.shifts import MinAbsDifference, MinRequiredResources\n'), ((2257, 2438), 'pyworkforce.shifts.MinAbsDifference', 'MinAbsDifference', ([], {'num_days': 'num_days', 'periods': '(24)', 'shifts_coverage': 'shifts_coverage', 'required_resources': 'required_resources', 'max_period_concurrency': '(10)', 'max_shift_concurrency': '(20)'}), '(num_days=num_days, periods=24, shifts_coverage=\n shifts_coverage, required_resources=required_resources,\n max_period_concurrency=10, max_shift_concurrency=20)\n', (2273, 2438), False, 'from pyworkforce.shifts import MinAbsDifference, MinRequiredResources\n'), ((3757, 3942), 'pyworkforce.shifts.MinRequiredResources', 'MinRequiredResources', ([], {'num_days': 'num_days', 'periods': '(24)', 'shifts_coverage': 'shifts_coverage', 'required_resources': 'required_resources', 'max_period_concurrency': '(25)', 'max_shift_concurrency': '(25)'}), '(num_days=num_days, periods=24, shifts_coverage=\n shifts_coverage, required_resources=required_resources,\n max_period_concurrency=25, max_shift_concurrency=25)\n', (3777, 3942), False, 'from pyworkforce.shifts import MinAbsDifference, MinRequiredResources\n'), ((5287, 5494), 'pyworkforce.shifts.MinRequiredResources', 'MinRequiredResources', ([], {'num_days': 'num_days', 'periods': '(24)', 'shifts_coverage': 'shifts_coverage', 'required_resources': 'required_resources', 'cost_dict': 'cost_dict', 'max_period_concurrency': '(25)', 'max_shift_concurrency': '(25)'}), '(num_days=num_days, periods=24, shifts_coverage=\n shifts_coverage, required_resources=required_resources, cost_dict=\n cost_dict, max_period_concurrency=25, max_shift_concurrency=25)\n', (5307, 5494), False, 'from pyworkforce.shifts import MinAbsDifference, MinRequiredResources\n'), ((8241, 8426), 'pyworkforce.shifts.MinRequiredResources', 'MinRequiredResources', ([], {'num_days': 'num_days', 'periods': '(24)', 'shifts_coverage': 'shifts_coverage', 'required_resources': 'required_resources', 'max_period_concurrency': '(25)', 'max_shift_concurrency': '(20)'}), '(num_days=num_days, periods=24, shifts_coverage=\n shifts_coverage, required_resources=required_resources,\n max_period_concurrency=25, max_shift_concurrency=20)\n', (8261, 8426), False, 'from pyworkforce.shifts import MinAbsDifference, MinRequiredResources\n'), ((6857, 6881), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (6870, 6881), False, 'import pytest\n'), ((6914, 7121), 'pyworkforce.shifts.MinRequiredResources', 'MinRequiredResources', ([], {'num_days': 'num_days', 'periods': '(24)', 'shifts_coverage': 'shifts_coverage', 'required_resources': 'required_resources', 'cost_dict': 'cost_dict', 'max_period_concurrency': '(25)', 'max_shift_concurrency': '(25)'}), '(num_days=num_days, periods=24, shifts_coverage=\n shifts_coverage, required_resources=required_resources, cost_dict=\n cost_dict, max_period_concurrency=25, max_shift_concurrency=25)\n', (6934, 7121), False, 'from pyworkforce.shifts import MinAbsDifference, MinRequiredResources\n')] |
# GENERATED BY KOMAND SDK - DO NOT EDIT
from setuptools import setup, find_packages
setup(name='cisco_umbrella_enforcement-rapid7-plugin',
version='1.0.0',
description='Cisco Umbrella Enforcement give technology partners the ability to send security events from their platform/service/appliance within a customer environment to the Cisco security cloud for enforcement',
author='rapid7',
author_email='',
url='',
packages=find_packages(),
install_requires=['komand'], # Add third-party dependencies to requirements.txt, not here!
scripts=['bin/komand_cisco_umbrella_enforcement']
)
| [
"setuptools.find_packages"
] | [((460, 475), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (473, 475), False, 'from setuptools import setup, find_packages\n')] |
# Generated by Django 3.2.8 on 2021-10-10 14:32
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ticketName', models.CharField(max_length=40)),
('ticketGroup', models.CharField(max_length=20)),
('ticketKeyword', models.CharField(max_length=20)),
],
),
]
| [
"django.db.models.CharField",
"django.db.models.BigAutoField"
] | [((302, 398), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (321, 398), False, 'from django.db import migrations, models\n'), ((428, 459), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (444, 459), False, 'from django.db import migrations, models\n'), ((494, 525), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (510, 525), False, 'from django.db import migrations, models\n'), ((562, 593), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (578, 593), False, 'from django.db import migrations, models\n')] |
# %%
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from model.inceptionv4 import inceptionv4
from model.mobilenetv2 import mobilenetv2
from model.resnet import resnet18
from model.shufflenetv2 import shufflenetv2
from model.vgg import vgg9_bn
from s3_dataset import PlantDataSet, PlantDataSetB
# %%
def get_acc(net, device, data_loader):
'''
get acc
'''
correct = 0
total = 0
with torch.no_grad():
net.eval()
for data in data_loader:
images, labels = data
images = images.float().to(device)
labels = labels.long().to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
return correct / total
def get_pre(net, device, data_loader):
'''
得到整个测试集预测的结果,以及标签
'''
label_all = []
pre_all = []
with torch.no_grad():
net.eval()
for data in data_loader:
images, labels = data
images = images.float().to(device)
labels = labels.long().to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
label_all.extend(labels.data.cpu().numpy())
pre_all.extend(predicted.data.cpu().numpy())
return pre_all, label_all
# %%
Func = [vgg9_bn, resnet18, shufflenetv2, mobilenetv2, inceptionv4]
Save_path = [
'../model_save/plant_disease2/vgg.pth',
'../model_save/plant_disease2/resnet18.pth',
'../model_save/plant_disease2/shufflenetv2.pth',
'../model_save/plant_disease2/mobilenetv2.pth',
'../model_save/plant_disease2/inceptionv4.pth'
]
device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
# data_loader_val = DataLoader(PlantDataSetB(flag='val'),
# batch_size=64,
# shuffle=False)
# data_loader_test = DataLoader(PlantDataSetB(flag='test'),
# batch_size=64,
# shuffle=False)
data_loader_val = DataLoader(PlantDataSet(flag='val'),
batch_size=64,
shuffle=False)
data_loader_test = DataLoader(PlantDataSet(flag='test'),
batch_size=64,
shuffle=False)
print('A 域数据集: 校核')
for Index in range(1):
# 导入模型和权重
net = Func[Index]()
path_saved_model = Save_path[Index]
net.load_state_dict(torch.load(path_saved_model))
net.to(device)
val_acc = get_acc(net, device, data_loader_val)
test_acc = get_acc(net, device, data_loader_test)
print('{:d}: val_acc:{:.5f}, test_acc:{:.5f}'.format(
Index, val_acc, test_acc))
# %%
# 计算每个模型在两个测试集上的混淆矩阵
device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
Func = [vgg9_bn, resnet18, shufflenetv2, mobilenetv2, inceptionv4]
Save_path = [
'../model_save/plant_disease2/vgg.pth',
'../model_save/plant_disease2/resnet18.pth',
'../model_save/plant_disease2/shufflenetv2.pth',
'../model_save/plant_disease2/mobilenetv2.pth',
'../model_save/plant_disease2/inceptionv4.pth'
]
data_test_a = DataLoader(PlantDataSet(flag='test'),
batch_size=64,
shuffle=False)
data_test_b = DataLoader(PlantDataSetB(flag='test'),
batch_size=64,
shuffle=False)
Index = 1
# 导入模型和权重
net = Func[Index]()
path_saved_model = Save_path[Index]
net.load_state_dict(torch.load(path_saved_model))
net.to(device)
pre, label = get_pre(net, device, data_test_b)
pre, label = np.array(pre), np.array(label)
# %%
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score # 精度
from sklearn.metrics import confusion_matrix # 混淆矩阵
print('预测精度为:{:.9f}'.format(accuracy_score(label, pre)))
# 查看混淆矩阵
domain_A_class = {
'Apple___Apple_scab': 0,
'Apple___Black_rot': 1,
'Apple___Cedar_apple_rust': 2,
'Apple___healthy': 3,
'Blueberry___healthy': 4,
'Cherry_(including_sour)___Powdery_mildew': 5,
'Cherry_(including_sour)___healthy': 6,
'Corn_(maize)___Cercospora_leaf_spot Gray_leaf_spot': 7,
'Corn_(maize)___Common_rust_': 8,
'Corn_(maize)___Northern_Leaf_Blight': 9,
'Corn_(maize)___healthy': 10,
'Grape___Black_rot': 11,
'Grape___Esca_(Black_Measles)': 12,
'Grape___Leaf_blight_(Isariopsis_Leaf_Spot)':13,
'Grape___healthy':14,
'Orange___Haunglongbing_(Citrus_greening)':15,
'Peach___Bacterial_spot':16,
'Peach___healthy':17,
'Pepper,_bell___Bacterial_spot':18,
'Pepper,_bell___healthy':19,
'Potato___Early_blight':20,
'Potato___Late_blight':21,
'Potato___healthy':22,
'Raspberry___healthy':23,
'Soybean___healthy':24,
'Squash___Powdery_mildew':25,
'Strawberry___Leaf_scorch':26,
'Strawberry___healthy':27,
'Tomato___Bacterial_spot':28,
'Tomato___Early_blight':29,
'Tomato___Late_blight':30,
'Tomato___Leaf_Mold':31,
'Tomato___Septoria_leaf_spot':32,
'Tomato___Spider_mites Two-spotted_spider_mite':33,
'Tomato___Target_Spot':34,
'Tomato___Tomato_Yellow_Leaf_Curl_Virus':35,
'Tomato___Tomato_mosaic_virus':36,
'Tomato___healthy':37}
c_matrix = confusion_matrix(label, pre, labels=list(range(38)))
# %% 这个代码留着
def plot_Matrix(cm, classes, title=None, cmap=plt.cm.Blues):
plt.rc('font',family='Times New Roman',size='8') # 设置字体样式、大小
# 按行进行归一化
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
str_cm = cm.astype(np.str).tolist()
for row in str_cm:
print('\t'.join(row))
# 占比1%以下的单元格,设为0,防止在最后的颜色中体现出来
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
if int(cm[i, j]*100 + 0.5) == 0:
cm[i, j]=0
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
# ax.figure.colorbar(im, ax=ax) # 侧边的颜色条带
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='Actual',
xlabel='Predicted')
# 通过绘制格网,模拟每个单元格的边框
ax.set_xticks(np.arange(cm.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(cm.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="gray", linestyle='-', linewidth=0.2)
ax.tick_params(which="minor", bottom=False, left=False)
# 将x轴上的lables旋转45度
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# 标注百分比信息
fmt = 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
if int(cm[i, j]*100 + 0.5) > 0:
ax.text(j, i, format(int(cm[i, j]*100 + 0.5) , fmt) + '%',
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
plt.show()
# %%
domain_A_class.keys()
# %%
plt.matshow(cm, cmap=plt.cm.Blues)
# %%
| [
"sklearn.metrics.accuracy_score",
"s3_dataset.PlantDataSetB",
"numpy.arange",
"torch.load",
"torch.max",
"numpy.array",
"torch.cuda.is_available",
"s3_dataset.PlantDataSet",
"torch.no_grad",
"matplotlib.pyplot.matshow",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.rc",
"matplotlib.pyplot... | [((7200, 7234), 'matplotlib.pyplot.matshow', 'plt.matshow', (['cm'], {'cmap': 'plt.cm.Blues'}), '(cm, cmap=plt.cm.Blues)\n', (7211, 7234), True, 'import matplotlib.pyplot as plt\n'), ((2225, 2249), 's3_dataset.PlantDataSet', 'PlantDataSet', ([], {'flag': '"""val"""'}), "(flag='val')\n", (2237, 2249), False, 'from s3_dataset import PlantDataSet, PlantDataSetB\n'), ((2369, 2394), 's3_dataset.PlantDataSet', 'PlantDataSet', ([], {'flag': '"""test"""'}), "(flag='test')\n", (2381, 2394), False, 'from s3_dataset import PlantDataSet, PlantDataSetB\n'), ((3337, 3362), 's3_dataset.PlantDataSet', 'PlantDataSet', ([], {'flag': '"""test"""'}), "(flag='test')\n", (3349, 3362), False, 'from s3_dataset import PlantDataSet, PlantDataSetB\n'), ((3477, 3503), 's3_dataset.PlantDataSetB', 'PlantDataSetB', ([], {'flag': '"""test"""'}), "(flag='test')\n", (3490, 3503), False, 'from s3_dataset import PlantDataSet, PlantDataSetB\n'), ((3691, 3719), 'torch.load', 'torch.load', (['path_saved_model'], {}), '(path_saved_model)\n', (3701, 3719), False, 'import torch\n'), ((3796, 3809), 'numpy.array', 'np.array', (['pre'], {}), '(pre)\n', (3804, 3809), True, 'import numpy as np\n'), ((3811, 3826), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (3819, 3826), True, 'import numpy as np\n'), ((5563, 5613), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""Times New Roman"""', 'size': '"""8"""'}), "('font', family='Times New Roman', size='8')\n", (5569, 5613), True, 'import matplotlib.pyplot as plt\n'), ((6031, 6045), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6043, 6045), True, 'import matplotlib.pyplot as plt\n'), ((7154, 7164), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7162, 7164), True, 'import matplotlib.pyplot as plt\n'), ((530, 545), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (543, 545), False, 'import torch\n'), ((1058, 1073), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1071, 1073), False, 'import torch\n'), ((1859, 1884), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1882, 1884), False, 'import torch\n'), ((2632, 2660), 'torch.load', 'torch.load', (['path_saved_model'], {}), '(path_saved_model)\n', (2642, 2660), False, 'import torch\n'), ((2942, 2967), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2965, 2967), False, 'import torch\n'), ((4006, 4032), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['label', 'pre'], {}), '(label, pre)\n', (4020, 4032), False, 'from sklearn.metrics import accuracy_score\n'), ((787, 813), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (796, 813), False, 'import torch\n'), ((1315, 1341), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (1324, 1341), False, 'import torch\n'), ((6174, 6196), 'numpy.arange', 'np.arange', (['cm.shape[1]'], {}), '(cm.shape[1])\n', (6183, 6196), True, 'import numpy as np\n'), ((6216, 6238), 'numpy.arange', 'np.arange', (['cm.shape[0]'], {}), '(cm.shape[0])\n', (6225, 6238), True, 'import numpy as np\n'), ((6419, 6445), 'numpy.arange', 'np.arange', (['(cm.shape[1] + 1)'], {}), '(cm.shape[1] + 1)\n', (6428, 6445), True, 'import numpy as np\n'), ((6478, 6504), 'numpy.arange', 'np.arange', (['(cm.shape[0] + 1)'], {}), '(cm.shape[0] + 1)\n', (6487, 6504), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: syft_proto/execution/v1/protocol.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from syft_proto.types.syft.v1 import id_pb2 as syft__proto_dot_types_dot_syft_dot_v1_dot_id__pb2
from syft_proto.execution.v1 import role_pb2 as syft__proto_dot_execution_dot_v1_dot_role__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='syft_proto/execution/v1/protocol.proto',
package='syft_proto.execution.v1',
syntax='proto3',
serialized_options=b'\n$org.openmined.syftproto.execution.v1',
serialized_pb=b'\n&syft_proto/execution/v1/protocol.proto\x12\x17syft_proto.execution.v1\x1a!syft_proto/types/syft/v1/id.proto\x1a\"syft_proto/execution/v1/role.proto\"\x9f\x02\n\x08Protocol\x12,\n\x02id\x18\x01 \x01(\x0b\x32\x1c.syft_proto.types.syft.v1.IdR\x02id\x12\x12\n\x04name\x18\x02 \x01(\tR\x04name\x12\x42\n\x05roles\x18\x03 \x03(\x0b\x32,.syft_proto.execution.v1.Protocol.RolesEntryR\x05roles\x12\x12\n\x04tags\x18\x04 \x03(\tR\x04tags\x12 \n\x0b\x64\x65scription\x18\x05 \x01(\tR\x0b\x64\x65scription\x1aW\n\nRolesEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x33\n\x05value\x18\x02 \x01(\x0b\x32\x1d.syft_proto.execution.v1.RoleR\x05value:\x02\x38\x01\x42&\n$org.openmined.syftproto.execution.v1b\x06proto3'
,
dependencies=[syft__proto_dot_types_dot_syft_dot_v1_dot_id__pb2.DESCRIPTOR,syft__proto_dot_execution_dot_v1_dot_role__pb2.DESCRIPTOR,])
_PROTOCOL_ROLESENTRY = _descriptor.Descriptor(
name='RolesEntry',
full_name='syft_proto.execution.v1.Protocol.RolesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='syft_proto.execution.v1.Protocol.RolesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='key', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='syft_proto.execution.v1.Protocol.RolesEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='value', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=339,
serialized_end=426,
)
_PROTOCOL = _descriptor.Descriptor(
name='Protocol',
full_name='syft_proto.execution.v1.Protocol',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='syft_proto.execution.v1.Protocol.id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='id', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='syft_proto.execution.v1.Protocol.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='name', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='roles', full_name='syft_proto.execution.v1.Protocol.roles', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='roles', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='syft_proto.execution.v1.Protocol.tags', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='tags', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='syft_proto.execution.v1.Protocol.description', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='description', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_PROTOCOL_ROLESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=139,
serialized_end=426,
)
_PROTOCOL_ROLESENTRY.fields_by_name['value'].message_type = syft__proto_dot_execution_dot_v1_dot_role__pb2._ROLE
_PROTOCOL_ROLESENTRY.containing_type = _PROTOCOL
_PROTOCOL.fields_by_name['id'].message_type = syft__proto_dot_types_dot_syft_dot_v1_dot_id__pb2._ID
_PROTOCOL.fields_by_name['roles'].message_type = _PROTOCOL_ROLESENTRY
DESCRIPTOR.message_types_by_name['Protocol'] = _PROTOCOL
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Protocol = _reflection.GeneratedProtocolMessageType('Protocol', (_message.Message,), {
'RolesEntry' : _reflection.GeneratedProtocolMessageType('RolesEntry', (_message.Message,), {
'DESCRIPTOR' : _PROTOCOL_ROLESENTRY,
'__module__' : 'syft_proto.execution.v1.protocol_pb2'
# @@protoc_insertion_point(class_scope:syft_proto.execution.v1.Protocol.RolesEntry)
})
,
'DESCRIPTOR' : _PROTOCOL,
'__module__' : 'syft_proto.execution.v1.protocol_pb2'
# @@protoc_insertion_point(class_scope:syft_proto.execution.v1.Protocol)
})
_sym_db.RegisterMessage(Protocol)
_sym_db.RegisterMessage(Protocol.RolesEntry)
DESCRIPTOR._options = None
_PROTOCOL_ROLESENTRY._options = None
# @@protoc_insertion_point(module_scope)
| [
"google.protobuf.reflection.GeneratedProtocolMessageType",
"google.protobuf.symbol_database.Default",
"google.protobuf.descriptor.FieldDescriptor",
"google.protobuf.descriptor.FileDescriptor"
] | [((400, 426), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (424, 426), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((636, 1676), 'google.protobuf.descriptor.FileDescriptor', '_descriptor.FileDescriptor', ([], {'name': '"""syft_proto/execution/v1/protocol.proto"""', 'package': '"""syft_proto.execution.v1"""', 'syntax': '"""proto3"""', 'serialized_options': "b'\\n$org.openmined.syftproto.execution.v1'", 'serialized_pb': 'b\'\\n&syft_proto/execution/v1/protocol.proto\\x12\\x17syft_proto.execution.v1\\x1a!syft_proto/types/syft/v1/id.proto\\x1a"syft_proto/execution/v1/role.proto"\\x9f\\x02\\n\\x08Protocol\\x12,\\n\\x02id\\x18\\x01 \\x01(\\x0b2\\x1c.syft_proto.types.syft.v1.IdR\\x02id\\x12\\x12\\n\\x04name\\x18\\x02 \\x01(\\tR\\x04name\\x12B\\n\\x05roles\\x18\\x03 \\x03(\\x0b2,.syft_proto.execution.v1.Protocol.RolesEntryR\\x05roles\\x12\\x12\\n\\x04tags\\x18\\x04 \\x03(\\tR\\x04tags\\x12 \\n\\x0bdescription\\x18\\x05 \\x01(\\tR\\x0bdescription\\x1aW\\n\\nRolesEntry\\x12\\x10\\n\\x03key\\x18\\x01 \\x01(\\tR\\x03key\\x123\\n\\x05value\\x18\\x02 \\x01(\\x0b2\\x1d.syft_proto.execution.v1.RoleR\\x05value:\\x028\\x01B&\\n$org.openmined.syftproto.execution.v1b\\x06proto3\'', 'dependencies': '[syft__proto_dot_types_dot_syft_dot_v1_dot_id__pb2.DESCRIPTOR,\n syft__proto_dot_execution_dot_v1_dot_role__pb2.DESCRIPTOR]'}), '(name=\'syft_proto/execution/v1/protocol.proto\',\n package=\'syft_proto.execution.v1\', syntax=\'proto3\', serialized_options=\n b\'\\n$org.openmined.syftproto.execution.v1\', serialized_pb=\n b\'\\n&syft_proto/execution/v1/protocol.proto\\x12\\x17syft_proto.execution.v1\\x1a!syft_proto/types/syft/v1/id.proto\\x1a"syft_proto/execution/v1/role.proto"\\x9f\\x02\\n\\x08Protocol\\x12,\\n\\x02id\\x18\\x01 \\x01(\\x0b2\\x1c.syft_proto.types.syft.v1.IdR\\x02id\\x12\\x12\\n\\x04name\\x18\\x02 \\x01(\\tR\\x04name\\x12B\\n\\x05roles\\x18\\x03 \\x03(\\x0b2,.syft_proto.execution.v1.Protocol.RolesEntryR\\x05roles\\x12\\x12\\n\\x04tags\\x18\\x04 \\x03(\\tR\\x04tags\\x12 \\n\\x0bdescription\\x18\\x05 \\x01(\\tR\\x0bdescription\\x1aW\\n\\nRolesEntry\\x12\\x10\\n\\x03key\\x18\\x01 \\x01(\\tR\\x03key\\x123\\n\\x05value\\x18\\x02 \\x01(\\x0b2\\x1d.syft_proto.execution.v1.RoleR\\x05value:\\x028\\x01B&\\n$org.openmined.syftproto.execution.v1b\\x06proto3\'\n , dependencies=[syft__proto_dot_types_dot_syft_dot_v1_dot_id__pb2.\n DESCRIPTOR, syft__proto_dot_execution_dot_v1_dot_role__pb2.DESCRIPTOR])\n', (662, 1676), True, 'from google.protobuf import descriptor as _descriptor\n'), ((5875, 6050), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""RolesEntry"""', '(_message.Message,)', "{'DESCRIPTOR': _PROTOCOL_ROLESENTRY, '__module__':\n 'syft_proto.execution.v1.protocol_pb2'}"], {}), "('RolesEntry', (_message.Message,),\n {'DESCRIPTOR': _PROTOCOL_ROLESENTRY, '__module__':\n 'syft_proto.execution.v1.protocol_pb2'})\n", (5915, 6050), True, 'from google.protobuf import reflection as _reflection\n'), ((2319, 2700), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""value"""', 'full_name': '"""syft_proto.execution.v1.Protocol.RolesEntry.value"""', 'index': '(1)', 'number': '(2)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'json_name': '"""value"""', 'file': 'DESCRIPTOR'}), "(name='value', full_name=\n 'syft_proto.execution.v1.Protocol.RolesEntry.value', index=1, number=2,\n type=11, cpp_type=10, label=1, has_default_value=False, default_value=\n None, message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None, serialized_options=None,\n json_name='value', file=DESCRIPTOR)\n", (2346, 2700), True, 'from google.protobuf import descriptor as _descriptor\n'), ((3120, 3481), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""id"""', 'full_name': '"""syft_proto.execution.v1.Protocol.id"""', 'index': '(0)', 'number': '(1)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'json_name': '"""id"""', 'file': 'DESCRIPTOR'}), "(name='id', full_name=\n 'syft_proto.execution.v1.Protocol.id', index=0, number=1, type=11,\n cpp_type=10, label=1, has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, json_name='id',\n file=DESCRIPTOR)\n", (3147, 3481), True, 'from google.protobuf import descriptor as _descriptor\n'), ((3903, 4271), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""roles"""', 'full_name': '"""syft_proto.execution.v1.Protocol.roles"""', 'index': '(2)', 'number': '(3)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'json_name': '"""roles"""', 'file': 'DESCRIPTOR'}), "(name='roles', full_name=\n 'syft_proto.execution.v1.Protocol.roles', index=2, number=3, type=11,\n cpp_type=10, label=3, has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, json_name='roles',\n file=DESCRIPTOR)\n", (3930, 4271), True, 'from google.protobuf import descriptor as _descriptor\n'), ((4292, 4655), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""tags"""', 'full_name': '"""syft_proto.execution.v1.Protocol.tags"""', 'index': '(3)', 'number': '(4)', 'type': '(9)', 'cpp_type': '(9)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'json_name': '"""tags"""', 'file': 'DESCRIPTOR'}), "(name='tags', full_name=\n 'syft_proto.execution.v1.Protocol.tags', index=3, number=4, type=9,\n cpp_type=9, label=3, has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, json_name='tags',\n file=DESCRIPTOR)\n", (4319, 4655), True, 'from google.protobuf import descriptor as _descriptor\n')] |
# -----------------------------------------------------------------------------
# NDN Repo delete client.
#
# @Author <EMAIL>
# @Date 2019-09-26
# -----------------------------------------------------------------------------
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import argparse
import asyncio as aio
from ..command.repo_commands import RepoCommandParameter, RepoCommandResponse
from .command_checker import CommandChecker
from ..utils import PubSub
import logging
from ndn.app import NDNApp
from ndn.encoding import Name, Component, DecodeError, NonStrictName
from ndn.types import InterestNack, InterestTimeout
from ndn.utils import gen_nonce
class DeleteClient(object):
def __init__(self, app: NDNApp, prefix: NonStrictName, repo_name: NonStrictName):
"""
This client deletes data packets from the remote repo.
:param app: NDNApp.
:param repo_name: NonStrictName. Routable name to remote repo.
"""
self.app = app
self.prefix = prefix
self.repo_name = repo_name
self.pb = PubSub(self.app, self.prefix)
async def delete_file(self, prefix: NonStrictName, start_block_id: int=None,
end_block_id: int=None) -> int:
"""
Delete from repo packets between "<name_at_repo>/<start_block_id>" and\
"<name_at_repo>/<end_block_id>" inclusively.
:param prefix: NonStrictName. The name of the file stored in the remote repo.
:param start_block_id: int. Default value is 0.
:param end_block_id: int. If not specified, repo will attempt to delete all data packets\
with segment number starting from `start_block_id` continously.
:return: Number of deleted packets.
"""
# send command interest
cmd_param = RepoCommandParameter()
cmd_param.name = prefix
cmd_param.start_block_id = start_block_id
cmd_param.end_block_id = end_block_id
cmd_param.register_prefix = prefix
process_id = gen_nonce()
cmd_param.process_id = process_id
cmd_param_bytes = cmd_param.encode()
# publish msg to repo's delete topic
await self.pb.wait_for_ready()
self.pb.publish(self.repo_name + ['delete'], cmd_param_bytes)
# wait until repo delete all data
return await self._wait_for_finish(process_id)
async def _wait_for_finish(self, process_id: int):
"""
Send delete check interest to wait until delete process completes
:param process_id: int. The process id to check for delete process
:return: Number of deleted packets.
"""
checker = CommandChecker(self.app)
n_retries = 3
while n_retries > 0:
response = await checker.check_delete(self.repo_name, process_id)
if response is None:
logging.info(f'Response code is None')
await aio.sleep(1)
# might receive 404 if repo has not yet processed delete command msg
elif response.status_code == 404:
n_retries -= 1
logging.info(f'Response code is {response.status_code}')
await aio.sleep(1)
elif response.status_code == 300:
logging.info(f'Response code is {response.status_code}')
await aio.sleep(1)
elif response.status_code == 200:
logging.info('Delete process {} status: {}, delete_num: {}'
.format(process_id, response.status_code, response.delete_num))
return response.delete_num
else:
# Shouldn't get here
assert False | [
"os.path.join",
"logging.info",
"ndn.utils.gen_nonce",
"asyncio.sleep"
] | [((268, 299), 'os.path.join', 'os.path.join', (['sys.path[0]', '""".."""'], {}), "(sys.path[0], '..')\n", (280, 299), False, 'import os\n'), ((2051, 2062), 'ndn.utils.gen_nonce', 'gen_nonce', ([], {}), '()\n', (2060, 2062), False, 'from ndn.utils import gen_nonce\n'), ((2898, 2936), 'logging.info', 'logging.info', (['f"""Response code is None"""'], {}), "(f'Response code is None')\n", (2910, 2936), False, 'import logging\n'), ((2959, 2971), 'asyncio.sleep', 'aio.sleep', (['(1)'], {}), '(1)\n', (2968, 2971), True, 'import asyncio as aio\n'), ((3146, 3202), 'logging.info', 'logging.info', (['f"""Response code is {response.status_code}"""'], {}), "(f'Response code is {response.status_code}')\n", (3158, 3202), False, 'import logging\n'), ((3225, 3237), 'asyncio.sleep', 'aio.sleep', (['(1)'], {}), '(1)\n', (3234, 3237), True, 'import asyncio as aio\n'), ((3300, 3356), 'logging.info', 'logging.info', (['f"""Response code is {response.status_code}"""'], {}), "(f'Response code is {response.status_code}')\n", (3312, 3356), False, 'import logging\n'), ((3379, 3391), 'asyncio.sleep', 'aio.sleep', (['(1)'], {}), '(1)\n', (3388, 3391), True, 'import asyncio as aio\n')] |
import threading
import datetime
import serial
import functions
from queue import Queue
rf_port = 'COM4'
ser_rf = serial.Serial(rf_port, baudrate=9600, bytesize=8, parity='N', stopbits=1, timeout=1, xonxoff=0)
iver = '3089'
send_through_rf_every = 2
def read_rf():
"""Read RF port"""
ser_rf.reset_input_buffer()
send_through_rf()
osi_rec, osd_ak = 0, 0
while True:
try:
frm_iver = ser_rf.readline().decode()
if len(frm_iver) > 1:
if functions.received_stream(frm_iver) == 'osi':
osi_return = functions.osi(frm_iver)
if functions.osi(frm_iver) is not None:
print(datetime.datetime.now(), ': RF: lat:', osi_return['Latitude'],
'lng:', osi_return['Longitude'], ', speed:', osi_return['Speed'],
', Battery:', osi_return['Battery'], ', nxtWP:', osi_return['NextWp'],
', DistantNxt WP: ', osi_return['DistanceToNxtWP'])
print(datetime.datetime.now(), f': OSI received RF: {osi_rec} / requested: {rf_i}')
osi_rec += 1
elif functions.received_stream(frm_iver) == 'osdAck':
if functions.osd_ack(frm_iver) == 0:
print(datetime.datetime.now(), ': OSI Ack received RF ', osd_ak)
osd_ak += 1
except Exception as e:
# q_log.put([datetime.datetime.now().strftime("%H:%M:%S:%f"), ':', e])
ser_rf.reset_input_buffer()
continue
rf_i = 0
def send_through_rf():
# send_through_ac_every = 15
inst_snd = '$AC;Iver3-' + iver + ';' + '$' + functions.osd() + '\r\n'
ser_rf.reset_output_buffer()
ser_rf.write(inst_snd.encode())
global rf_i
print(datetime.datetime.now(), ': Sending through RF: ', rf_i)
rf_i += 1
threading.Timer(send_through_rf_every, send_through_rf).start()
read_rf() | [
"threading.Timer",
"functions.osd_ack",
"functions.received_stream",
"datetime.datetime.now",
"functions.osd",
"serial.Serial",
"functions.osi"
] | [((118, 217), 'serial.Serial', 'serial.Serial', (['rf_port'], {'baudrate': '(9600)', 'bytesize': '(8)', 'parity': '"""N"""', 'stopbits': '(1)', 'timeout': '(1)', 'xonxoff': '(0)'}), "(rf_port, baudrate=9600, bytesize=8, parity='N', stopbits=1,\n timeout=1, xonxoff=0)\n", (131, 217), False, 'import serial\n'), ((1849, 1872), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1870, 1872), False, 'import datetime\n'), ((1729, 1744), 'functions.osd', 'functions.osd', ([], {}), '()\n', (1742, 1744), False, 'import functions\n'), ((1924, 1979), 'threading.Timer', 'threading.Timer', (['send_through_rf_every', 'send_through_rf'], {}), '(send_through_rf_every, send_through_rf)\n', (1939, 1979), False, 'import threading\n'), ((508, 543), 'functions.received_stream', 'functions.received_stream', (['frm_iver'], {}), '(frm_iver)\n', (533, 543), False, 'import functions\n'), ((587, 610), 'functions.osi', 'functions.osi', (['frm_iver'], {}), '(frm_iver)\n', (600, 610), False, 'import functions\n'), ((634, 657), 'functions.osi', 'functions.osi', (['frm_iver'], {}), '(frm_iver)\n', (647, 657), False, 'import functions\n'), ((1209, 1244), 'functions.received_stream', 'functions.received_stream', (['frm_iver'], {}), '(frm_iver)\n', (1234, 1244), False, 'import functions\n'), ((701, 724), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (722, 724), False, 'import datetime\n'), ((1073, 1096), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1094, 1096), False, 'import datetime\n'), ((1281, 1308), 'functions.osd_ack', 'functions.osd_ack', (['frm_iver'], {}), '(frm_iver)\n', (1298, 1308), False, 'import functions\n'), ((1345, 1368), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1366, 1368), False, 'import datetime\n')] |
"""The Rituals Perfume Genie integration."""
from datetime import timedelta
import logging
import aiohttp
from pyrituals import Account, Diffuser
from openpeerpower.config_entries import ConfigEntry
from openpeerpower.core import OpenPeerPower
from openpeerpower.exceptions import ConfigEntryNotReady
from openpeerpower.helpers.aiohttp_client import async_get_clientsession
from openpeerpower.helpers.update_coordinator import DataUpdateCoordinator
from .const import ACCOUNT_HASH, COORDINATORS, DEVICES, DOMAIN, HUBLOT
PLATFORMS = ["binary_sensor", "sensor", "switch"]
EMPTY_CREDENTIALS = ""
_LOGGER = logging.getLogger(__name__)
UPDATE_INTERVAL = timedelta(seconds=30)
async def async_setup_entry(opp: OpenPeerPower, entry: ConfigEntry):
"""Set up Rituals Perfume Genie from a config entry."""
session = async_get_clientsession(opp)
account = Account(EMPTY_CREDENTIALS, EMPTY_CREDENTIALS, session)
account.data = {ACCOUNT_HASH: entry.data.get(ACCOUNT_HASH)}
try:
account_devices = await account.get_devices()
except aiohttp.ClientError as err:
raise ConfigEntryNotReady from err
opp.data.setdefault(DOMAIN, {})[entry.entry_id] = {
COORDINATORS: {},
DEVICES: {},
}
for device in account_devices:
hublot = device.hub_data[HUBLOT]
coordinator = RitualsDataUpdateCoordinator(opp, device)
await coordinator.async_refresh()
opp.data[DOMAIN][entry.entry_id][DEVICES][hublot] = device
opp.data[DOMAIN][entry.entry_id][COORDINATORS][hublot] = coordinator
opp.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(opp: OpenPeerPower, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = await opp.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
opp.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class RitualsDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching Rituals Perufme Genie device data from single endpoint."""
def __init__(self, opp: OpenPeerPower, device: Diffuser) -> None:
"""Initialize global Rituals Perufme Genie data updater."""
self._device = device
super().__init__(
opp,
_LOGGER,
name=f"{DOMAIN}-{device.hub_data[HUBLOT]}",
update_interval=UPDATE_INTERVAL,
)
async def _async_update_data(self) -> None:
"""Fetch data from Rituals."""
await self._device.update_data()
| [
"logging.getLogger",
"datetime.timedelta",
"pyrituals.Account",
"openpeerpower.helpers.aiohttp_client.async_get_clientsession"
] | [((609, 636), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (626, 636), False, 'import logging\n'), ((656, 677), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(30)'}), '(seconds=30)\n', (665, 677), False, 'from datetime import timedelta\n'), ((823, 851), 'openpeerpower.helpers.aiohttp_client.async_get_clientsession', 'async_get_clientsession', (['opp'], {}), '(opp)\n', (846, 851), False, 'from openpeerpower.helpers.aiohttp_client import async_get_clientsession\n'), ((866, 920), 'pyrituals.Account', 'Account', (['EMPTY_CREDENTIALS', 'EMPTY_CREDENTIALS', 'session'], {}), '(EMPTY_CREDENTIALS, EMPTY_CREDENTIALS, session)\n', (873, 920), False, 'from pyrituals import Account, Diffuser\n')] |
"""Copyright (c) 2021, <NAME>. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import List, Tuple
from sqlalchemy.orm import Session
from sqlalchemy.sql.sqltypes import Boolean
from . import db_models as models
from data import schemas
def create_pet(db: Session, pet: schemas.Pet) -> schemas.Pet:
db_pet = models.Pet(
display_name=pet.display_name,
kind=pet.kind,
current_price=pet.current_price,
available_amount=pet.available_amount,
)
db.add(db_pet)
db.commit()
db.refresh(db_pet)
return schemas.Pet.from_orm(db_pet)
def get_available_pets(db: Session, limit: int, offset: int) -> List[schemas.Pet]:
db_pets = (
db.query(models.Pet)
.filter(models.Pet.available_amount > 0)
.limit(limit)
.offset(offset)
.all()
)
return [schemas.Pet.from_orm(db_pet) for db_pet in db_pets]
def reserve_pet(db: Session, pet_id: int, amount: int) -> Tuple[Boolean, int]:
db_pet = db.query(models.Pet).filter(models.Pet.id == pet_id).first()
is_success = False
if db_pet.available_amount > amount:
db_pet.available_amount = models.Pet.available_amount - amount
db.commit()
db.refresh(db_pet)
is_success = True
return is_success, schemas.Pet.from_orm(db_pet)
| [
"data.schemas.Pet.from_orm"
] | [((1063, 1091), 'data.schemas.Pet.from_orm', 'schemas.Pet.from_orm', (['db_pet'], {}), '(db_pet)\n', (1083, 1091), False, 'from data import schemas\n'), ((1350, 1378), 'data.schemas.Pet.from_orm', 'schemas.Pet.from_orm', (['db_pet'], {}), '(db_pet)\n', (1370, 1378), False, 'from data import schemas\n'), ((1790, 1818), 'data.schemas.Pet.from_orm', 'schemas.Pet.from_orm', (['db_pet'], {}), '(db_pet)\n', (1810, 1818), False, 'from data import schemas\n')] |
from configparser import ConfigParser
import os
from dotenv import load_dotenv
import pathlib
from shutil import copyfile
from execution_engine2.db.models.models import Job, JobInput, Meta
from dateutil import parser as dateparser
import requests
import json
from datetime import datetime
from execution_engine2.exceptions import MalformedTimestampException
from execution_engine2.db.models.models import Status
def get_example_job(
user: str = "boris", wsid: int = 123, authstrat: str = "kbaseworkspace"
) -> Job:
j = Job()
j.user = user
j.wsid = wsid
job_input = JobInput()
job_input.wsid = j.wsid
job_input.method = "method"
job_input.requested_release = "requested_release"
job_input.params = {}
job_input.service_ver = "dev"
job_input.app_id = "super_module.super_function"
m = Meta()
m.cell_id = "ApplePie"
job_input.narrative_cell_info = m
j.job_input = job_input
j.status = "queued"
j.authstrat = authstrat
return j
def _create_sample_params(self):
params = dict()
params["job_id"] = self.job_id
params["user"] = "kbase"
params["token"] = "<PASSWORD>"
params["client_group_and_requirements"] = "njs"
return params
def read_config_into_dict(config="deploy.cfg", section="execution_engine2"):
config_parser = ConfigParser()
config_parser.read(config)
config = dict()
for key, val in config_parser[section].items():
config[key] = val
return config
def bootstrap():
test_env = "test.env"
pwd = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
if not os.path.exists(test_env):
copyfile(f"{pwd}/test/env/{test_env}", f"{test_env}")
load_dotenv("test.env", verbose=True)
# flake8: noqa: C901
def validate_job_state(state):
"""
Validates whether a returned Job State has all the required fields with the right format.
If all is well, returns True,
otherwise this prints out errors to the command line and returns False.
Can be just used with assert in tests, like "assert validate_job_state(state)"
"""
required_fields = {
"job_id": str,
"user": str,
"wsid": int,
"authstrat": str,
"job_input": dict,
"updated": int,
"created": int,
"status": str,
}
optional_fields = {
"estimating": int,
"queued": int,
"running": int,
"finished": int,
"error_code": int,
"terminated_code": int,
"errormsg": str,
}
timestamp_fields = [
"created",
"updated",
"estimating",
"queued",
"running",
"completed",
]
# fields that have to be present based on the context of different statuses
valid_statuses = vars(Status)["_member_names_"]
status_context = {
"estimating": ["estimating"],
"running": ["running"],
"completed": ["completed"],
"error": ["error_code", "errormsg"],
"terminated": ["terminated_code"],
}
# 1. Make sure required fields are present and of the correct type
missing_reqs = list()
wrong_reqs = list()
for req in required_fields.keys():
if req not in state:
missing_reqs.append(req)
elif not isinstance(state[req], required_fields[req]):
wrong_reqs.append(req)
if missing_reqs or wrong_reqs:
print(f"Job state is missing required fields: {missing_reqs}.")
for req in wrong_reqs:
print(
f"Job state has faulty req - {req} should be of type {required_fields[req]}, but had value {state[req]}."
)
return False
# 2. Make sure that context-specific fields are present and the right type
status = state["status"]
if status not in valid_statuses:
print(f"Job state has invalid status {status}.")
return False
if status in status_context:
context_fields = status_context[status]
missing_context = list()
wrong_context = list()
for field in context_fields:
if field not in state:
missing_context.append(field)
elif not isinstance(state[field], optional_fields[field]):
wrong_context.append(field)
if missing_context or wrong_context:
print(f"Job state is missing status context fields: {missing_context}.")
for field in wrong_context:
print(
f"Job state has faulty context field - {field} should be of type {optional_fields[field]}, but had value {state[field]}."
)
return False
# 3. Make sure timestamps are really timestamps
bad_ts = list()
for ts_type in timestamp_fields:
if ts_type in state:
is_second_ts = is_timestamp(state[ts_type])
if not is_second_ts:
print(state[ts_type], "is not a second ts")
is_ms_ts = is_timestamp(state[ts_type] / 1000)
if not is_ms_ts:
print(state[ts_type], "is not a millisecond ts")
if not is_second_ts and not is_ms_ts:
bad_ts.append(ts_type)
if bad_ts:
for ts_type in bad_ts:
print(
f"Job state has a malformatted timestamp: {ts_type} with value {state[ts_type]}"
)
raise MalformedTimestampException()
return True
def is_timestamp(ts: int):
"""
Simple enough - if dateutil.parser likes the string, it's a time string and we return True.
Otherwise, return False.
"""
try:
datetime.fromtimestamp(ts)
return True
except ValueError:
return False
def custom_ws_perm_maker(user_id: str, ws_perms: dict):
"""
Returns an Adapter for requests_mock that deals with mocking workspace permissions.
:param user_id: str - the user id
:param ws_perms: dict of permissions, keys are ws ids, values are permission. Example:
{123: "a", 456: "w"} means workspace id 123 has admin permissions, and 456 has
write permission
:return: an adapter function to be passed to request_mock
"""
def perm_adapter(request):
perms_req = request.json().get("params")[0].get("workspaces")
ret_perms = []
for ws in perms_req:
ret_perms.append({user_id: ws_perms.get(ws["id"], "n")})
response = requests.Response()
response.status_code = 200
response._content = bytes(
json.dumps({"result": [{"perms": ret_perms}], "version": "1.1"}), "UTF-8"
)
return response
return perm_adapter
| [
"os.path.exists",
"execution_engine2.exceptions.MalformedTimestampException",
"datetime.datetime.fromtimestamp",
"configparser.ConfigParser",
"execution_engine2.db.models.models.JobInput",
"requests.Response",
"json.dumps",
"dotenv.load_dotenv",
"execution_engine2.db.models.models.Job",
"shutil.co... | [((529, 534), 'execution_engine2.db.models.models.Job', 'Job', ([], {}), '()\n', (532, 534), False, 'from execution_engine2.db.models.models import Job, JobInput, Meta\n'), ((587, 597), 'execution_engine2.db.models.models.JobInput', 'JobInput', ([], {}), '()\n', (595, 597), False, 'from execution_engine2.db.models.models import Job, JobInput, Meta\n'), ((835, 841), 'execution_engine2.db.models.models.Meta', 'Meta', ([], {}), '()\n', (839, 841), False, 'from execution_engine2.db.models.models import Job, JobInput, Meta\n'), ((1324, 1338), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (1336, 1338), False, 'from configparser import ConfigParser\n'), ((1704, 1741), 'dotenv.load_dotenv', 'load_dotenv', (['"""test.env"""'], {'verbose': '(True)'}), "('test.env', verbose=True)\n", (1715, 1741), False, 'from dotenv import load_dotenv\n'), ((1612, 1636), 'os.path.exists', 'os.path.exists', (['test_env'], {}), '(test_env)\n', (1626, 1636), False, 'import os\n'), ((1646, 1699), 'shutil.copyfile', 'copyfile', (['f"""{pwd}/test/env/{test_env}"""', 'f"""{test_env}"""'], {}), "(f'{pwd}/test/env/{test_env}', f'{test_env}')\n", (1654, 1699), False, 'from shutil import copyfile\n'), ((5384, 5413), 'execution_engine2.exceptions.MalformedTimestampException', 'MalformedTimestampException', ([], {}), '()\n', (5411, 5413), False, 'from execution_engine2.exceptions import MalformedTimestampException\n'), ((5618, 5644), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (5640, 5644), False, 'from datetime import datetime\n'), ((6416, 6435), 'requests.Response', 'requests.Response', ([], {}), '()\n', (6433, 6435), False, 'import requests\n'), ((1573, 1598), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1588, 1598), False, 'import os\n'), ((6518, 6582), 'json.dumps', 'json.dumps', (["{'result': [{'perms': ret_perms}], 'version': '1.1'}"], {}), "({'result': [{'perms': ret_perms}], 'version': '1.1'})\n", (6528, 6582), False, 'import json\n')] |
# -*- coding: utf-8 -*-
import asyncio
import socket
from xTool.servers.tcp import TCPServer
def test_tcp_server(aiomisc_unused_port):
loop = asyncio.get_event_loop()
class TestTcpService(TCPServer):
DATA = []
async def handle_client(self, reader: asyncio.StreamReader,
writer: asyncio.StreamWriter):
while True:
data = await reader.readline()
writer.write(data)
self.DATA.append(data)
service = TestTcpService(
"127.0.0.1", aiomisc_unused_port, **{"loop": loop})
async def writer():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with sock:
sock.connect(("127.0.0.1", aiomisc_unused_port))
sock.sendall(b"hello server\n")
await asyncio.sleep(1)
loop.run_until_complete(service.start())
loop.run_until_complete(writer())
loop.close()
assert TestTcpService.DATA == [b"hello server\n"]
| [
"asyncio.get_event_loop",
"socket.socket",
"asyncio.sleep"
] | [((149, 173), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (171, 173), False, 'import asyncio\n'), ((638, 687), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (651, 687), False, 'import socket\n'), ((826, 842), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (839, 842), False, 'import asyncio\n')] |
import decimal
import itertools
import random
from datetime import date, timedelta
import factory
from applications.enums import ApplicationStatus, ApplicationStep, BenefitType
from applications.models import (
AhjoDecision,
Application,
APPLICATION_LANGUAGE_CHOICES,
ApplicationBasis,
ApplicationBatch,
DeMinimisAid,
Employee,
)
from calculator.models import Calculation
from companies.tests.factories import CompanyFactory
from users.tests.factories import HandlerFactory
class DeMinimisAidFactory(factory.django.DjangoModelFactory):
granter = factory.Faker("sentence", nb_words=2)
# delay evaluation of date_start and date_end so that any freeze_time takes effect
granted_at = factory.Faker(
"date_between_dates",
date_start=factory.LazyAttribute(
lambda _: date.today() - timedelta(days=365 * 2)
),
date_end=factory.LazyAttribute(lambda _: date.today()),
)
amount = factory.Faker("pyint", min_value=1, max_value=100000)
ordering = factory.Iterator(itertools.count(0))
class Meta:
model = DeMinimisAid
class ApplicationBasisFactory(factory.django.DjangoModelFactory):
identifier = factory.Sequence(
lambda id: f"basis_identifier_{id}"
) # ensure it is unique
class Meta:
model = ApplicationBasis
class ApplicationFactory(factory.django.DjangoModelFactory):
company = factory.SubFactory(CompanyFactory)
employee = factory.RelatedFactory(
"applications.tests.factories.EmployeeFactory",
factory_related_name="application",
)
company_name = factory.Faker("sentence", nb_words=2)
company_form = factory.Faker("sentence", nb_words=1)
company_department = factory.Faker("street_address")
official_company_street_address = factory.Faker("street_address")
official_company_city = factory.Faker("city")
official_company_postcode = factory.Faker("postcode")
use_alternative_address = factory.Faker("boolean")
alternative_company_street_address = factory.Faker("street_address")
alternative_company_city = factory.Faker("city")
alternative_company_postcode = factory.Faker("postcode", locale="fi_FI")
company_bank_account_number = factory.Faker("iban", locale="fi_FI")
company_contact_person_phone_number = factory.Sequence(
lambda n: f"050-10000{n}"
) # max.length in validation seems to be 10 digits
company_contact_person_email = factory.Faker("email")
company_contact_person_first_name = factory.Faker("first_name")
company_contact_person_last_name = factory.Faker("last_name")
association_has_business_activities = None
applicant_language = factory.Faker(
"random_element", elements=[v[0] for v in APPLICATION_LANGUAGE_CHOICES]
)
co_operation_negotiations = factory.Faker("boolean")
co_operation_negotiations_description = factory.LazyAttribute(
lambda o: factory.Faker("sentence") if o.co_operation_negotiations else ""
)
pay_subsidy_granted = False
pay_subsidy_percent = None
additional_pay_subsidy_percent = None
apprenticeship_program = factory.Faker("boolean")
archived = factory.Faker("boolean")
application_step = ApplicationStep.STEP_1
benefit_type = BenefitType.EMPLOYMENT_BENEFIT
start_date = factory.Faker(
"date_between_dates",
date_start=date(date.today().year, 1, 1),
date_end=date.today() + timedelta(days=100),
)
end_date = factory.LazyAttribute(
lambda o: o.start_date + timedelta(days=random.randint(31, 364))
)
de_minimis_aid = True
status = ApplicationStatus.DRAFT
@factory.post_generation
def bases(self, created, extracted, **kwargs):
if basis_count := kwargs.pop("basis_count", random.randint(1, 5)):
for bt in ApplicationBasisFactory.create_batch(basis_count, **kwargs):
self.bases.add(bt)
de_minimis_1 = factory.RelatedFactory(
DeMinimisAidFactory,
factory_related_name="application",
)
de_minimis_2 = factory.RelatedFactory(
DeMinimisAidFactory,
factory_related_name="application",
)
class Meta:
model = Application
class ReceivedApplicationFactory(ApplicationFactory):
status = ApplicationStatus.RECEIVED
applicant_terms_approval = factory.RelatedFactory(
"terms.tests.factories.ApplicantTermsApprovalFactory",
factory_related_name="application",
)
calculation = factory.RelatedFactory(
"calculator.tests.factories.CalculationFactory",
factory_related_name="application",
)
@factory.post_generation
def calculation(self, created, extracted, **kwargs):
self.calculation = Calculation.objects.create_for_application(self)
self.calculation.calculated_benefit_amount = decimal.Decimal("321.00")
self.calculation.save()
class HandlingApplicationFactory(ReceivedApplicationFactory):
status = ApplicationStatus.HANDLING
@factory.post_generation
def calculation(self, created, extracted, **kwargs):
self.calculation = Calculation.objects.create_for_application(self)
self.calculation.calculated_benefit_amount = decimal.Decimal("123.00")
self.calculation.handler = HandlerFactory()
self.calculation.save()
class DecidedApplicationFactory(HandlingApplicationFactory):
status = ApplicationStatus.ACCEPTED
class EmployeeFactory(factory.django.DjangoModelFactory):
# pass employee=None to prevent ApplicationFactory from creating another employee
application = factory.SubFactory(ApplicationFactory, employee=None)
first_name = factory.Faker("first_name")
last_name = factory.Faker("last_name")
social_security_number = factory.Faker("ssn", locale="fi_FI")
phone_number = factory.Sequence(lambda n: f"050-10000{n}")
email = factory.Faker("email")
employee_language = factory.Faker(
"random_element", elements=[v[0] for v in APPLICATION_LANGUAGE_CHOICES]
)
job_title = factory.Faker("job")
monthly_pay = factory.Faker("random_int", max=5000)
vacation_money = factory.Faker("random_int", max=5000)
other_expenses = factory.Faker("random_int", max=5000)
working_hours = factory.Faker("random_int", min=18, max=40)
is_living_in_helsinki = factory.Faker("boolean")
collective_bargaining_agreement = factory.Faker("words")
class Meta:
model = Employee
class ApplicationBatchFactory(factory.django.DjangoModelFactory):
proposal_for_decision = AhjoDecision.DECIDED_ACCEPTED
application_1 = factory.RelatedFactory(
DecidedApplicationFactory,
factory_related_name="batch",
status=factory.SelfAttribute("batch.proposal_for_decision"),
)
application_2 = factory.RelatedFactory(
DecidedApplicationFactory,
factory_related_name="batch",
status=factory.SelfAttribute("batch.proposal_for_decision"),
)
decision_maker_title = factory.Faker("sentence", nb_words=2)
decision_maker_name = factory.Faker("name")
section_of_the_law = factory.Faker("word")
decision_date = factory.Faker(
"date_between_dates",
date_start=factory.LazyAttribute(lambda _: date.today() - timedelta(days=30)),
date_end=factory.LazyAttribute(lambda _: date.today()),
)
expert_inspector_name = factory.Faker("name")
expert_inspector_email = factory.Faker("email")
class Meta:
model = ApplicationBatch
| [
"factory.SubFactory",
"random.randint",
"users.tests.factories.HandlerFactory",
"datetime.date.today",
"factory.RelatedFactory",
"datetime.timedelta",
"factory.Faker",
"itertools.count",
"calculator.models.Calculation.objects.create_for_application",
"factory.Sequence",
"factory.SelfAttribute",
... | [((581, 618), 'factory.Faker', 'factory.Faker', (['"""sentence"""'], {'nb_words': '(2)'}), "('sentence', nb_words=2)\n", (594, 618), False, 'import factory\n'), ((966, 1019), 'factory.Faker', 'factory.Faker', (['"""pyint"""'], {'min_value': '(1)', 'max_value': '(100000)'}), "('pyint', min_value=1, max_value=100000)\n", (979, 1019), False, 'import factory\n'), ((1203, 1256), 'factory.Sequence', 'factory.Sequence', (["(lambda id: f'basis_identifier_{id}')"], {}), "(lambda id: f'basis_identifier_{id}')\n", (1219, 1256), False, 'import factory\n'), ((1421, 1455), 'factory.SubFactory', 'factory.SubFactory', (['CompanyFactory'], {}), '(CompanyFactory)\n', (1439, 1455), False, 'import factory\n'), ((1471, 1581), 'factory.RelatedFactory', 'factory.RelatedFactory', (['"""applications.tests.factories.EmployeeFactory"""'], {'factory_related_name': '"""application"""'}), "('applications.tests.factories.EmployeeFactory',\n factory_related_name='application')\n", (1493, 1581), False, 'import factory\n'), ((1620, 1657), 'factory.Faker', 'factory.Faker', (['"""sentence"""'], {'nb_words': '(2)'}), "('sentence', nb_words=2)\n", (1633, 1657), False, 'import factory\n'), ((1677, 1714), 'factory.Faker', 'factory.Faker', (['"""sentence"""'], {'nb_words': '(1)'}), "('sentence', nb_words=1)\n", (1690, 1714), False, 'import factory\n'), ((1740, 1771), 'factory.Faker', 'factory.Faker', (['"""street_address"""'], {}), "('street_address')\n", (1753, 1771), False, 'import factory\n'), ((1810, 1841), 'factory.Faker', 'factory.Faker', (['"""street_address"""'], {}), "('street_address')\n", (1823, 1841), False, 'import factory\n'), ((1870, 1891), 'factory.Faker', 'factory.Faker', (['"""city"""'], {}), "('city')\n", (1883, 1891), False, 'import factory\n'), ((1924, 1949), 'factory.Faker', 'factory.Faker', (['"""postcode"""'], {}), "('postcode')\n", (1937, 1949), False, 'import factory\n'), ((1980, 2004), 'factory.Faker', 'factory.Faker', (['"""boolean"""'], {}), "('boolean')\n", (1993, 2004), False, 'import factory\n'), ((2046, 2077), 'factory.Faker', 'factory.Faker', (['"""street_address"""'], {}), "('street_address')\n", (2059, 2077), False, 'import factory\n'), ((2109, 2130), 'factory.Faker', 'factory.Faker', (['"""city"""'], {}), "('city')\n", (2122, 2130), False, 'import factory\n'), ((2166, 2207), 'factory.Faker', 'factory.Faker', (['"""postcode"""'], {'locale': '"""fi_FI"""'}), "('postcode', locale='fi_FI')\n", (2179, 2207), False, 'import factory\n'), ((2242, 2279), 'factory.Faker', 'factory.Faker', (['"""iban"""'], {'locale': '"""fi_FI"""'}), "('iban', locale='fi_FI')\n", (2255, 2279), False, 'import factory\n'), ((2322, 2365), 'factory.Sequence', 'factory.Sequence', (["(lambda n: f'050-10000{n}')"], {}), "(lambda n: f'050-10000{n}')\n", (2338, 2365), False, 'import factory\n'), ((2465, 2487), 'factory.Faker', 'factory.Faker', (['"""email"""'], {}), "('email')\n", (2478, 2487), False, 'import factory\n'), ((2528, 2555), 'factory.Faker', 'factory.Faker', (['"""first_name"""'], {}), "('first_name')\n", (2541, 2555), False, 'import factory\n'), ((2595, 2621), 'factory.Faker', 'factory.Faker', (['"""last_name"""'], {}), "('last_name')\n", (2608, 2621), False, 'import factory\n'), ((2694, 2784), 'factory.Faker', 'factory.Faker', (['"""random_element"""'], {'elements': '[v[0] for v in APPLICATION_LANGUAGE_CHOICES]'}), "('random_element', elements=[v[0] for v in\n APPLICATION_LANGUAGE_CHOICES])\n", (2707, 2784), False, 'import factory\n'), ((2827, 2851), 'factory.Faker', 'factory.Faker', (['"""boolean"""'], {}), "('boolean')\n", (2840, 2851), False, 'import factory\n'), ((3144, 3168), 'factory.Faker', 'factory.Faker', (['"""boolean"""'], {}), "('boolean')\n", (3157, 3168), False, 'import factory\n'), ((3184, 3208), 'factory.Faker', 'factory.Faker', (['"""boolean"""'], {}), "('boolean')\n", (3197, 3208), False, 'import factory\n'), ((3950, 4029), 'factory.RelatedFactory', 'factory.RelatedFactory', (['DeMinimisAidFactory'], {'factory_related_name': '"""application"""'}), "(DeMinimisAidFactory, factory_related_name='application')\n", (3972, 4029), False, 'import factory\n'), ((4072, 4151), 'factory.RelatedFactory', 'factory.RelatedFactory', (['DeMinimisAidFactory'], {'factory_related_name': '"""application"""'}), "(DeMinimisAidFactory, factory_related_name='application')\n", (4094, 4151), False, 'import factory\n'), ((4347, 4464), 'factory.RelatedFactory', 'factory.RelatedFactory', (['"""terms.tests.factories.ApplicantTermsApprovalFactory"""'], {'factory_related_name': '"""application"""'}), "('terms.tests.factories.ApplicantTermsApprovalFactory',\n factory_related_name='application')\n", (4369, 4464), False, 'import factory\n'), ((4502, 4613), 'factory.RelatedFactory', 'factory.RelatedFactory', (['"""calculator.tests.factories.CalculationFactory"""'], {'factory_related_name': '"""application"""'}), "('calculator.tests.factories.CalculationFactory',\n factory_related_name='application')\n", (4524, 4613), False, 'import factory\n'), ((5604, 5657), 'factory.SubFactory', 'factory.SubFactory', (['ApplicationFactory'], {'employee': 'None'}), '(ApplicationFactory, employee=None)\n', (5622, 5657), False, 'import factory\n'), ((5675, 5702), 'factory.Faker', 'factory.Faker', (['"""first_name"""'], {}), "('first_name')\n", (5688, 5702), False, 'import factory\n'), ((5719, 5745), 'factory.Faker', 'factory.Faker', (['"""last_name"""'], {}), "('last_name')\n", (5732, 5745), False, 'import factory\n'), ((5775, 5811), 'factory.Faker', 'factory.Faker', (['"""ssn"""'], {'locale': '"""fi_FI"""'}), "('ssn', locale='fi_FI')\n", (5788, 5811), False, 'import factory\n'), ((5831, 5874), 'factory.Sequence', 'factory.Sequence', (["(lambda n: f'050-10000{n}')"], {}), "(lambda n: f'050-10000{n}')\n", (5847, 5874), False, 'import factory\n'), ((5887, 5909), 'factory.Faker', 'factory.Faker', (['"""email"""'], {}), "('email')\n", (5900, 5909), False, 'import factory\n'), ((5935, 6025), 'factory.Faker', 'factory.Faker', (['"""random_element"""'], {'elements': '[v[0] for v in APPLICATION_LANGUAGE_CHOICES]'}), "('random_element', elements=[v[0] for v in\n APPLICATION_LANGUAGE_CHOICES])\n", (5948, 6025), False, 'import factory\n'), ((6052, 6072), 'factory.Faker', 'factory.Faker', (['"""job"""'], {}), "('job')\n", (6065, 6072), False, 'import factory\n'), ((6091, 6128), 'factory.Faker', 'factory.Faker', (['"""random_int"""'], {'max': '(5000)'}), "('random_int', max=5000)\n", (6104, 6128), False, 'import factory\n'), ((6150, 6187), 'factory.Faker', 'factory.Faker', (['"""random_int"""'], {'max': '(5000)'}), "('random_int', max=5000)\n", (6163, 6187), False, 'import factory\n'), ((6209, 6246), 'factory.Faker', 'factory.Faker', (['"""random_int"""'], {'max': '(5000)'}), "('random_int', max=5000)\n", (6222, 6246), False, 'import factory\n'), ((6267, 6310), 'factory.Faker', 'factory.Faker', (['"""random_int"""'], {'min': '(18)', 'max': '(40)'}), "('random_int', min=18, max=40)\n", (6280, 6310), False, 'import factory\n'), ((6339, 6363), 'factory.Faker', 'factory.Faker', (['"""boolean"""'], {}), "('boolean')\n", (6352, 6363), False, 'import factory\n'), ((6403, 6425), 'factory.Faker', 'factory.Faker', (['"""words"""'], {}), "('words')\n", (6416, 6425), False, 'import factory\n'), ((7006, 7043), 'factory.Faker', 'factory.Faker', (['"""sentence"""'], {'nb_words': '(2)'}), "('sentence', nb_words=2)\n", (7019, 7043), False, 'import factory\n'), ((7070, 7091), 'factory.Faker', 'factory.Faker', (['"""name"""'], {}), "('name')\n", (7083, 7091), False, 'import factory\n'), ((7117, 7138), 'factory.Faker', 'factory.Faker', (['"""word"""'], {}), "('word')\n", (7130, 7138), False, 'import factory\n'), ((7390, 7411), 'factory.Faker', 'factory.Faker', (['"""name"""'], {}), "('name')\n", (7403, 7411), False, 'import factory\n'), ((7441, 7463), 'factory.Faker', 'factory.Faker', (['"""email"""'], {}), "('email')\n", (7454, 7463), False, 'import factory\n'), ((1052, 1070), 'itertools.count', 'itertools.count', (['(0)'], {}), '(0)\n', (1067, 1070), False, 'import itertools\n'), ((4747, 4795), 'calculator.models.Calculation.objects.create_for_application', 'Calculation.objects.create_for_application', (['self'], {}), '(self)\n', (4789, 4795), False, 'from calculator.models import Calculation\n'), ((4849, 4874), 'decimal.Decimal', 'decimal.Decimal', (['"""321.00"""'], {}), "('321.00')\n", (4864, 4874), False, 'import decimal\n'), ((5125, 5173), 'calculator.models.Calculation.objects.create_for_application', 'Calculation.objects.create_for_application', (['self'], {}), '(self)\n', (5167, 5173), False, 'from calculator.models import Calculation\n'), ((5227, 5252), 'decimal.Decimal', 'decimal.Decimal', (['"""123.00"""'], {}), "('123.00')\n", (5242, 5252), False, 'import decimal\n'), ((5288, 5304), 'users.tests.factories.HandlerFactory', 'HandlerFactory', ([], {}), '()\n', (5302, 5304), False, 'from users.tests.factories import HandlerFactory\n'), ((6726, 6778), 'factory.SelfAttribute', 'factory.SelfAttribute', (['"""batch.proposal_for_decision"""'], {}), "('batch.proposal_for_decision')\n", (6747, 6778), False, 'import factory\n'), ((6919, 6971), 'factory.SelfAttribute', 'factory.SelfAttribute', (['"""batch.proposal_for_decision"""'], {}), "('batch.proposal_for_decision')\n", (6940, 6971), False, 'import factory\n'), ((2937, 2962), 'factory.Faker', 'factory.Faker', (['"""sentence"""'], {}), "('sentence')\n", (2950, 2962), False, 'import factory\n'), ((3434, 3446), 'datetime.date.today', 'date.today', ([], {}), '()\n', (3444, 3446), False, 'from datetime import date, timedelta\n'), ((3449, 3468), 'datetime.timedelta', 'timedelta', ([], {'days': '(100)'}), '(days=100)\n', (3458, 3468), False, 'from datetime import date, timedelta\n'), ((3789, 3809), 'random.randint', 'random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (3803, 3809), False, 'import random\n'), ((932, 944), 'datetime.date.today', 'date.today', ([], {}), '()\n', (942, 944), False, 'from datetime import date, timedelta\n'), ((3391, 3403), 'datetime.date.today', 'date.today', ([], {}), '()\n', (3401, 3403), False, 'from datetime import date, timedelta\n'), ((7340, 7352), 'datetime.date.today', 'date.today', ([], {}), '()\n', (7350, 7352), False, 'from datetime import date, timedelta\n'), ((833, 845), 'datetime.date.today', 'date.today', ([], {}), '()\n', (843, 845), False, 'from datetime import date, timedelta\n'), ((848, 871), 'datetime.timedelta', 'timedelta', ([], {'days': '(365 * 2)'}), '(days=365 * 2)\n', (857, 871), False, 'from datetime import date, timedelta\n'), ((3562, 3585), 'random.randint', 'random.randint', (['(31)', '(364)'], {}), '(31, 364)\n', (3576, 3585), False, 'import random\n'), ((7255, 7267), 'datetime.date.today', 'date.today', ([], {}), '()\n', (7265, 7267), False, 'from datetime import date, timedelta\n'), ((7270, 7288), 'datetime.timedelta', 'timedelta', ([], {'days': '(30)'}), '(days=30)\n', (7279, 7288), False, 'from datetime import date, timedelta\n')] |
import random
class Play:
def __init__(self, name="Player"):
self.name = name
def print_name(self):
print("your name is ", self.name)
def TossDie(self, x=1):
for i in range(x):
print(random.randint(1, 6))
def RPC(self, x=1):
for i in range(x):
options = ["rock", "paper", "scissors"]
print(random.choice(options))
player1 = Play("Andula")
player1.print_name()
player1.RPC(3)
player1.TossDie(2)
| [
"random.choice",
"random.randint"
] | [((238, 258), 'random.randint', 'random.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (252, 258), False, 'import random\n'), ((383, 405), 'random.choice', 'random.choice', (['options'], {}), '(options)\n', (396, 405), False, 'import random\n')] |
"""
According with BUG-132 was added table GitHubBugoutUser.
It requires additional script to generate BugoutUser for existing installations
after database migration.
"""
import argparse
import uuid
from ..models import GitHubOAuthEvent, GitHubBugoutUser
from ...broodusers import bugout_api
from ...db import yield_connection_from_env_ctx
from ...utils.settings import INSTALLATION_TOKEN, BOT_INSTALLATION_TOKEN_HEADER
def main(args: argparse.Namespace) -> None:
if args.run:
print("Starting upgrade")
with yield_connection_from_env_ctx() as db_session:
bot_installations = db_session.query(GitHubOAuthEvent).all()
for bot_installation in bot_installations:
user_installation = (
db_session.query(GitHubBugoutUser)
.filter(GitHubBugoutUser.event_id == bot_installation.id)
.one_or_none()
)
if user_installation is not None:
continue
org_name = bot_installation.github_installation_url.rstrip("/").split(
"/"
)[-1]
# Create Brood user
generated_password: str = str(uuid.uuid4())
username = f"{org_name}-{bot_installation.github_account_id}"
email = f"{org_name}-{bot_installation.github_account_id}<EMAIL>"
headers = {BOT_INSTALLATION_TOKEN_HEADER: INSTALLATION_TOKEN}
bugout_user = bugout_api.create_user(
username, email, generated_password, headers=headers
)
bugout_user_token = bugout_api.create_token(
username, generated_password
)
installation_user = GitHubBugoutUser(
event_id=bot_installation.id,
bugout_user_id=bugout_user.id,
bugout_access_token=bugout_user_token.id,
)
db_session.add(installation_user)
db_session.commit()
installation_group_name = (
f"Team group: {org_name}-{bot_installation.github_account_id}"
)
# TODO(kompotkot): Add group id to SlackBugoutUser
if bot_installation.deleted is False:
bugout_api.create_group(
installation_user.bugout_access_token, installation_group_name
)
print(
f"Installation {bot_installation.github_installation_id} complete."
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate Bugout user and group for installations"
)
parser.set_defaults(func=lambda _: parser.print_help())
parser.add_argument("run", help="Start upgrade existing installations")
args = parser.parse_args()
main(args)
| [
"argparse.ArgumentParser",
"uuid.uuid4"
] | [((2672, 2764), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate Bugout user and group for installations"""'}), "(description=\n 'Generate Bugout user and group for installations')\n", (2695, 2764), False, 'import argparse\n'), ((1228, 1240), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1238, 1240), False, 'import uuid\n')] |
import numpy as np
import json
import re
from Utils import *
np.random.seed(4)
def output_process(example):
state = e['state'][-1]
if type(state) == str:
return state
else:
return ' '.join(state)
def polish_notation(steps):
step_mapping = {}
for ix, s in enumerate(steps):
references = re.findall('@@\d+@@', s)
if len(references):
indices = [int(x.replace('@@','')) - 1 for x in references]
if len(references) == 1:
s = '(' + s.replace(' ' + references[0], '') + ', ' + step_mapping[indices[0]] + ')'
step_mapping[ix] = s
else:
first_index, final_index = s.index(references[0]) - 1, s.index(references[-1]) + len(references[-1])
s = '(' + s[:first_index] + s[final_index:] + ', '
for jx in indices:
s += step_mapping[jx] + ', '
s = s[:-2] + ')'
step_mapping[ix] = s
else:
step_mapping[ix] = s
return step_mapping[len(steps) - 1][1:-1]
def subgraphs_from_polish(polish_):
if polish_.count('(') == 0 and polish_.count(','):
return [x.strip() for x in polish_.split(',')][1:]
result_holder = []
while True:
try:
first_paren = polish_.index('(')
except ValueError:
break
open_paren = 1
for ix, char in enumerate(polish_[first_paren+1:]):
if char == '(':
open_paren += 1
elif char == ')':
open_paren -= 1
if open_paren == 0:
result_holder.append(polish_[first_paren+1:first_paren + ix + 1])
polish_ = polish_[first_paren + ix:]
# print('new polish:', polish_)
break
while '' in result_holder:
result_holder.remove('')
intermed_results = [subgraphs_from_polish(x) for x in result_holder]
if type(intermed_results[0]) == list:
intermed_results = [item for sublist in intermed_results for item in sublist]
return result_holder + intermed_results
def remove_duplicates(data_list):
data_set = []
sorted_data = sorted(data_list, key=lambda x: ' '.join(x['nlg']))
for ix in range(len(sorted_data) - 1):
e = sorted_data[ix]
if e['nlg'] != sorted_data[ix + 1]['nlg']:
data_set.append(e)
data_set.append(sorted_data[-1])
return data_set
def is_valid_dag(nlg):
steps_n = len(nlg)
references = re.findall('@@\d+@@', ' '.join(nlg))
return len(list(set(references))) + 1 == steps_n
def get_valid_subgraphs(example):
states, instructions, tokenized_states = example['state'], example['nlg'], example['tokenized_state']
subgraphs = []
steps_n = len(states)
for steps_index in range(steps_n):
if is_valid_dag(instructions[:steps_index + 1]):
subgraphs.append((instructions[:steps_index + 1], tokenized_states[steps_index]))
else:
new_instructions = prune_and_reference(instructions[:steps_index + 1])
subgraphs.append((new_instructions, tokenized_states[steps_index]))
return subgraphs
def prune_and_reference(instructions):
queue = [instructions[-1]]
required_indices = [len(instructions) - 1]
while len(queue):
step = queue.pop(0)
references = re.findall(r'@@\d+@@', step)
indices = [int(x.replace('@@', '')) - 1 for x in references]
required_indices += indices
queue += [instructions[index] for index in indices]
prior_removals = 0
pruned_instructions = []
for index, instruction in enumerate(instructions):
if index not in required_indices:
prior_removals += 1
else:
if prior_removals > 0:
for ref_index, referencer in enumerate(instructions[index + 1:]):
if '@@' + str(index + 1) + '@@' in referencer:
instructions[index + ref_index + 1] = instructions[index + ref_index + 1].replace(
'@@' + str(index + 1) + '@@', '@@' + str(index + 1 - prior_removals) + '@@'
)
pruned_instructions.append(instruction)
return pruned_instructions
def tokenize_string(example_state, example_vocab):
return_step = ''
temp_state = example_state[:].lower()
first_tok = True
while len(temp_state):
if temp_state[:3] in example_vocab or temp_state[:3][::-1] in example_vocab:
if first_tok:
return_step += temp_state[:3] + ' '
first_tok = False
else:
return_step += '%' + temp_state[:3] + ' '
temp_state = temp_state[3:]
elif temp_state[:2] in example_vocab or temp_state[:2][::-1] in example_vocab:
if first_tok:
return_step += temp_state[:2] + ' '
first_tok = False
else:
return_step += '%' + temp_state[:2] + ' '
temp_state = temp_state[2:]
elif temp_state[0] in example_vocab:
if first_tok:
return_step += temp_state[0] + ' '
first_tok = False
else:
return_step += '%' + temp_state[0] + ' '
temp_state = temp_state[1:]
else:
return None
return return_step
with open('list_task_v2.json', 'r', encoding="utf-8") as input_file:
data = json.loads(input_file.read())
data = remove_duplicates(data)
n = len(data)
np.random.shuffle(data)
vocab = []
for e in data:
e_vocab, tokenized_state = [], []
nlg, state = e['nlg'], e['state']
add_bool = True
for ix, step in enumerate(nlg):
tokenized_step = ''
# if terminal node ...
if step.startswith('the string '):
new_string = step.split("'")[1]
tokenized_state.append(state[ix].lower().strip())
e_vocab.append(new_string.lower())
# if state is a string
elif type(state[ix]) == str:
# if it's a reversal
if state[ix][::-1].lower() in e_vocab:
tokenized_state.append(state[ix].lower().strip())
else:
tokenized_step = tokenize_string(state[ix], e_vocab)
if tokenized_step is not None:
tokenized_state.append(tokenized_step.strip())
else:
add_bool = False
break
# if state[ix] is a list
else:
for list_element in state[ix]:
temp_tok = tokenize_string(list_element, e_vocab)
if temp_tok is None:
add_bool = False
break
else:
tokenized_step += ' ' + temp_tok
if add_bool:
tokenize_step = remove_whitespace(tokenized_step).strip()
tokenized_state.append(tokenized_step)
else:
break
if add_bool:
e['tokenized_state'] = tokenized_state
vocab += e_vocab + ['%' + x for x in e_vocab] + [x[::-1] for x in e_vocab] + ['%' + x[::-1] for x in e_vocab]
vocab = list(set(vocab))
# with open('string_piece_vocabulary.txt', 'w', encoding='utf-8') as f:
# f.write('\n'.join(vocab))
filtered_data = []
for e in data:
if 'tokenized_state' in e.keys():
filtered_data.append(e)
train = filtered_data[:int(n*0.8)]
val = filtered_data[int(n*0.8):int(n*0.9)]
test = filtered_data[int(n*0.9):]
train_in, train_out = '', ''
for jx, e in enumerate(train):
if jx % 5000 == 0:
print(round(float(jx / len(train) * 100), 2), '% complete')
subgraphs = get_valid_subgraphs(e)
for subgraph in subgraphs:
train_input = remove_whitespace(' @@SEP@@ '.join(subgraph[0]).lower().strip())
train_in += train_input + '\n'
if type(subgraph[1]) == list:
train_out += ' '.join(subgraph[1]) + '\n'
else:
train_out += remove_whitespace(subgraph[1].strip()) + '\n'
# train_in += ' @@SEP@@ '.join(e['nlg']).lower() + '\n'
# train_out += e['tokenized_state'][-1].strip() + '\n'
val_in, val_out = '', ''
for e in val:
val_input = ' @@SEP@@ '.join(e['nlg']).lower()
val_in += val_input + '\n'
val_out += e['tokenized_state'][-1].strip() + '\n'
test_in, test_out = '', ''
for e in test:
test_input = ' @@SEP@@ '.join(e['nlg']).lower()
test_in += test_input + '\n'
test_out += e['tokenized_state'][-1].strip() + '\n'
base_path = './dag_baseline_2a/'
with open(base_path + 'train_in.txt', 'w', encoding='utf-8') as f:
f.write(train_in)
with open(base_path + 'train_out.txt', 'w', encoding='utf-8') as f:
f.write(train_out)
with open(base_path + 'val_in.txt', 'w', encoding='utf-8') as f:
f.write(val_in)
with open(base_path + 'val_out.txt', 'w', encoding='utf-8') as f:
f.write(val_out)
with open(base_path + 'test_in.txt', 'w', encoding='utf-8') as f:
f.write(test_in)
with open(base_path + 'test_out.txt', 'w', encoding='utf-8') as f:
f.write(test_out) | [
"re.findall",
"numpy.random.seed",
"numpy.random.shuffle"
] | [((62, 79), 'numpy.random.seed', 'np.random.seed', (['(4)'], {}), '(4)\n', (76, 79), True, 'import numpy as np\n'), ((5577, 5600), 'numpy.random.shuffle', 'np.random.shuffle', (['data'], {}), '(data)\n', (5594, 5600), True, 'import numpy as np\n'), ((335, 360), 're.findall', 're.findall', (['"""@@\\\\d+@@"""', 's'], {}), "('@@\\\\d+@@', s)\n", (345, 360), False, 'import re\n'), ((3386, 3414), 're.findall', 're.findall', (['"""@@\\\\d+@@"""', 'step'], {}), "('@@\\\\d+@@', step)\n", (3396, 3414), False, 'import re\n')] |
#G.Benelli Feb 7 2008
#This fragment is used to have the random generator seeds saved to test
#simulation reproducibility. Anothe fragment then allows to run on the
#root output of cmsDriver.py to test reproducibility.
import FWCore.ParameterSet.Config as cms
def customise(process):
#Renaming the process
process.__dict__['_Process__name']='DIGISavingSeeds'
#Storing the random seeds
process.rndmStore=cms.EDProducer("RandomEngineStateProducer")
#Adding the RandomEngine seeds to the content
process.output.outputCommands.append("keep RandomEngineStates_*_*_*")
process.rndmStore_step=cms.Path(process.rndmStore)
#Modifying the schedule:
#First delete the current one:
del process.schedule[:]
#Then add the wanted sequences
process.schedule.append(process.digitisation_step)
process.schedule.append(process.rndmStore_step)
process.schedule.append(process.out_step)
#Adding SimpleMemoryCheck service:
process.SimpleMemoryCheck=cms.Service("SimpleMemoryCheck",
ignoreTotal=cms.untracked.int32(1),
oncePerEventMode=cms.untracked.bool(True))
#Adding Timing service:
process.Timing=cms.Service("Timing")
#Add these 3 lines to put back the summary for timing information at the end of the logfile
#(needed for TimeReport report)
if hasattr(process,'options'):
process.options.wantSummary = cms.untracked.bool(True)
else:
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
return(process)
| [
"FWCore.ParameterSet.Config.EDProducer",
"FWCore.ParameterSet.Config.Service",
"FWCore.ParameterSet.Config.untracked.int32",
"FWCore.ParameterSet.Config.Path",
"FWCore.ParameterSet.Config.untracked.bool"
] | [((420, 463), 'FWCore.ParameterSet.Config.EDProducer', 'cms.EDProducer', (['"""RandomEngineStateProducer"""'], {}), "('RandomEngineStateProducer')\n", (434, 463), True, 'import FWCore.ParameterSet.Config as cms\n'), ((615, 642), 'FWCore.ParameterSet.Config.Path', 'cms.Path', (['process.rndmStore'], {}), '(process.rndmStore)\n', (623, 642), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1235, 1256), 'FWCore.ParameterSet.Config.Service', 'cms.Service', (['"""Timing"""'], {}), "('Timing')\n", (1246, 1256), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1467, 1491), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (1485, 1491), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1079, 1101), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(1)'], {}), '(1)\n', (1098, 1101), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1162, 1186), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (1180, 1186), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1574, 1598), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (1592, 1598), True, 'import FWCore.ParameterSet.Config as cms\n')] |
import rosebot
import time
def led():
robot = rosebot.RoseBot()
robot.drive_system.go(30, 30)
while True:
distance = robot.sensor_system.ir_proximity_sensor.get_distance_in_inches()
delay = distance/500
robot.led_system.left_led.turn_on()
time.sleep(delay)
robot.led_system.left_led.turn_off()
robot.led_system.right_led.turn_on()
time.sleep(delay)
robot.led_system.left_led.turn_on()
time.sleep(delay)
robot.led_system.left_led.turn_off()
robot.led_system.right_led.turn_off()
if distance < 20:
robot.drive_system.stop()
break
def camera():
robot = rosebot.RoseBot()
robot.drive_system.display_camera_data()
robot.drive_system.spin_counterclockwise_until_sees_object(100, 500)
time.sleep(5)
robot.drive_system.spin_clockwise_until_sees_object(100, 500)
| [
"rosebot.RoseBot",
"time.sleep"
] | [((54, 71), 'rosebot.RoseBot', 'rosebot.RoseBot', ([], {}), '()\n', (69, 71), False, 'import rosebot\n'), ((692, 709), 'rosebot.RoseBot', 'rosebot.RoseBot', ([], {}), '()\n', (707, 709), False, 'import rosebot\n'), ((832, 845), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (842, 845), False, 'import time\n'), ((287, 304), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (297, 304), False, 'import time\n'), ((403, 420), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (413, 420), False, 'import time\n'), ((473, 490), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (483, 490), False, 'import time\n')] |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/lib/python/bytes.proto
"""Generated protocol buffer code."""
# third party
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x1cproto/lib/python/bytes.proto\x12\x0fsyft.lib.python"\x15\n\x05\x42ytes\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\x62\x06proto3'
)
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(
DESCRIPTOR, "proto.lib.python.bytes_pb2", globals()
)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_BYTES._serialized_start = 49
_BYTES._serialized_end = 70
# @@protoc_insertion_point(module_scope)
| [
"google.protobuf.descriptor_pool.Default",
"google.protobuf.symbol_database.Default"
] | [((461, 487), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (485, 487), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((503, 529), 'google.protobuf.descriptor_pool.Default', '_descriptor_pool.Default', ([], {}), '()\n', (527, 529), True, 'from google.protobuf import descriptor_pool as _descriptor_pool\n')] |
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../clients/python"))
import pypartisci
server, port = "localhost", 7777
apps = ["Demo App A",
"Demo App B"]
hosts = ["host1.example.com",
"host2.example.com"]
versions = ["1.0", "2.0"]
for app in apps:
for i, host in enumerate(hosts):
pypartisci.send_http(server, port, app, versions[i], host)
| [
"os.path.dirname",
"pypartisci.send_http"
] | [((54, 79), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (69, 79), False, 'import os\n'), ((351, 409), 'pypartisci.send_http', 'pypartisci.send_http', (['server', 'port', 'app', 'versions[i]', 'host'], {}), '(server, port, app, versions[i], host)\n', (371, 409), False, 'import pypartisci\n')] |
from cv2 import cv2
import numpy as np
import sys
import os
from base import normalize
# some parameters of training and testing data
train_sub_count = 40
train_img_count = 5
total_face = 200
row = 70
col = 70
def eigenfaces_train(src_path):
img_list = np.empty((row*col, total_face))
count = 0
# read all the faces and flatten them
for i in range(1, train_sub_count+1):
for j in range(1, train_img_count+1):
img_path = src_path + "/s" + str(i) + "/" + str(j) + ".png"
img = cv2.imread(img_path, 0)
img_col = np.array(img).flatten()
img_list[:, count] = img_col[:]
count += 1
# compute the average of the faces
img_mean = np.sum(img_list, axis=1) / total_face
diff = np.empty((row*col, total_face))
# compute the difference matrix
for i in range(0, total_face):
diff[:, i] = img_list[:, i] - img_mean[:]
cov = np.mat(diff)*np.mat(diff.T) / total_face
eigen_values, eigen_vectors = np.linalg.eigh(cov)
# sort the eigenvalues and eigenvectors by desc
sort_index = np.argsort(-eigen_values)
eigen_values = eigen_values[sort_index]
eigen_vectors = eigen_vectors[:, sort_index]
# print(eigen_values)
'''
compute the coveriance matrix
here we don't use original algrithom to avoid computing an 10000+ * 10000+ coveriance matrix later
oringinal: cov = 1/m * A*A^T => it will be an 10000+ * 10000+ matrix
when the dimension of the image (here we mean row*col) > the total number of the training images (here we mean total_face)
(1)cov*v = A*A^T*v = e*v (e is eigenvalue of cov, v is eigenvector of cov) => original
(2)let cov'*u = A^T*A*u = e*u
thus, on both sides of the equation(2) left side multiplied by A, we can get the equation below
(3)A*A^T*A*u = A*e2*u = e2*A*u
compare (1) with (3), if u is eigenvector of cov' of eigenvalue e, we can find that A*u = v
(e is not zero, cov and cov' have the same not-zero eigenvalues, but have different number of zero eigenvalue, it can be proofed)
so we can compute A^T*A instead of A*A^T to simplify the computation (which will generate a matrix with only 200 * 200 data)
cov = np.matrix(diff.T)*np.matrix(diff) / total_face
# compute the eigen values and eigen vectors of cov
eigen_values, vectors = np.linalg.eigh(cov)
eigen_vectors = np.matrix(diff)*np.matrix(vectors)
# sort the eigenvalues and eigenvectors by desc
sort_index = np.argsort(-eigen_values)
eigen_values = eigen_values[sort_index]
eigen_vectors = eigen_vectors[:, sort_index]
print(eigen_values)
'''
# for each image we compute the y (y = A^T * x, weight) and we will compare yf(the input image) with yf, find the nearest one
eigenfaces_weight = np.matrix(eigen_vectors.T)*np.matrix(diff)
return img_mean, eigen_values, eigen_vectors, eigenfaces_weight | [
"numpy.mat",
"cv2.cv2.imread",
"numpy.argsort",
"numpy.sum",
"numpy.array",
"numpy.empty",
"numpy.linalg.eigh",
"numpy.matrix"
] | [((273, 306), 'numpy.empty', 'np.empty', (['(row * col, total_face)'], {}), '((row * col, total_face))\n', (281, 306), True, 'import numpy as np\n'), ((803, 836), 'numpy.empty', 'np.empty', (['(row * col, total_face)'], {}), '((row * col, total_face))\n', (811, 836), True, 'import numpy as np\n'), ((1048, 1067), 'numpy.linalg.eigh', 'np.linalg.eigh', (['cov'], {}), '(cov)\n', (1062, 1067), True, 'import numpy as np\n'), ((1139, 1164), 'numpy.argsort', 'np.argsort', (['(-eigen_values)'], {}), '(-eigen_values)\n', (1149, 1164), True, 'import numpy as np\n'), ((751, 775), 'numpy.sum', 'np.sum', (['img_list'], {'axis': '(1)'}), '(img_list, axis=1)\n', (757, 775), True, 'import numpy as np\n'), ((2875, 2901), 'numpy.matrix', 'np.matrix', (['eigen_vectors.T'], {}), '(eigen_vectors.T)\n', (2884, 2901), True, 'import numpy as np\n'), ((2902, 2917), 'numpy.matrix', 'np.matrix', (['diff'], {}), '(diff)\n', (2911, 2917), True, 'import numpy as np\n'), ((547, 570), 'cv2.cv2.imread', 'cv2.imread', (['img_path', '(0)'], {}), '(img_path, 0)\n', (557, 570), False, 'from cv2 import cv2\n'), ((972, 984), 'numpy.mat', 'np.mat', (['diff'], {}), '(diff)\n', (978, 984), True, 'import numpy as np\n'), ((985, 999), 'numpy.mat', 'np.mat', (['diff.T'], {}), '(diff.T)\n', (991, 999), True, 'import numpy as np\n'), ((596, 609), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (604, 609), True, 'import numpy as np\n')] |
# Generated by Django 2.2.5 on 2019-09-24 12:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('request', '0002_auto_20190924_1811'),
]
operations = [
migrations.CreateModel(
name='PoliceOffice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, verbose_name='이름')),
],
),
migrations.AddField(
model_name='request',
name='police_office',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='request.PoliceOffice', verbose_name='경찰서'),
),
]
| [
"django.db.models.AutoField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((680, 805), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""request.PoliceOffice"""', 'verbose_name': '"""경찰서"""'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n to='request.PoliceOffice', verbose_name='경찰서')\n", (697, 805), False, 'from django.db import migrations, models\n'), ((369, 462), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (385, 462), False, 'from django.db import migrations, models\n'), ((486, 536), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'verbose_name': '"""이름"""'}), "(max_length=30, verbose_name='이름')\n", (502, 536), False, 'from django.db import migrations, models\n')] |
# -*- coding: utf-8 -*-
"""json_fmtr.py
This module implements a MediaTypeFormatter with JSON response.
This module was originally shipped as an example code from https://github.com/DataBooster/PyWebApi, licensed under the MIT license.
Anyone who obtains a copy of this code is welcome to modify it for any purpose, and holds all rights to the modified part only.
The above license notice and permission notice shall be included in all copies or substantial portions of the Software.
"""
from jsonpickle import dumps
from pywebapi import MediaTypeFormatter
class JsonFormatter(MediaTypeFormatter):
"""description of class"""
@property
def supported_media_types(self):
return ['application/json', 'text/json']
def format(self, obj, media_type:str, **kwargs):
kwargs['unpicklable'] = kwargs.get('unpicklable', False)
return dumps(obj, **kwargs)
| [
"jsonpickle.dumps"
] | [((885, 905), 'jsonpickle.dumps', 'dumps', (['obj'], {}), '(obj, **kwargs)\n', (890, 905), False, 'from jsonpickle import dumps\n')] |
from django import template
from django.template.defaultfilters import stringfilter
register = template.Library()
@register.filter
@stringfilter
def FindPhoto(value):
if "%photo%" in str(value):
return True
else:
return False
@register.filter
@stringfilter
def ReplacePhoto(value):
return value.replace("%photo%","")
| [
"django.template.Library"
] | [((96, 114), 'django.template.Library', 'template.Library', ([], {}), '()\n', (112, 114), False, 'from django import template\n')] |
"""
contains several global data classes (for log entries for example)
Author: `HBernigau <https://github.com/HBernigau>`_
Date: 01.2022
"""
import marshmallow_dataclass as mmdc
import marshmallow
from logging import StreamHandler
from typing import Any
from dataclasses import is_dataclass, dataclass, field
from datetime import datetime
import uuid
import warnings
import time
import logging
import os
import threading
UUID = mmdc.NewType("UUID", str, field=marshmallow.fields.UUID)
@dataclass
class LogEntry:
name: str
flow_run_id: str
task_name: str
task_slug: str
task_run_id: str
map_index: int
task_loop_count: int
task_run_count: int
thread: str
threadName: str
process: int
processName: str
exc_text: str
levelname: str
msg: str
timestamp: datetime = field(default_factory=datetime.now)
msg_uuid: UUID = field(default_factory=uuid.uuid4)
modus: str = field(default_factory=lambda:'test')
def fill_dc_from_obj(DC, obj: Any, add_dict: dict = None, excl_lst = None, get_default = None, None_2_default = True):
"""
Creates data class from fields in obj
:param DC: Any data class
:param obj: obj that contains values for parameters of data class (dictionary or object)
:param add_dict: additional parameter values (will overwrite values of obj. if present)
:param excl_lst: list with field names for fields of CD that should not be filled
:return: instance of DC with values of obj and add_dict
"""
def h1(obj, key, default = None):
if isinstance(obj, dict):
ret = obj.get(key, default)
else:
try:
ret = getattr(obj, key, default)
except:
raise ValueError(f'"get_item" not implemented for type="{type(obj)}"')
if None_2_default and ret is None:
return default
else:
return ret
if add_dict is None:
add_dict = {}
if excl_lst is None:
excl_lst = []
if get_default is None:
get_default=lambda x: None
if not is_dataclass(DC) and type(DC) is type:
raise ValueError(f'"DC" must be a valid dataclass.')
rel_val_dict = {key: h1(obj, key, get_default(key)) for key, value in DC.__dataclass_fields__.items() if not key in excl_lst}
rel_val_dict.update(add_dict)
return DC(**rel_val_dict)
def flatten_ls(ls, base=None):
if base is None:
base = []
if not (isinstance(ls, list)):
return base + [ls]
else:
for item in ls:
base += flatten_ls(item)
return base
def get_null_logger():
fb_null_logger = logging.getLogger('downloader logger')
fb_null_logger.setLevel(logging.DEBUG)
fb_null_logger.addHandler(logging.NullHandler())
return fb_null_logger
class TstHandler(StreamHandler):
def __init__(self):
self.reset_loglist()
super().__init__()
def reset_loglist(self):
self._log_list = []
@property
def log_list(self):
return self._log_list
def emit(self, record):
msg = self.format(record)
self._log_list.append({'formated_log_msg': msg, **record.__dict__})
class CustomLogHandler(StreamHandler):
"""Stores prefect logs in the project's postgresql data base"""
def __init__(self, get_session, modus, *args, **kwargs):
self.get_session=get_session
self.modus=modus
super().__init__(*args, **kwargs)
def emit(self, record):
resp = fill_dc_from_obj(DC=LogEntry,
obj=record,
excl_lst = ['timestamp', 'msg_uuid'],
add_dict={'modus': self.modus})
default_dict = {'thread': threading.get_ident(),
'threadName': threading.current_thread().name,
'process': os.getpid()
}
for attr, attr_def_value in default_dict.items():
if getattr(resp, attr, None) is None:
setattr(resp, attr, attr_def_value)
if not isinstance(resp.msg, str):
resp.msg = str(resp.msg)[:4094]
else:
resp.msg=resp.msg[:4094]
for i in range(2):
try:
session = self.get_session()
session.add(resp)
session.commit()
# session.close() # can be omitted now...
break
except Exception as exc:
warnings.warn(f'Error when logging: "{exc}" (trial {i+1}/3)')
time.sleep(1.0)
else:
warnings.warn(f'Final result: Could not log record {record}')
def get_tst_logger(tst_handler):
smpl_logger = logging.Logger('tst_logger', level = logging.INFO)
for hndl in smpl_logger.handlers:
if type(hndl) is type(tst_handler):
break
else:
smpl_logger.addHandler(tst_handler)
return smpl_logger
def get_prod_logger(name, get_session, cust_formatting = None, modus='test'):
logger = logging.Logger(name, level = logging.INFO)
if cust_formatting is None:
cust_formatting = '[%(asctime)s] %(levelname)s - %(name)s | %(message)s'
for req_hndl in [StreamHandler(), CustomLogHandler(get_session, modus=modus)]:
#for hndl in logger.handlers:
# logger.removeHandler(hndl)
for hndl in logger.handlers:
if type(hndl) is type(req_hndl):
break
else:
formatter = logging.Formatter(cust_formatting)
req_hndl.setFormatter(formatter)
logger.addHandler(req_hndl)
return logger
| [
"logging.getLogger",
"logging.NullHandler",
"logging.StreamHandler",
"threading.current_thread",
"logging.Formatter",
"time.sleep",
"threading.get_ident",
"marshmallow_dataclass.NewType",
"os.getpid",
"logging.Logger",
"warnings.warn",
"dataclasses.is_dataclass",
"dataclasses.field"
] | [((431, 487), 'marshmallow_dataclass.NewType', 'mmdc.NewType', (['"""UUID"""', 'str'], {'field': 'marshmallow.fields.UUID'}), "('UUID', str, field=marshmallow.fields.UUID)\n", (443, 487), True, 'import marshmallow_dataclass as mmdc\n'), ((828, 863), 'dataclasses.field', 'field', ([], {'default_factory': 'datetime.now'}), '(default_factory=datetime.now)\n', (833, 863), False, 'from dataclasses import is_dataclass, dataclass, field\n'), ((885, 918), 'dataclasses.field', 'field', ([], {'default_factory': 'uuid.uuid4'}), '(default_factory=uuid.uuid4)\n', (890, 918), False, 'from dataclasses import is_dataclass, dataclass, field\n'), ((936, 974), 'dataclasses.field', 'field', ([], {'default_factory': "(lambda : 'test')"}), "(default_factory=lambda : 'test')\n", (941, 974), False, 'from dataclasses import is_dataclass, dataclass, field\n'), ((2654, 2692), 'logging.getLogger', 'logging.getLogger', (['"""downloader logger"""'], {}), "('downloader logger')\n", (2671, 2692), False, 'import logging\n'), ((4747, 4795), 'logging.Logger', 'logging.Logger', (['"""tst_logger"""'], {'level': 'logging.INFO'}), "('tst_logger', level=logging.INFO)\n", (4761, 4795), False, 'import logging\n'), ((5067, 5107), 'logging.Logger', 'logging.Logger', (['name'], {'level': 'logging.INFO'}), '(name, level=logging.INFO)\n', (5081, 5107), False, 'import logging\n'), ((2766, 2787), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (2785, 2787), False, 'import logging\n'), ((5244, 5259), 'logging.StreamHandler', 'StreamHandler', ([], {}), '()\n', (5257, 5259), False, 'from logging import StreamHandler\n'), ((2090, 2106), 'dataclasses.is_dataclass', 'is_dataclass', (['DC'], {}), '(DC)\n', (2102, 2106), False, 'from dataclasses import is_dataclass, dataclass, field\n'), ((3759, 3780), 'threading.get_ident', 'threading.get_ident', ([], {}), '()\n', (3778, 3780), False, 'import threading\n'), ((3888, 3899), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3897, 3899), False, 'import os\n'), ((4633, 4694), 'warnings.warn', 'warnings.warn', (['f"""Final result: Could not log record {record}"""'], {}), "(f'Final result: Could not log record {record}')\n", (4646, 4694), False, 'import warnings\n'), ((5526, 5560), 'logging.Formatter', 'logging.Formatter', (['cust_formatting'], {}), '(cust_formatting)\n', (5543, 5560), False, 'import logging\n'), ((3820, 3846), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (3844, 3846), False, 'import threading\n'), ((4513, 4576), 'warnings.warn', 'warnings.warn', (['f"""Error when logging: "{exc}" (trial {i + 1}/3)"""'], {}), '(f\'Error when logging: "{exc}" (trial {i + 1}/3)\')\n', (4526, 4576), False, 'import warnings\n'), ((4591, 4606), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (4601, 4606), False, 'import time\n')] |
from run_translation.TestModelComputer import asl_translation
from run_translation.TextToSpeech import tts
from run_translation.RunPiModelStream import rasp_translation
# from run_translation.RunPiModelTesting import rasp_translation
from run_translation.TestModelComputerLetters import asl_translation_letters
# from run_translation.SpeechToText import stt
if __name__ == "__main__":
# sentence = rasp_translation()
sentence = asl_translation(CAM_ID=1)
# rasp_sentence = rasp_translation([])
# tts(sentence)
# print(stt()) | [
"run_translation.TestModelComputer.asl_translation"
] | [((437, 462), 'run_translation.TestModelComputer.asl_translation', 'asl_translation', ([], {'CAM_ID': '(1)'}), '(CAM_ID=1)\n', (452, 462), False, 'from run_translation.TestModelComputer import asl_translation\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
wz_table/spreadsheet_make.py
Last updated: 2019-10-14
Create a new spreadsheet (.xlsx).
=+LICENCE=============================
Copyright 2017-2019 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=-LICENCE========================================
"""
import os, datetime
from collections import namedtuple
from openpyxl import Workbook
from openpyxl.worksheet.datavalidation import DataValidation
from openpyxl.styles import (NamedStyle, PatternFill, Alignment,
Protection, Font, Border, Side)
from openpyxl.utils import get_column_letter
from openpyxl.worksheet.properties import (WorksheetProperties,
PageSetupProperties)
class NewSpreadsheet:
FORMAT_DATE = 'DD.MM.YYYY'
def __init__ (self, sheetName=None):
# Create the workbook and worksheet we'll be working with
self._wb = Workbook ()
self._ws = self._wb.active
if sheetName:
self._ws.title = sheetName
self._unlocked = None # cache for a <Protection> instance
@staticmethod
def cellName (row, col):
if row == None:
r = '*r*'
else:
r = str (row+1)
if col == None:
c = '*c*'
else:
c = get_column_letter (col+1)
return c + r
@staticmethod
def completeCellNames (rcstring, row=None, col=None):
if col != None:
rcstring = rcstring.replace ('*c*', get_column_letter (col+1))
if row != None:
rcstring = rcstring.replace ('*r*', str (row+1))
return rcstring
def makeStyle (self, style):
"""Return the attributes of this style in the form needed for
applying it to a cell. The result is cached in the style object.
"""
cellstyle = style.cellStyle
if cellstyle == None:
cellstyle = {}
cstyle = style.attributes
# Font
try:
fontname = cstyle ['font']
f = True
except KeyError:
fontname = 'Arial'
f = False
try:
fontsize = int (cstyle ['size'])
f = True
except KeyError:
fontsize = 12
try:
fontbold = bool (cstyle ['bold'])
f = True
except KeyError:
fontbold = False
try:
fontital = bool (cstyle ['emph'])
f = True
except KeyError:
fontital = False
try:
fontcol = cstyle ['fg']
f = True
except KeyError:
fontcol = '000000'
pass
if f:
cellstyle ['font'] = Font (name = fontname,
size = fontsize, bold = fontbold,
italic = fontital, color=fontcol)
# "Number format"
try:
cellstyle ['number_format'] = cstyle ['number_format']
except KeyError:
pass
# Alignment
try:
align = cstyle ['align']
if align in 'bmt':
# Vertical
h = 'c'
v = align
rotate = 90
else:
h = align
v = 'm'
rotate = None
cellstyle ['alignment'] = self.alignment (h=h, v=v,
rotate=rotate)
except KeyError:
pass
# Border
try:
border = cstyle ['border']
if border == 2:
cellstyle ['border'] = self.border (left=0, right=0,
top=0, bottom=2)
elif border == 1:
cellstyle ['border'] = self.border ()
except KeyError:
pass
# Background
try:
cellstyle ['fill'] = self.background (cstyle ['background'])
except KeyError:
pass
# Validation is not really a style ...
try:
valid = cstyle ['valid']
if valid:
# The default is 'locked' so only if <valid> is present
# is an action necessary.
if not self._unlocked:
self._unlocked = Protection (locked=False)
# Remove cell protection
cellstyle ['protection'] = self._unlocked
if type (valid) == list:
style.validation = self.dataValidation (valid)
except KeyError:
pass
style.cellStyle = cellstyle
return cellstyle
def setCell (self, row, col, val, style=None, isDate=False):
"""Set the cell at the given coordinates to the given value.
The coordinates start at 0.
Style objects can be passed as additional arguments.
"""
cell = self._ws.cell (row=row+1, column=col+1)
if style:
cellstyle = self.makeStyle (style)
for k, v in cellstyle.items ():
setattr (cell, k, v)
if style.validation:
style.validation.add (cell)
if val != None:
if isDate:
# Set cell number format
cell.number_format = self.FORMAT_DATE
# Convert to <datetime.date> instance
cell.value = datetime.date (*[int (v) for v in val.split ('-')])
else:
# Workaround for probable bug in openpyxl:
if isinstance (val, str) and type (val) != str:
val = str (val)
cell.value = val
def setWidth (self, col, width):
"""Set a column width in mm – probably very roughly.
"""
# The internal width parameter is related to the width of the
# 'Normal style font'! The conversion factor tries to compensate.
self._ws.column_dimensions [get_column_letter (col+1)].width = width * 0.5
def setHeight (self, row, height):
"""Set a row height in mm – probably very roughly.
"""
# The internal height parameter is related to the height of the
# 'Normal style font'! The conversion factor tries to compensate.
self._ws.row_dimensions [row+1].height = height * 2.8
def merge (self, row0, col0, height, width):
self._ws.merge_cells (start_row=row0 + 1, start_column=col0 + 1,
end_row=row0 + height, end_column=col0 + width)
def dataValidation (self, valList, allow_blank=True):
"""Create a data-validation object with list validation.
"""
def newValidationList ():
dv = DataValidation (type='list',
formula1 = '"' + ','.join (valList) + '"',
allow_blank = allow_blank)
# Optionally set a custom error message
#dv.error ='Your entry is not in the list'
#dv.errorTitle = 'Invalid Entry'
# Optionally set a custom prompt message
#dv.prompt = 'Please select from the list'
#dv.promptTitle = 'List Selection'
# Add the data-validation object to the worksheet
self._ws.add_data_validation (dv)
return dv
key = tuple (valList) + (allow_blank,)
try:
return self._vcache [key]
except AttributeError:
# No cache yet
self._vcache = {}
except KeyError:
# No existing validation instance for this key
pass
dv = newValidationList ()
self._vcache [key] = dv
return dv
def dataValidationLength (self, chars):
"""Create a data-validation object for a string with maximum
length validation (chars >= 0) or exact length validation
(-chars for chars < 0).
"""
if chars < 0:
op = 'equal'
chars = - chars
else:
op = 'lessThanOrEqual'
dv = DataValidation(type='textLength', operator=op, formula1=chars)
# Optionally set a custom error message
dv.error ='Entry is too long'
# Add the data-validation object to the worksheet
self._ws.add_data_validation (dv)
return dv
@staticmethod
def background (colour):
return PatternFill (patternType='solid', fgColor=colour)
@staticmethod
def alignment (h=None, v=None, rotate=None, indent=None, wrap=None):
al = Alignment ()
if h:
hal = {'l': 'left', 'r': 'right', 'c': 'center'}.get (h)
if hal:
al.horizontal = hal
if v:
val = {'t': 'top', 'b': 'bottom', 'm': 'center'}.get (v)
if val:
al.vertical = val
if rotate:
try:
ral = int (rotate)
if ral >=0 and ral <= 180:
al.textRotation = ral
except:
pass
if indent != None:
al.indent = float (indent)
if wrap != None:
al.wrapText = wrap
return al
@staticmethod
def border (left=1, right=1, top=1, bottom=1):
"""Simple borders. Only supports definition of the sides and thickness.
The value must lie in the range 0 – 3.
"""
bstyle = [None, 'thin', 'medium', 'thick']
return Border (
left=Side (style=bstyle [left]),
right=Side (style=bstyle [right]),
top=Side (style=bstyle [top]),
bottom=Side (style=bstyle [bottom]))
def protectSheet (self, pw=None):
if pw:
self._ws.protection.set_password (pw)
else:
self._ws.protection.enable ()
def sheetProperties (self, paper='A4', landscape=False,
fitWidth=False, fitHeight=False):
if landscape:
self._ws.page_setup.orientation = self._ws.ORIENTATION_LANDSCAPE
self._ws.page_setup.paperSize = getattr (self._ws, 'PAPERSIZE_' + paper)
# Property settings
if fitWidth or fitHeight:
wsprops = self._ws.sheet_properties
wsprops.pageSetUpPr = PageSetupProperties(fitToPage=True,
autoPageBreaks=False)
# self._ws.page_setup.fitToPage = True
if not fitWidth:
self._ws.page_setup.fitToWidth = False
if not fitHeight:
self._ws.page_setup.fitToHeight = False
def freeze (self, row, col):
self._ws.freeze_panes = self.cellName (row, col)
def save (self, filepath):
"""Write the spreadsheet to a file.
The ending '.xlsx' is added automatically if it is not present
already.
Return the full filepath.
"""
fdir = os.path.dirname (filepath)
fname = os.path.basename (filepath).rsplit ('.', 1) [0] + '.xlsx'
fp = os.path.join (fdir, fname)
self._wb.save (fp)
return fp
class TableStyle:
def __init__ (self, base=None, **kargs):
"""
<base> is an existing style (<TableStyle> instance).
The following kargs are processed:
<font> is the font name.
<size> is the font size.
<bold> (bool) and <emph> (bool) are font styles.
<fg> is the font colour (RRGGBB).
<align> is the horizontal (l, c or r) OR vertical (b, m, t) alignment.
Vertical alignment is for rotated text.
<background> is a colour in the form 'RRGGBB', default none.
<border>: Only three border types are supported here:
0: none
1: all sides
2: (thicker) underline
<number_format>: By default force all cells to text format.
<valid>: <True> just unlocks cell (removes protection).
Otherwise it can be a list of valid strings (which will also
unlock the cell).
#TODO: other types of validation?
"""
if base == None:
self.attributes = {}
# Set default values
if 'border' not in kargs: kargs ['border'] = 1
if 'number_format' not in kargs: kargs ['number_format'] = '@'
if 'align' not in kargs: kargs ['align'] = 'c'
else:
self.attributes = base.attributes.copy ()
self.attributes.update (kargs)
# These are for the sheet style info (cache),
# see <NewSpreadsheet.makeStyle>.
self.cellStyle = None
self.validation = None
| [
"openpyxl.worksheet.datavalidation.DataValidation",
"os.path.join",
"openpyxl.styles.Font",
"os.path.dirname",
"openpyxl.utils.get_column_letter",
"openpyxl.styles.Side",
"openpyxl.styles.Protection",
"openpyxl.styles.Alignment",
"openpyxl.Workbook",
"openpyxl.styles.PatternFill",
"openpyxl.work... | [((1389, 1399), 'openpyxl.Workbook', 'Workbook', ([], {}), '()\n', (1397, 1399), False, 'from openpyxl import Workbook\n'), ((8720, 8782), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""textLength"""', 'operator': 'op', 'formula1': 'chars'}), "(type='textLength', operator=op, formula1=chars)\n", (8734, 8782), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((9052, 9100), 'openpyxl.styles.PatternFill', 'PatternFill', ([], {'patternType': '"""solid"""', 'fgColor': 'colour'}), "(patternType='solid', fgColor=colour)\n", (9063, 9100), False, 'from openpyxl.styles import NamedStyle, PatternFill, Alignment, Protection, Font, Border, Side\n'), ((9208, 9219), 'openpyxl.styles.Alignment', 'Alignment', ([], {}), '()\n', (9217, 9219), False, 'from openpyxl.styles import NamedStyle, PatternFill, Alignment, Protection, Font, Border, Side\n'), ((11542, 11567), 'os.path.dirname', 'os.path.dirname', (['filepath'], {}), '(filepath)\n', (11557, 11567), False, 'import os, datetime\n'), ((11656, 11681), 'os.path.join', 'os.path.join', (['fdir', 'fname'], {}), '(fdir, fname)\n', (11668, 11681), False, 'import os, datetime\n'), ((1778, 1804), 'openpyxl.utils.get_column_letter', 'get_column_letter', (['(col + 1)'], {}), '(col + 1)\n', (1795, 1804), False, 'from openpyxl.utils import get_column_letter\n'), ((10925, 10982), 'openpyxl.worksheet.properties.PageSetupProperties', 'PageSetupProperties', ([], {'fitToPage': '(True)', 'autoPageBreaks': '(False)'}), '(fitToPage=True, autoPageBreaks=False)\n', (10944, 10982), False, 'from openpyxl.worksheet.properties import WorksheetProperties, PageSetupProperties\n'), ((1975, 2001), 'openpyxl.utils.get_column_letter', 'get_column_letter', (['(col + 1)'], {}), '(col + 1)\n', (1992, 2001), False, 'from openpyxl.utils import get_column_letter\n'), ((3306, 3392), 'openpyxl.styles.Font', 'Font', ([], {'name': 'fontname', 'size': 'fontsize', 'bold': 'fontbold', 'italic': 'fontital', 'color': 'fontcol'}), '(name=fontname, size=fontsize, bold=fontbold, italic=fontital, color=\n fontcol)\n', (3310, 3392), False, 'from openpyxl.styles import NamedStyle, PatternFill, Alignment, Protection, Font, Border, Side\n'), ((6664, 6690), 'openpyxl.utils.get_column_letter', 'get_column_letter', (['(col + 1)'], {}), '(col + 1)\n', (6681, 6690), False, 'from openpyxl.utils import get_column_letter\n'), ((10140, 10164), 'openpyxl.styles.Side', 'Side', ([], {'style': 'bstyle[left]'}), '(style=bstyle[left])\n', (10144, 10164), False, 'from openpyxl.styles import NamedStyle, PatternFill, Alignment, Protection, Font, Border, Side\n'), ((10190, 10215), 'openpyxl.styles.Side', 'Side', ([], {'style': 'bstyle[right]'}), '(style=bstyle[right])\n', (10194, 10215), False, 'from openpyxl.styles import NamedStyle, PatternFill, Alignment, Protection, Font, Border, Side\n'), ((10239, 10262), 'openpyxl.styles.Side', 'Side', ([], {'style': 'bstyle[top]'}), '(style=bstyle[top])\n', (10243, 10262), False, 'from openpyxl.styles import NamedStyle, PatternFill, Alignment, Protection, Font, Border, Side\n'), ((10289, 10315), 'openpyxl.styles.Side', 'Side', ([], {'style': 'bstyle[bottom]'}), '(style=bstyle[bottom])\n', (10293, 10315), False, 'from openpyxl.styles import NamedStyle, PatternFill, Alignment, Protection, Font, Border, Side\n'), ((5010, 5034), 'openpyxl.styles.Protection', 'Protection', ([], {'locked': '(False)'}), '(locked=False)\n', (5020, 5034), False, 'from openpyxl.styles import NamedStyle, PatternFill, Alignment, Protection, Font, Border, Side\n'), ((11585, 11611), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (11601, 11611), False, 'import os, datetime\n')] |
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
T = 200
h = 1e-2
t = np.arange(start=0, stop=T + h, step=h)
bet, gam = 0.15, 1 / 50
# todo: zmienic poziej na randoma
# S_pocz = np.random.uniform(0.7, 1)
S_start = 0.8
I_start = 1 - S_start
R_start = 0
N = S_start + I_start + R_start # is const
# using odeint
# ---------------------------------------------------------------------------------------------------------------------#
def two_diff_ode_equation(state, t, bet, gam):
S, I = state
return [- bet * I * S / N, bet * I * S / N - gam * I]
def one_diff_equation_ode(state, t, bet, gam):
S = state[0]
C = I_start - gam / bet * np.log(S_start) + S_start # C - const
return [(-bet / N * S * (gam / bet * np.log(S) - S + C))]
def calc_R(S_arr, I_arr):
R_arr = np.zeros(len(t))
for i in range(len(R_arr)):
R_arr[i] = N - S_arr[i] - I_arr[i]
return R_arr
def calc_I(S_arr):
C = I_start - gam / bet * np.log(S_start) + S_start # C - const
I_arr = np.zeros(len(t))
for i in range(len(I_arr)):
I_arr[i] = gam / bet * np.log(S_arr[i]) - S_arr[i] + C
return I_arr
def two_equation_ode_plot(t, sym, labelt='$t$', labels=['S', 'I', 'R']):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 4))
# plot drawing (S, I)
for i in range(len(labels) - 1):
ax.plot(t, sym[:, i], label=labels[i])
# plot drawing (R)
ax.plot(t, calc_R(sym[:, 0], sym[:, 1]), label=labels[2])
ax.set_xlabel(labelt, fontsize=14)
ax.set_ylabel('stan', fontsize=14)
ax.set_ylim([0, 1])
ax.legend()
plt.show()
def one_equation_ode_plot(t, sym, labelt='$t$', labels=['S', 'I', 'R']):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 4))
# plot drawing (S)
ax.plot(t, sym[:, 0], label=labels[0])
# plot drawing (I)
I_arr = calc_I(sym[:, 0])
ax.plot(t, I_arr, label=labels[2])
# plot drawing (R)
ax.plot(t, calc_R(sym[:, 0], I_arr), label=labels[2])
ax.set_xlabel(labelt, fontsize=14)
ax.set_ylabel('stan', fontsize=14)
ax.set_ylim([0, 1])
ax.legend()
plt.show()
def two_equation_ode_main():
start_state = S_start, I_start
sym = odeint(two_diff_ode_equation, start_state, t, args=(bet, gam))
two_equation_ode_plot(t, sym, labels=['S', 'I', 'R'])
def one_equation_ode_main():
start_state = S_start
sym = odeint(one_diff_equation_ode, start_state, t, args=(bet, gam))
one_equation_ode_plot(t, sym, labels=['S', 'I', 'R'])
# using manual
# ---------------------------------------------------------------------------------------------------------------------#
S = np.zeros(len(t))
S[0] = S_start
I = np.zeros(len(t))
I[0] = I_start
R = np.zeros(len(t))
R[0] = R_start
def two_diff_equation_manual():
for i in range(t.size - 1):
S[i + 1] = S[i] + h * (- bet * I[i] * S[i] / N)
I[i + 1] = I[i] + h * (bet * I[i] * S[i + 1] / N - gam * I[i])
R[i + 1] = N - S[i + 1] - I[i + 1]
def one_diff_equation_manual():
C = I_start - gam / bet * np.log(S_start) + S_start # C - const
for i in range(t.size - 1):
S[i + 1] = S[i] + h * (-bet / N * S[i] * (gam / bet * np.log(S[i]) - S[i] + C))
I[i + 1] = gam / bet * np.log(S[i + 1]) - S[i + 1] + C
R[i + 1] = N - S[i + 1] - I[i + 1]
def equation_man_plot(t, sirList, labelt='$t$', labels=['S', 'I', 'R']):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 4))
# plot drawing (R, S, I)
for i in range(len(sirList)):
ax.plot(t, sirList[i], label=labels[i])
ax.set_xlabel(labelt, fontsize=14)
ax.set_ylabel('stan', fontsize=14)
ax.set_ylim([0, 1])
ax.legend()
plt.show()
def two_equation_man_main():
two_diff_equation_manual()
equation_man_plot(t, [S, I, R], labels=['S', 'I', 'R'])
def one_equation_man_main():
one_diff_equation_manual()
equation_man_plot(t, [S, I, R], labels=['S', 'I', 'R'])
if __name__ == "__main__":
# one_equation_ode_main()
# one_equation_man_main()
# two_equation_ode_main()
two_equation_man_main()
exit(0)
| [
"scipy.integrate.odeint",
"numpy.log",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((108, 146), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': '(T + h)', 'step': 'h'}), '(start=0, stop=T + h, step=h)\n', (117, 146), True, 'import numpy as np\n'), ((1261, 1308), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(10, 4)'}), '(nrows=1, ncols=1, figsize=(10, 4))\n', (1273, 1308), True, 'import matplotlib.pyplot as plt\n'), ((1628, 1638), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1636, 1638), True, 'import matplotlib.pyplot as plt\n'), ((1728, 1775), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(10, 4)'}), '(nrows=1, ncols=1, figsize=(10, 4))\n', (1740, 1775), True, 'import matplotlib.pyplot as plt\n'), ((2139, 2149), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2147, 2149), True, 'import matplotlib.pyplot as plt\n'), ((2226, 2288), 'scipy.integrate.odeint', 'odeint', (['two_diff_ode_equation', 'start_state', 't'], {'args': '(bet, gam)'}), '(two_diff_ode_equation, start_state, t, args=(bet, gam))\n', (2232, 2288), False, 'from scipy.integrate import odeint\n'), ((2414, 2476), 'scipy.integrate.odeint', 'odeint', (['one_diff_equation_ode', 'start_state', 't'], {'args': '(bet, gam)'}), '(one_diff_equation_ode, start_state, t, args=(bet, gam))\n', (2420, 2476), False, 'from scipy.integrate import odeint\n'), ((3435, 3482), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(10, 4)'}), '(nrows=1, ncols=1, figsize=(10, 4))\n', (3447, 3482), True, 'import matplotlib.pyplot as plt\n'), ((3716, 3726), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3724, 3726), True, 'import matplotlib.pyplot as plt\n'), ((691, 706), 'numpy.log', 'np.log', (['S_start'], {}), '(S_start)\n', (697, 706), True, 'import numpy as np\n'), ((992, 1007), 'numpy.log', 'np.log', (['S_start'], {}), '(S_start)\n', (998, 1007), True, 'import numpy as np\n'), ((3081, 3096), 'numpy.log', 'np.log', (['S_start'], {}), '(S_start)\n', (3087, 3096), True, 'import numpy as np\n'), ((1123, 1139), 'numpy.log', 'np.log', (['S_arr[i]'], {}), '(S_arr[i])\n', (1129, 1139), True, 'import numpy as np\n'), ((3271, 3287), 'numpy.log', 'np.log', (['S[i + 1]'], {}), '(S[i + 1])\n', (3277, 3287), True, 'import numpy as np\n'), ((771, 780), 'numpy.log', 'np.log', (['S'], {}), '(S)\n', (777, 780), True, 'import numpy as np\n'), ((3214, 3226), 'numpy.log', 'np.log', (['S[i]'], {}), '(S[i])\n', (3220, 3226), True, 'import numpy as np\n')] |
import numpy as np
from skfuzzy import cmeans
from config import NAN, FCMParam
class FCMeansEstimator:
def __init__(self, c, m, data):
self.c = c
self.m = m
self.data = data
self.complete_rows, self.incomplete_rows = self.__extract_rows()
# Extract complete and incomplete rows
def __extract_rows(self):
rows, columns = len(self.data), len(self.data[0])
complete_rows, incomplete_rows = [], []
for i in range(rows):
for j in range(columns):
if self.data[i][j] == NAN:
incomplete_rows.append(i)
break
complete_rows.append(i)
return np.array(complete_rows), np.array(incomplete_rows)
# Estimate the missing values
def estimate_missing_values(self):
estimated_data = []
complete_data = np.array([self.data[x] for x in self.complete_rows])
centers, _, _, _, _, _, _ = cmeans(data=complete_data.transpose(), c=self.c, m=self.m, error=FCMParam.ERROR,
maxiter=FCMParam.MAX_ITR, init=None)
# Calculate distance between two points based on euclidean distance
def calculate_distance(data_1, data_2):
return np.linalg.norm(data_1 - data_2)
# Calculate the membership value for given point
def calculate_membership(dist_matrix, distance, m):
numerator = np.power(distance, -2 / (1 - m))
denominator = np.array([np.power(x, -2 / (1 - m)) for x in dist_matrix]).sum()
return numerator / denominator
for i in self.incomplete_rows:
estimated = 0
dist, membership_value = [], []
miss_ind = np.where(self.data[i] == NAN)[0][0]
for center in centers:
dist.append(calculate_distance(data_1=np.delete(np.array(center), miss_ind),
data_2=np.delete(np.array(self.data[i]), miss_ind)))
for d in dist:
membership_value.append(calculate_membership(dist, d, self.m))
for k in range(self.c):
estimated += centers[k][miss_ind] * membership_value[k]
estimated_data.append(estimated)
return np.array(estimated_data)
| [
"numpy.where",
"numpy.array",
"numpy.power",
"numpy.linalg.norm"
] | [((873, 925), 'numpy.array', 'np.array', (['[self.data[x] for x in self.complete_rows]'], {}), '([self.data[x] for x in self.complete_rows])\n', (881, 925), True, 'import numpy as np\n'), ((2284, 2308), 'numpy.array', 'np.array', (['estimated_data'], {}), '(estimated_data)\n', (2292, 2308), True, 'import numpy as np\n'), ((696, 719), 'numpy.array', 'np.array', (['complete_rows'], {}), '(complete_rows)\n', (704, 719), True, 'import numpy as np\n'), ((721, 746), 'numpy.array', 'np.array', (['incomplete_rows'], {}), '(incomplete_rows)\n', (729, 746), True, 'import numpy as np\n'), ((1267, 1298), 'numpy.linalg.norm', 'np.linalg.norm', (['(data_1 - data_2)'], {}), '(data_1 - data_2)\n', (1281, 1298), True, 'import numpy as np\n'), ((1441, 1473), 'numpy.power', 'np.power', (['distance', '(-2 / (1 - m))'], {}), '(distance, -2 / (1 - m))\n', (1449, 1473), True, 'import numpy as np\n'), ((1741, 1770), 'numpy.where', 'np.where', (['(self.data[i] == NAN)'], {}), '(self.data[i] == NAN)\n', (1749, 1770), True, 'import numpy as np\n'), ((1510, 1535), 'numpy.power', 'np.power', (['x', '(-2 / (1 - m))'], {}), '(x, -2 / (1 - m))\n', (1518, 1535), True, 'import numpy as np\n'), ((1877, 1893), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (1885, 1893), True, 'import numpy as np\n'), ((1970, 1992), 'numpy.array', 'np.array', (['self.data[i]'], {}), '(self.data[i])\n', (1978, 1992), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""
Author : RoxanneB <<EMAIL>>
Date : 2021-10-07
Purpose: Rock the Casbah
"""
import argparse
import sys
import string
from collections import defaultdict
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Rock the Casbah',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('infile1',
help='Input file 1',
metavar='FILE',
type=argparse.FileType('rt'),
default=None)
parser.add_argument('infile2',
help='Input file 2',
metavar='FILE',
type=argparse.FileType('rt'),
default=None)
parser.add_argument('-o',
'--outfile',
help='Optional output file',
metavar='FILE',
type=argparse.FileType('wt'),
default=sys.stdout)
parser.add_argument('-d',
'--distance',
help='Calculate Hamming distance',
metavar='int',
type=int,
default=0)
return parser.parse_args()
# --------------------------------------------------
def flatlist(nested_list):
flat_list = [item for inner_list in nested_list for item in inner_list]
return(flat_list)
def lines_to_list(infile):
""" put all words in a file into a list """
nested_list = [line.split() for line in infile]
# nested_list = []
# for line in infile:
# nested_list.append(line.split())
flat_list = flatlist(nested_list)
# flat_list = []
# for inner_list in nested_list:
# for item in inner_list:
# flat_list.append(item)
return flat_list
def rm_punctuation(inlist):
""" takes the output of lines_to_list, requires str module """
no_punct = [item.translate(str.maketrans('', '', string.punctuation)) for item in inlist]
return no_punct
def get_hamming(seq1, seq2):
l1, l2 = len(seq1), len(seq2)
dist = abs(l1-l2)
for i in range(min(len(seq1), len(seq2))):
if seq1[i] != seq2[i]:
dist += 1
return(int(dist))
def main():
"""Make a jazz noise here"""
args = get_args()
infile1_ls = rm_punctuation([word.lower() for word in lines_to_list(args.infile1)])
infile2_ls = rm_punctuation([word.lower() for word in lines_to_list(args.infile2)])
if args.distance:
dist_dict = defaultdict(list)
for word1 in infile1_ls:
for word2 in infile2_ls:
hamm = get_hamming(word1, word2)
dist_dict[hamm].append([word1, word2])
for k, v in dist_dict.items():
if k <= args.distance:
print(flatlist(v), file=args.outfile)
else:
file_intersect = sorted(list(set(infile1_ls).intersection(infile2_ls)))
for word in file_intersect:
print(word, file=args.outfile)
# file_intersect = sorted(list(set(infile1_ls).intersection(infile2_ls)))
#
# # for word in file_intersect:
# # print(word, file=args.outfile)
# --------------------------------------------------
if __name__ == '__main__':
main()
| [
"argparse.FileType",
"collections.defaultdict",
"argparse.ArgumentParser"
] | [((303, 418), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Rock the Casbah"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Rock the Casbah', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\n", (326, 418), False, 'import argparse\n'), ((2684, 2701), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2695, 2701), False, 'from collections import defaultdict\n'), ((581, 604), 'argparse.FileType', 'argparse.FileType', (['"""rt"""'], {}), "('rt')\n", (598, 604), False, 'import argparse\n'), ((794, 817), 'argparse.FileType', 'argparse.FileType', (['"""rt"""'], {}), "('rt')\n", (811, 817), False, 'import argparse\n'), ((1047, 1070), 'argparse.FileType', 'argparse.FileType', (['"""wt"""'], {}), "('wt')\n", (1064, 1070), False, 'import argparse\n')] |
import datetime
import os
if __name__ == '__main__':
date_str = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M")
print(date_str)
os.system('zip -r tc-rapids-{}.zip triangle-counting technical_report.pdf -x *cmake-build-debug/* -x */CMake* -x *.idea/*'.format(date_str))
| [
"datetime.datetime.now"
] | [((69, 92), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (90, 92), False, 'import datetime\n')] |
'''This package sets up the admin interface for the :mod:`papers` app.'''
from django.contrib import admin
from projects.models import Funding, FundingAgency
class FundingAdmin(admin.ModelAdmin):
'''The :class:`~projects.models.Funding` model admin is the default.'''
pass
admin.site.register(Funding, FundingAdmin)
class FundingAgencyAdmin(admin.ModelAdmin):
'''The :class:`~projects.models.FundingAgency` model admin is the default.'''
pass
admin.site.register(FundingAgency, FundingAgencyAdmin)
| [
"django.contrib.admin.site.register"
] | [((286, 328), 'django.contrib.admin.site.register', 'admin.site.register', (['Funding', 'FundingAdmin'], {}), '(Funding, FundingAdmin)\n', (305, 328), False, 'from django.contrib import admin\n'), ((469, 523), 'django.contrib.admin.site.register', 'admin.site.register', (['FundingAgency', 'FundingAgencyAdmin'], {}), '(FundingAgency, FundingAgencyAdmin)\n', (488, 523), False, 'from django.contrib import admin\n')] |
import sim
import utils
import numpy as np
import matplotlib.pyplot as plt
import argparse
def main():
my_parser = argparse.ArgumentParser(description='Parameters for Simulation')
my_parser.add_argument('-N', '--n_cars', type=int, action='store', help='Number of cars', default = 40)
my_parser.add_argument('-L', '--length', type=int, action='store', help='Length of road', default = 250)
my_parser.add_argument('-P', '--p_break', type=float, action='store', help='probability of stopping', default = 0.1)
my_parser.add_argument('-S', '--steps', type=int, action='store', help='Steps of simulation', required = True)
args = my_parser.parse_args()
print(dir(args))
N=args.n_cars
L=args.length
pos = np.zeros(N)
vel = np.zeros(N)
sim.populate_arrays(pos,vel,N)
pos_list = sim.run_simulation(pos,vel,N,L, MAX_STEPS=args.steps, p = args.p_break)
flow = utils.estimate_flow(pos_list,N, 0,250)
sim_fig = utils.plot_simulation(pos_list)
plt.show()
if __name__ == '__main__':
main()
| [
"sim.populate_arrays",
"utils.plot_simulation",
"argparse.ArgumentParser",
"utils.estimate_flow",
"sim.run_simulation",
"numpy.zeros",
"matplotlib.pyplot.show"
] | [((122, 186), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Parameters for Simulation"""'}), "(description='Parameters for Simulation')\n", (145, 186), False, 'import argparse\n'), ((744, 755), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (752, 755), True, 'import numpy as np\n'), ((766, 777), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (774, 777), True, 'import numpy as np\n'), ((783, 815), 'sim.populate_arrays', 'sim.populate_arrays', (['pos', 'vel', 'N'], {}), '(pos, vel, N)\n', (802, 815), False, 'import sim\n'), ((829, 901), 'sim.run_simulation', 'sim.run_simulation', (['pos', 'vel', 'N', 'L'], {'MAX_STEPS': 'args.steps', 'p': 'args.p_break'}), '(pos, vel, N, L, MAX_STEPS=args.steps, p=args.p_break)\n', (847, 901), False, 'import sim\n'), ((912, 952), 'utils.estimate_flow', 'utils.estimate_flow', (['pos_list', 'N', '(0)', '(250)'], {}), '(pos_list, N, 0, 250)\n', (931, 952), False, 'import utils\n'), ((965, 996), 'utils.plot_simulation', 'utils.plot_simulation', (['pos_list'], {}), '(pos_list)\n', (986, 996), False, 'import utils\n'), ((1001, 1011), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1009, 1011), True, 'import matplotlib.pyplot as plt\n')] |
from PythonAPI.bam import BAM
import numpy as np
dataset_dir = '/home/beatriz/Documentos/Work/final_datasets' # For Bea
# dataset_dir = '/home/almartmen/Github/aikapi' # For Alberto
dataset_name = '181129'
bam = BAM(dataset_dir, dataset_name, image_format='png')
# bam.unroll_videos()
# print(bam.get_persons_in_frame(800))
# print(bam.get_poses_in_frame(801))
# person3d = bam.get_person_in_frame(800, 1)
# print(person3d)
# pose3d = bam.get_pose_in_frame(799, 1)
# print(pose3d)
# print(bam.get_activities_for_person(2))
# print(bam.get_images_in_frame(1141))
# bam.unroll_videos(force=True, video=1)
# camera = bam.get_camera(3,1)
# points3d = np.array([[0.498339264765202, 3.2171029078369897, 1.5828869056621102]])
# points2d = camera.project_points(points3d)
# print(points2d)
# points2d_pose = camera.project_points(pose3d)
# print(points2d_pose)
# bam.unroll_videos()
print(bam.get_total_cameras())
print(bam.get_total_frames())
print(bam.get_person_ids())
print(bam.get_static_object_ids())
# print(bam.get_activity_names())
# print(bam.get_annotations_for_person(19))
print(bam.get_persons_in_frame(1000))
# p = bam.get_annotations_for_person(1)
# print(p)
### OBJECTS
# print(bam.get_static_objects_in_frame(2))
# print(bam.get_static_object_in_frame(3, 21))
# print(bam.get_annotations_for_static_object(21))
| [
"PythonAPI.bam.BAM"
] | [((219, 269), 'PythonAPI.bam.BAM', 'BAM', (['dataset_dir', 'dataset_name'], {'image_format': '"""png"""'}), "(dataset_dir, dataset_name, image_format='png')\n", (222, 269), False, 'from PythonAPI.bam import BAM\n')] |
#!/usr/bin/env/python3
import setuptools
with open("README.md", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="Amazon DenseClus",
version="0.0.19",
author="<NAME>",
description="Dense Clustering for Mixed Data Types",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/awslabs/amazon-denseclus",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.7",
license_files=("LICENSE",),
install_requires=[
"umap_learn>=0.5.1",
"numpy>=1.20.2",
"hdbscan>=0.8.27",
"numba>=0.51.2",
"pandas>=1.2.4",
"scikit_learn>=0.24.2",
],
)
| [
"setuptools.find_packages"
] | [((429, 455), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (453, 455), False, 'import setuptools\n')] |