hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a07e8b1935ce721bb2063bbe19f95a39a4c090e
| 3,314
|
py
|
Python
|
monailabel/utils/others/planner.py
|
hamghalam/MONAILabel
|
4f322db0a056d2757d134c9b3c4d4f35740bbf6d
|
[
"Apache-2.0"
] | null | null | null |
monailabel/utils/others/planner.py
|
hamghalam/MONAILabel
|
4f322db0a056d2757d134c9b3c4d4f35740bbf6d
|
[
"Apache-2.0"
] | null | null | null |
monailabel/utils/others/planner.py
|
hamghalam/MONAILabel
|
4f322db0a056d2757d134c9b3c4d4f35740bbf6d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import shutil
import subprocess
from collections import OrderedDict
import numpy as np
from monai.transforms import LoadImage
from tqdm import tqdm
from monailabel.interfaces import MONAILabelError, MONAILabelException
logger = logging.getLogger(__name__)
class ExperimentPlanner(object):
def __init__(self, datastore):
self.plans = OrderedDict()
self.datastore = datastore
self.get_img_info()
def get_gpu_memory_map(self):
"""Get the current gpu usage.
Returns
-------
usage: dict
Keys are device ids as integers.
Values are memory usage as integers in MB.
"""
logger.info("Using nvidia-smi command")
if shutil.which("nvidia-smi") is None:
raise MONAILabelException(
MONAILabelError.APP_INIT_ERROR,
"nvidia-smi command doesn't work!",
)
result = subprocess.check_output(
["nvidia-smi", "--query-gpu=memory.free", "--format=csv,nounits,noheader"], encoding="utf-8"
)
# --query-gpu=memory.used
# Convert lines into a dictionary
gpu_memory = [int(x) for x in result.strip().split("\n")]
gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))
return gpu_memory_map
def get_img_info(self):
loader = LoadImage(reader="ITKReader")
spacings = []
img_sizes = []
logger.info("Reading datastore metadata for heuristic planner ...")
for n in tqdm(self.datastore.list_images()):
_, mtdt = loader(self.datastore.get_image_uri(n))
spacings.append(mtdt["spacing"])
img_sizes.append(mtdt["spatial_shape"])
spacings = np.array(spacings)
img_sizes = np.array(img_sizes)
self.target_spacing = np.mean(spacings, 0)
self.target_img_size = np.max(img_sizes, 0)
def get_target_img_size(self):
# This should return an image according to the free gpu memory available
# These values are for DynUNetV1. In Megabytes
memory_use = [3000, 4100, 4300, 5900, 7700, 9000, 9300, 12100, 17700]
sizes = {
"3000": [64, 64, 32],
"4100": [256, 256, 16],
"4300": [128, 128, 64],
"5900": [256, 256, 32],
"7700": [192, 192, 96],
"9000": [256, 256, 64],
"9300": [192, 192, 128],
"12100": [256, 256, 96],
"17700": [256, 256, 128],
}
idx = np.abs(np.array(memory_use) - self.get_gpu_memory_map()[0]).argmin()
return sizes[str(memory_use[idx])]
def get_target_spacing(self):
return np.around(self.target_spacing)
| 35.634409
| 104
| 0.62764
|
4a07e8bf1973879fef14791742388aec4c8c316b
| 2,153
|
py
|
Python
|
src/models/correlations/Correlations.py
|
teatauri/stats_biogeo_2021
|
9face4cdf8e8c569f927073cd1cc7ab9b45054f7
|
[
"MIT"
] | null | null | null |
src/models/correlations/Correlations.py
|
teatauri/stats_biogeo_2021
|
9face4cdf8e8c569f927073cd1cc7ab9b45054f7
|
[
"MIT"
] | null | null | null |
src/models/correlations/Correlations.py
|
teatauri/stats_biogeo_2021
|
9face4cdf8e8c569f927073cd1cc7ab9b45054f7
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import dcor
COLS = ["PO4", "NO3", "Fe", "Si", "SST", "SSS", "PAR"]
INDEX = ["Pro", "Pico", "Cocco", "Diazo", "Diatom", "Dino", "Zoo"]
def get_sample_sets(*paths):
sets = {}
for i in range(0, len(paths)):
sets[f"s{i}"] = pd.read_pickle(paths[i])
return [sets[f"s{i}"] for i in range(0, len(sets))]
def calculate_dcorrs(predictors, f_groups):
dcorrs = dcorr_df()
predictors[["PAR", "SSS", "SST"]] = np.float64(predictors[["PAR", "SSS", "SST"]])
predictors.reset_index(drop=True, inplace=True)
for groupname, plankton_data in f_groups.items():
for i in range(len(COLS)):
dcorrs.loc[groupname, COLS[i]] = distance_correlation(
predictors[COLS[i]].values, plankton_data.values
)
dcorrs
return dcorrs
def dcorr_df():
return pd.DataFrame(columns=COLS, index=INDEX)
def distance_correlation(predictor, plankton):
return dcor.distance_correlation(predictor, plankton)
def calculate_pearsons(predictors, plankton):
pear_df = get_df(predictors, plankton)
pearsons = pear_df.corr(method="pearson")
pearsons_trimmed = pearsons[INDEX].iloc[0:7]
return pearsons_trimmed.T
def calculate_ln_pearsons(predictors, plankton):
pear_df = get_df(predictors, plankton)
ln_pear_df = np.log(pear_df)
ln_pearsons = ln_pear_df.corr(method="pearson")
ln_pearsons_trimmed = ln_pearsons[INDEX].iloc[0:7]
return ln_pearsons_trimmed.T
def calculate_spearmans(predictors, plankton):
spear_df = get_df(predictors, plankton)
spearmans = spear_df.corr(method="spearman")
spearmans_trimmed = spearmans[INDEX].iloc[0:7]
return spearmans_trimmed.T
def get_df(predictors, plankton):
plank_df = pd.DataFrame(plankton, columns=INDEX)
return pd.concat([predictors, plank_df], axis=1)
# TODO check dcorrs[i][1] - dcorrs[i][0] or dcorrs[i][0] - dcorrs[i][1] (1st version)
def calculate_differences(*dcorrs):
diffs_dfs = []
for i in range(len(dcorrs)):
diffs_dfs.append(round((dcorrs[i][1] - dcorrs[i][0]), 2))
return diffs_dfs
| 29.493151
| 85
| 0.677659
|
4a07e8f4b84fc8083e653e0e04c4016e132e5422
| 106
|
py
|
Python
|
ACME/utility/inf.py
|
mauriziokovacic/ACME
|
2615b66dd4addfd5c03d9d91a24c7da414294308
|
[
"MIT"
] | 3
|
2019-10-23T23:10:55.000Z
|
2021-09-01T07:30:14.000Z
|
ACME/utility/inf.py
|
mauriziokovacic/ACME-Python
|
2615b66dd4addfd5c03d9d91a24c7da414294308
|
[
"MIT"
] | null | null | null |
ACME/utility/inf.py
|
mauriziokovacic/ACME-Python
|
2615b66dd4addfd5c03d9d91a24c7da414294308
|
[
"MIT"
] | 1
|
2020-07-11T11:35:43.000Z
|
2020-07-11T11:35:43.000Z
|
import sys
if sys.version_info > (2, 7):
import math
Inf = math.inf
else:
Inf = float('inf')
| 13.25
| 29
| 0.59434
|
4a07e91340e66ca395c41b94cfd059014b2a0366
| 2,434
|
py
|
Python
|
sample/integrationtest.py
|
akmurugan/automated-test-environment-for-aks-applications
|
693e2ace287ac388d47185f7925b646507231bdd
|
[
"MIT"
] | 8
|
2020-08-05T14:02:19.000Z
|
2021-08-10T00:35:01.000Z
|
sample/integrationtest.py
|
akmurugan/automated-test-environment-for-aks-applications
|
693e2ace287ac388d47185f7925b646507231bdd
|
[
"MIT"
] | 2
|
2020-08-10T13:07:56.000Z
|
2020-08-21T08:13:10.000Z
|
sample/integrationtest.py
|
akmurugan/automated-test-environment-for-aks-applications
|
693e2ace287ac388d47185f7925b646507231bdd
|
[
"MIT"
] | 10
|
2020-08-03T08:23:19.000Z
|
2022-03-17T08:49:38.000Z
|
import unittest
import requests
class TestRest(unittest.TestCase):
'''
Sample Integration Test Cases for https://github.com/Azure-Samples/helm-charts/tree/master/chart-source/azure-vote
'''
azure_vote_endpoint = "http://localhost:8080"
def test_1_title_check(self):
"""
[Test Case 1]: Basic Integration Test Title Check - GET - http://azure-vote-front/
"""
print("\n[Test Case 1]: Basic Integration Test Title Check - GET - http://azure-vote-front/")
url = self.azure_vote_endpoint + '/'
response = requests.get(url=url)
status_code = response.status_code
self.assertEqual(status_code, 200)
request_body = response.text
self.assertIn("Kind Integration Test Framework Demo", request_body)
def test_2_button_check_1(self):
"""
[Test Case 2]: Basic Integration Test Button Check 1 - GET - http://azure-vote-front/
"""
print("\n[Test Case 1]: Basic Integration Test Button Check 1 - GET - http://azure-vote-front/")
url = self.azure_vote_endpoint + '/'
response = requests.get(url=url)
status_code = response.status_code
self.assertEqual(status_code, 200)
request_body = response.text
self.assertIn("Kubernetes", request_body)
def test_3_button_check_2(self):
"""
[Test Case 3]: Basic Integration Test Button Check 2 - GET - http://azure-vote-front/
"""
print("\n[Test Case 1]: Basic Integration Test Button Check 2 - GET - http://azure-vote-front/")
url = self.azure_vote_endpoint + '/'
response = requests.get(url=url)
status_code = response.status_code
self.assertEqual(status_code, 200)
request_body = response.text
self.assertIn("DockerSwarm", request_body)
def test_4_default_vote_check(self):
"""
[Test Case 3]: Basic Integration Test Default Votes - GET - http://azure-vote-front/
"""
print("\n[Test Case 1]: Basic Integration Test Default Votes - GET - http://azure-vote-front/")
url = self.azure_vote_endpoint + '/'
response = requests.get(url=url)
status_code = response.status_code
self.assertEqual(status_code, 200)
request_body = response.text
self.assertIn("Kubernetes - 0 | DockerSwarm - 0", request_body)
if __name__ == '__main__':
unittest.main()
| 41.254237
| 118
| 0.638866
|
4a07ea87cbdbc78fdef56db177e449bb078a1f11
| 939
|
py
|
Python
|
backprop/utils/load.py
|
lucky7323/backprop
|
4daa756f3a46600d4dfa0631bb3607237df1fed6
|
[
"Apache-2.0"
] | 200
|
2021-03-22T17:29:46.000Z
|
2022-03-20T21:58:31.000Z
|
backprop/utils/load.py
|
lucky7323/backprop
|
4daa756f3a46600d4dfa0631bb3607237df1fed6
|
[
"Apache-2.0"
] | 6
|
2021-04-15T06:48:32.000Z
|
2021-12-21T08:07:49.000Z
|
backprop/utils/load.py
|
lucky7323/backprop
|
4daa756f3a46600d4dfa0631bb3607237df1fed6
|
[
"Apache-2.0"
] | 15
|
2021-03-25T05:25:43.000Z
|
2022-01-04T08:12:29.000Z
|
import dill
import json
import os
def load(path):
"""
Loads a saved model and returns it.
Args:
path: Name of the model or full path to model.
Example::
import backprop
backprop.save(model_object, "my_model")
model = backprop.load("my_model")
"""
# Try to look in cache folder
cache_path = os.path.expanduser(f"~/.cache/backprop/{path}")
cache_model_path = os.path.join(cache_path, "model.bin")
if os.path.exists(cache_model_path):
path = cache_model_path
else:
model_path = os.path.join(path, "model.bin")
if not os.path.isabs(model_path):
model_path = os.path.join(os.getcwd(), model_path)
if not os.path.exists(model_path):
raise ValueError("model not found!")
path = model_path
with open(os.path.join(path), "rb") as f:
model = dill.load(f)
return model
| 24.710526
| 64
| 0.601704
|
4a07eacbdc26771affd0626bfd362d68c283b921
| 27
|
py
|
Python
|
models/__init__.py
|
ChrisSun06/Context-Aware-Consistency
|
481b52b42ef6c2fe8180ea6417bc0b2d8fe1951a
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
ChrisSun06/Context-Aware-Consistency
|
481b52b42ef6c2fe8180ea6417bc0b2d8fe1951a
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
ChrisSun06/Context-Aware-Consistency
|
481b52b42ef6c2fe8180ea6417bc0b2d8fe1951a
|
[
"MIT"
] | null | null | null |
from .model import CAC, SGS
| 27
| 27
| 0.777778
|
4a07eaf6e8024660da0524dae6ac8aaea627f9a6
| 104
|
py
|
Python
|
python_modules/dagster/dagster/seven/compat/__init__.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 4,606
|
2018-06-21T17:45:20.000Z
|
2022-03-31T23:39:42.000Z
|
python_modules/dagster/dagster/seven/compat/__init__.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 6,221
|
2018-06-12T04:36:01.000Z
|
2022-03-31T21:43:05.000Z
|
python_modules/dagster/dagster/seven/compat/__init__.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 619
|
2018-08-22T22:43:09.000Z
|
2022-03-31T22:48:06.000Z
|
"""This module is intended for compatibility shims between versions of our third-party
dependencies."""
| 34.666667
| 86
| 0.798077
|
4a07eb7d9241ba4e895d29ece104f6be1ba7bd51
| 177
|
py
|
Python
|
examples/core/create_benchmark.py
|
gugarosa/opytimark
|
cad25623f23ce4b509d59381cf7bd79e41a966b6
|
[
"Apache-2.0"
] | 3
|
2020-06-11T22:58:26.000Z
|
2021-03-15T20:12:29.000Z
|
examples/core/create_benchmark.py
|
gugarosa/opytimark
|
cad25623f23ce4b509d59381cf7bd79e41a966b6
|
[
"Apache-2.0"
] | 1
|
2020-08-13T12:10:35.000Z
|
2020-08-17T14:30:45.000Z
|
examples/core/create_benchmark.py
|
gugarosa/opytimark
|
cad25623f23ce4b509d59381cf7bd79e41a966b6
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from opytimark.core import Benchmark
# Creating a Benchmark class instance
# Note that this class does not have __call__ method implemented
b = Benchmark()
| 22.125
| 64
| 0.79661
|
4a07eb86d985e62f8857f5d382416eeb2e4414bd
| 13,697
|
py
|
Python
|
brian2/tests/test_stateupdaters.py
|
divyashivaram/brian2
|
ac086e478efa50be772c6cee55b52b43018bc77a
|
[
"BSD-2-Clause"
] | 1
|
2019-12-25T16:33:37.000Z
|
2019-12-25T16:33:37.000Z
|
brian2/tests/test_stateupdaters.py
|
divyashivaram/brian2
|
ac086e478efa50be772c6cee55b52b43018bc77a
|
[
"BSD-2-Clause"
] | null | null | null |
brian2/tests/test_stateupdaters.py
|
divyashivaram/brian2
|
ac086e478efa50be772c6cee55b52b43018bc77a
|
[
"BSD-2-Clause"
] | null | null | null |
import re
from collections import namedtuple
from numpy.testing.utils import assert_equal, assert_raises
from brian2 import *
from brian2.utils.logger import catch_logs
from brian2.core.variables import ArrayVariable, AttributeVariable, Variable
def test_explicit_stateupdater_parsing():
'''
Test the parsing of explicit state updater descriptions.
'''
# These are valid descriptions and should not raise errors
updater = ExplicitStateUpdater('x_new = x + dt * f(x, t)')
updater(Equations('dv/dt = -v / tau : 1'))
updater = ExplicitStateUpdater('''x2 = x + dt * f(x, t)
x_new = x2''')
updater(Equations('dv/dt = -v / tau : 1'))
updater = ExplicitStateUpdater('''x1 = g(x, t) * dW
x2 = x + dt * f(x, t)
x_new = x1 + x2''')
updater(Equations('dv/dt = -v / tau + v * xi * tau**-.5: 1'))
updater = ExplicitStateUpdater('''x_support = x + dt*f(x, t) + dt**.5 * g(x, t)
g_support = g(x_support, t)
k = 1/(2*dt**.5)*(g_support - g(x, t))*(dW**2)
x_new = x + dt*f(x,t) + g(x, t) * dW + k''')
updater(Equations('dv/dt = -v / tau + v * xi * tau**-.5: 1'))
# Examples of failed parsing
# No x_new = ... statement
assert_raises(SyntaxError, lambda: ExplicitStateUpdater('x = x + dt * f(x, t)'))
# Not an assigment
assert_raises(SyntaxError, lambda: ExplicitStateUpdater('''2 * x
x_new = x + dt * f(x, t)'''))
# doesn't separate into stochastic and non-stochastic part
updater = ExplicitStateUpdater('''x_new = x + dt * f(x, t) * g(x, t) * dW''')
assert_raises(ValueError, lambda: updater(Equations('')))
def test_str_repr():
'''
Assure that __str__ and __repr__ do not raise errors
'''
for integrator in [linear, euler, rk2, rk4]:
assert len(str(integrator))
assert len(repr(integrator))
def test_integrator_code():
'''
Check whether the returned abstract code is as expected.
'''
# A very simple example where the abstract code should always look the same
eqs = Equations('dv/dt = -v / (1 * second) : 1')
# Only test very basic stuff (expected number of lines and last line)
for integrator, lines in zip([linear, euler, rk2, rk4], [2, 2, 3, 6]):
code_lines = integrator(eqs).split('\n')
err_msg = 'Returned code for integrator %s had %d lines instead of %d' % (integrator.__class__.__name__, len(code_lines), lines)
assert len(code_lines) == lines, err_msg
assert code_lines[-1] == 'v = _v'
# Make sure that it isn't a problem to use 'x', 'f' and 'g' as variable
# names, even though they are also used in state updater descriptions.
# The resulting code should be identical when replacing x by v (and ..._x by
# ..._v)
for varname in ['x', 'f', 'g']:
eqs_v = Equations('dv/dt = -v / (1 * second) : 1')
eqs_var = Equations('d{varname}/dt = -{varname} / (1 * second) : 1'.format(varname=varname))
for integrator in [linear, euler, rk2, rk4]:
code_v = integrator(eqs_v)
code_var = integrator(eqs_var)
# Re-substitute the variable names in the output
code_var = re.sub(r'\b{varname}\b'.format(varname=varname),
'v', code_var)
code_var = re.sub(r'\b(\w*)_{varname}\b'.format(varname=varname),
r'\1_v', code_var)
assert code_var == code_v
def test_priority():
updater = ExplicitStateUpdater('x_new = x + dt * f(x, t)')
# Equations that work for the state updater
eqs = Equations('dv/dt = -v / (10*ms) : 1')
# Fake clock class
MyClock = namedtuple('MyClock', ['t_', 'dt_'])
clock = MyClock(t_=0, dt_=0.0001)
variables = {'v': ArrayVariable('v', Unit(1), None, constant=False),
't': AttributeVariable(second, clock, 't_', constant=False),
'dt': AttributeVariable(second, clock, 'dt_', constant=True)}
assert updater.can_integrate(eqs, variables)
# Non-constant parameter in the coefficient, linear integration does not
# work
eqs = Equations('''dv/dt = -param * v / (10*ms) : 1
param : 1''')
variables['param'] = ArrayVariable('param', Unit(1), None, constant=False)
assert updater.can_integrate(eqs, variables)
can_integrate = {linear: False, euler: True, rk2: True, rk4: True,
milstein: True}
for integrator, able in can_integrate.iteritems():
assert integrator.can_integrate(eqs, variables) == able
# Constant parameter in the coefficient, linear integration should
# work
eqs = Equations('''dv/dt = -param * v / (10*ms) : 1
param : 1 (constant)''')
variables['param'] = ArrayVariable('param', Unit(1), None, constant=True)
assert updater.can_integrate(eqs, variables)
can_integrate = {linear: True, euler: True, rk2: True, rk4: True,
milstein: True}
del variables['param']
for integrator, able in can_integrate.iteritems():
assert integrator.can_integrate(eqs, variables) == able
# External parameter in the coefficient, linear integration *should* work
# (external parameters don't change during a run)
param = 1
eqs = Equations('dv/dt = -param * v / (10*ms) : 1')
assert updater.can_integrate(eqs, variables)
can_integrate = {linear: True, euler: True, rk2: True, rk4: True,
milstein: True}
for integrator, able in can_integrate.iteritems():
assert integrator.can_integrate(eqs, variables) == able
# Equation with additive noise
eqs = Equations('dv/dt = -v / (10*ms) + xi/(10*ms)**.5 : 1')
assert not updater.can_integrate(eqs, variables)
can_integrate = {linear: False, euler: True, rk2: False, rk4: False,
milstein: True}
for integrator, able in can_integrate.iteritems():
assert integrator.can_integrate(eqs, variables) == able
# Equation with multiplicative noise
eqs = Equations('dv/dt = -v / (10*ms) + v*xi/(10*ms)**.5 : 1')
assert not updater.can_integrate(eqs, variables)
can_integrate = {linear: False, euler: False, rk2: False, rk4: False,
milstein: True}
for integrator, able in can_integrate.iteritems():
assert integrator.can_integrate(eqs, variables) == able
def test_registration():
'''
Test state updater registration.
'''
# Save state before tests
before = list(StateUpdateMethod.stateupdaters)
lazy_updater = ExplicitStateUpdater('x_new = x')
StateUpdateMethod.register('lazy', lazy_updater)
# Trying to register again
assert_raises(ValueError,
lambda: StateUpdateMethod.register('lazy', lazy_updater))
# Trying to register something that is not a state updater
assert_raises(ValueError,
lambda: StateUpdateMethod.register('foo', 'just a string'))
# Trying to register with an invalid index
assert_raises(TypeError,
lambda: StateUpdateMethod.register('foo', lazy_updater,
index='not an index'))
# reset to state before the test
StateUpdateMethod.stateupdaters = before
def test_determination():
'''
Test the determination of suitable state updaters.
'''
# To save some typing
determine_stateupdater = StateUpdateMethod.determine_stateupdater
# Save state before tests
before = list(StateUpdateMethod.stateupdaters)
eqs = Equations('dv/dt = -v / (10*ms) : 1')
# Just make sure that state updaters know about the two state variables
variables = {'v': Variable(unit=None), 'w': Variable(unit=None)}
# all methods should work for these equations.
# First, specify them explicitly (using the object)
for integrator in (linear, independent, euler, exponential_euler,
rk2, rk4, milstein):
with catch_logs() as logs:
returned = determine_stateupdater(eqs, variables,
method=integrator)
assert returned is integrator
assert len(logs) == 0
# Equation with multiplicative noise, only milstein should work without
# a warning
eqs = Equations('dv/dt = -v / (10*ms) + v*xi*second**-.5: 1')
for integrator in (linear, independent, euler, exponential_euler, rk2, rk4):
with catch_logs() as logs:
returned = determine_stateupdater(eqs, variables,
method=integrator)
assert returned is integrator
# We should get a warning here
assert len(logs) == 1
with catch_logs() as logs:
returned = determine_stateupdater(eqs, variables,
method=milstein)
assert returned is milstein
# No warning here
assert len(logs) == 0
# Arbitrary functions (converting equations into abstract code) should
# always work
my_stateupdater = lambda eqs: 'x_new = x'
with catch_logs() as logs:
returned = determine_stateupdater(eqs, variables,
method=my_stateupdater)
assert returned is my_stateupdater
# No warning here
assert len(logs) == 0
# Specification with names
eqs = Equations('dv/dt = -v / (10*ms) : 1')
for name, integrator in [('linear', linear), ('euler', euler),
('independent', independent),
('exponential_euler', exponential_euler),
('rk2', rk2), ('rk4', rk4),
('milstein', milstein)]:
with catch_logs() as logs:
returned = determine_stateupdater(eqs, variables,
method=name)
assert returned is integrator
# No warning here
assert len(logs) == 0
# Now all except milstein should refuse to work
eqs = Equations('dv/dt = -v / (10*ms) + v*xi*second**-.5: 1')
for name in ['linear', 'independent', 'euler', 'exponential_euler',
'rk2', 'rk4']:
assert_raises(ValueError, lambda: determine_stateupdater(eqs,
variables,
method=name))
# milstein should work
with catch_logs() as logs:
determine_stateupdater(eqs, variables, method='milstein')
assert len(logs) == 0
# non-existing name
assert_raises(ValueError, lambda: determine_stateupdater(eqs,
variables,
method='does_not_exist'))
# Automatic state updater choice should return linear for linear equations,
# euler for non-linear, non-stochastic equations and equations with
# additive noise, milstein for equations with multiplicative noise
eqs = Equations('dv/dt = -v / (10*ms) : 1')
assert determine_stateupdater(eqs, variables) is linear
# This is conditionally linear
eqs = Equations('''dv/dt = -(v + w**2)/ (10*ms) : 1
dw/dt = -w/ (10*ms) : 1''')
assert determine_stateupdater(eqs, variables) is exponential_euler
eqs = Equations('dv/dt = sin(t) / (10*ms) : 1')
assert determine_stateupdater(eqs, variables) is independent
eqs = Equations('dv/dt = -sqrt(v) / (10*ms) : 1')
assert determine_stateupdater(eqs, variables) is euler
eqs = Equations('dv/dt = -v / (10*ms) + 0.1*second**-.5*xi: 1')
assert determine_stateupdater(eqs, variables) is euler
eqs = Equations('dv/dt = -v / (10*ms) + v*0.1*second**-.5*xi: 1')
assert determine_stateupdater(eqs, variables) is milstein
# remove all registered state updaters --> automatic choice no longer works
StateUpdateMethod.stateupdaters = {}
assert_raises(ValueError, lambda: determine_stateupdater(eqs, variables))
# reset to state before the test
StateUpdateMethod.stateupdaters = before
def test_static_equations():
'''
Make sure that the integration of a (non-stochastic) differential equation
does not depend on whether it's formulated using static equations.
'''
# no static equation
eqs1 = 'dv/dt = (-v + sin(2*pi*100*Hz*t)) / (10*ms) : 1'
# same with static equation
eqs2 = '''dv/dt = I / (10*ms) : 1
I = -v + sin(2*pi*100*Hz*t): 1'''
methods = ['euler', 'exponential_euler', 'rk2', 'rk4']
for method in methods:
G1 = NeuronGroup(1, eqs1, clock=Clock(), method=method)
G1.v = 1
G2 = NeuronGroup(1, eqs2, clock=Clock(), method=method)
G2.v = 1
mon1 = StateMonitor(G1, 'v', record=True)
mon2 = StateMonitor(G2, 'v', record=True)
net1 = Network(G1, mon1)
net2 = Network(G2, mon2)
net1.run(10*ms)
net2.run(10*ms)
assert_equal(mon1.v, mon2.v, 'Results for method %s differed!' % method)
if __name__ == '__main__':
test_determination()
test_explicit_stateupdater_parsing()
test_str_repr()
test_integrator_code()
test_priority()
test_registration()
test_static_equations()
| 41.759146
| 136
| 0.586041
|
4a07ecda8bbc7a8268655561e8eb30c12ef76b89
| 869
|
py
|
Python
|
test/test_new_sample.py
|
apsinha-equinix/controlm-client
|
f24e0f935c82306074f4e4025cf62c217348dc3f
|
[
"MIT"
] | 1
|
2021-12-02T08:49:25.000Z
|
2021-12-02T08:49:25.000Z
|
test/test_new_sample.py
|
apsinha-equinix/controlm-client
|
f24e0f935c82306074f4e4025cf62c217348dc3f
|
[
"MIT"
] | null | null | null |
test/test_new_sample.py
|
apsinha-equinix/controlm-client
|
f24e0f935c82306074f4e4025cf62c217348dc3f
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.18.3
Contact: support@bmc.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import controlm_client
from controlm_client.models.new_sample import NewSample # noqa: E501
from controlm_client.rest import ApiException
class TestNewSample(unittest.TestCase):
"""NewSample unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNewSample(self):
"""Test NewSample"""
# FIXME: construct object with mandatory attributes with example values
# model = controlm_client.models.new_sample.NewSample() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 21.195122
| 79
| 0.693901
|
4a07edcefd70c79257800ac117ce058ecced05f7
| 1,536
|
py
|
Python
|
tests/test_clisops_subset.py
|
agstephens/daops-tester
|
e4aa873a471a72ea882b91d0cb9ecf514d1a651e
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_clisops_subset.py
|
agstephens/daops-tester
|
e4aa873a471a72ea882b91d0cb9ecf514d1a651e
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_clisops_subset.py
|
agstephens/daops-tester
|
e4aa873a471a72ea882b91d0cb9ecf514d1a651e
|
[
"BSD-2-Clause"
] | null | null | null |
import pytest
import xarray as xr
from daops_tester.clisops_loader import cops
ds = None
def setup_module():
global ds
ds = xr.open_dataset('/badc/cmip5/data/cmip5/output1/MPI-M/MPI-ESM-LR/esmControl/day/ocean/day/r1i1p1/latest/tos/tos_day_MPI-ESM-LR_esmControl_r1i1p1_18500101-18591231.nc', use_cftime=True)
def _stat(x, stat, is_time=False):
r = eval(f'x.values.{stat}()')
if is_time:
r = r.strftime()
return r
def _min(x, is_time=False):
return _stat(x, 'min', is_time)
def _max(x, is_time=False):
return _stat(x, 'max', is_time)
def test_clisops_subset_bbox_i_j_grid():
res = cops.subset_bbox(ds['tos'], lon_bnds=(55, 88), lat_bnds=(-10, 20), start_date='1850-01-01T12:00:00', end_date='1850-04-01T12:00:00')
lat = res.lat
lon = res.lon
tm = res.time
assert(_min(lat) > -15 and _max(lat) < 25)
assert(_min(lon) > 50 and _max(lon) < 95)
assert(_min(tm, True) == '1850-01-01 12:00:00' and _max(tm, True) == '1850-04-01 12:00:00')
@pytest.mark.xfail(reason="Subset crossing Greenwich Meridian when data is 0-360 not implemented.")
def test_clisops_subset_bbox_i_j_grid_fail_over_0_longitude():
res = cops.subset_bbox(ds['tos'], lon_bnds=(-50, 50), lat_bnds=(-10, 20), start_date='1850-01-01T12:00:00', end_date='1850-04-01T12:00:00')
print(res.lat.min(), res.lat.max())
print(res.lon.min(), res.lon.max())
print(res.time.min(), res.time.max())
print('LON IS WRONG IN SECOND ONE!')
def teardown_module():
ds.close()
| 27.428571
| 193
| 0.669922
|
4a07ee8ec987003ae5be5cc0efe0727535c33b8c
| 1,223
|
py
|
Python
|
test/test_analytics_dto.py
|
unofficial-memsource/memsource-cli-client
|
a6639506b74e95476da87f4375953448b76ea90c
|
[
"Apache-2.0"
] | 16
|
2019-09-25T00:20:38.000Z
|
2021-05-04T05:56:10.000Z
|
test/test_analytics_dto.py
|
zerodayz/memsource-cli-client
|
c2574f1467539a49e6637c874e88d75c7ef789b3
|
[
"Apache-2.0"
] | 26
|
2019-09-30T14:00:03.000Z
|
2021-05-12T11:15:18.000Z
|
test/test_analytics_dto.py
|
zerodayz/memsource-cli-client
|
c2574f1467539a49e6637c874e88d75c7ef789b3
|
[
"Apache-2.0"
] | 1
|
2021-05-24T16:19:14.000Z
|
2021-05-24T16:19:14.000Z
|
# coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:support@memsource.com>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import memsource_cli
from memsource_cli.models.analytics_dto import AnalyticsDto # noqa: E501
from memsource_cli.rest import ApiException
class TestAnalyticsDto(unittest.TestCase):
"""AnalyticsDto unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAnalyticsDto(self):
"""Test AnalyticsDto"""
# FIXME: construct object with mandatory attributes with example values
# model = memsource_cli.models.analytics_dto.AnalyticsDto() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 29.829268
| 421
| 0.722813
|
4a07eeee49b2a23d19d1d2b7103aea4fc3b48620
| 1,796
|
py
|
Python
|
tests/test_lookup.py
|
GearPlug/python-rest-api
|
f8695a5f5b6f13ebf47da2fe623da9d5ef62781e
|
[
"BSD-2-Clause"
] | 57
|
2015-04-07T21:20:12.000Z
|
2022-03-30T17:27:48.000Z
|
tests/test_lookup.py
|
GearPlug/python-rest-api
|
f8695a5f5b6f13ebf47da2fe623da9d5ef62781e
|
[
"BSD-2-Clause"
] | 33
|
2015-09-24T21:29:48.000Z
|
2022-02-11T22:17:29.000Z
|
tests/test_lookup.py
|
GearPlug/python-rest-api
|
f8695a5f5b6f13ebf47da2fe623da9d5ef62781e
|
[
"BSD-2-Clause"
] | 59
|
2015-01-08T13:00:13.000Z
|
2022-03-09T16:55:46.000Z
|
import unittest
from unittest.mock import Mock
from messagebird import Client
class TestLookup(unittest.TestCase):
def test_lookup(self):
http_client = Mock()
http_client.request.return_value = '{"href": "https://rest.messagebird.com/lookup/31612345678","countryCode": "NL","countryPrefix": 31,"phoneNumber": 31612345678,"type": "mobile","formats": {"e164": "+31612345678","international": "+31 6 12345678","national": "06 12345678","rfc3966": "tel:+31-6-12345678"},"hlr": {"id": "hlr-id","network": 20416,"reference": "reference2000","status": "active","createdDatetime": "2015-12-15T08:19:24+00:00","statusDatetime": "2015-12-15T08:19:25+00:00"}}'
lookup = Client('', http_client).lookup('0612345678', {'countryCode': 'NL'})
http_client.request.assert_called_once_with('lookup/0612345678', 'GET', {'countryCode': 'NL'})
self.assertEqual('mobile', lookup.type)
def test_lookup_hlr(self):
http_client = Mock()
http_client.request.return_value = '{"id": "hlr-id","network": 20416,"reference": "reference2000","status": "active","createdDatetime": "2015-12-15T08:19:24+00:00","statusDatetime": "2015-12-15T08:19:25+00:00"}'
lookup_hlr = Client('', http_client).lookup_hlr(31612345678, {'reference': 'reference2000'})
http_client.request.assert_called_once_with('lookup/31612345678/hlr', 'GET', {'reference': 'reference2000'})
self.assertEqual(lookup_hlr.status, 'active')
def test_lookup_hlr_create(self):
http_client = Mock()
http_client.request.return_value = '{}'
Client('', http_client).lookup_hlr_create(31612345678, {'reference': 'MyReference'})
http_client.request.assert_called_once_with('lookup/31612345678/hlr', 'POST', {'reference': 'MyReference'})
| 49.888889
| 498
| 0.686526
|
4a07efad7652b4ea86064c0c579682f81906caa7
| 320
|
py
|
Python
|
src/django_bootstrap/widgets.py
|
zostera/django-bootstrap
|
3ccccd1a781afd2c160b97ae3d15f6f715901991
|
[
"BSD-3-Clause"
] | 2
|
2021-10-05T14:07:16.000Z
|
2021-10-08T12:56:11.000Z
|
src/django_bootstrap/widgets.py
|
zostera/django-bootstrap
|
3ccccd1a781afd2c160b97ae3d15f6f715901991
|
[
"BSD-3-Clause"
] | 84
|
2021-02-17T06:58:37.000Z
|
2022-03-31T23:07:27.000Z
|
src/django_bootstrap/widgets.py
|
zostera/django-bootstrap
|
3ccccd1a781afd2c160b97ae3d15f6f715901991
|
[
"BSD-3-Clause"
] | null | null | null |
from django.forms import RadioSelect
class RadioSelectButtonGroup(RadioSelect):
"""
This widget renders a Bootstrap 4 set of buttons horizontally instead of typical radio buttons.
Much more mobile friendly.
"""
template_name = "django_bootstrap/widgets/radio_select_button_group.html"
| 26.666667
| 100
| 0.740625
|
4a07efd6c47eee8be176115c6f526a77a874f5cd
| 16,169
|
py
|
Python
|
snake.py
|
JehunYoo/SnakeRL
|
3635e2e5bcd6d3147cafa90e57471dbc5587d49a
|
[
"MIT"
] | null | null | null |
snake.py
|
JehunYoo/SnakeRL
|
3635e2e5bcd6d3147cafa90e57471dbc5587d49a
|
[
"MIT"
] | null | null | null |
snake.py
|
JehunYoo/SnakeRL
|
3635e2e5bcd6d3147cafa90e57471dbc5587d49a
|
[
"MIT"
] | null | null | null |
<<<<<<< HEAD
'''
snake game module
valid coordinate : ([0, 380], [0, 380])
'''
import pygame
from pygame.locals import *
import random, time
import queue
successes, failures = pygame.init()
print("{0} successes and {1} failures".format(successes, failures))
assert not failures, "pygame initialization error"
# hyperparameter control (GAME)
## screen and objects
SCREEN_SIZE = 400
FPS = 60 # how many frames we update per second.
SIZE = 20
STEP = 20
## colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
class Snake():
def __init__(self, gnn):
# snake
self.x = 20 * random.randint(0, 19) # x head of snake
self.y = 20 * random.randint(0, 19) # y head of snake
self.prey_x = None # x of prey
self.prey_y = None # y of prey
self.length = 1 # length of snake
self.go = 100 # go nowhere (initial value)
self.gone = 100 # gone nowhere (initial value)
self.edible = False # whether space of snake's head is same as space of prey
self.space = [(self.x, self.y)] # queue | the space that the snake takes
self.rect = [] # list of pygame.Rect instances
self.surf = [] # list of pygame.Surface instances
self.wall = None # tuple of distance from the wall to the head of the snake
self.prey = None # tuple of distance from the prey to the head of the snake
self.__START = True # private variable | whether the game just started
# screen
self.screen = None
self.clock = None
# objects
self.snakeRect = None # pygame.Rect | head of snake
self.snakeSurf = None # pygame.Surface | head of snake
self.preyRect = None # pygame.Rect | prey
self.preySurf = None # pygame.Surface | prey
# genetic neural network
self.gnn = gnn
def __str__(self, score=False, length=False, pos=False, space=False):
info = ''
if score or length:
info += 'score : {0} '.format(self.length)
if pos:
info += '(x,y) : ({0},{1}) '.format(self.x, self.y)
if space:
info += 'space : {0}'
return info.strip()
def __call__(self):
pass
def update(self, delta_x=None, delta_y=None):
if delta_x is not None:
self.x += delta_x
if delta_y is not None:
self.y += delta_y
self.space.insert(0, (self.x, self.y))
if not self.edible:
self.space.pop()
elif self.edible:
self.edible = False
self.wall = ((self.x - 0) / STEP, (self.x - SCREEN_SIZE + SIZE) / STEP,
(self.y - 0) / STEP, (self.y - SCREEN_SIZE + SIZE) / STEP)
self.prey = ((self.prey_x - self.x) / STEP, (self.prey_y - self.y) / STEP)
def info(self):
print(self.length, self.space, self.wall)
def move(self):
self.rect = []
self.surf = []
for idx, sp in enumerate(self.space):
self.rect.append(pygame.Rect((sp[0], sp[1]), (SIZE, SIZE)))
self.surf.append(pygame.Surface((SIZE, SIZE)))
if idx == 0:
self.surf[idx].fill(GREEN)
elif idx == len(self.space) - 1:
self.surf[idx].fill(BLUE)
else:
self.surf[idx].fill(WHITE)
def eat(self):
self.length += 1
self.edible = True
def where(self): # FIXME generator? or just method?
if True:
a = [1, 2, -1, -2] * 100
# a = [1,1,1,1,1,2,2,2,2,2,-1,-1,-1,-1,-1,-2,-2,-2,-2,-2] * 100
yield from a
if False:
status = self.wall + self.prey
return gnn.predict(status)
def play(self):
if self.__START:
# screen
self.screen = pygame.display.set_mode((SCREEN_SIZE, SCREEN_SIZE))
pygame.display.set_caption('snake game')
self.clock = pygame.time.Clock()
# objects
self.snakeRect = pygame.Rect((self.x, self.y), (SIZE, SIZE))
self.snakeSurf = pygame.Surface((SIZE, SIZE))
self.snakeSurf.fill(GREEN)
self.preyRect = None
self.preySurf = pygame.Surface((SIZE, SIZE))
self.preySurf.fill(RED)
while True:
self.clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
quit()
elif event.key == pygame.K_UP:
self.snakeRect.move_ip(0, -STEP)
self.update(delta_y=-STEP)
elif event.key == pygame.K_DOWN:
self.snakeRect.move_ip(0, STEP)
self.update(delta_y=STEP)
elif event.key == pygame.K_LEFT:
self.snakeRect.move_ip(-STEP, 0)
self.update(delta_x=-STEP)
elif event.key == pygame.K_RIGHT:
self.snakeRect.move_ip(STEP, 0)
self.update(delta_x=STEP)
if not self.__START:
if self.go == 2: # go UP
self.snakeRect.move_ip(0, -STEP) # changes the self.snakeRect's position
self.update(delta_y=-STEP)
elif self.go == -2: # go DOWN
self.snakeRect.move_ip(0, STEP)
self.update(delta_y=STEP)
elif self.go == -1: # go LEFT
self.snakeRect.move_ip(-STEP, 0)
self.update(delta_x=-STEP)
elif self.go == 1: # go RIGHT
self.snakeRect.move_ip(STEP, 0)
self.update(delta_x=STEP)
else:
pass
self.info()
# game end rule
if (not 0 <= self.x <= (SCREEN_SIZE - SIZE)) or (not 0 <= self.y <= (SCREEN_SIZE - SIZE)):
print('out of boundary')
quit()
elif (self.x, self.y) in self.space[1:] or (self.length > 1 and self.go + self.gone == 0):
print('it\'s you!')
quit()
elif self.length == 400:
print('you win!')
quit()
# prey
if self.__START == True or (self.prey_x, self.prey_y) == (self.x, self.y):
self.prey_x = SIZE * random.randint(0, int((SCREEN_SIZE - SIZE) / STEP))
self.prey_y = SIZE * random.randint(0, int((SCREEN_SIZE - SIZE) / STEP))
while (self.prey_x, self.prey_y) in self.space:
self.prey_x = SIZE * random.randint(0, int((SCREEN_SIZE - SIZE) / STEP))
self.prey_y = SIZE * random.randint(0, int((SCREEN_SIZE - SIZE) / STEP))
self.preyRect = pygame.Rect((self.prey_x, self.prey_y), (SIZE, SIZE))
if self.__START:
self.__START = False
where = self.where() # generator
elif not self.__START:
self.eat()
# update direction of snake
self.gone = self.go
self.go = next(where)
# time.sleep(0.25)
# screen display
self.screen.fill(BLACK)
self.screen.blit(self.snakeSurf, self.snakeRect)
self.move()
for surf, rect in zip(self.surf, self.rect):
self.screen.blit(surf, rect)
self.screen.blit(self.preySurf, self.preyRect)
=======
'''
snake game module
valid coordinate : ([0, 380], [0, 380])
'''
import pygame
from pygame.locals import *
import random, time
import queue
successes, failures = pygame.init()
print("{0} successes and {1} failures".format(successes, failures))
assert not failures, "pygame initialization error"
# hyperparameter control (GAME)
## screen and objects
SCREEN_SIZE = 400
FPS = 60 # how many frames we update per second.
SIZE = 20
STEP = 20
## colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
class Snake():
def __init__(self, gnn):
# snake
self.x = 20 * random.randint(0, 19) # x head of snake
self.y = 20 * random.randint(0, 19) # y head of snake
self.prey_x = None # x of prey
self.prey_y = None # y of prey
self.length = 1 # length of snake
self.go = 100 # go nowhere (initial value)
self.gone = 100 # gone nowhere (initial value)
self.edible = False # whether space of snake's head is same as space of prey
self.space = [(self.x, self.y)] # queue | the space that the snake takes
self.rect = [] # list of pygame.Rect instances
self.surf = [] # list of pygame.Surface instances
self.wall = None # tuple of distance from the wall to the head of the snake
self.prey = None # tuple of distance from the prey to the head of the snake
self.__START = True # private variable | whether the game just started
# screen
self.screen = None
self.clock = None
# objects
self.snakeRect = None # pygame.Rect | head of snake
self.snakeSurf = None # pygame.Surface | head of snake
self.preyRect = None # pygame.Rect | prey
self.preySurf = None # pygame.Surface | prey
# genetic neural network
self.gnn = gnn
def __str__(self, score=False, length=False, pos=False, space=False):
info = ''
if score or length:
info += 'score : {0} '.format(self.length)
if pos:
info += '(x,y) : ({0},{1}) '.format(self.x, self.y)
if space:
info += 'space : {0}'
return info.strip()
def __call__(self):
pass
def update(self, delta_x=None, delta_y=None):
if delta_x is not None:
self.x += delta_x
if delta_y is not None:
self.y += delta_y
self.space.insert(0, (self.x, self.y))
if not self.edible:
self.space.pop()
elif self.edible:
self.edible = False
self.wall = ((self.x - 0) / STEP, (self.x - SCREEN_SIZE + SIZE) / STEP,
(self.y - 0) / STEP, (self.y - SCREEN_SIZE + SIZE) / STEP)
self.prey = ((self.prey_x - self.x) / STEP, (self.prey_y - self.y) / STEP)
def info(self):
print(self.length, self.space, self.wall)
def move(self):
self.rect = []
self.surf = []
for idx, sp in enumerate(self.space):
self.rect.append(pygame.Rect((sp[0], sp[1]), (SIZE, SIZE)))
self.surf.append(pygame.Surface((SIZE, SIZE)))
if idx == 0:
self.surf[idx].fill(GREEN)
elif idx == len(self.space) - 1:
self.surf[idx].fill(BLUE)
else:
self.surf[idx].fill(WHITE)
def eat(self):
self.length += 1
self.edible = True
def where(self): # FIXME generator? or just method?
if True:
# a = [1, 2, -1, -2] * 100
a = [1,1,1,1,1,2,2,2,2,2,-1,-1,-1,-1,-1,-2,-2,-2,-2,-2] * 100
yield from a
if False:
status = self.wall + self.prey
return gnn.predict(status)
def play(self):
if self.__START:
# screen
self.screen = pygame.display.set_mode((SCREEN_SIZE, SCREEN_SIZE))
pygame.display.set_caption('snake game')
self.clock = pygame.time.Clock()
# objects
self.snakeRect = pygame.Rect((self.x, self.y), (SIZE, SIZE))
self.snakeSurf = pygame.Surface((SIZE, SIZE))
self.snakeSurf.fill(GREEN)
self.preyRect = None
self.preySurf = pygame.Surface((SIZE, SIZE))
self.preySurf.fill(RED)
while True:
self.clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
quit()
elif event.key == pygame.K_UP:
self.snakeRect.move_ip(0, -STEP)
self.update(delta_y=-STEP)
elif event.key == pygame.K_DOWN:
self.snakeRect.move_ip(0, STEP)
self.update(delta_y=STEP)
elif event.key == pygame.K_LEFT:
self.snakeRect.move_ip(-STEP, 0)
self.update(delta_x=-STEP)
elif event.key == pygame.K_RIGHT:
self.snakeRect.move_ip(STEP, 0)
self.update(delta_x=STEP)
if not self.__START:
if self.go == 2: # go UP
self.snakeRect.move_ip(0, -STEP) # changes the self.snakeRect's position
self.update(delta_y=-STEP)
elif self.go == -2: # go DOWN
self.snakeRect.move_ip(0, STEP)
self.update(delta_y=STEP)
elif self.go == -1: # go LEFT
self.snakeRect.move_ip(-STEP, 0)
self.update(delta_x=-STEP)
elif self.go == 1: # go RIGHT
self.snakeRect.move_ip(STEP, 0)
self.update(delta_x=STEP)
else:
pass
self.info()
# game end rule
if (not 0 <= self.x <= (SCREEN_SIZE - SIZE)) or (not 0 <= self.y <= (SCREEN_SIZE - SIZE)):
print('out of boundary')
quit()
elif (self.x, self.y) in self.space[1:] or (self.length > 1 and self.go + self.gone == 0):
print('it\'s you!')
quit()
elif self.length == 400:
print('you win!')
quit()
# prey
if self.__START == True or (self.prey_x, self.prey_y) == (self.x, self.y):
self.prey_x = SIZE * random.randint(0, int((SCREEN_SIZE - SIZE) / STEP))
self.prey_y = SIZE * random.randint(0, int((SCREEN_SIZE - SIZE) / STEP))
while (self.prey_x, self.prey_y) in self.space:
self.prey_x = SIZE * random.randint(0, int((SCREEN_SIZE - SIZE) / STEP))
self.prey_y = SIZE * random.randint(0, int((SCREEN_SIZE - SIZE) / STEP))
self.preyRect = pygame.Rect((self.prey_x, self.prey_y), (SIZE, SIZE))
if self.__START:
self.__START = False
where = self.where() # generator
elif not self.__START:
self.eat()
# update direction of snake
self.gone = self.go
self.go = next(where)
# time.sleep(0.25)
# screen display
self.screen.fill(BLACK)
self.screen.blit(self.snakeSurf, self.snakeRect)
self.move()
for surf, rect in zip(self.surf, self.rect):
self.screen.blit(surf, rect)
self.screen.blit(self.preySurf, self.preyRect)
>>>>>>> ff35870c235c677bd6e367cfedf2974cac4a6e8a
pygame.display.update()
| 37.25576
| 103
| 0.491991
|
4a07f0dd85788380bc5d5b27866b98bd54dad402
| 31,901
|
py
|
Python
|
saleor/graphql/product/types/products.py
|
elwoodxblues/saleor
|
5e4e4a4259a011d24b04ebd24c77c689de843fa1
|
[
"CC-BY-4.0"
] | 8
|
2018-07-17T13:13:21.000Z
|
2022-03-01T17:02:34.000Z
|
saleor/graphql/product/types/products.py
|
elwoodxblues/saleor
|
5e4e4a4259a011d24b04ebd24c77c689de843fa1
|
[
"CC-BY-4.0"
] | 1
|
2021-03-10T07:55:59.000Z
|
2021-03-10T07:55:59.000Z
|
saleor/graphql/product/types/products.py
|
elwoodxblues/saleor
|
5e4e4a4259a011d24b04ebd24c77c689de843fa1
|
[
"CC-BY-4.0"
] | 1
|
2019-12-04T22:24:13.000Z
|
2019-12-04T22:24:13.000Z
|
from typing import List, Union
import graphene
import graphene_django_optimizer as gql_optimizer
from django.db.models import Prefetch
from graphene import relay
from graphql.error import GraphQLError
from ....product import models
from ....product.templatetags.product_images import (
get_product_image_thumbnail,
get_thumbnail,
)
from ....product.utils import calculate_revenue_for_variant
from ....product.utils.availability import (
get_product_availability,
get_variant_availability,
)
from ....product.utils.costs import get_margin_for_variant, get_product_costs_data
from ...core.connection import CountableDjangoObjectType
from ...core.enums import ReportingPeriod, TaxRateType
from ...core.fields import FilterInputConnectionField, PrefetchingConnectionField
from ...core.resolvers import resolve_meta, resolve_private_meta
from ...core.types import (
Image,
MetadataObjectType,
Money,
MoneyRange,
TaxedMoney,
TaxedMoneyRange,
TaxType,
)
from ...decorators import permission_required
from ...translations.enums import LanguageCodeEnum
from ...translations.resolvers import resolve_translation
from ...translations.types import (
CategoryTranslation,
CollectionTranslation,
ProductTranslation,
ProductVariantTranslation,
)
from ...utils import get_database_id, reporting_period_to_date
from ..enums import OrderDirection, ProductOrderField
from ..filters import AttributeFilterInput
from ..resolvers import resolve_attributes
from .attributes import Attribute, SelectedAttribute
from .digital_contents import DigitalContent
def prefetch_products(info, *_args, **_kwargs):
"""Prefetch products visible to the current user.
Can be used with models that have the `products` relationship. The queryset
of products being prefetched is filtered based on permissions of the
requesting user, to restrict access to unpublished products from non-staff
users.
"""
user = info.context.user
qs = models.Product.objects.visible_to_user(user)
return Prefetch(
"products",
queryset=gql_optimizer.query(qs, info),
to_attr="prefetched_products",
)
def prefetch_products_collection_sorted(info, *_args, **_kwargs):
user = info.context.user
qs = models.Product.objects.collection_sorted(user)
return Prefetch(
"products",
queryset=gql_optimizer.query(qs, info),
to_attr="prefetched_products",
)
def resolve_attribute_list(
instance: Union[models.Product, models.ProductVariant], *, user
) -> List[SelectedAttribute]:
"""Resolve attributes from a product into a list of `SelectedAttribute`s.
Note: you have to prefetch the below M2M fields.
- product_type -> attribute[rel] -> [rel]assignments -> values
- product_type -> attribute[rel] -> attribute
"""
resolved_attributes = []
# Retrieve the product type
if isinstance(instance, models.Product):
product_type = instance.product_type
product_type_attributes_assoc_field = "attributeproduct"
assigned_attribute_instance_field = "productassignments"
assigned_attribute_instance_filters = {"product_id": instance.pk}
elif isinstance(instance, models.ProductVariant):
product_type = instance.product.product_type
product_type_attributes_assoc_field = "attributevariant"
assigned_attribute_instance_field = "variantassignments"
assigned_attribute_instance_filters = {"variant_id": instance.pk}
else:
raise AssertionError(f"{instance.__class__.__name__} is unsupported")
# Retrieve all the product attributes assigned to this product type
attributes_qs = getattr(product_type, product_type_attributes_assoc_field)
attributes_qs = attributes_qs.get_visible_to_user(user)
# An empty QuerySet for unresolved values
empty_qs = models.AttributeValue.objects.none()
# Goes through all the attributes assigned to the product type
# The assigned values are returned as a QuerySet, but will assign a
# dummy empty QuerySet if no values are assigned to the given instance.
for attr_data_rel in attributes_qs:
attr_instance_data = getattr(attr_data_rel, assigned_attribute_instance_field)
# Retrieve the instance's associated data
attr_data = attr_instance_data.filter(**assigned_attribute_instance_filters)
attr_data = attr_data.first()
# Return the instance's attribute values if the assignment was found,
# otherwise it sets the values as an empty QuerySet
values = attr_data.values.all() if attr_data is not None else empty_qs
resolved_attributes.append(
SelectedAttribute(attribute=attr_data_rel.attribute, values=values)
)
return resolved_attributes
class ProductOrder(graphene.InputObjectType):
field = graphene.Argument(
ProductOrderField,
required=True,
description="Sort products by the selected field.",
)
direction = graphene.Argument(
OrderDirection,
required=True,
description="Specifies the direction in which to sort products",
)
class Margin(graphene.ObjectType):
start = graphene.Int()
stop = graphene.Int()
class BasePricingInfo(graphene.ObjectType):
available = graphene.Boolean(
description="Whether it is in stock and visible or not.",
deprecation_reason=(
"DEPRECATED: Will be removed in Saleor 2.10, "
"this has been moved to the parent type as 'isAvailable'."
),
)
on_sale = graphene.Boolean(description="Whether it is in sale or not.")
discount = graphene.Field(
TaxedMoney, description="The discount amount if in sale (null otherwise)."
)
discount_local_currency = graphene.Field(
TaxedMoney, description="The discount amount in the local currency."
)
class VariantPricingInfo(BasePricingInfo):
discount_local_currency = graphene.Field(
TaxedMoney, description="The discount amount in the local currency."
)
price = graphene.Field(
TaxedMoney, description="The price, with any discount subtracted."
)
price_undiscounted = graphene.Field(
TaxedMoney, description="The price without any discount."
)
price_local_currency = graphene.Field(
TaxedMoney, description="The discounted price in the local currency."
)
class Meta:
description = "Represents availability of a variant in the storefront."
class ProductPricingInfo(BasePricingInfo):
price_range = graphene.Field(
TaxedMoneyRange,
description="The discounted price range of the product variants.",
)
price_range_undiscounted = graphene.Field(
TaxedMoneyRange,
description="The undiscounted price range of the product variants.",
)
price_range_local_currency = graphene.Field(
TaxedMoneyRange,
description=(
"The discounted price range of the product variants "
"in the local currency."
),
)
class Meta:
description = "Represents availability of a product in the storefront."
class ProductVariant(CountableDjangoObjectType, MetadataObjectType):
stock_quantity = graphene.Int(
required=True, description="Quantity of a product available for sale."
)
price_override = graphene.Field(
Money,
description="""
Override the base price of a product if necessary.
A value of `null` indicates that the default product
price is used.""",
)
price = graphene.Field(
Money,
description="Price of the product variant.",
deprecation_reason=(
"DEPRECATED: Will be removed in Saleor 2.10, "
"has been replaced by 'pricing.priceUndiscounted'"
),
)
availability = graphene.Field(
VariantPricingInfo,
description="""Informs about variant's availability in the
storefront, current price and discounted price.""",
deprecation_reason=(
"DEPRECATED: Will be removed in Saleor 2.10, "
"has been renamed to 'pricing'."
),
)
pricing = graphene.Field(
VariantPricingInfo,
description=(
"""Lists the storefront variant's pricing,
the current price and discounts, only meant for displaying"""
),
)
is_available = graphene.Boolean(
description="Whether the variant is in stock and visible or not."
)
attributes = gql_optimizer.field(
graphene.List(
graphene.NonNull(SelectedAttribute),
required=True,
description="List of attributes assigned to this variant.",
)
)
cost_price = graphene.Field(Money, description="Cost price of the variant.")
margin = graphene.Int(description="Gross margin percentage value.")
quantity_ordered = graphene.Int(description="Total quantity ordered.")
revenue = graphene.Field(
TaxedMoney,
period=graphene.Argument(ReportingPeriod),
description="""Total revenue generated by a variant in given
period of time. Note: this field should be queried using
`reportProductSales` query as it uses optimizations suitable
for such calculations.""",
)
images = gql_optimizer.field(
graphene.List(
lambda: ProductImage, description="List of images for the product variant"
),
model_field="images",
)
translation = graphene.Field(
ProductVariantTranslation,
language_code=graphene.Argument(
LanguageCodeEnum,
description="A language code to return the translation for.",
required=True,
),
description=(
"Returns translated Product Variant fields " "for the given language code."
),
resolver=resolve_translation,
)
digital_content = gql_optimizer.field(
graphene.Field(
DigitalContent, description="Digital content for the product variant"
),
model_field="digital_content",
)
class Meta:
description = """Represents a version of a product such as
different size or color."""
only_fields = [
"id",
"name",
"product",
"quantity",
"quantity_allocated",
"sku",
"track_inventory",
"weight",
]
interfaces = [relay.Node]
model = models.ProductVariant
@staticmethod
@permission_required("product.manage_products")
def resolve_digital_content(root: models.ProductVariant, *_args):
return getattr(root, "digital_content", None)
@staticmethod
def resolve_stock_quantity(root: models.ProductVariant, *_args):
return root.quantity_available
@staticmethod
@gql_optimizer.resolver_hints(
prefetch_related=["attributes__values", "attributes__assignment__attribute"]
)
def resolve_attributes(root: models.ProductVariant, info):
return resolve_attribute_list(root, user=info.context.user)
@staticmethod
@permission_required("product.manage_products")
def resolve_margin(root: models.ProductVariant, *_args):
return get_margin_for_variant(root)
@staticmethod
def resolve_price(root: models.ProductVariant, *_args):
return (
root.price_override
if root.price_override is not None
else root.product.price
)
@staticmethod
@gql_optimizer.resolver_hints(
prefetch_related=("product",), only=["price_override_amount", "currency"]
)
def resolve_pricing(root: models.ProductVariant, info):
context = info.context
availability = get_variant_availability(
root,
context.discounts,
context.country,
context.currency,
extensions=context.extensions,
)
return VariantPricingInfo(**availability._asdict())
resolve_availability = resolve_pricing
@staticmethod
def resolve_is_available(root: models.ProductVariant, _info):
return root.is_available
@staticmethod
@permission_required("product.manage_products")
def resolve_price_override(root: models.ProductVariant, *_args):
return root.price_override
@staticmethod
@permission_required("product.manage_products")
def resolve_quantity(root: models.ProductVariant, *_args):
return root.quantity
@staticmethod
@permission_required(["order.manage_orders", "product.manage_products"])
def resolve_quantity_ordered(root: models.ProductVariant, *_args):
# This field is added through annotation when using the
# `resolve_report_product_sales` resolver.
return getattr(root, "quantity_ordered", None)
@staticmethod
@permission_required(["order.manage_orders", "product.manage_products"])
def resolve_quantity_allocated(root: models.ProductVariant, *_args):
return root.quantity_allocated
@staticmethod
@permission_required(["order.manage_orders", "product.manage_products"])
def resolve_revenue(root: models.ProductVariant, *_args, period):
start_date = reporting_period_to_date(period)
return calculate_revenue_for_variant(root, start_date)
@staticmethod
def resolve_images(root: models.ProductVariant, *_args):
return root.images.all()
@classmethod
def get_node(cls, info, id):
user = info.context.user
visible_products = models.Product.objects.visible_to_user(user).values_list(
"pk", flat=True
)
qs = cls._meta.model.objects.filter(product__id__in=visible_products)
return cls.maybe_optimize(info, qs, id)
@staticmethod
@permission_required("product.manage_products")
def resolve_private_meta(root, _info):
return resolve_private_meta(root, _info)
@staticmethod
def resolve_meta(root, _info):
return resolve_meta(root, _info)
class Product(CountableDjangoObjectType, MetadataObjectType):
url = graphene.String(
description="The storefront URL for the product.", required=True
)
thumbnail = graphene.Field(
Image,
description="The main thumbnail for a product.",
size=graphene.Argument(graphene.Int, description="Size of thumbnail"),
)
availability = graphene.Field(
ProductPricingInfo,
description="""Informs about product's availability in the
storefront, current price and discounts.""",
deprecation_reason=(
"DEPRECATED: Will be removed in Saleor 2.10, "
"Has been renamed to 'pricing'."
),
)
pricing = graphene.Field(
ProductPricingInfo,
description="""Lists the storefront product's pricing,
the current price and discounts, only meant for displaying.""",
)
is_available = graphene.Boolean(
description="Whether the product is in stock and visible or not."
)
base_price = graphene.Field(Money, description="The product's default base price.")
price = graphene.Field(
Money,
description="The product's default base price.",
deprecation_reason=(
"DEPRECATED: Will be removed in Saleor 2.10, "
"has been replaced by 'basePrice'"
),
)
minimal_variant_price = graphene.Field(
Money, description="The price of the cheapest variant (including discounts)."
)
tax_type = graphene.Field(
TaxType, description="A type of tax. Assigned by enabled tax gateway"
)
attributes = graphene.List(
graphene.NonNull(SelectedAttribute),
required=True,
description="List of attributes assigned to this product.",
)
purchase_cost = graphene.Field(MoneyRange)
margin = graphene.Field(Margin)
image_by_id = graphene.Field(
lambda: ProductImage,
id=graphene.Argument(graphene.ID, description="ID of a product image."),
description="Get a single product image by ID",
)
variants = gql_optimizer.field(
graphene.List(ProductVariant, description="List of variants for the product"),
model_field="variants",
)
images = gql_optimizer.field(
graphene.List(
lambda: ProductImage, description="List of images for the product"
),
model_field="images",
)
collections = gql_optimizer.field(
graphene.List(
lambda: Collection, description="List of collections for the product"
),
model_field="collections",
)
translation = graphene.Field(
ProductTranslation,
language_code=graphene.Argument(
LanguageCodeEnum,
description="A language code to return the translation for.",
required=True,
),
description=("Returns translated Product fields for the given language code."),
resolver=resolve_translation,
)
slug = graphene.String(required=True, description="The slug of a product.")
class Meta:
description = """Represents an individual item for sale in the
storefront."""
interfaces = [relay.Node]
model = models.Product
only_fields = [
"category",
"charge_taxes",
"description",
"description_json",
"id",
"is_published",
"name",
"product_type",
"publication_date",
"seo_description",
"seo_title",
"updated_at",
"weight",
]
@staticmethod
def resolve_tax_type(root: models.Product, info):
tax_data = info.context.extensions.get_tax_code_from_object_meta(root)
return TaxType(tax_code=tax_data.code, description=tax_data.description)
@staticmethod
@gql_optimizer.resolver_hints(prefetch_related="images")
def resolve_thumbnail(root: models.Product, info, *, size=None):
image = root.get_first_image()
if not size:
size = 255
url = get_product_image_thumbnail(image, size, method="thumbnail")
url = info.context.build_absolute_uri(url)
alt = image.alt if image else None
return Image(alt=alt, url=url)
@staticmethod
def resolve_url(root: models.Product, *_args):
return root.get_absolute_url()
@staticmethod
@gql_optimizer.resolver_hints(
prefetch_related=("variants", "collections"),
only=["publication_date", "charge_taxes", "price_amount", "currency", "meta"],
)
def resolve_pricing(root: models.Product, info):
context = info.context
availability = get_product_availability(
root,
context.discounts,
context.country,
context.currency,
context.extensions,
)
return ProductPricingInfo(**availability._asdict())
resolve_availability = resolve_pricing
@staticmethod
def resolve_is_available(root: models.Product, _info):
return root.is_available
@staticmethod
@permission_required("product.manage_products")
def resolve_base_price(root: models.Product, _info):
return root.price
@staticmethod
@gql_optimizer.resolver_hints(
prefetch_related=("variants", "collections"),
only=["publication_date", "charge_taxes", "price_amount", "currency", "meta"],
)
def resolve_price(root: models.Product, info):
price_range = root.get_price_range(info.context.discounts)
price = info.context.extensions.apply_taxes_to_product(
root, price_range.start, info.context.country
)
return price.net
@staticmethod
@gql_optimizer.resolver_hints(
prefetch_related=[
"product_type__attributeproduct__productassignments__values",
"product_type__attributeproduct__attribute",
]
)
def resolve_attributes(root: models.Product, info):
return resolve_attribute_list(root, user=info.context.user)
@staticmethod
@permission_required("product.manage_products")
def resolve_purchase_cost(root: models.Product, *_args):
purchase_cost, _ = get_product_costs_data(root)
return purchase_cost
@staticmethod
@permission_required("product.manage_products")
def resolve_margin(root: models.Product, *_args):
_, margin = get_product_costs_data(root)
return Margin(margin[0], margin[1])
@staticmethod
def resolve_image_by_id(root: models.Product, info, id):
pk = get_database_id(info, id, ProductImage)
try:
return root.images.get(pk=pk)
except models.ProductImage.DoesNotExist:
raise GraphQLError("Product image not found.")
@staticmethod
@gql_optimizer.resolver_hints(model_field="images")
def resolve_images(root: models.Product, *_args, **_kwargs):
return root.images.all()
@staticmethod
def resolve_variants(root: models.Product, *_args, **_kwargs):
return root.variants.all()
@staticmethod
def resolve_collections(root: models.Product, *_args):
return root.collections.all()
@classmethod
def get_node(cls, info, pk):
if info.context:
qs = cls._meta.model.objects.visible_to_user(info.context.user)
return cls.maybe_optimize(info, qs, pk)
return None
@staticmethod
@permission_required("product.manage_products")
def resolve_private_meta(root, _info):
return resolve_private_meta(root, _info)
@staticmethod
def resolve_meta(root, _info):
return resolve_meta(root, _info)
@staticmethod
def resolve_slug(root: models.Product, *_args):
return root.get_slug()
class ProductType(CountableDjangoObjectType, MetadataObjectType):
products = gql_optimizer.field(
PrefetchingConnectionField(
Product, description="List of products of this type."
),
prefetch_related=prefetch_products,
)
tax_rate = TaxRateType(description="A type of tax rate.")
tax_type = graphene.Field(
TaxType, description="A type of tax. Assigned by enabled tax gateway"
)
variant_attributes = graphene.List(
Attribute, description="Variant attributes of that product type."
)
product_attributes = graphene.List(
Attribute, description="Product attributes of that product type."
)
available_attributes = gql_optimizer.field(
FilterInputConnectionField(Attribute, filter=AttributeFilterInput())
)
class Meta:
description = """Represents a type of product. It defines what
attributes are available to products of this type."""
interfaces = [relay.Node]
model = models.ProductType
only_fields = [
"has_variants",
"id",
"is_digital",
"is_shipping_required",
"name",
"weight",
"tax_type",
]
@staticmethod
def resolve_tax_type(root: models.ProductType, info):
tax_data = info.context.extensions.get_tax_code_from_object_meta(root)
return TaxType(tax_code=tax_data.code, description=tax_data.description)
@staticmethod
def resolve_tax_rate(root: models.ProductType, info, **_kwargs):
# FIXME this resolver should be dropped after we drop tax_rate from API
if not hasattr(root, "meta"):
return None
tax = root.meta.get("taxes", {}).get("vatlayer", {})
return tax.get("code")
@staticmethod
@gql_optimizer.resolver_hints(
prefetch_related="product_attributes__attributeproduct"
)
def resolve_product_attributes(root: models.ProductType, *_args, **_kwargs):
return root.product_attributes.product_attributes_sorted().all()
@staticmethod
@gql_optimizer.resolver_hints(
prefetch_related="variant_attributes__attributevariant"
)
def resolve_variant_attributes(root: models.ProductType, *_args, **_kwargs):
return root.variant_attributes.variant_attributes_sorted().all()
@staticmethod
def resolve_products(root: models.ProductType, info, **_kwargs):
if hasattr(root, "prefetched_products"):
return root.prefetched_products
qs = root.products.visible_to_user(info.context.user)
return gql_optimizer.query(qs, info)
@staticmethod
@permission_required("product.manage_products")
def resolve_available_attributes(root: models.ProductType, info, **kwargs):
qs = models.Attribute.objects.get_unassigned_attributes(root.pk)
return resolve_attributes(info, qs=qs, **kwargs)
@staticmethod
@permission_required("account.manage_products")
def resolve_private_meta(root, _info):
return resolve_private_meta(root, _info)
@staticmethod
def resolve_meta(root, _info):
return resolve_meta(root, _info)
class Collection(CountableDjangoObjectType, MetadataObjectType):
products = gql_optimizer.field(
PrefetchingConnectionField(
Product, description="List of products in this collection."
),
prefetch_related=prefetch_products_collection_sorted,
)
background_image = graphene.Field(
Image, size=graphene.Int(description="Size of the image")
)
translation = graphene.Field(
CollectionTranslation,
language_code=graphene.Argument(
LanguageCodeEnum,
description="A language code to return the translation for.",
required=True,
),
description=(
"Returns translated Collection fields " "for the given language code."
),
resolver=resolve_translation,
)
class Meta:
description = "Represents a collection of products."
only_fields = [
"description",
"description_json",
"id",
"is_published",
"name",
"publication_date",
"seo_description",
"seo_title",
"slug",
]
interfaces = [relay.Node]
model = models.Collection
@staticmethod
def resolve_background_image(root: models.Collection, info, size=None, **_kwargs):
if root.background_image:
return Image.get_adjusted(
image=root.background_image,
alt=root.background_image_alt,
size=size,
rendition_key_set="background_images",
info=info,
)
@staticmethod
def resolve_products(root: models.Collection, info, **_kwargs):
if hasattr(root, "prefetched_products"):
return root.prefetched_products
qs = root.products.collection_sorted(info.context.user)
return gql_optimizer.query(qs, info)
@classmethod
def get_node(cls, info, id):
if info.context:
user = info.context.user
qs = cls._meta.model.objects.visible_to_user(user)
return cls.maybe_optimize(info, qs, id)
return None
@staticmethod
@permission_required("product.manage_products")
def resolve_private_meta(root, _info):
return resolve_private_meta(root, _info)
@staticmethod
def resolve_meta(root, _info):
return resolve_meta(root, _info)
class Category(CountableDjangoObjectType, MetadataObjectType):
ancestors = PrefetchingConnectionField(
lambda: Category, description="List of ancestors of the category."
)
products = gql_optimizer.field(
PrefetchingConnectionField(
Product, description="List of products in the category."
),
prefetch_related=prefetch_products,
)
url = graphene.String(description="The storefront's URL for the category.")
children = PrefetchingConnectionField(
lambda: Category, description="List of children of the category."
)
background_image = graphene.Field(
Image, size=graphene.Int(description="Size of the image")
)
translation = graphene.Field(
CategoryTranslation,
language_code=graphene.Argument(
LanguageCodeEnum,
description="A language code to return the translation for.",
required=True,
),
description=("Returns translated Category fields for the given language code."),
resolver=resolve_translation,
)
class Meta:
description = """Represents a single category of products.
Categories allow to organize products in a tree-hierarchies which can
be used for navigation in the storefront."""
only_fields = [
"description",
"description_json",
"id",
"level",
"name",
"parent",
"seo_description",
"seo_title",
"slug",
]
interfaces = [relay.Node]
model = models.Category
@staticmethod
def resolve_ancestors(root: models.Category, info, **_kwargs):
qs = root.get_ancestors()
return gql_optimizer.query(qs, info)
@staticmethod
def resolve_background_image(root: models.Category, info, size=None, **_kwargs):
if root.background_image:
return Image.get_adjusted(
image=root.background_image,
alt=root.background_image_alt,
size=size,
rendition_key_set="background_images",
info=info,
)
@staticmethod
def resolve_children(root: models.Category, info, **_kwargs):
qs = root.children.all()
return gql_optimizer.query(qs, info)
@staticmethod
def resolve_url(root: models.Category, _info):
return root.get_absolute_url()
@staticmethod
def resolve_products(root: models.Category, info, **_kwargs):
# If the category has no children, we use the prefetched data.
children = root.children.all()
if not children and hasattr(root, "prefetched_products"):
return root.prefetched_products
# Otherwise we want to include products from child categories which
# requires performing additional logic.
tree = root.get_descendants(include_self=True)
qs = models.Product.objects.published()
qs = qs.filter(category__in=tree)
return gql_optimizer.query(qs, info)
@staticmethod
@permission_required("product.manage_products")
def resolve_private_meta(root, _info):
return resolve_private_meta(root, _info)
@staticmethod
def resolve_meta(root, _info):
return resolve_meta(root, _info)
class ProductImage(CountableDjangoObjectType):
url = graphene.String(
required=True,
description="The URL of the image.",
size=graphene.Int(description="Size of the image"),
)
class Meta:
description = "Represents a product image."
only_fields = ["alt", "id", "sort_order"]
interfaces = [relay.Node]
model = models.ProductImage
@staticmethod
def resolve_url(root: models.ProductImage, info, *, size=None):
if size:
url = get_thumbnail(root.image, size, method="thumbnail")
else:
url = root.image.url
return info.context.build_absolute_uri(url)
class MoveProductInput(graphene.InputObjectType):
product_id = graphene.ID(
description="The ID of the product to move.", required=True
)
sort_order = graphene.Int(
description=(
"The relative sorting position of the product (from -inf to +inf) "
"starting from the first given product's actual position."
)
)
| 34.902626
| 88
| 0.665904
|
4a07f156cd865e535a2742845b368c1177467176
| 4,206
|
py
|
Python
|
rnn_portfolio/quantiacs_code.py
|
kjchalup/recurrent_portfolio
|
09f2708e0355c733d980703113ce8a6d8a84d464
|
[
"MIT"
] | 3
|
2017-03-24T20:26:12.000Z
|
2021-12-18T15:38:50.000Z
|
rnn_portfolio/quantiacs_code.py
|
kjchalup/recurrent_portfolio
|
09f2708e0355c733d980703113ce8a6d8a84d464
|
[
"MIT"
] | null | null | null |
rnn_portfolio/quantiacs_code.py
|
kjchalup/recurrent_portfolio
|
09f2708e0355c733d980703113ce8a6d8a84d464
|
[
"MIT"
] | null | null | null |
""" Convenience functions to call various Quantiacs functions. """
import numpy as np
from quantiacsToolbox import stats
def fillnans(data):
""" Fill in (column-wise) value gaps with the most recent non-nan value.
Leading nan's remain in place. The gaps are filled in only after the first non-nan entry.
Args:
data: Input data to be un-nanned.
Returns:
filled: Same size as data with the nan-values
replaced by the most recent non-nan entry.
"""
data = np.array(data, dtype=float)
nan_row, nan_col = np.where(np.isnan(data))
for (row_id, col_id) in zip(nan_row, nan_col):
if row_id > 0:
data[row_id, col_id] = data[row_id - 1, col_id]
return data
def quantiacs_calculation(dataDict, positions, settings):
""" Evaluates trading returns using quantiacsToolbox code
Args:
dataDict: Dict used by Quantiacs' loadData function.
positions (n_timesteps, n_markets): Positions vectors, unnormalized is fine.
settings: Dict with lookback and slippage settings. minimum for lookback is 2.
Returns:
returns: dict with 'fundEquity' (n_timesteps, 1),
'returns' (n_timesteps, n_markets).
"""
nMarkets=len(settings['markets'])
endLoop=len(dataDict['DATE'])
if 'RINFO' in dataDict:
Rix = dataDict['RINFO'] != 0
else:
dataDict['RINFO'] = np.zeros(np.shape(dataDict['CLOSE']))
Rix = np.zeros(np.shape(dataDict['CLOSE']))
#%dataDict['exposure']=np.zeros((endLoop,nMarkets))
dataDict['exposure']=positions
position = positions
dataDict['equity']=np.ones((endLoop,nMarkets))
dataDict['fundEquity'] = np.ones((endLoop,1))
realizedP = np.zeros((endLoop, nMarkets))
returns = np.zeros((endLoop, nMarkets))
sessionReturnTemp = np.append( np.empty((1,nMarkets))*np.nan,(( dataDict['CLOSE'][1:,:]- dataDict['OPEN'][1:,:]) / dataDict['CLOSE'][0:-1,:] ), axis =0 ).copy()
sessionReturn=np.nan_to_num( fillnans(sessionReturnTemp) )
gapsTemp=np.append(np.empty((1,nMarkets))*np.nan, (dataDict['OPEN'][1:,:]- dataDict['CLOSE'][:-1,:]-dataDict['RINFO'][1:,:].astype(float)) / dataDict['CLOSE'][:-1:],axis=0)
gaps=np.nan_to_num(fillnans(gapsTemp))
slippageTemp = np.append(np.empty((1,nMarkets))*np.nan, ((dataDict['HIGH'][1:,:] - dataDict['LOW'][1:,:]) / dataDict['CLOSE'][:-1,:] ), axis=0) * settings['slippage']
SLIPPAGE = np.nan_to_num(fillnans(slippageTemp))
startLoop = settings['lookback'] - 1
# Loop through trading days
for t in range(startLoop,endLoop):
todaysP= dataDict['exposure'][t-1,:]
yesterdaysP = realizedP[t-2,:]
deltaP=todaysP-yesterdaysP
newGap=yesterdaysP * gaps[t,:]
newGap[np.isnan(newGap)]= 0
newRet = todaysP * sessionReturn[t,:] - abs(deltaP * SLIPPAGE[t,:])
newRet[np.isnan(newRet)] = 0
returns[t,:] = newRet + newGap
dataDict['equity'][t,:] = dataDict['equity'][t-1,:] * (1+returns[t,:])
dataDict['fundEquity'][t] = (dataDict['fundEquity'][t-1] * (1+np.sum(returns[t,:])))
realizedP[t-1,:] = dataDict['CLOSE'][t,:] / dataDict['CLOSE'][t-1,:] * dataDict['fundEquity'][t-1] / dataDict['fundEquity'][t] * todaysP
position[np.isnan(position)] = 0
position = np.real(position)
position = position/np.sum(abs(position))
position[np.isnan(position)] = 0 # extra nan check in case the positions sum to zero
marketRets = np.float64(dataDict['CLOSE'][1:,:] - dataDict['CLOSE'][:-1,:] - dataDict['RINFO'][1:,:])/dataDict['CLOSE'][:-1,:]
marketRets = fillnans(marketRets)
marketRets[np.isnan(marketRets)] = 0
marketRets = marketRets.tolist()
a = np.zeros((1,nMarkets))
a = a.tolist()
marketRets = a + marketRets
ret={}
ret['returns'] = np.nan_to_num(returns)
#ret['tsName']=tsName
ret['fundDate']=dataDict['DATE']
ret['fundEquity']=dataDict['fundEquity']
ret['marketEquity']= dataDict['equity']
ret['marketExposure'] = dataDict['exposure']
ret['settings']=settings
ret['evalDate']=dataDict['DATE'][t]
ret['stats']=stats(dataDict['fundEquity'])
return ret
| 38.944444
| 176
| 0.63243
|
4a07f426c19cd65cea892e340a3022ea0d25189b
| 3,567
|
py
|
Python
|
toontown/coghq/DistributedCountryClubBattleAI.py
|
TrueBlueDogemon/Toontown
|
ebed7fc3f2ef06a529cf02eda7ab46361aceef9d
|
[
"MIT"
] | 1
|
2021-02-25T06:22:49.000Z
|
2021-02-25T06:22:49.000Z
|
toontown/coghq/DistributedCountryClubBattleAI.py
|
TrueBlueDogemon/Toontown
|
ebed7fc3f2ef06a529cf02eda7ab46361aceef9d
|
[
"MIT"
] | null | null | null |
toontown/coghq/DistributedCountryClubBattleAI.py
|
TrueBlueDogemon/Toontown
|
ebed7fc3f2ef06a529cf02eda7ab46361aceef9d
|
[
"MIT"
] | 2
|
2020-11-08T03:38:35.000Z
|
2021-09-02T07:03:47.000Z
|
import CogDisguiseGlobals
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.showbase.PythonUtil import addListsByValue
from toontown.battle.BattleBase import *
from toontown.coghq import DistributedLevelBattleAI
from toontown.toonbase import ToontownGlobals
from toontown.toonbase.ToontownBattleGlobals import getCountryClubCreditMultiplier
class DistributedCountryClubBattleAI(DistributedLevelBattleAI.DistributedLevelBattleAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedCountryClubBattleAI')
def __init__(self, air, battleMgr, pos, suit, toonId, zoneId, level, battleCellId, roundCallback = None, finishCallback = None, maxSuits = 4):
DistributedLevelBattleAI.DistributedLevelBattleAI.__init__(self, air, battleMgr, pos, suit, toonId, zoneId, level, battleCellId, 'CountryClubReward', roundCallback, finishCallback, maxSuits)
self.battleCalc.setSkillCreditMultiplier(1)
if self.bossBattle:
self.level.d_setBossConfronted(toonId)
self.fsm.addState(State.State('CountryClubReward', self.enterCountryClubReward, self.exitCountryClubReward, ['Resume']))
playMovieState = self.fsm.getStateNamed('PlayMovie')
playMovieState.addTransition('CountryClubReward')
def getTaskZoneId(self):
return self.level.countryClubId
def handleToonsWon(self, toons):
extraMerits = [0, 0, 0, 0]
amount = ToontownGlobals.CountryClubCogBuckRewards[self.level.countryClubId]
index = ToontownGlobals.cogHQZoneId2deptIndex(self.level.countryClubId)
extraMerits[index] = amount
for toon in toons:
recovered, notRecovered = self.air.questManager.recoverItems(toon, self.suitsKilled, self.getTaskZoneId())
self.toonItems[toon.doId][0].extend(recovered)
self.toonItems[toon.doId][1].extend(notRecovered)
meritArray = self.air.promotionMgr.recoverMerits(toon, self.suitsKilled, self.getTaskZoneId(), getCountryClubCreditMultiplier(self.getTaskZoneId()), extraMerits=extraMerits)
if toon.doId in self.helpfulToons:
self.toonMerits[toon.doId] = addListsByValue(self.toonMerits[toon.doId], meritArray)
else:
self.notify.debug('toon %d not helpful list, skipping merits' % toon.doId)
if self.bossBattle:
self.toonParts[toon.doId] = self.air.cogSuitMgr.recoverPart(
toon, 'fullSuit', self.suitTrack,
self.getTaskZoneId(), toons)
self.notify.debug('toonParts = %s' % self.toonParts)
def enterCountryClubReward(self):
self.joinableFsm.request('Unjoinable')
self.runableFsm.request('Unrunable')
self.resetResponses()
self.assignRewards()
self.bossDefeated = 1
self.level.setVictors(self.activeToons[:])
self.timer.startCallback(BUILDING_REWARD_TIMEOUT, self.serverRewardDone)
def exitCountryClubReward(self):
pass
def enterResume(self):
DistributedLevelBattleAI.DistributedLevelBattleAI.enterResume(self)
if self.bossBattle and self.bossDefeated:
self.battleMgr.level.b_setDefeated()
def enterReward(self):
DistributedLevelBattleAI.DistributedLevelBattleAI.enterReward(self)
roomDoId = self.getLevelDoId()
room = simbase.air.doId2do.get(roomDoId)
if room:
room.challengeDefeated()
| 50.957143
| 198
| 0.711522
|
4a07f49e88c8ca9024b4d678507403e33249e817
| 3,899
|
py
|
Python
|
stackdio/api/users/urls.py
|
hdmillerdr/stackdio
|
84be621705031d147e104369399b872d5093ef64
|
[
"Apache-2.0"
] | 9
|
2015-12-18T22:44:55.000Z
|
2022-02-07T19:34:44.000Z
|
stackdio/api/users/urls.py
|
hdmillerdr/stackdio
|
84be621705031d147e104369399b872d5093ef64
|
[
"Apache-2.0"
] | 77
|
2015-01-12T17:49:38.000Z
|
2017-02-24T17:57:46.000Z
|
stackdio/api/users/urls.py
|
hdmillerdr/stackdio
|
84be621705031d147e104369399b872d5093ef64
|
[
"Apache-2.0"
] | 11
|
2015-01-23T15:50:19.000Z
|
2022-02-07T19:34:45.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
from django.conf.urls import include, url
from stackdio.api.users import api
from stackdio.core import routers
user_model_router = routers.SimpleBulkRouter()
user_model_router.register(r'users',
api.UserModelUserPermissionsViewSet,
'user-model-user-permissions')
user_model_router.register(r'groups',
api.UserModelGroupPermissionsViewSet,
'user-model-group-permissions')
model_router = routers.SimpleBulkRouter()
model_router.register(r'users',
api.GroupModelUserPermissionsViewSet,
'group-model-user-permissions')
model_router.register(r'groups',
api.GroupModelGroupPermissionsViewSet,
'group-model-group-permissions')
object_router = routers.SimpleBulkRouter()
object_router.register(r'users',
api.GroupObjectUserPermissionsViewSet,
'group-object-user-permissions')
object_router.register(r'groups',
api.GroupObjectGroupPermissionsViewSet,
'group-object-group-permissions')
urlpatterns = (
url(r'^users/$',
api.UserListAPIView.as_view(),
name='user-list'),
url(r'^users/permissions/',
include(user_model_router.urls)),
url(r'^users/(?P<username>[\w.@+-]+)/$',
api.UserDetailAPIView.as_view(),
name='user-detail'),
url(r'^users/(?P<parent_username>[\w.@+-]+)/groups/$',
api.UserGroupListAPIView.as_view(),
name='user-grouplist'),
url(r'^groups/$',
api.GroupListAPIView.as_view(),
name='group-list'),
url(r'^groups/permissions/',
include(model_router.urls)),
url(r'^groups/(?P<name>[\w.@+-]+)/$',
api.GroupDetailAPIView.as_view(),
name='group-detail'),
url(r'^groups/(?P<parent_name>[\w.@+-]+)/permissions/',
include(object_router.urls)),
url(r'^groups/(?P<parent_name>[\w.@+-]+)/users/$',
api.GroupUserListAPIView.as_view(),
name='group-userlist'),
url(r'^groups/(?P<parent_name>[\w.@+-]+)/action/$',
api.GroupActionAPIView.as_view(),
name='group-action'),
url(r'^groups/(?P<parent_name>[\w.@+-]+)/channels/$',
api.GroupChannelListAPIView.as_view(),
name='group-channel-list'),
url(r'^groups/(?P<parent_name>[\w.@+-]+)/channels/(?P<name>[\w.@+-]+)/$',
api.GroupChannelDetailAPIView.as_view(),
name='group-channel-detail'),
url(r'^user/$',
api.CurrentUserDetailAPIView.as_view(),
name='currentuser-detail'),
url(r'^user/token/$',
api.AuthToken.as_view(),
name='currentuser-token'),
url(r'^user/token/reset/$',
api.ResetAuthToken.as_view(),
name='currentuser-token-reset'),
url(r'^user/channels/$',
api.CurrentUserChannelListAPIView.as_view(),
name='currentuser-channel-list'),
url(r'^user/channels/(?P<name>[\w.@+-]+)/$',
api.CurrentUserChannelDetailAPIView.as_view(),
name='currentuser-channel-detail'),
url(r'^user/password/$',
api.ChangePasswordAPIView.as_view(),
name='currentuser-password'),
)
| 32.22314
| 77
| 0.624263
|
4a07f4c8ab494ade9906a02ebf70b25ec12ed51d
| 3,645
|
py
|
Python
|
antelope/interfaces/abstract_query.py
|
AntelopeLCA/antelope_lca
|
18ae7621713c8cc92c7d8fcc7c0810c1ca3fea32
|
[
"BSD-3-Clause"
] | 1
|
2021-10-01T01:07:45.000Z
|
2021-10-01T01:07:45.000Z
|
antelope/interfaces/abstract_query.py
|
AntelopeLCA/antelope
|
3a689b40bb4b53d27d8a2750b3bcc37e8d571377
|
[
"BSD-3-Clause"
] | null | null | null |
antelope/interfaces/abstract_query.py
|
AntelopeLCA/antelope
|
3a689b40bb4b53d27d8a2750b3bcc37e8d571377
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Root-level catalog interface
"""
class ValidationError(Exception):
pass
class PrivateArchive(Exception):
pass
class EntityNotFound(Exception):
pass
class NoAccessToEntity(Exception):
"""
Used when the actual entity is not accessible, i.e. when a ref cannot dereference itself
"""
pass
class AbstractQuery(object):
"""
Not-qute-abstract base class for executing queries
Query implementation must provide:
- origin (property)
- _iface (generator: itype)
- _tm (property) a TermManager
"""
_validated = None
'''
Overridde these methods
'''
@property
def origin(self):
return NotImplemented
def make_ref(self, entity):
raise NotImplementedError
def _perform_query(self, itype, attrname, exc, *args, strict=False, **kwargs):
raise NotImplementedError
'''
Internal workings
'''
'''
Can be overridden
'''
def _grounded_query(self, origin):
"""
Pseudo-abstract method used to construct entity references from a query that is anchored to a metaresource.
must be overriden by user-facing subclasses if resources beyond self are required to answer
the queries (e.g. a catalog).
:param origin:
:return:
"""
return self
"""
Basic "Documentary" interface implementation
From JIE submitted:
- get(id)
- properties(id)
- get item(id, item)
- get reference(id)
- synonyms(id-or-string)
provided but not spec'd:
- validate
- get_uuid
"""
def validate(self):
if self._validated is None:
try:
self._perform_query('basic', 'validate', ValidationError)
self._validated = True
except ValidationError:
self._validated = False
return self._validated
def get(self, eid, **kwargs):
"""
Basic entity retrieval-- should be supported by all implementations
:param eid:
:param kwargs:
:return:
"""
return self._perform_query('basic', 'get', EntityNotFound, eid,
**kwargs)
def properties(self, external_ref, **kwargs):
"""
Get an entity's list of properties
:param external_ref:
:param kwargs:
:return:
"""
return self._perform_query('basic', 'properties', EntityNotFound, external_ref, **kwargs)
def get_item(self, external_ref, item):
"""
access an entity's dictionary items
:param external_ref:
:param item:
:return:
"""
'''
if hasattr(external_ref, 'external_ref'): # debounce
err_str = external_ref.external_ref
else:
err_str = external_ref
'''
return self._perform_query('basic', 'get_item', EntityNotFound,
external_ref, item)
def get_uuid(self, external_ref):
return self._perform_query('basic', 'get_uuid', EntityNotFound,
external_ref)
def get_reference(self, external_ref):
return self._perform_query('basic', 'get_reference', EntityNotFound,
external_ref)
def synonyms(self, item, **kwargs):
"""
Return a list of synonyms for the object -- quantity, flowable, or compartment
:param item:
:return: list of strings
"""
return self._perform_query('basic', 'synonyms', EntityNotFound, item,
**kwargs)
| 26.605839
| 115
| 0.58107
|
4a07f54288a6ba531e9a99aa573537f713e04bc6
| 2,840
|
py
|
Python
|
gen/pb_python/flyteidl/service/flyteadmin/flyteadmin/models/admin_matchable_resource.py
|
EngHabu/flyteidl
|
d7970314fc2bcbd2840610b4d42ea2886f0837b9
|
[
"Apache-2.0"
] | null | null | null |
gen/pb_python/flyteidl/service/flyteadmin/flyteadmin/models/admin_matchable_resource.py
|
EngHabu/flyteidl
|
d7970314fc2bcbd2840610b4d42ea2886f0837b9
|
[
"Apache-2.0"
] | 3
|
2021-03-27T12:20:58.000Z
|
2021-03-29T09:45:45.000Z
|
gen/pb_python/flyteidl/service/flyteadmin/flyteadmin/models/admin_matchable_resource.py
|
EngHabu/flyteidl
|
d7970314fc2bcbd2840610b4d42ea2886f0837b9
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
flyteidl/service/admin.proto
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: version not set
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AdminMatchableResource(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
TASK_RESOURCE = "TASK_RESOURCE"
CLUSTER_RESOURCE = "CLUSTER_RESOURCE"
EXECUTION_QUEUE = "EXECUTION_QUEUE"
EXECUTION_CLUSTER_LABEL = "EXECUTION_CLUSTER_LABEL"
QUALITY_OF_SERVICE_SPECIFICATION = "QUALITY_OF_SERVICE_SPECIFICATION"
PLUGIN_OVERRIDE = "PLUGIN_OVERRIDE"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""AdminMatchableResource - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AdminMatchableResource, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AdminMatchableResource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.979592
| 119
| 0.584155
|
4a07f5e14cb6464e1b9f6c3dd3c6b5843fa52956
| 3,282
|
py
|
Python
|
catkin_ws/src:/opt/ros/kinetic/lib/python2.7/dist-packages:/home/bala/duckietown/catkin_ws/src:/home/bala/duckietown/catkin_ws/src/lib/python2.7/site-packages/geometry/manifolds/tests/checks_generation.py
|
johnson880319/Software
|
045894227f359e0a3a3ec5b7a53f8d1ebc06acdd
|
[
"CC-BY-2.0"
] | null | null | null |
catkin_ws/src:/opt/ros/kinetic/lib/python2.7/dist-packages:/home/bala/duckietown/catkin_ws/src:/home/bala/duckietown/catkin_ws/src/lib/python2.7/site-packages/geometry/manifolds/tests/checks_generation.py
|
johnson880319/Software
|
045894227f359e0a3a3ec5b7a53f8d1ebc06acdd
|
[
"CC-BY-2.0"
] | null | null | null |
catkin_ws/src:/opt/ros/kinetic/lib/python2.7/dist-packages:/home/bala/duckietown/catkin_ws/src:/home/bala/duckietown/catkin_ws/src/lib/python2.7/site-packages/geometry/manifolds/tests/checks_generation.py
|
johnson880319/Software
|
045894227f359e0a3a3ec5b7a53f8d1ebc06acdd
|
[
"CC-BY-2.0"
] | null | null | null |
# coding=utf-8
"""
These are very "meta" utils for creating nose tests on the fly.
Here is an example use: ::
thinghies = {'banana': 'yellow', 'apple': 'red', 'sky': 'blue'}
def thinghies_list():
return thinghies.keys()
def thinghies_args(x):
return (x, thinghies[x])
def thinghies_attrs(x):
return dict(thinghy_name='%s' % x, flavor=thinghies[x])
for_all_thinghies = fancy_test_decorator(lister=thinghies_list,
arguments=thinghies_args,
attributes=thinghies_attrs)
And this is the proper test: ::
@for_all_thinghies
def check_good_flavor(id_thinghy, flavor):
print('test for %s %s' % (id_thinghy, flavor))
"""
import sys
from nose.tools import istest, nottest
from geometry import logger
__all__ = ['fancy_test_decorator']
def add_to_module(function, module_name):
module = sys.modules[module_name]
name = function.__name__
if not 'test' in name:
raise Exception('No "test" in function name %r' % name)
if not 'test' in module_name:
raise Exception('While adding %r in %r: module does not have "test"'
' in it, so nose will not find the test.' %
(name, module_name))
if name in module.__dict__:
raise Exception('Already created test %r.' % name)
module.__dict__[name] = function
# logger.debug('Added test %s:%s' % (module.__name__, name))
def add_checker_f(f, x, arguments, attributes, naming):
name = 'test_%s_%s' % (f.__name__, naming(x))
@istest
def caller():
args = None
try:
args = arguments(x)
f(*args)
except (Exception, AssertionError):
msg = 'Error while executing test %r.\n' % name
msg += ' f = %s\n' % f
msg += ' f.__module__ = %s\n' % f.__module__
msg += ' x = %s\n' % str(x)
msg += ' arguments() = %s\n' % str(arguments)
msg += ' arguments(x) = %s\n' % str(args)
msg += ' attributes = %s\n' % str(attributes)
logger.error(msg)
raise
caller.__name__ = name
for k, v in attributes(x).items():
caller.__dict__[k] = v
caller.__dict__['test'] = f.__name__
add_to_module(caller, f.__module__)
# TODO: add debug info function
@nottest
def fancy_test_decorator(lister,
arguments=lambda x: x,
attributes=lambda x: {'id': str(x)},
naming=lambda x: str(x),
debug=False):
'''
Creates a fancy decorator for adding checks.
:param lister: a function that should give a list of objects
:param arguments: from object to arguments
:param attributes: (optional) set of attributes for the test
Returns a function that can be used as a decorator.
'''
def for_all_stuff(check):
for x in lister():
if debug:
logger.info('add test %s / %s ' % (check, x))
add_checker_f(check, x, arguments, attributes, naming)
return check
return for_all_stuff
| 28.051282
| 76
| 0.556368
|
4a07f6337482594c038e52341ed4e1119434f41a
| 556
|
py
|
Python
|
liquepy/motion/measures.py
|
geosharma/liquepy
|
d567f6a7eb93eb76218f718375f2d34355d7edd4
|
[
"MIT"
] | 4
|
2020-04-12T14:49:24.000Z
|
2020-09-22T09:28:10.000Z
|
liquepy/motion/measures.py
|
geosharma/liquepy
|
d567f6a7eb93eb76218f718375f2d34355d7edd4
|
[
"MIT"
] | null | null | null |
liquepy/motion/measures.py
|
geosharma/liquepy
|
d567f6a7eb93eb76218f718375f2d34355d7edd4
|
[
"MIT"
] | 4
|
2019-11-02T02:11:34.000Z
|
2021-04-30T06:06:41.000Z
|
import eqsig
import numpy as np
from numpy import trapz
from liquepy.exceptions import deprecation
def calculate_cav_dp_time_series(acc, dt):
asig = eqsig.AccSignal(acc, dt)
return calc_cav_dp_series(asig)
def calculate_cav_dp(acc, dt):
asig = eqsig.AccSignal(acc, dt)
return calc_cav_dp_series(asig)[-1]
def calc_cav_dp_series(asig):
return eqsig.measures.calc_cav_dp(asig)
def calculate_cav_dp_series(asig):
deprecation("calculate_cav_dp_series is deprecated - use calc_cav_dp_series")
return calc_cav_dp_series(asig)
| 23.166667
| 81
| 0.77518
|
4a07f85235a2b6978879ef36ac3c5a7f6e43fdd5
| 516
|
py
|
Python
|
toolchain/riscv/MSYS/python/Lib/test/test_future3.py
|
zhiqiang-hu/bl_iot_sdk
|
154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d
|
[
"Apache-2.0"
] | 207
|
2018-10-01T08:53:01.000Z
|
2022-03-14T12:15:54.000Z
|
toolchain/riscv/MSYS/python/Lib/test/test_future3.py
|
zhiqiang-hu/bl_iot_sdk
|
154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d
|
[
"Apache-2.0"
] | 8
|
2019-06-29T14:18:51.000Z
|
2022-02-19T07:30:27.000Z
|
toolchain/riscv/MSYS/python/Lib/test/test_future3.py
|
zhiqiang-hu/bl_iot_sdk
|
154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d
|
[
"Apache-2.0"
] | 76
|
2020-03-16T01:47:46.000Z
|
2022-03-21T16:37:07.000Z
|
from __future__ import nested_scopes
from __future__ import division
import unittest
x = 2
def nester():
x = 3
def inner():
return x
return inner()
class TestFuture(unittest.TestCase):
def test_floor_div_operator(self):
self.assertEqual(7 // 2, 3)
def test_true_div_as_default(self):
self.assertAlmostEqual(7 / 2, 3.5)
def test_nested_scopes(self):
self.assertEqual(nester(), 3)
if __name__ == "__main__":
unittest.main()
| 19.111111
| 43
| 0.631783
|
4a07f943a0e2d9e9acf43af723293ff8f8829986
| 336
|
py
|
Python
|
projectapp/migrations/0003_auto_20191106_0857.py
|
imran1234567/fullthrottlelabsproj
|
530f2b866bcfa2a2170225a1e7b4591500e5ffe7
|
[
"MIT"
] | null | null | null |
projectapp/migrations/0003_auto_20191106_0857.py
|
imran1234567/fullthrottlelabsproj
|
530f2b866bcfa2a2170225a1e7b4591500e5ffe7
|
[
"MIT"
] | 6
|
2019-11-07T13:12:40.000Z
|
2022-02-10T09:37:57.000Z
|
projectapp/migrations/0003_auto_20191106_0857.py
|
imran1234567/fullthrottlelabsproj
|
530f2b866bcfa2a2170225a1e7b4591500e5ffe7
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.7 on 2019-11-06 08:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('projectapp', '0002_auto_20191106_0853'),
]
operations = [
migrations.RenameModel(
old_name='Searchapp',
new_name='Search',
),
]
| 18.666667
| 50
| 0.60119
|
4a07fc073d5cc5055197e490348e7bc8d6b445db
| 1,208
|
py
|
Python
|
authors/apps/bookmarks/migrations/0001_initial.py
|
andela/ah-infinity-stones
|
e57d4b007f0df4a5db9a1841fd88898adecf1fcd
|
[
"BSD-3-Clause"
] | null | null | null |
authors/apps/bookmarks/migrations/0001_initial.py
|
andela/ah-infinity-stones
|
e57d4b007f0df4a5db9a1841fd88898adecf1fcd
|
[
"BSD-3-Clause"
] | 64
|
2018-11-27T11:20:55.000Z
|
2021-06-10T20:58:34.000Z
|
authors/apps/bookmarks/migrations/0001_initial.py
|
andela/ah-infinity-stones
|
e57d4b007f0df4a5db9a1841fd88898adecf1fcd
|
[
"BSD-3-Clause"
] | 4
|
2018-12-19T19:58:18.000Z
|
2019-10-03T08:28:00.000Z
|
# Generated by Django 2.1.3 on 2019-01-14 17:13
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('articles', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BookmarkArticle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bookmark_url', to='articles.Article', verbose_name='Article')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'ordering': ['-created_at'],
},
),
migrations.AlterUniqueTogether(
name='bookmarkarticle',
unique_together={('user', 'article')},
),
]
| 34.514286
| 168
| 0.620033
|
4a07fd2f78127bd0611e9743ba8faf99fc2955db
| 14,107
|
py
|
Python
|
Snakes/snakes.py
|
ianramzy/high-school-assignments
|
6d3ca52de5c6b80a1f0678ca73b78a3024e95f05
|
[
"MIT"
] | null | null | null |
Snakes/snakes.py
|
ianramzy/high-school-assignments
|
6d3ca52de5c6b80a1f0678ca73b78a3024e95f05
|
[
"MIT"
] | null | null | null |
Snakes/snakes.py
|
ianramzy/high-school-assignments
|
6d3ca52de5c6b80a1f0678ca73b78a3024e95f05
|
[
"MIT"
] | null | null | null |
import pygame, sys, random
# pygame 1.9.4
pygame.init()
size = width, height = 1320, 720
screen = pygame.display.set_mode(size)
# pygame.mixer.pre_init()
# white = [0, 0, 0]
white = [255, 255, 255]
orange = [255, 140, 0]
red = [255, 0, 0]
gray = [35, 35, 35]
# white = [0, 0, 255]
font = pygame.font.SysFont("BankGothic", 45)
font2 = pygame.font.SysFont("BankGothic", 80)
font3 = pygame.font.SysFont("BankGothic", 222)
font4 = pygame.font.SysFont("BankGothic", 15)
font5 = pygame.font.SysFont("BankGothic", 95)
font6 = pygame.font.SysFont("BankGothic", 65)
# snakeyum = pygame.mixer.Sound('snakeyum.wav')
# alarm = pygame.mixer.Sound('alarm.wav')
# ticktick = pygame.mixer.Sound('ticktock.wav')
backround = pygame.image.load("snakeback.jpg")
timebon = pygame.image.load("clock.png")
# pygame.mixer.music.load('tronmusic.wav')
def gamep1():
isticking = False
# pygame.mixer.music.play(-1)
# starting speed is going to the right:
speed = [0, 30]
# head is where the new snake segment will be created:
head = [90, 90]
# snake is a list of Rectangles, representing segments of the snake:
snake = [pygame.Rect(head, [30, 30])]
# starting length is 5:
length = 5
# set random position for food:
food = [30 * random.randint(0, width / 30 - 1), 30 * random.randint(0, height / 30 - 1)]
food2 = [30 * random.randint(0, width / 30 - 1), 30 * random.randint(0, height / 30 - 1)]
eleven = 11
counter = eleven
wt = 0
score = 0
backrect = pygame.Rect(0, 0, 0, 0)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
if speed[1] != 30:
speed = [0, -30]
elif event.key == pygame.K_DOWN:
if speed[1] != -30:
speed = [0, 30]
if event.key == pygame.K_LEFT:
if speed[0] != 30:
speed = [-30, 0]
elif event.key == pygame.K_RIGHT:
if speed[0] != -30:
speed = [30, 0]
# move the head:
head[0] = head[0] + speed[0]
head[1] = head[1] + speed[1]
# check for collision with self:
for segment in snake:
if segment == pygame.Rect(head, [30, 30]):
losequit(score)
# check for collision with walls:
if head[0] >= width or head[0] < 0 or head[1] >= height or head[1] < 0:
losequit(score)
# check for collision with food:
if head == food:
length = length + 1
food = [30 * random.randint(0, width / 30 - 1), 30 * random.randint(0, height / 30 - 1)]
wt = wt - 3
eleven = eleven - .5
counter = eleven
score = score + 1
# snakeyum.play()
# check for collision with time bonus:
if head == food2:
food2 = [30 * random.randint(0, width / 30 - 1), 30 * random.randint(0, height / 30 - 1)]
eleven = eleven + 1
counter = eleven
# snakeyum.play()
# add new segment to snake at head:
snake.append(pygame.Rect(head, [30, 30]))
# remove tail segments if necessary:
while len(snake) > length:
snake.pop(0)
# draw your game elements here:
screen.blit(backround, backrect)
# draw all the snake segments:
for segment in snake:
pygame.draw.rect(screen, red, segment, 0)
## timer
counter = counter - 0.1
## render title
renderedText = font5.render("SNAKE TRIALS", 1, white)
screen.blit(renderedText, (300, 10))
## render timer
renderedText = font.render("Time Remaining: " + str(int(counter)), 1, white)
screen.blit(renderedText, (5, height - 155))
## render score
renderedText = font.render("Score: " + str(int(score)), 1, white)
screen.blit(renderedText, (5, height - 195))
if counter <= 4:
if not isticking:
# ticktick.play(0)
isticking = True
# running out of time:
if counter <= 0:
losequit(score)
# draw the food:
pygame.draw.rect(screen, orange, pygame.Rect(food, [30, 30]), 0)
screen.blit(timebon, food2)
pygame.display.flip()
pygame.time.wait(wt)
def gamep2():
isticking = False
# backround = pygame.image.load("snakeback.jpg")
# pygame.mixer.music.load('tronmusic.wav')
# pygame.mixer.music.play(-1)
# starting speed is going to the right:
speed = [0, 30]
speed2 = [30, 0]
# head is where the new snake segment will be created:
head = [90, 90]
head2 = [270, 270]
# snake is a list of Rectangles, representing segments of the snake:
snake = [pygame.Rect(head, [30, 30])]
snake2 = [pygame.Rect(head2, [30, 30])]
# starting length is 5:
length = 5
length2 = 5
# set random position for food:
food = [30 * random.randint(0, width / 30 - 1), 30 * random.randint(0, height / 30 - 1)]
food2 = [30 * random.randint(0, width / 30 - 1), 30 * random.randint(0, height / 30 - 1)]
eleven = 11
counter = eleven
wt = 100
score = 0
backrect = pygame.Rect(0, 0, 0, 0)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_w:
if speed[1] != 30:
speed = [0, -30]
elif event.key == pygame.K_s:
if speed[1] != -30:
speed = [0, 30]
if event.key == pygame.K_a:
if speed[0] != 30:
speed = [-30, 0]
elif event.key == pygame.K_d:
if speed[0] != -30:
speed = [30, 0]
# snake2 controls
if event.key == pygame.K_UP:
if speed2[1] != 30:
speed2 = [0, -30]
elif event.key == pygame.K_DOWN:
if speed2[1] != -30:
speed2 = [0, 30]
if event.key == pygame.K_LEFT:
if speed2[0] != 30:
speed2 = [-30, 0]
elif event.key == pygame.K_RIGHT:
if speed2[0] != -30:
speed2 = [30, 0]
# move the head:
head[0] = head[0] + speed[0]
head[1] = head[1] + speed[1]
head2[0] = head2[0] + speed2[0]
head2[1] = head2[1] + speed2[1]
# check for collision with self:
for segment in snake:
if segment == pygame.Rect(head, [30, 30]):
losequit(score)
for segment in snake2:
if segment == pygame.Rect(head2, [30, 30]):
losequit(score)
for segment in snake:
if segment == pygame.Rect(head2, [30, 30]):
losequit(score)
for segment in snake2:
if segment == pygame.Rect(head, [30, 30]):
losequit(score)
# check for collision with walls:
if head[0] >= width or head[0] < 0 or head[1] >= height or head[1] < 0:
losequit(score)
if head2[0] >= width or head2[0] < 0 or head2[1] >= height or head2[1] < 0:
losequit(score)
# check for collision with food:
if head == food:
length = length + 1
food = [30 * random.randint(0, width / 30 - 1), 30 * random.randint(0, height / 30 - 1)]
wt = wt - 3
eleven = eleven - .5
counter = eleven
score = score + 1
# snakeyum.play()
if head2 == food:
length2 = length2 + 1
food = [30 * random.randint(0, width / 30 - 1), 30 * random.randint(0, height / 30 - 1)]
wt = wt - 3
eleven = eleven - .5
counter = eleven
score = score + 1
# snakeyum.play()
# check for collision with time bonus:
if head == food2:
food2 = [30 * random.randint(0, width / 30 - 1), 30 * random.randint(0, height / 30 - 1)]
eleven = eleven + .5
counter = eleven
# snakeyum.play()
if head2 == food2:
food2 = [30 * random.randint(0, width / 30 - 1), 30 * random.randint(0, height / 30 - 1)]
eleven = eleven + .5
counter = eleven
# snakeyum.play()
# add new segment to snake at head:
snake.append(pygame.Rect(head, [30, 30]))
snake2.append(pygame.Rect(head2, [30, 30]))
# remove tail segments if necessary:
while len(snake) > length:
snake.pop(0)
while len(snake2) > length:
snake2.pop(0)
# draw your game elements here:
screen.blit(backround, backrect)
# draw all the snake segments:
for segment in snake:
pygame.draw.rect(screen, white, segment, 0)
for segment in snake2:
pygame.draw.rect(screen, red, segment, 0)
## timer
counter = counter - 0.1
if counter <= 4:
if not isticking:
# ticktick.play(0)
isticking = True
## render title
renderedText = font5.render("P2 SNAKE TRIALS ", 1, white)
screen.blit(renderedText, (233, 5))
## render timer
renderedText = font.render("Time Remaining: " + str(int(counter)), 1, white)
screen.blit(renderedText, (5, height - 55))
## render score
renderedText = font.render("Score: " + str(int(score)), 1, white)
screen.blit(renderedText, (5, height - 95))
# running out of time:
if counter <= 0:
losequit(score)
# draw the food:
pygame.draw.rect(screen, orange, pygame.Rect(food, [30, 30]), 0)
screen.blit(timebon, food2)
pygame.display.flip()
pygame.time.wait(wt)
def startscreen():
backround = pygame.image.load("snakeback.jpg")
backrect = pygame.Rect(0, 0, 0, 0)
screen.blit(backround, backrect)
renderedText = font5.render('Welcome to Snake Trials', 1, white)
screen.blit(renderedText, (11, 1))
renderedText = font6.render("Press Space to Start", 1, white)
screen.blit(renderedText, (11, height - 195))
renderedText = font6.render("Press '2' for Two Player Co-Op", 1, white)
screen.blit(renderedText, (11, height - 95))
pygame.display.flip()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
gamep1()
if event.key == pygame.K_2:
gamep2()
def prestart():
time = 0
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
startscreen()
if time == 10:
white = [0, 0, 0]
renderedText = font4.render(
'Traceback most recent call last:File C:Users Wood Word is2o snakes snakes.py, line 307, in <module>',
0, white)
screen.blit(renderedText, (0, 0))
renderedText = font4.render('Press space to initiate the new world order and virus.exe', 0, white)
screen.blit(renderedText, (0, 15))
if time == 20:
white = [255, 255, 255]
renderedText = font4.render(
'Traceback most recent call last:File C:Users Wood Word is2o snakes snakes.py, line 307, in <module>',
0, white)
screen.blit(renderedText, (0, 0))
renderedText = font4.render('Press space to initiate the new world order and virus.exe', 0, white)
screen.blit(renderedText, (0, 15))
time = 0
time = time + 1
pygame.display.flip()
pygame.time.wait(100)
def losequit(score):
# pygame.mixer.music.load('tronmusic.wav')
# pygame.mixer.music.stop
# alarm.play()
fixme = 69420
backround = pygame.image.load("snakeback.jpg")
backrect = pygame.Rect(0, 0, 0, 0)
screen.blit(backround, backrect)
renderedText = font3.render('You Lose!', 1, white)
screen.blit(renderedText, (85, 100))
renderedText = font.render("You scored: " + str(int(score)), 1, white)
screen.blit(renderedText, (4, height - 95))
renderedText = font.render("Press Space to Play Single Player Again", 1, white)
screen.blit(renderedText, (4, height - 195))
renderedText = font.render("Press '2' to Play Two Player", 1, white)
screen.blit(renderedText, (4, height - 155))
pygame.display.flip()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
gamep1()
if event.key == pygame.K_2:
gamep2()
prestart()
| 34.746305
| 119
| 0.505281
|
4a07fd3b94ee7e8eba96a8648acd153bf197dfdb
| 193
|
py
|
Python
|
v1-v2/test-programs/two_funcs.py
|
ambadhan/OnlinePythonTutor
|
857bab941fbde20f1f020b05b7723094ddead62a
|
[
"MIT"
] | 17
|
2021-12-09T11:31:44.000Z
|
2021-12-29T03:07:14.000Z
|
v1-v2/test-programs/two_funcs.py
|
heysachin/OnlinePythonTutor
|
0dcdacc7ff5be504dd6a47236ebc69dc0069f991
|
[
"MIT"
] | 22
|
2017-09-17T03:59:16.000Z
|
2017-11-14T17:33:57.000Z
|
v1-v2/test-programs/two_funcs.py
|
heysachin/OnlinePythonTutor
|
0dcdacc7ff5be504dd6a47236ebc69dc0069f991
|
[
"MIT"
] | 12
|
2021-12-09T11:31:46.000Z
|
2022-01-07T03:14:46.000Z
|
def add(a, b, c):
d = a + b
return c + d
def double_add(a, b, c):
x = add(a, b, c)
y = add(a, b, c)
return x + y
x = 5
y = 10
z = x * y
print add(x, y, z)
print double_add(x, y, z)
| 12.866667
| 25
| 0.492228
|
4a07fd7c282bfbea9d23b1d3bed07624942e62c3
| 6,815
|
py
|
Python
|
QuatPlotTypes_QtDemo.py
|
johnmgregoire/PythonCompositionPlots
|
e105c575463b7d4512d9aac18c7330d1a0dc2c14
|
[
"BSD-3-Clause"
] | 4
|
2018-03-05T09:34:49.000Z
|
2022-02-01T15:33:54.000Z
|
QuatPlotTypes_QtDemo.py
|
johnmgregoire/PythonCompositionPlots
|
e105c575463b7d4512d9aac18c7330d1a0dc2c14
|
[
"BSD-3-Clause"
] | null | null | null |
QuatPlotTypes_QtDemo.py
|
johnmgregoire/PythonCompositionPlots
|
e105c575463b7d4512d9aac18c7330d1a0dc2c14
|
[
"BSD-3-Clause"
] | 2
|
2016-01-24T19:09:21.000Z
|
2019-10-11T12:43:07.000Z
|
import time
import os, os.path
import sys
import numpy
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import operator
import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
import numpy.ma as ma
import matplotlib.colors as colors
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import pylab
import pickle
#from fcns_math import *
#from fcns_io import *
#from fcns_ui import *
#PyCodePath=os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
matplotlib.rcParams['backend.qt4'] = 'PyQt4'
wd=os.getcwd()
#sys.path.append(os.path.join(PyCodePath,'PythonCompositionPlots'))
from myternaryutility import TernaryPlot
from myquaternaryutility import QuaternaryPlot
from quaternary_FOM_stackedtern2 import *
from quaternary_FOM_stackedtern5 import *
from quaternary_FOM_stackedtern20 import *
from quaternary_FOM_stackedtern30 import *
from quaternary_FOM_stackedtern9of100 import *
from quaternary_ternary_faces import *
from quaternary_faces_shells import *
from quaternary_folded_ternaries import *
# self.plotw_stack.fig.clf()
# self.plotw_stack_axl, self.plotw_stack_stpl=makefcn(fig=self.plotw_stack.fig, ellabels=self.ellabels)
# self.cbax_stack=self.plotw_stack.fig.add_axes(self.plotw_stack_cbaxrect)
class plottypeDialog(QDialog):
def __init__(self, parent=None, ellabels=['A', 'B', 'C', 'D']):
super(plottypeDialog, self).__init__(parent)
mainlayout=QVBoxLayout()
self.ellabels=ellabels
self.plotw=plotwidget(self)
self.plotw.fig.clf()
mainlayout.addWidget(self.plotw)
self.plottypeComboBox = QComboBox(self)
mainlayout.addWidget(self.plottypeComboBox)
QObject.connect(self.plottypeComboBox,SIGNAL("activated(QString)"),self.plot)
self.setLayout(mainlayout)
self.ternaryfaceoptions=[\
('tern. layered shells', ternaryfaces_shells), \
('folded tern. layers', ternaryfaces_folded), \
('only tern. faces', ternaryfaces), \
]
self.ternaryface_uiinds=[1, 2, 3]
self.stackedternoptions=[\
('20% interv ternaries', (make5ternaxes, scatter_5axes)), \
('10% interv ternaries', (make10ternaxes, scatter_10axes)), \
('5% interv ternaries', (make20ternaxes, scatter_20axes)), \
('3.3% interv ternaries', (make30ternaxes, scatter_30axes)), \
('9 plots at 1% interv', (make9of100ternaxes, scatter_9of100axes)), \
]
self.stackedtern_uiinds=[4, 5, 6, 7, 8]
self.fillplotoptions()
def fillplotoptions(self):
self.plottypeComboBox.clear()
self.plottypeComboBox.insertItem(0, 'none')
for count, tup in enumerate(self.ternaryfaceoptions):
self.plottypeComboBox.insertItem(999, tup[0])
for count, tup in enumerate(self.stackedternoptions):
self.plottypeComboBox.insertItem(999, tup[0])
self.plottypeComboBox.setCurrentIndex(1)
def loadplotdata(self, quatcomps, cols, nintervals=None):
self.cols=cols
self.quatcomps=quatcomps
if nintervals is None:
pairwisediffs=(((quatcomps[1:]-quatcomps[:-1])**2).sum(axis=1))**.5/2.**.5
mindiff=(pairwisediffs[pairwisediffs>0.005]).min()
self.nintervals=round(1./mindiff)
else:
self.nintervals=nintervals
def plot(self, **kwargs):
self.plotw.fig.clf()
i=self.plottypeComboBox.currentIndex()
if i in self.ternaryface_uiinds:
selclass=self.ternaryfaceoptions[self.ternaryface_uiinds.index(i)][1]
self.ternaryfaceplot(selclass, **kwargs)
if i in self.stackedtern_uiinds:
makefcn, scatterfcn=self.stackedternoptions[self.stackedtern_uiinds.index(i)][1]
self.stackedternplot(makefcn, scatterfcn, **kwargs)
self.plotw.fig.canvas.draw()
def ternaryfaceplot(self, plotclass, **kwargs):
ax=self.plotw.fig.add_axes((0, 0, 1, 1))
tf=plotclass(ax, nintervals=self.nintervals)
tf.label()
tf.scatter(self.quatcomps, self.cols, **kwargs)
def stackedternplot(self, makefcn, scatterfcn, **kwargs):
self.axl, self.stpl=makefcn(fig=self.plotw.fig, ellabels=self.ellabels)
scatterfcn(self.quatcomps, self.cols, self.stpl, edgecolor='none', **kwargs)
class plotwidget(FigureCanvas):
def __init__(self, parent, width=12, height=6, dpi=72, projection3d=False):
#plotdata can be 2d array for image plot or list of 2 1d arrays for x-y plot or 2d array for image plot or list of lists of 2 1D arrays
self.fig=Figure(figsize=(width, height), dpi=dpi)
if projection3d:
self.axes=self.fig.add_subplot(111, navigate=True, projection='3d')
else:
self.axes=self.fig.add_subplot(111, navigate=True)
self.axes.hold(True)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
#self.parent=parent
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
#NavigationToolbar(self, parent)
self.toolbar=NavigationToolbar(self.figure.canvas, self)
self.toolbar.setMovable(True)#DOESNT DO ANYTHING
self.mpl_connect('button_press_event', self.myclick)
self.clicklist=[]
def myclick(self, event):
if not (event.xdata is None or event.ydata is None):
arrayxy=[event.xdata, event.ydata]
print 'clicked on image: array indeces ', arrayxy, ' using button', event.button
self.clicklist+=[arrayxy]
self.emit(SIGNAL("genericclickonplot"), [event.xdata, event.ydata, event.button])
if __name__ == "__main__":
class MainMenu(QMainWindow):
def __init__(self, previousmm, execute=True, **kwargs):
super(MainMenu, self).__init__(None)
self.ui=plottypeDialog(self, **kwargs)
intervs=20
compsint=[[b, c, (intervs-a-b-c), a] for a in numpy.arange(0,intervs+1)[::-1] for b in numpy.arange(0,intervs+1-a) for c in numpy.arange(0,intervs+1-a-b)][::-1]
print len(compsint)
comps=numpy.float32(compsint)/intervs
pylab.figure()
stpquat=QuaternaryPlot(111)
cols=stpquat.rgb_comp(comps)
self.ui.loadplotdata(comps, cols)
if execute:
self.ui.exec_()
mainapp=QApplication(sys.argv)
form=MainMenu(None)
form.show()
form.setFocus()
mainapp.exec_()
| 37.651934
| 172
| 0.667351
|
4a07fddf3e8b68abefb82f4a9ccebd399259bd66
| 634
|
py
|
Python
|
tests/garage/envs/mujoco/maze/test_point_maze_env.py
|
shadiakiki1986/garage
|
095bb5d25b32df1d44b47e99a78a9b01796941d9
|
[
"MIT"
] | 3
|
2019-08-11T22:26:55.000Z
|
2020-11-28T10:23:50.000Z
|
tests/garage/envs/mujoco/maze/test_point_maze_env.py
|
shadiakiki1986/garage
|
095bb5d25b32df1d44b47e99a78a9b01796941d9
|
[
"MIT"
] | null | null | null |
tests/garage/envs/mujoco/maze/test_point_maze_env.py
|
shadiakiki1986/garage
|
095bb5d25b32df1d44b47e99a78a9b01796941d9
|
[
"MIT"
] | 2
|
2019-08-11T22:30:14.000Z
|
2021-03-25T02:57:50.000Z
|
import pickle
import unittest
from garage.envs.mujoco.maze.point_maze_env import PointMazeEnv
from tests.helpers import step_env
class TestPointMazeEnv(unittest.TestCase):
def test_pickleable(self):
env = PointMazeEnv(n_bins=2)
round_trip = pickle.loads(pickle.dumps(env))
assert round_trip
assert round_trip._n_bins == env._n_bins
step_env(round_trip)
def test_does_not_modify_action(self):
env = PointMazeEnv(n_bins=2)
a = env.action_space.sample()
a_copy = a.copy()
env.reset()
env.step(a)
self.assertEquals(a.all(), a_copy.all())
| 27.565217
| 63
| 0.676656
|
4a07fdf02a8cb79d4cac9e1721636ce46900f270
| 1,166
|
py
|
Python
|
FIM_CoronaBot/Old_version/mierdat.py
|
HeNeos/Mechanical
|
b64e9ea8610b4d8d8ef02d01d3ca7fea3ea7b857
|
[
"MIT"
] | 1
|
2020-03-31T11:54:28.000Z
|
2020-03-31T11:54:28.000Z
|
FIM_CoronaBot/Old_version/mierdat.py
|
HeNeos/Mechanical
|
b64e9ea8610b4d8d8ef02d01d3ca7fea3ea7b857
|
[
"MIT"
] | null | null | null |
FIM_CoronaBot/Old_version/mierdat.py
|
HeNeos/Mechanical
|
b64e9ea8610b4d8d8ef02d01d3ca7fea3ea7b857
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
from webbot import Browser
web = Browser()
web.go_to('https://unidemo.webex.com/join/echaucagiron')
#web.go_to('https://unidemo.webex.com/webappng/sites/unidemo/dashboard/pmr/huaman1942?siteurl=unidemo')
web.click("Entrar a la reunión")
#web.go_to('https://unidemo.webex.com/webappng/sites/unidemo/meeting/download/ddc993d54459e2515c68cceb6856bfec?launchApp=true')
for i in range(0,2):
if(web.exists("IDONTKNOW")):
web.click("IDK")
web.press(web.Key.ENTER)
for i in range(0,2):
print(i)
if(web.exists("IDONTKNOW")):
web.click("IDK")
web.type('Josue Huaroto\tjosue.huaroto.v@uni.pe')
#for i in range(0,1):
# if(web.exists("IDONTKNOW")):
# web.click("IDK")
web.press(web.Key.ENTER)
for i in range(0,7):
print(i)
if(web.exists('Saltear')):
web.click('Saltear')
else:
web.press(web.Key.ESCAPE)
for i in range(0,6):
if(web.exists('Saltear')):
web.click('Saltear')
web.press(web.Key.ENTER)
for i in range(0,7):
print(i)
if(web.exists('Entrar a reunión')):
web.click('Entrar a reunión')
else:
web.press(web.Key.TAB)
web.press(web.Key.ENTER)
| 28.439024
| 127
| 0.657804
|
4a07fee3b9fd844b12e1d8383a9026aed25357b6
| 376
|
py
|
Python
|
bradfield/test_x4_01_pathsum.py
|
savarin/algorithms
|
4d1f8f2361de12a02f376883f648697562d177ae
|
[
"MIT"
] | 1
|
2020-06-16T23:22:54.000Z
|
2020-06-16T23:22:54.000Z
|
bradfield/test_x4_01_pathsum.py
|
savarin/algorithms
|
4d1f8f2361de12a02f376883f648697562d177ae
|
[
"MIT"
] | null | null | null |
bradfield/test_x4_01_pathsum.py
|
savarin/algorithms
|
4d1f8f2361de12a02f376883f648697562d177ae
|
[
"MIT"
] | null | null | null |
from x4_01_pathsum import Node, path_sum
def test_path_sum():
tree = Node(5, \
Node(4, \
Node(11,
Node(7), Node(2))), \
Node(8,
Node(13), Node(4, \
None, Node(1))))
assert path_sum(tree, 22) == True
assert path_sum(tree, 100) == False
| 25.066667
| 50
| 0.414894
|
4a07fee4adf3c526348fa9c7d739f7ca7eae62c6
| 6,387
|
py
|
Python
|
w_queues/howtorep_exp.py
|
mfatihaktas/deep-scheduler
|
ad567465399620ec379cfdaa67fbcd94ded03c75
|
[
"MIT"
] | 11
|
2018-03-28T02:55:12.000Z
|
2021-07-12T15:21:38.000Z
|
w_queues/howtorep_exp.py
|
mfatihaktas/deep-scheduler
|
ad567465399620ec379cfdaa67fbcd94ded03c75
|
[
"MIT"
] | null | null | null |
w_queues/howtorep_exp.py
|
mfatihaktas/deep-scheduler
|
ad567465399620ec379cfdaa67fbcd94ded03c75
|
[
"MIT"
] | 5
|
2018-03-16T01:36:46.000Z
|
2019-10-17T03:23:20.000Z
|
import numpy as np
from patch import *
from rvs import *
from sim import *
class MultiQ_wRep(object):
def __init__(self, env, n, max_numj, sching_m, sl_dist, scher=None, act_max=False):
self.env = env
self.n = n
self.max_numj = max_numj
self.sching_m = sching_m
self.scher = scher
self.act_max = act_max
self.d = self.sching_m['d']
self.jq = JQ(env, list(range(self.n) ), self)
L = sching_m['L'] if 'L' in sching_m else None
self.q_l = [FCFS(i, env, sl_dist, out=self.jq, out_c=self.jq, L=L) for i in range(self.n) ]
self.store = simpy.Store(env)
self.action = env.process(self.run() )
self.jid_info_m = {}
self.num_jcompleted = 0
self.jsl_l = []
self.d_numj_l = [0 for _ in range(self.d) ]
def __repr__(self):
return "MultiQ_wRep[n= {}]".format(self.n)
def get_rand_d(self):
qi_l = random.sample(range(self.n), self.d)
ql_l = [self.q_l[i].length() for i in qi_l]
return qi_l, ql_l
def get_sortedrand_d(self):
qi_l = random.sample(range(self.n), self.d)
ql_i_l = sorted([(self.q_l[i].length(), i) for i in qi_l] )
qi_l = [ql_i[1] for ql_i in ql_i_l]
ql_l = [ql_i[0] for ql_i in ql_i_l]
# ql_l = [self.q_l[i].length() for i in qi_l]
return qi_l, ql_l
def run(self):
while True:
j = (yield self.store.get() )
qi_l, ql_l = self.get_sortedrand_d()
s = ql_l
sname = self.sching_m['name']
if sname == 'reptod':
toi_l = qi_l
elif sname == 'reptod-ifidle' or sname == 'reptod-ifidle-wcancel':
i = 0
while i < len(ql_l) and ql_l[i] == 0: i += 1
if i > 0:
toi_l = qi_l[:i]
else:
toi_l = random.sample(qi_l, 1)
# qi_l, ql_l = self.get_rand_d()
# toi_l = [qi_l[0] ]
# toi_l += [qi_l[i+1] for i, l in enumerate(ql_l[1:] ) if l == 0]
elif 'reptod-wcancel' in self.sching_m:
qi_l, ql_l = self.get_rand_d()
a = (len(toi_l) > 1)
self.d_numj_l[a] += 1
if sname == 'reptod-wcancel' or sname == 'reptod-ifidle-wcancel':
self.q_l[toi_l[0]].put(Task(j._id, j.k, j.size) )
for i in toi_l[1:]:
self.q_l[i].put(Task(j._id, j.k, j.size, type_='r', L=self.sching_m['L'] ) )
else:
for i in toi_l:
self.q_l[i].put(Task(j._id, j.k, j.size) )
self.jid_info_m[j._id] = {'ent': self.env.now, 'ts': j.size, 'qid_l': toi_l, 's': s, 'a': a}
def put(self, j):
sim_log(DEBUG, self.env, self, "recved", j)
return self.store.put(j)
def put_c(self, m):
sim_log(DEBUG, self.env, self, "recved", m)
jid = m['jid']
jinfo = self.jid_info_m[jid]
self.jid_info_m[jid]['T'] = self.env.now - jinfo['ent']
self.jid_info_m[jid]['sl'] = (self.env.now - jinfo['ent'] )/jinfo['ts']
for i in jinfo['qid_l']:
if i not in m['deped_from']:
self.q_l[i].put_c({'m': 'cancel', 'jid': jid} )
self.jsl_l.append(t)
self.num_jcompleted += 1
if self.num_jcompleted > self.max_numj:
self.env.exit()
def sim_sl(nf, ar, ns, T, sching_m, ts_dist, sl_dist):
sl, sl2 = 0, 0
for _ in range(nf):
env = simpy.Environment()
jg = JG(env, ar, k_dist=DUniform(1, 1), size_dist=ts_dist, max_sent=T)
mq = MultiQ_wRep(env, ns, T, sching_m, sl_dist)
jg.out = mq
jg.init()
env.run()
sl += np.mean([mq.jid_info_m[t+1]['sl'] for t in range(T) ] )
# print("jid_r_m= \n{}".format(mq.jq.jid_r_m) )
if 'd' in sching_m:
r_numj_l = list(range(sching_m['d'] ) )
for jid, r in mq.jq.jid_r_m.items():
r_numj_l[r] += 1
r_numj_l[0] = T - sum(r_numj_l)
r_freqj_l = [nj/T for nj in r_numj_l]
print("r_freqj_l= {}".format(r_freqj_l) )
d_freqj_l = [nj/T for nj in mq.d_numj_l]
print("d_freqj_l= {}".format(d_freqj_l) )
sl2 += sl**2
return sl/nf, sl2/nf
def plot_reptod_ifidle_vs_withdraw():
ns, d = 12, 4
T = 30000
ts_dist = TPareto(1, 10**10, 1.1) # TPareto(1, 10, 1), DUniform(1, 1)
sl_dist = Dolly() # TPareto(1, 20, 1) # Exp(1) # Exp(1, D=1) # DUniform(1, 1)
alog("ns= {}, d= {}, T= {}, ts_dist= {}, sl_dist= {}".format(ns, d, T, ts_dist, sl_dist) )
ar_l = []
Esl_ifidle_l, Vsl_ifidle_l = [], []
Esl_withdraw_l, Vsl_withdraw_l = [], []
# sl_god_withdraw_l = []
nf = 1
# for ar in np.linspace(0.02, 0.5, 8):
for ar in np.linspace(0.01, 0.17, 8):
print("\n ar= {}".format(ar) )
ar_l.append(ar)
sching_m = {'reptod-ifidle': 0, 'd': d}
sl, sl2 = sim_sl(nf, ar, ns, T, sching_m, ts_dist, sl_dist)
print("sching_m= {} \n sl= {}, sl2= {}".format(sching_m, sl, sl2) )
Esl_ifidle_l.append(sl)
Vsl_ifidle_l.append(sl2 - sl**2)
sching_m = {'reptod-withdraw': 0, 'd': d, 'L': 0}
sl, sl2 = sim_sl(nf, ar, ns, T, sching_m, ts_dist, sl_dist)
print("sching_m= {} \n sl= {}, sl2= {}".format(sching_m, sl, sl2) )
Esl_withdraw_l.append(sl)
Vsl_withdraw_l.append(sl2 - sl**2)
# sching_m = {'reptogod-withdraw': 0, 'd': d, 'L': 0}
# sl = sim_sl(nf, ar, ns, T, sching_m)
# print("sching_m= {} \n sl= {}".format(sching_m, sl) )
# sl_god_withdraw_l.append(sl)
plot.plot(ar_l, Esl_ifidle_l, label='reptod-ifidle', color=next(dark_color), marker=next(marker), linestyle=':', mew=2)
plot.plot(ar_l, Esl_withdraw_l, label='reptod-withdraw', color=next(dark_color), marker=next(marker), linestyle=':', mew=2)
# plot.plot(ar_l, sl_god_withdraw_l, label='reptod-god-withdraw', color=next(dark_color), marker=next(marker), linestyle=':', mew=2)
plot.title(r'$n= {}$, $d= {}$'.format(ns, d) )
plot.xlabel(r'$\lambda$', fonsize=14)
plot.ylabel(r'E[Sl]', fonsize=14)
plot.legend()
plot.savefig("Esl_reptod_ifidle_vs_withdraw.pdf")
plot.gcf().clear()
plot.plot(ar_l, Vsl_ifidle_l, label='reptod-ifidle', color=next(dark_color), marker=next(marker), linestyle=':', mew=2)
plot.plot(ar_l, Vsl_withdraw_l, label='reptod-withdraw', color=next(dark_color), marker=next(marker), linestyle=':', mew=2)
plot.title(r'$n= {}$, $d= {}$'.format(ns, d) )
plot.xlabel(r'$\lambda$', fonsize=14)
plot.ylabel(r'Var[Sl]', fonsize=14)
plot.legend()
plot.savefig("Vsl_reptod_ifidle_vs_withdraw.pdf")
plot.gcf().clear()
log(WARNING, "done.")
if __name__ == "__main__":
plot_reptod_ifidle_vs_withdraw()
| 35.093407
| 134
| 0.588539
|
4a07ff468d92e64b4fdbba2b6f540344d1251699
| 858
|
py
|
Python
|
ebarber/migrations/0007_auto_20200227_1300.py
|
LoukasPap/university_assignment-django
|
c0ccdcb5f12e536037b211c564eab049847175bb
|
[
"MIT"
] | 2
|
2020-08-02T21:23:43.000Z
|
2020-08-05T22:14:57.000Z
|
ebarber/migrations/0007_auto_20200227_1300.py
|
LoukasPap/university_assignment-django
|
c0ccdcb5f12e536037b211c564eab049847175bb
|
[
"MIT"
] | null | null | null |
ebarber/migrations/0007_auto_20200227_1300.py
|
LoukasPap/university_assignment-django
|
c0ccdcb5f12e536037b211c564eab049847175bb
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.3 on 2020-02-27 11:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ebarber', '0006_auto_20200226_1734'),
]
operations = [
migrations.AlterField(
model_name='barbershop',
name='username',
field=models.CharField(max_length=25),
),
migrations.AlterField(
model_name='customer',
name='name',
field=models.CharField(max_length=25),
),
migrations.AlterField(
model_name='customer',
name='surname',
field=models.CharField(max_length=25),
),
migrations.AlterField(
model_name='customer',
name='username',
field=models.CharField(max_length=25),
),
]
| 25.235294
| 50
| 0.55711
|
4a07ffb7bb7a2f967199760a3c73832c370086ff
| 162
|
py
|
Python
|
src/reader/compressed/gzipped.py
|
misssoft/Fan.Python
|
c9ffffefa44bc9a67dbf39089acc93b14c1d761c
|
[
"MIT"
] | null | null | null |
src/reader/compressed/gzipped.py
|
misssoft/Fan.Python
|
c9ffffefa44bc9a67dbf39089acc93b14c1d761c
|
[
"MIT"
] | null | null | null |
src/reader/compressed/gzipped.py
|
misssoft/Fan.Python
|
c9ffffefa44bc9a67dbf39089acc93b14c1d761c
|
[
"MIT"
] | null | null | null |
import gzip
import sys
opener = gzip.open
if __name__ == '__main__':
f = gzip.open(sys.argv[1], mode='wt')
f.write(''.join(sys.argv[2:]))
f.close()
| 16.2
| 41
| 0.604938
|
4a07ffd01c29f1bcec79eb6d61d995d22d041724
| 2,489
|
py
|
Python
|
internal/twirptest/multiple/multiple1_pb2_twirp.py
|
joshi4/twirp
|
677ba1a47ae278934e5911498e65f917c845dc69
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-06-19T15:57:43.000Z
|
2021-06-19T15:57:43.000Z
|
internal/twirptest/multiple/multiple1_pb2_twirp.py
|
joshi4/twirp
|
677ba1a47ae278934e5911498e65f917c845dc69
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
internal/twirptest/multiple/multiple1_pb2_twirp.py
|
joshi4/twirp
|
677ba1a47ae278934e5911498e65f917c845dc69
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Code generated by protoc-gen-twirp_python v5.8.0, DO NOT EDIT.
# source: multiple1.proto
try:
import httplib
from urllib2 import Request, HTTPError, urlopen
except ImportError:
import http.client as httplib
from urllib.request import Request, urlopen
from urllib.error import HTTPError
import json
from google.protobuf import symbol_database as _symbol_database
import sys
_sym_db = _symbol_database.Default()
class TwirpException(httplib.HTTPException):
def __init__(self, code, message, meta):
self.code = code
self.message = message
self.meta = meta
super(TwirpException, self).__init__(message)
@classmethod
def from_http_err(cls, err):
try:
jsonerr = json.load(err)
code = jsonerr["code"]
msg = jsonerr["msg"]
meta = jsonerr.get("meta")
if meta is None:
meta = {}
except:
code = "internal"
msg = "Error from intermediary with HTTP status code {} {}".format(
err.code, httplib.responses[err.code],
)
meta = {}
return cls(code, msg, meta)
class Svc1Client(object):
def __init__(self, server_address):
"""Creates a new client for the Svc1 service.
Args:
server_address: The address of the server to send requests to, in
the full protocol://host:port form.
"""
if sys.version_info[0] > 2:
self.__target = server_address
else:
self.__target = server_address.encode('ascii')
self.__service_name = "twirp.internal.twirptest.multiple.Svc1"
def __make_request(self, body, full_method):
req = Request(
url=self.__target + "/twirp" + full_method,
data=body,
headers={"Content-Type": "application/protobuf"},
)
try:
resp = urlopen(req)
except HTTPError as err:
raise TwirpException.from_http_err(err)
return resp.read()
def send(self, msg1):
serialize = _sym_db.GetSymbol("twirp.internal.twirptest.multiple.Msg1").SerializeToString
deserialize = _sym_db.GetSymbol("twirp.internal.twirptest.multiple.Msg1").FromString
full_method = "/{}/{}".format(self.__service_name, "Send")
body = serialize(msg1)
resp_str = self.__make_request(body=body, full_method=full_method)
return deserialize(resp_str)
| 32.324675
| 97
| 0.617919
|
4a08024c9fe78d52e19713e64df9464965e98910
| 633
|
py
|
Python
|
setup.py
|
callat-qcd/hpss
|
54d0a4172015639e339b6e93fb2e18c90dc4d287
|
[
"MIT"
] | null | null | null |
setup.py
|
callat-qcd/hpss
|
54d0a4172015639e339b6e93fb2e18c90dc4d287
|
[
"MIT"
] | 1
|
2020-03-15T14:16:52.000Z
|
2020-03-15T17:36:08.000Z
|
setup.py
|
callat-qcd/hpss
|
54d0a4172015639e339b6e93fb2e18c90dc4d287
|
[
"MIT"
] | 1
|
2020-03-14T23:02:32.000Z
|
2020-03-14T23:02:32.000Z
|
import setuptools
with open("README.md", "r") as read_me:
long_description = read_me.read()
setuptools.setup(
name="hpss",
version="0.0.1",
author="Evan Berkowitz",
author_email="e.berkowitz@fz-juelich.de",
description="Interface with an HPSS tape archive.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/evanberkowitz/hpss",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
)
| 30.142857
| 55
| 0.672986
|
4a0802813e33295569796dd17293d8867df638ac
| 1,479
|
py
|
Python
|
netmiko_2_host.py
|
iks0x1b/netmiko_multidevice_example
|
8cd60652fbfcc0fa2ce9a106d511ff23cdd8a8b5
|
[
"BSD-2-Clause"
] | null | null | null |
netmiko_2_host.py
|
iks0x1b/netmiko_multidevice_example
|
8cd60652fbfcc0fa2ce9a106d511ff23cdd8a8b5
|
[
"BSD-2-Clause"
] | null | null | null |
netmiko_2_host.py
|
iks0x1b/netmiko_multidevice_example
|
8cd60652fbfcc0fa2ce9a106d511ff23cdd8a8b5
|
[
"BSD-2-Clause"
] | null | null | null |
import json
import os
import getpass
from netmiko import ConnectHandler
from datetime import datetime
def main():
timeObj = datetime.now()
# auser = getpass.getuser()
auser = input('Username:')
apass = getpass.getpass()
secret = apass
vyos_commands = [
"set terminal length 0",
"show config",
"show interfaces"
]
pano_commands = [
"set cli pager off",
"show config running",
"show arp all"
]
result = {}
with open("hosts.json") as connectionargs:
adata = json.load(connectionargs)
for host in adata['hosts']:
target = {
'device_type': host['device_type'],
'ip': host['address'],
'host': host['hostname'],
'username': auser,
'password': apass,
'secret': secret
}
net_connect = ConnectHandler(**target)
if target['device_type'] == "paloalto_panos":
for command in pano_commands:
output = net_connect.send_command(command)
print(output)
elif target['device_type'] == "vyos_ssh":
for command in vyos_commands:
output = net_connect.send_command(command)
print(output)
if __name__ == "__main__":
main()
| 29.58
| 63
| 0.494929
|
4a08028e96126d0d84d79a36ff0b232715a4bb95
| 17,638
|
py
|
Python
|
sample/1_xml2conll_offset.py
|
Huiweizhou/keras_bc6_track1
|
0250b93952777cbd627f8b98beb1a1585b749664
|
[
"BSD-2-Clause"
] | null | null | null |
sample/1_xml2conll_offset.py
|
Huiweizhou/keras_bc6_track1
|
0250b93952777cbd627f8b98beb1a1585b749664
|
[
"BSD-2-Clause"
] | 5
|
2020-09-25T22:43:11.000Z
|
2022-02-09T23:40:30.000Z
|
sample/1_xml2conll_offset.py
|
Huiweizhou/keras_bc6_track1
|
0250b93952777cbd627f8b98beb1a1585b749664
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
'''
BioC(XML)格式ConNLL格式
从 .XML 原始document文件中解析获取训练数据和标签文件
1、通过offset,将句子中所有的实体用一对标签 <B^>entity<^I> 进行标记
注意:offset 是在二进制编码下索引的,要对句子进行编码 s=s.encode(‘utf-8’)
2、对于嵌套实体(offset 相同),仅保留其中长度较长的实体
3、对句子中的标点符号 !\”#$%‘()*+,-./:;<=>?@[\\]_`{|}~ 进行切分;^ 保留用于实体标签!!
4、利用GENIA tagger工具对标记过的语料进行分词和词性标注
5、根据预处理语料的标记 <B^><^I> 获取BIO标签
'''
from xml.dom.minidom import parse
import re
import codecs
import os
from tqdm import tqdm
# def entityReplace(splited_sen, splited_tagged, i, item, sen_length):
# '''
# 将句子中的实体用<></>标签包裹
# '''
# # 1、先处理嵌套实体的问题
# if B_tag in splited_sen[i] and I_tag in splited_sen[i]:
# k1 = splited_sen[i].index(B_tag)+4
# k2 = splited_sen[i].index(I_tag)
# splited_sen[i] = splited_sen[i][:k1] + item + splited_sen[i][k2:]
# elif B_tag in splited_sen[i]:
# k1 = splited_sen[i].index(B_tag)+4
# splited_sen[i] = splited_sen[i][:k1] + item
# elif I_tag in splited_sen[i]:
# k2 = splited_sen[i].index(I_tag)
# splited_sen[i] = item + splited_sen[i][k2:]
# else:
# splited_sen[i] = item
# # 2、对于嵌入在单词内部的实体,包裹标签后 需要重新调整句子的长度
# gap = i+1
# diff = len(splited_tagged) - sen_length # 标记后的句子与原始句子的长度差
# while diff:
# splited_sen.insert(gap, splited_tagged[gap])
# diff-=1
# gap+=1
def xx(entity):
if entity.startswith('NCBI gene:') or entity.startswith('Uniprot:') or \
entity.startswith('gene:') or entity.startswith('protein:'):
return True
else:
return False
def readXML(files, BioC_PATH):
num_sentence = 0
num_file = 0
num_annotations_pro = 0
num_entitytype_pro = 0
num_annotations_gene = 0
num_entitytype_gene = 0
passages_list = []
raw_sentence_list = []
id_list_list = []
for file in files: #遍历文件夹
if not os.path.isdir(file): #判断是否是文件夹,不是文件夹才打开
f = BioC_PATH + "/" + file
DOMTree = parse(f) # 使用minidom解析器打开 XML 文档
collection = DOMTree.documentElement # 得到了根元素
# 在集合中获取所有 document
documents = collection.getElementsByTagName("document")
for document in documents:
doc_id = document.getElementsByTagName("id")[0].childNodes[0].data
passages = document.getElementsByTagName("passage")
# print("*****passage*****")
for passage in passages:
text = passage.getElementsByTagName('text')[0]
sentence_byte = text.childNodes[0].data.encode("utf-8") # byte
sentence_str = sentence_byte.decode("utf-8") # str
raw_sentence_list.append(sentence_str)
num_sentence += 1
id_list = []
offset_list = []
length_list = []
entity_list = []
annotations = passage.getElementsByTagName('annotation')
for annotation in annotations:
info = annotation.getElementsByTagName("infon")[0]
ID = info.childNodes[0].data
location = annotation.getElementsByTagName("location")[0]
offset = int(location.getAttribute("offset"))
length = int(location.getAttribute("length"))
txt = annotation.getElementsByTagName("text")[0]
entity = txt.childNodes[0].data
assert len(sentence_byte[offset:offset+length].decode('utf-8'))==len(entity)
id_list.append(ID)
offset_list.append(offset)
length_list.append(length)
entity_list.append(entity)
# 根据offset的大小对数组进行逆序排序
offset_sorted = sorted(enumerate(offset_list), key=lambda x:x[1], reverse=True)
offset_list = [x[1] for x in offset_sorted] # 新数组
offset_idx = [x[0] for x in offset_sorted] # 数组下标
length_list = [length_list[idx] for idx in offset_idx]
id_list = [id_list[idx] for idx in offset_idx]
entity_list = [entity_list[idx] for idx in offset_idx]
# if num_sentence in [101, 5268, 8327, 4628]:
if num_sentence in [5268]:
continue # golden 标签好像有错
print(doc_id)
print(offset_list)
print(id_list)
# 针对实体嵌套的情况
# 即两个实体的start/end相同,而长度不同,保留长的哪个
offset_temp = []
offset_remove = []
for i in range(len(offset_list)):
offset1 = offset_list[i]
length1 = length_list[i]
for j in range(i+1, len(offset_list)):
offset2 = offset_list[j]
length2 = length_list[j]
if offset1==offset2: # 实体的start相同
offset_temp.append([i, j])
elif offset1+length1==offset2+length2: # 实体的end相同
offset_temp.append([i, j])
while 1:
if offset_temp:
idx1 = offset_temp[0][0]
idx2 = offset_temp[0][1]
ID1 = id_list[idx1]
ID2 = id_list[idx2]
# 保留其中的蛋白质或基因实体,否则保留其中长度较长的实体
if xx(ID1) and not xx(ID2):
offset_remove.append(idx2)
elif not xx(ID1) and xx(ID2):
offset_remove.append(idx1)
else:
idx3 = idx2 if length_list[idx1]>length_list[idx2] else idx1
offset_remove.append(idx3)
offset_temp = offset_temp[1:]
else:
break
# 丢弃 offset_remove 中的实体
if offset_remove:
# print('{}: {}'.format(doc_id, offset_list))
offset_remove = sorted(offset_remove, reverse=True)
for idxidx in offset_remove:
offset_list.pop(idxidx)
length_list.pop(idxidx)
id_list.pop(idxidx)
entity_list.pop(idxidx)
# print('{}: {}'.format(doc_id, offset_list))
# print(entity_list)
# 用一对标签 <B^>entity<^I> 包裹筛选后的所有实体
tmp = sentence_byte
id_list_only = [] # 仅保留gene or protein的ID
for i in range(len(offset_list)):
offset = offset_list[i]
length = length_list[i]
ID = id_list[i]
entity = entity_list[i]
if isinstance(tmp, str):
tmp = tmp.encode("utf-8")
if ID.startswith('Uniprot:') or ID.startswith('protein:'):
if ID.startswith('Uniprot:'):
num_annotations_pro+=1
elif ID.startswith('protein:'):
num_entitytype_pro+=1
id_list_only.append(ID.strip('\n').strip())
# # This solution will strip out (ignore) the characters in
# # question returning the string without them.
left = tmp[:offset].decode("utf-8", errors='ignore')
mid = tmp[offset:offset + length].decode("utf-8", errors='ignore')
right = tmp[offset + length:].decode("utf-8", errors='ignore')
tmp = left + ' ' + B_tag[0] + mid + I_tag[0] + ' ' + right
tmp = tmp.replace(' ', ' ').replace(' ', ' ')
elif ID.startswith('NCBI gene:') or ID.startswith('gene:'):
if ID.startswith('NCBI gene:'):
num_annotations_gene+=1
elif ID.startswith('gene:'):
num_entitytype_gene+=1
id_list_only.append(ID.strip('\n').strip())
# # This solution will strip out (ignore) the characters in
# # question returning the string without them.
left = tmp[:offset].decode("utf-8", errors='ignore')
mid = tmp[offset:offset + length].decode("utf-8", errors='ignore')
right = tmp[offset + length:].decode("utf-8", errors='ignore')
tmp = left + ' ' + B_tag[1] + mid + I_tag[1] + ' ' + right
tmp = tmp.replace(' ', ' ').replace(' ', ' ')
else:
# 暂时不考虑其他类别的实体
continue
if not id_list_only:
id_list_only.append('') # 不包含实体也要占位
if isinstance(tmp, bytes):
tmp = tmp.decode("utf-8")
tmp = ' '.join(tmp.split()) # 重构
# 对标点符号进行切分,但保留 ^ 用作标记识别符
for special in "!\"#$%'()*+,-./:;<=>?@[\\]_`{|}~":
tmp = tmp.replace(special, ' '+special+' ')
tmp = tmp.replace('°C', ' °C ')
tmp = tmp.replace(' ', ' ').replace(' ', ' ')
if '' in tmp.split():
print('tmp中存在空字符error\n')
passages_list.append(tmp)
id_list_list.append(id_list_only)
with codecs.open(train_path + "/" + 'train.txt', 'w', encoding='utf-8') as f:
for sentence in passages_list:
f.write(sentence)
f.write('\n')
with codecs.open(train_path + "/" + 'train_goldenID.txt', 'w', encoding='utf-8') as f:
for sentence in id_list_list:
f.write('\t'.join(sentence))
f.write('\n')
with codecs.open(train_path + "/" + 'train_raw.txt', 'w', encoding='utf-8') as f:
for sentence in raw_sentence_list:
f.write(sentence)
f.write('\n')
passages_list = []
del passages_list
print('标注proID的实体的个数:{}'.format((num_annotations_pro)))
print('标注pro类型的实体的个数:{}'.format((num_entitytype_pro)))
print('标注geneID的实体的个数:{}'.format((num_annotations_gene)))
print('标注gene类型的实体的个数:{}'.format((num_entitytype_gene)))
print('passage 总数: {}'.format(num_sentence)) # 13697
# 利用GENIA tagger工具对标记过的语料进行预处理(分词+POS+CHUNK+NER)
# 得到 train.genia 文件
def judge(word, label_sen, flag):
'''
0:实体结束 1:实体开头或内部 2:特殊实体[]
情况1:嵌套实体,如:pHA,其标注为: B-p B-ha-I-I
对于后者的标注形式较难解决:
①实体包含多个标签B,如:B-PROTEIN-B-GENE-Spi-I-GENE
②实体包含多个标签I,如:B-GENE/RARα/I-GENE/I-PROTEIN
解决1:针对特定情况,比较B-和I-的数量,选择多的一方
情况2:实体词本身最后一个字母是B,如:B-GENE-SipB-I-GENE
解决2:改变标记形式<B-XXX-><-I-XXX>为<B-*/></I-*>
丢弃那些长度为1的golden实体
情况3:特殊实体,如:[14C],其标注为: B-[ 14C ]-I
GENIA tagger 分词后变为 B- [ 14C ] -I。标记被分离了
解决3:在获取BIO标签时进行处理
'''
previous = None
changable = False
if B_tag[0] in word or I_tag[0] in word or B_tag[1] in word or I_tag[1] in word:
if word==B_tag[0]:
# 处理 B-[]-I 实体在分词后标签B-和-I被分离的情况
flag=2
changable = 1
print('B_protein')
elif word==I_tag[0] or word==I_tag[1]:
# 处理 B-[]-I 实体在分词后标签B-和-I被分离的情况
flag=0
changable = 1
print(word)
elif word==B_tag[1]:
flag=21
changable = 1
print('B_gene')
if not changable:
if word.startswith(B_tag[1]):
if word.count(B_tag[1]) > word.count(I_tag[1]):
# 嵌套实体①
label_sen.append('B-gene')
flag=1
changable = 1
elif word.count(B_tag[1]) < word.count(I_tag[1]): # 实体结尾
# 嵌套实体②
label_sen.append('I-gene')
flag=0
changable = 1
else: # 单个实体
if flag:
label_sen.append('I-gene')
flag=1
else:
label_sen.append('B-gene')
flag=0
changable = 1
elif word.startswith(B_tag[0]):
if word.count(B_tag[0]) > word.count(I_tag[0]):
# 嵌套实体①
label_sen.append('B-protein')
flag=1
changable = 1
elif word.count(B_tag[0]) < word.count(I_tag[0]): # 实体结尾
# 嵌套实体②
label_sen.append('I-protein')
flag=0
changable = 1
else: # 单个实体
if flag:
label_sen.append('I-protein')
flag=1
else:
label_sen.append('B-protein')
flag=0
changable = 1
elif word.endswith(I_tag[1]):
# 对应两种结尾情况:①/I-XXX ②/I-XXX/I-XXX
label_sen.append('I-gene')
flag=0
changable = 1
elif word.endswith(I_tag[0]):
# 对应两种结尾情况:①/I-XXX ②/I-XXX/I-XXX
label_sen.append('I-protein')
flag=0
changable = 1
else:
# 非实体词
pass
if changable:
word = word.replace(B_tag[1], '').replace(I_tag[1], '')
word = word.replace(B_tag[0], '').replace(I_tag[0], '')
else:
if flag:
if flag==2: # 针对‘[entity]’这种实体形式
# print(word, flag)
label_sen.append('B-protein')
flag=1
elif flag==21:
label_sen.append('B-gene')
flag=1
else: # flag=1
if label_sen[-1]=='B-protein':
label_sen.append('I-protein')
elif label_sen[-1]=='B-gene':
label_sen.append('I-gene')
flag=1
else:
label_sen.append('O')
flag=0
return word, flag
# 根据预处理语料的标记 <B-xxx-></-I-xxx> 获取BIO标签
def getLabel(dataPath):
flag = 0 # 0:实体结束 1:实体内部 2:特殊实体[]
label_sen = []
sent = []
geniaPath = dataPath+ '/' + 'train.genia.txt'
outputPath = dataPath+ '/' + 'train.out.txt'
with codecs.open(geniaPath, 'r', encoding='utf-8') as data:
for line in data:
if not line=='\n':
words = line.split('\t')[0]
word, flag = judge(words, label_sen, flag)
if not word:
# 跳过单纯的标签 B^ 和 ^I
continue
sent.append(word + '\t' + '\t'.join(line.split('\t')[2:-1]) + '\t' + label_sen[-1] + '\n')
else:
# label.append(label_sen)
flag = 0
label_sen = []
sent.append('\n')
with codecs.open(outputPath, 'w', encoding='utf-8') as f:
for line in sent:
f.write(line)
# 生成单独的BIO标签文件
label_sen = []
ff = open(dataPath + '/' +'label.txt', 'w')
with codecs.open(dataPath + '/' +'train.out.txt', encoding='utf-8') as f:
lines = f.readlines()
for line in tqdm(lines):
if line=='\n':
ff.write(''.join(label_sen))
ff.write('\n')
label_sen = []
else:
label = line.split('\t')[-1]
label_sen.append(label.strip('\n')[0])
ff.close()
if __name__ == '__main__':
B_tag = ['B‐^', 'B‐^^'] # '‐' != '-'
I_tag = ['^‐I', '^^‐I']
train_path = r'/Users/ningshixian/Desktop/BC6_Track1/BioIDtraining_2/train'
BioC_PATH = r'/Users/ningshixian/Desktop/BC6_Track1/BioIDtraining_2/caption_bioc'
files = os.listdir(BioC_PATH) # 得到文件夹下的所有文件名称
files.sort()
readXML(files, BioC_PATH)
print("完结撒花====")
'''
% cd geniatagger-3.0.2
% ./geniatagger /Users/ningshixian/Desktop/'BC6_Track1'/BioIDtraining_2/train/train.txt \
> /Users/ningshixian/Desktop/'BC6_Track1'/BioIDtraining_2/train/train.genia.txt
'''
# getLabel(train_path)
# print("完结撒花====")
# with codecs.open(train_path + "/" + 'train_goldenID.txt', encoding='utf-8') as f:
# lines1 = f.readlines()
# with codecs.open(train_path + '/' + 'label.txt', encoding='utf-8') as f:
# lines2 = f.readlines()
# for i in range(len(lines1)):
# sentence1 = lines1[i].strip('\n')
# sentence2 = lines2[i].strip('\n')
# count1 = len(sentence1.split('\t')) if sentence1 else 0
# count2 = sentence2.count('B')
# if not count1 == count2:
# print(i)
# print(count1, count2)
# print(sentence1)
# print(sentence2)
| 39.995465
| 106
| 0.468931
|
4a0802aca0f7bd10c36f2401d23beac59b054903
| 21,185
|
py
|
Python
|
mindspore/python/mindspore/run_check/_check_version.py
|
httpsgithu/mindspore
|
c29d6bb764e233b427319cb89ba79e420f1e2c64
|
[
"Apache-2.0"
] | 1
|
2022-02-23T09:13:43.000Z
|
2022-02-23T09:13:43.000Z
|
mindspore/python/mindspore/run_check/_check_version.py
|
949144093/mindspore
|
c29d6bb764e233b427319cb89ba79e420f1e2c64
|
[
"Apache-2.0"
] | null | null | null |
mindspore/python/mindspore/run_check/_check_version.py
|
949144093/mindspore
|
c29d6bb764e233b427319cb89ba79e420f1e2c64
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""version and config check"""
import os
import sys
import time
import subprocess
import glob
from pathlib import Path
from abc import abstractmethod, ABCMeta
from packaging import version
import numpy as np
from mindspore import log as logger
from ..version import __version__
from ..default_config import __package_name__
class EnvChecker(metaclass=ABCMeta):
"""basic class for environment check"""
@abstractmethod
def check_env(self, e):
pass
@abstractmethod
def set_env(self):
pass
@abstractmethod
def check_version(self):
pass
class GPUEnvChecker(EnvChecker):
"""GPU environment check."""
def __init__(self):
self.version = ["10.1", "11.1"]
self.lib_key_to_lib_name = {'libcu': 'libcuda.so'}
# env
self.path = os.getenv("PATH")
self.ld_lib_path = os.getenv("LD_LIBRARY_PATH")
# check
self.v = "0"
self.cuda_lib_path = self._get_lib_path("libcu")
self.cuda_bin_path = self._get_bin_path("cuda")
self.cudnn_lib_path = self._get_lib_path("libcudnn")
def check_env(self, e):
raise e
def set_env(self):
return
def _get_bin_path(self, bin_name):
"""Get bin path by bin name."""
if bin_name == "cuda":
return self._get_cuda_bin_path()
return []
def _get_cuda_bin_path(self):
"""Get cuda bin path by lib path."""
path_list = []
for path in self.cuda_lib_path:
path = os.path.abspath(path.strip()+"/bin/")
if Path(path).is_dir():
path_list.append(path)
return np.unique(path_list)
def _get_nvcc_version(self, is_set_env):
"""Get cuda version by nvcc command."""
try:
nvcc_result = subprocess.run(["nvcc", "--version | grep release"],
timeout=3, text=True, capture_output=True, check=False)
except OSError:
if not is_set_env:
for path in self.cuda_bin_path:
if Path(path + "/nvcc").is_file():
os.environ['PATH'] = path + ":" + os.environ['PATH']
return self._get_nvcc_version(True)
return ""
result = nvcc_result.stdout
for line in result.split('\n'):
if line:
return line.strip().split("release")[1].split(",")[0].strip()
return ""
def _get_cudnn_version(self):
"""Get cudnn version by libcudnn.so."""
cudnn_version = []
for path in self.cudnn_lib_path:
real_path = glob.glob(path + "/lib*/libcudnn.so.*.*")
if real_path == []:
continue
ls_cudnn = subprocess.run(["ls", real_path[0]], timeout=10, text=True,
capture_output=True, check=False)
if ls_cudnn.returncode == 0:
cudnn_version = ls_cudnn.stdout.split('/')[-1].strip('libcudnn.so.').strip().split('.')
if len(cudnn_version) == 2:
cudnn_version.append('0')
break
version_str = ''.join([n for n in cudnn_version])
return version_str[0:3]
def _get_cudart_version(self):
"""Get cuda runtime version by libcudart.so."""
for path in self.cuda_lib_path:
real_path = glob.glob(path + "/lib*/libcudart.so.*.*.*")
if real_path == []:
continue
ls_cudart = subprocess.run(["ls", real_path[0]], timeout=10, text=True,
capture_output=True, check=False)
if ls_cudart.returncode == 0:
self.v = ls_cudart.stdout.split('/')[-1].strip('libcudart.so.').strip()
break
return self.v
def check_version(self):
"""Check cuda version."""
version_match = False
if self._check_version():
version_match = True
if not version_match:
if self.v == "0":
logger.warning("Can not found cuda libs, please confirm that the correct "
"cuda version has been installed, you can refer to the "
"installation guidelines: https://www.mindspore.cn/install")
else:
logger.warning(f"MindSpore version {__version__} and cuda version {self.v} does not match, "
"please refer to the installation guide for version matching "
"information: https://www.mindspore.cn/install")
nvcc_version = self._get_nvcc_version(False)
if nvcc_version and (nvcc_version not in self.version):
logger.warning(f"MindSpore version {__version__} and nvcc(cuda bin) version {nvcc_version} "
"does not match, please refer to the installation guide for version matching "
"information: https://www.mindspore.cn/install")
cudnn_version = self._get_cudnn_version()
if cudnn_version and int(cudnn_version) < 760:
logger.warning(f"MindSpore version {__version__} and cudDNN version {cudnn_version} "
"does not match, please refer to the installation guide for version matching "
"information: https://www.mindspore.cn/install. The recommended version is "
"CUDA10.1 with cuDNN7.6.x and CUDA11.1 with cuDNN8.0.x")
if cudnn_version and int(cudnn_version) < 800 and int(str(self.v).split('.')[0]) > 10:
logger.warning(f"CUDA version {self.v} and cuDNN version {cudnn_version} "
"does not match, please refer to the installation guide for version matching "
"information: https://www.mindspore.cn/install. The recommended version is "
"CUDA11.1 with cuDNN8.0.x")
def _check_version(self):
"""Check cuda version"""
v = self._get_cudart_version()
v = version.parse(v)
v_str = str(v.major) + "." + str(v.minor)
if v_str not in self.version:
return False
return True
def _get_lib_path(self, lib_name):
"""Get gpu lib path by ldd command."""
path_list = []
current_path = os.path.split(os.path.realpath(__file__))[0]
mindspore_path = os.path.join(current_path, "../")
try:
real_path = glob.glob(mindspore_path + "/_c_expression*.so*")
if real_path == []:
logger.error(f"{self.lib_key_to_lib_name[lib_name]} (need by mindspore-gpu) is not found, please "
f"confirm that _c_expression.so is in directory:{mindspore_path} and the correct cuda "
"version has been installed, you can refer to the installation "
"guidelines: https://www.mindspore.cn/install")
return path_list
ldd_r = subprocess.Popen(['ldd', real_path[0]], stdout=subprocess.PIPE)
ldd_result = subprocess.Popen(['grep', lib_name], stdin=ldd_r.stdout, stdout=subprocess.PIPE)
result = ldd_result.communicate()[0].decode()
for i in result.split('\n'):
path = i.partition("=>")[2]
if path.lower().find("not found") > 0:
logger.warning(f"Cuda {self.version} version(need by mindspore-gpu) is not found, please confirm "
"that the path of cuda is set to the env LD_LIBRARY_PATH, please refer to the "
"installation guidelines: https://www.mindspore.cn/install")
continue
path = path.partition(lib_name)[0]
if path:
path_list.append(os.path.abspath(path.strip() + "../"))
return np.unique(path_list)
except subprocess.TimeoutExpired:
logger.warning("Failed to check cuda version due to the ldd command timeout, please confirm that "
"the correct cuda version has been installed, you can refer to the "
"installation guidelines: https://www.mindspore.cn/install")
return path_list
def _read_version(self, file_path):
"""Get gpu version info in version.txt."""
with open(file_path, 'r') as f:
all_info = f.readlines()
for line in all_info:
if line.startswith("CUDA Version"):
self.v = line.strip().split("CUDA Version")[1]
return self.v
return self.v
class AscendEnvChecker(EnvChecker):
"""ascend environment check"""
def __init__(self):
self.version = ["1.81"]
atlas_nnae_version = "/usr/local/Ascend/nnae/latest/fwkacllib/version.info"
atlas_toolkit_version = "/usr/local/Ascend/ascend-toolkit/latest/fwkacllib/version.info"
hisi_fwk_version = "/usr/local/Ascend/latest/fwkacllib/version.info"
if os.path.exists(atlas_nnae_version):
# atlas default path
self.fwk_path = "/usr/local/Ascend/nnae/latest/fwkacllib"
self.op_impl_path = "/usr/local/Ascend/nnae/latest/opp/op_impl/built-in/ai_core/tbe"
self.tbe_path = self.fwk_path + "/lib64"
self.cce_path = self.fwk_path + "/ccec_compiler/bin"
self.fwk_version = atlas_nnae_version
self.op_path = "/usr/local/Ascend/nnae/latest/opp"
self.aicpu_path = "/usr/local/Ascend/nnae/latest"
elif os.path.exists(atlas_toolkit_version):
# atlas default path
self.fwk_path = "/usr/local/Ascend/ascend-toolkit/latest/fwkacllib"
self.op_impl_path = "/usr/local/Ascend/ascend-toolkit/latest/opp/op_impl/built-in/ai_core/tbe"
self.tbe_path = self.fwk_path + "/lib64"
self.cce_path = self.fwk_path + "/ccec_compiler/bin"
self.fwk_version = atlas_toolkit_version
self.op_path = "/usr/local/Ascend/ascend-toolkit/latest/opp"
self.aicpu_path = "/usr/local/Ascend/ascend-toolkit/latest"
elif os.path.exists(hisi_fwk_version):
# hisi default path
self.fwk_path = "/usr/local/Ascend/latest/fwkacllib"
self.op_impl_path = "/usr/local/Ascend/latest/opp/op_impl/built-in/ai_core/tbe"
self.tbe_path = self.fwk_path + "/lib64"
self.cce_path = self.fwk_path + "/ccec_compiler/bin"
self.fwk_version = hisi_fwk_version
self.op_path = "/usr/local/Ascend/latest/opp"
self.aicpu_path = "/usr/local/Ascend/latest"
else:
# custom or unknown environment
self.fwk_path = ""
self.op_impl_path = ""
self.tbe_path = ""
self.cce_path = ""
self.fwk_version = ""
self.op_path = ""
self.aicpu_path = ""
# env
self.path = os.getenv("PATH")
self.python_path = os.getenv("PYTHONPATH")
self.ld_lib_path = os.getenv("LD_LIBRARY_PATH")
self.ascend_opp_path = os.getenv("ASCEND_OPP_PATH")
self.ascend_aicpu_path = os.getenv("ASCEND_AICPU_PATH")
# check content
self.path_check = "/fwkacllib/ccec_compiler/bin"
self.python_path_check = "opp/op_impl/built-in/ai_core/tbe"
self.ld_lib_path_check_fwk = "/fwkacllib/lib64"
self.ld_lib_path_check_addons = "/add-ons"
self.ascend_opp_path_check = "/op"
self.v = ""
def check_env(self, e):
self._check_env()
raise e
def check_version(self):
if not Path(self.fwk_version).is_file():
logger.warning("Using custom Ascend AI software package (Ascend Data Center Solution) path, package "
"version checking is skipped, please make sure Ascend AI software package (Ascend Data "
"Center Solution) version is supported, you can reference to the installation guidelines "
"https://www.mindspore.cn/install")
return
v = self._read_version(self.fwk_version)
if v not in self.version:
v_list = str([x for x in self.version])
logger.warning(f"MindSpore version {__version__} and Ascend AI software package (Ascend Data Center "
f"Solution)version {v} does not match, the version of software package expect one of "
f"{v_list}, please reference to the match info on: https://www.mindspore.cn/install")
def check_deps_version(self):
"""
te, topi, hccl wheel package version check
in order to update the change of 'LD_LIBRARY_PATH' env, run a sub process
"""
input_args = ["--mindspore_version=" + __version__]
for v in self.version:
input_args.append("--supported_version=" + v)
deps_version_checker = os.path.join(os.path.split(os.path.realpath(__file__))[0],
"_check_deps_version.py")
call_cmd = [sys.executable, deps_version_checker] + input_args
try:
process = subprocess.run(call_cmd, timeout=3, text=True, capture_output=True, check=False)
if process.stdout.strip() != "":
logger.warning(process.stdout.strip())
warning_countdown = 3
for i in range(warning_countdown, 0, -1):
logger.warning(f"Please pay attention to the above warning, countdown: {i}")
time.sleep(1)
except subprocess.TimeoutExpired:
logger.info("Package te, topi, hccl version check timed out, skip.")
def set_env(self):
if not self.tbe_path:
self._check_env()
return
try:
import te # pylint: disable=unused-import
# pylint: disable=broad-except
except Exception:
if Path(self.tbe_path).is_dir():
if os.getenv('LD_LIBRARY_PATH'):
os.environ['LD_LIBRARY_PATH'] = self.tbe_path + ":" + os.environ['LD_LIBRARY_PATH']
else:
os.environ['LD_LIBRARY_PATH'] = self.tbe_path
else:
raise EnvironmentError(
f"No such directory: {self.tbe_path}, Please check if Ascend AI software package (Ascend Data "
"Center Solution) is installed correctly.")
# check te version after set te env
self.check_deps_version()
if Path(self.op_impl_path).is_dir():
# python path for sub process
if os.getenv('PYTHONPATH'):
os.environ['PYTHONPATH'] = self.op_impl_path + ":" + os.environ['PYTHONPATH']
else:
os.environ['PYTHONPATH'] = self.op_impl_path
# sys path for this process
sys.path.append(self.op_impl_path)
os.environ['TBE_IMPL_PATH'] = self.op_impl_path
else:
raise EnvironmentError(
f"No such directory: {self.op_impl_path}, Please check if Ascend AI software package (Ascend Data "
"Center Solution) is installed correctly.")
if Path(self.cce_path).is_dir():
os.environ['PATH'] = self.cce_path + ":" + os.environ['PATH']
else:
raise EnvironmentError(
f"No such directory: {self.cce_path}, Please check if Ascend AI software package (Ascend Data Center "
"Solution) is installed correctly.")
if self.op_path is None:
pass
elif Path(self.op_path).is_dir():
os.environ['ASCEND_OPP_PATH'] = self.op_path
else:
raise EnvironmentError(
f"No such directory: {self.op_path}, Please check if Ascend AI software package (Ascend Data Center "
"Solution) is installed correctly.")
if self.aicpu_path is None:
pass
elif Path(self.aicpu_path).is_dir():
os.environ['ASCEND_AICPU_PATH'] = self.aicpu_path
else:
raise EnvironmentError(
f"No such directory: {self.aicpu_path}, Please check if Ascend AI software package (Ascend Data Center"
" Solution) is installed correctly.")
def _check_env(self):
"""ascend dependence path check"""
if self.path is None or self.path_check not in self.path:
logger.warning("Can not find ccec_compiler(need by mindspore-ascend), please check if you have set env "
"PATH, you can reference to the installation guidelines https://www.mindspore.cn/install")
if self.python_path is None or self.python_path_check not in self.python_path:
logger.warning(
"Can not find tbe op implement(need by mindspore-ascend), please check if you have set env "
"PYTHONPATH, you can reference to the installation guidelines "
"https://www.mindspore.cn/install")
if self.ld_lib_path is None or not (self.ld_lib_path_check_fwk in self.ld_lib_path and
self.ld_lib_path_check_addons in self.ld_lib_path):
logger.warning("Can not find driver so(need by mindspore-ascend), please check if you have set env "
"LD_LIBRARY_PATH, you can reference to the installation guidelines "
"https://www.mindspore.cn/install")
if self.ascend_opp_path is None or self.ascend_opp_path_check not in self.ascend_opp_path:
logger.warning(
"Can not find opp path (need by mindspore-ascend), please check if you have set env ASCEND_OPP_PATH, "
"you can reference to the installation guidelines https://www.mindspore.cn/install")
def _read_version(self, file_path):
"""get ascend version info"""
with open(file_path, 'r') as f:
all_info = f.readlines()
for line in all_info:
if line.startswith("Version="):
full_version = line.strip().split("=")[1]
self.v = '.'.join(full_version.split('.')[0:2])
return self.v
return self.v
def check_version_and_env_config():
"""check version and env config"""
if __package_name__.lower() == "mindspore-ascend":
env_checker = AscendEnvChecker()
# Note: pre-load libgomp.so to solve error like "cannot allocate memory in statis TLS block"
try:
import ctypes
ctypes.cdll.LoadLibrary("libgomp.so.1")
except OSError:
logger.warning(
"Pre-Load Lirary libgomp.so.1 failed, this might cause cannot allocate TLS memory problem, "
"if so find solution in FAQ in https://www.mindspore.cn/docs/faq/en/master/index.html.")
elif __package_name__.lower() == "mindspore-gpu":
env_checker = GPUEnvChecker()
else:
logger.info(f"Package version {__package_name__} does not need to check any environment variable, skipping.")
return
if os.getenv("MS_DEV_CLOSE_VERSION_CHECK") == "ON":
return
os.environ["MS_DEV_CLOSE_VERSION_CHECK"] = "ON"
try:
# check version of ascend site or cuda
env_checker.check_version()
from .. import _c_expression # pylint: disable=unused-import
env_checker.set_env()
except ImportError as e:
env_checker.check_env(e)
def _set_pb_env():
"""Set env variable `PROTOCOL_BUFFERS` to prevent memory overflow."""
if os.getenv("PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION") == "cpp":
logger.info("Current env variable `PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp`. "
"When the checkpoint file is too large, "
"it may cause memory limit error during load checkpoint file. "
"This can be solved by set env `PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python`.")
elif os.getenv("PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION") is None:
logger.info("Setting the env `PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python` to prevent memory overflow "
"during save or load checkpoint file.")
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
check_version_and_env_config()
_set_pb_env()
| 46.356674
| 119
| 0.5941
|
4a0802afb3f7134316d0dcb3e514d0fe27842376
| 4,852
|
py
|
Python
|
hti/server/guiding/periodic_error.py
|
simonvoelcker/astrotools
|
89a2ecaee44a5764931422cae6cc46bad595b766
|
[
"CC0-1.0"
] | 12
|
2020-03-10T15:24:55.000Z
|
2022-03-10T09:14:02.000Z
|
hti/server/guiding/periodic_error.py
|
simonvoelcker/astrotools
|
89a2ecaee44a5764931422cae6cc46bad595b766
|
[
"CC0-1.0"
] | 9
|
2021-03-19T15:47:11.000Z
|
2022-02-10T04:32:58.000Z
|
hti/server/guiding/periodic_error.py
|
simonvoelcker/astrotools
|
89a2ecaee44a5764931422cae6cc46bad595b766
|
[
"CC0-1.0"
] | null | null | null |
import csv
import datetime
from dataclasses import dataclass
from itertools import tee
def pairwise(iterable):
"""s -> (s0,s1), (s1,s2), (s2, s3), ..."""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def fraction(low, middle, high):
return (middle - low) / (high - low)
def lerp(low_value, high_value, f):
return low_value * (1.0 - f) + high_value * f
def fract_part(value):
if value < 0:
value += abs(value) + 1
if value > 1:
value -= int(value)
return value
@dataclass
class ErrorSample:
wheel_position: float = None
pixel_error: float = None
class Recording:
def __init__(self):
self.samples = []
self.finalized = False
def add_sample(self, sample):
if len(self.samples) > 0:
rounds = sample.wheel_position - self.samples[0].wheel_position
print(f'PEC recorder: {int(100.0*rounds)}% completed')
if rounds > 1 and not self.finalized:
print('PEC recorder: finalizing')
self._finalize()
return
self.samples.append(sample)
def _finalize(self):
drift = self.samples[-1].pixel_error - self.samples[0].pixel_error
print(f'Drift: {drift}px ({drift/640.0}px/s)')
# subtract increasing portions of the drift from samples' pixel errors
for sample in self.samples:
rounds = sample.wheel_position - self.samples[0].wheel_position
sample.pixel_error -= drift * rounds
# normalize wheel positions by subtracting integer part
for sample in self.samples:
sample.wheel_position -= int(sample.wheel_position)
# offset samples to have sampling period start at 0 wheel position
while self.samples[0].wheel_position > self.samples[-1].wheel_position:
self.samples = self.samples[1:] + self.samples[:1]
# persist the recording for analysis on a cloudy day
self._save()
self.finalized = True
def _save(self):
now = datetime.datetime.now().isoformat()
with open(f'pec_recording_{now}.csv', 'w', newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=';')
writer.writerow(['wheel_position', 'pixel_error'])
for sample in self.samples:
writer.writerow([sample.wheel_position, sample.pixel_error])
def sample_pixel_error(self, wheel_position):
wheel_position = fract_part(wheel_position)
# copy first+last sample to the end to allow for safe interpolation
first_sample_wrapped = ErrorSample(
wheel_position=self.samples[0].wheel_position + 1.0,
pixel_error=self.samples[0].pixel_error,
)
last_sample_wrapped = ErrorSample(
wheel_position=self.samples[-1].wheel_position - 1.0,
pixel_error=self.samples[-1].pixel_error,
)
samples_ring = [last_sample_wrapped] + self.samples + [first_sample_wrapped]
for s1, s2 in pairwise(samples_ring):
if s1.wheel_position <= wheel_position < s2.wheel_position:
f = fraction(s1.wheel_position, wheel_position, s2.wheel_position)
return lerp(s1.pixel_error, s2.pixel_error, f)
print('WARN: Sampling pixel error from PEC recording failed')
return 0.0
def sample_slope(self, wheel_position):
# sampling radius for slope (=speed) computation
# this should roughly equal a 10-second window
epsilon = 5.0 / 640.0
s1 = self.sample_pixel_error(wheel_position - epsilon)
s2 = self.sample_pixel_error(wheel_position + epsilon)
# unit is pixels/second, roughly equal to arcsecs/second
return (s2 - s1) / 10.0
class PeriodicErrorManager:
def __init__(self, pec_state):
self.recordings = []
self.pec_state = pec_state
def add_sample(self, sample):
# create new recording if necessary
if not self.recordings or self.recordings[-1].finalized:
self.recordings.append(Recording())
# append to latest recording
self.recordings[-1].add_sample(sample)
# ready when first recording is ready
if self.recordings[0].finalized:
self.pec_state.ready = True
def get_speed_correction(self, wheel_position, range_dps):
if not self.recordings or not self.recordings[0].finalized:
print('WARN: Not ready to sample PEC recording')
return 0.0
slope = self.recordings[0].sample_slope(wheel_position)
correction = slope * self.pec_state.factor
correction = min(range_dps, max(-range_dps, correction))
print(f'Slope: {slope} Factor: {self.pec_state.factor} => Correction: {correction} (capped to {range_dps})')
return correction
| 35.15942
| 116
| 0.636645
|
4a08037feaf95e1f12d2edb58bf96efaa9dd747c
| 1,019
|
py
|
Python
|
eran/ELINA/python_interface/tests/oct_test.py
|
pauls658/ReluDiff-ICSE2020-Artifact
|
212854fe04f482183c239e5dfec70106a9a83df8
|
[
"Apache-2.0"
] | 7
|
2020-01-27T21:25:49.000Z
|
2022-01-07T04:37:37.000Z
|
eran/ELINA/python_interface/tests/oct_test.py
|
yqtianust/ReluDiff-ICSE2020-Artifact
|
149f6efe4799602db749faa576980c36921a07c7
|
[
"Apache-2.0"
] | 1
|
2022-01-25T17:41:54.000Z
|
2022-01-26T02:27:51.000Z
|
eran/ELINA/python_interface/tests/oct_test.py
|
yqtianust/ReluDiff-ICSE2020-Artifact
|
149f6efe4799602db749faa576980c36921a07c7
|
[
"Apache-2.0"
] | 3
|
2020-03-14T17:12:17.000Z
|
2022-03-16T09:50:46.000Z
|
#
#
# This source file is part of ELINA (ETH LIbrary for Numerical Analysis).
# ELINA is Copyright © 2019 Department of Computer Science, ETH Zurich
# This software is distributed under GNU Lesser General Public License Version 3.0.
# For more information, see the ELINA project website at:
# http://elina.ethz.ch
#
# THE SOFTWARE IS PROVIDED "AS-IS" WITHOUT ANY WARRANTY OF ANY KIND, EITHER
# EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO ANY WARRANTY
# THAT THE SOFTWARE WILL CONFORM TO SPECIFICATIONS OR BE ERROR-FREE AND ANY
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
# TITLE, OR NON-INFRINGEMENT. IN NO EVENT SHALL ETH ZURICH BE LIABLE FOR ANY
# DAMAGES, INCLUDING BUT NOT LIMITED TO DIRECT, INDIRECT,
# SPECIAL OR CONSEQUENTIAL DAMAGES, ARISING OUT OF, RESULTING FROM, OR IN
# ANY WAY CONNECTED WITH THIS SOFTWARE (WHETHER OR NOT BASED UPON WARRANTY,
# CONTRACT, TORT OR OTHERWISE).
#
#
from opt_oct import *
manager1 = opt_oct_manager_alloc()
| 40.76
| 84
| 0.758587
|
4a080467a3cc4354be9a51dd971004d4b3b56ae4
| 1,820
|
py
|
Python
|
sympy/abc.py
|
eriknw/sympy
|
b7544e2bb74c011f6098a7e886fd77f41776c2c4
|
[
"BSD-3-Clause"
] | 7
|
2015-01-14T06:55:33.000Z
|
2018-08-11T14:43:52.000Z
|
sympy/abc.py
|
pbeltran/sympy-1
|
94f92b36731c2bebe6de1037c063c2a258a8a399
|
[
"BSD-3-Clause"
] | 1
|
2018-02-19T04:56:04.000Z
|
2018-02-19T04:56:04.000Z
|
sympy/abc.py
|
pbeltran/sympy-1
|
94f92b36731c2bebe6de1037c063c2a258a8a399
|
[
"BSD-3-Clause"
] | 1
|
2016-04-24T14:39:22.000Z
|
2016-04-24T14:39:22.000Z
|
from core import Symbol
_latin = list('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
# COSINEQ should not be imported as they clash; gamma, pi and zeta clash, too
_greek = 'alpha beta gamma delta epsilon zeta eta theta iota kappa lamda '\
'mu nu xi omicron pi rho sigma tau upsilon phi chi psi omega'.split(' ')
# Note: We import lamda since lambda is a reserved keyword in Python
for _s in _latin + _greek:
exec "%s = Symbol('%s')" % (_s, _s)
def clashing():
"""Return the clashing-symbols dictionaries.
``clash1`` defines all the single letter variables that clash with
SymPy objects; ``clash2`` defines the multi-letter clashing symbols;
and ``clash`` is the union of both. These can be passed for ``locals``
during sympification if one desires Symbols rather than the non-Symbol
objects for those names.
Examples
========
>>> from sympy import S
>>> from sympy.abc import _clash1, _clash2, _clash
>>> S("Q & C", locals=_clash1)
And(C, Q)
>>> S('pi(x)', locals=_clash2)
pi(x)
>>> S('pi(C, Q)', locals=_clash)
pi(C, Q)
Note: if changes are made to the docstring examples they can only
be tested after removing "clashing" from the list of deleted items
at the bottom of this file which removes this function from the
namespace.
"""
ns = {}
exec 'from sympy import *' in ns
clash1 = {}
clash2 = {}
while ns:
k, _ = ns.popitem()
if k in _greek:
clash2[k] = Symbol(k)
_greek.remove(k)
elif k in _latin:
clash1[k] = Symbol(k)
_latin.remove(k)
clash = {}
clash.update(clash1)
clash.update(clash2)
return clash1, clash2, clash
_clash1, _clash2, _clash = clashing()
del _latin, _greek, _s, clashing, Symbol
| 30.847458
| 77
| 0.642308
|
4a0804a7b931f09c6851d755d3b10fafce00e4c5
| 4,932
|
py
|
Python
|
admin/c2cgeoportal_admin/views/layers_vectortiles.py
|
rbovard/c2cgeoportal
|
61b7a4fc98f686f9b7d4c5fda7bb4c5cc09f8de8
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
admin/c2cgeoportal_admin/views/layers_vectortiles.py
|
rbovard/c2cgeoportal
|
61b7a4fc98f686f9b7d4c5fda7bb4c5cc09f8de8
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
admin/c2cgeoportal_admin/views/layers_vectortiles.py
|
rbovard/c2cgeoportal
|
61b7a4fc98f686f9b7d4c5fda7bb4c5cc09f8de8
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
# Copyright (c) 2017-2021, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
from functools import partial
from typing import Optional
import sqlalchemy
from c2cgeoform.schema import GeoFormSchemaNode
from c2cgeoform.views.abstract_views import ListField
from deform.widget import FormWidget
from pyramid.httpexceptions import HTTPNotFound
from pyramid.view import view_config, view_defaults
from c2cgeoportal_admin import _
from c2cgeoportal_admin.schemas.interfaces import interfaces_schema_node
from c2cgeoportal_admin.schemas.metadata import metadata_schema_node
from c2cgeoportal_admin.schemas.restriction_areas import restrictionareas_schema_node
from c2cgeoportal_admin.schemas.treeitem import parent_id_node
from c2cgeoportal_admin.views.dimension_layers import DimensionLayerViews
from c2cgeoportal_commons.lib.literal import Literal
from c2cgeoportal_commons.models.main import LayerGroup, LayerVectorTiles
_list_field = partial(ListField, LayerVectorTiles)
base_schema = GeoFormSchemaNode(LayerVectorTiles, widget=FormWidget(fields_template="layer_fields"))
base_schema.add(metadata_schema_node(LayerVectorTiles.metadatas, LayerVectorTiles))
base_schema.add(interfaces_schema_node(LayerVectorTiles.interfaces))
base_schema.add(restrictionareas_schema_node(LayerVectorTiles.restrictionareas))
base_schema.add_unique_validator(LayerVectorTiles.name, LayerVectorTiles.id)
base_schema.add(parent_id_node(LayerGroup)) # type: ignore
@view_defaults(match_param="table=layers_vectortiles")
class LayerVectorTilesViews(DimensionLayerViews):
"""The vector tiles administration view."""
_list_fields = (
DimensionLayerViews._list_fields
+ [_list_field("style"), _list_field("xyz")]
+ DimensionLayerViews._extra_list_fields
)
_id_field = "id"
_model = LayerVectorTiles
_base_schema = base_schema
def _base_query(self, query: Optional[sqlalchemy.orm.query.Query] = None) -> sqlalchemy.orm.query.Query:
del query
return super()._base_query(self._request.dbsession.query(LayerVectorTiles).distinct())
@view_config(route_name="c2cgeoform_index", renderer="../templates/index.jinja2")
def index(self):
return super().index()
@view_config(route_name="c2cgeoform_grid", renderer="fast_json")
def grid(self):
return super().grid()
def schema(self) -> GeoFormSchemaNode:
try:
obj = self._get_object()
except HTTPNotFound:
obj = None
schema = self._base_schema.clone()
schema["style"].description = Literal(
_("{}<br>Current runtime value is: {}").format(
schema["style"].description,
obj.style_description(self._request),
)
)
return schema
@view_config(route_name="c2cgeoform_item", request_method="GET", renderer="../templates/edit.jinja2")
def view(self):
return super().edit(self.schema())
@view_config(route_name="c2cgeoform_item", request_method="POST", renderer="../templates/edit.jinja2")
def save(self):
return super().save()
@view_config(route_name="c2cgeoform_item", request_method="DELETE", renderer="fast_json")
def delete(self):
return super().delete()
@view_config(
route_name="c2cgeoform_item_duplicate", request_method="GET", renderer="../templates/edit.jinja2"
)
def duplicate(self):
return super().duplicate()
| 42.517241
| 108
| 0.760543
|
4a080522eafcd8027e639548ef79f0963fd19712
| 9,193
|
py
|
Python
|
learningPygame/Tyler/03-Raindrops/MikesRainyDay.py
|
Rosebotics/catapult2019
|
4f125632f4d144b97ee3ecaf00a517780d510a70
|
[
"MIT"
] | null | null | null |
learningPygame/Tyler/03-Raindrops/MikesRainyDay.py
|
Rosebotics/catapult2019
|
4f125632f4d144b97ee3ecaf00a517780d510a70
|
[
"MIT"
] | null | null | null |
learningPygame/Tyler/03-Raindrops/MikesRainyDay.py
|
Rosebotics/catapult2019
|
4f125632f4d144b97ee3ecaf00a517780d510a70
|
[
"MIT"
] | null | null | null |
import pygame
import sys
import time # Note this!
import random # Note this!
class Raindrop:
def __init__(self, screen, x, y):
""" Creates a Raindrop sprite that travels down at a random speed. """
# TODO 8: Initialize this Raindrop, as follows:
# TODO - Store the screen.
# TODO - Set the initial position of the Raindrop to x and y.
# TODO - Set the initial speed to a random integer between 5 and 15.
# TODO Use instance variables: screen x y speed.
self.screen = screen
self.x = x
self.y = y
self.speed = random.randint(5, 15)
def move(self):
""" Move the self.y value of the Raindrop down the screen (y increase) at the self.speed. """
# TODO 11: Change the y position of this Raindrop by its speed.
self.y = self.y + self.speed
def off_screen(self):
""" Returns true if the Raindrop y value is not shown on the screen, otherwise false. """
# Note: this will be used for testing, but not used in the final version of the code for the sake of simplicity.
# TODO 13: Return True if the y position of this Raindrop is greater than 800.
return self.y > self.screen.get_height()
def draw(self):
""" Draws this sprite onto the screen. """
# done 9: Draw a vertical line that is 5 pixels long, 2 pixels thick,
# TODO from the current position of this Raindrop (use either a black or blue color).
pygame.draw.line(self.screen, (0, 0, 200), (self.x, self.y), (self.x, self.y + 5), 2)
class Hero:
def __init__(self, screen, x, y, with_umbrella_filename, without_umbrella_filename):
""" Creates a Hero sprite (Mike) that does not move. If hit by rain he'll put up his umbrella. """
# TODO 16: Initialize this Hero, as follows:
# TODO - Store the screen.
# TODO - Set the initial position of this Hero to x and y.
# TODO - Create an image of this Hero WITH an umbrella to the given with_umbrella_filename.
# TODO - Create an image of this Hero WITHOUT an umbrella to the given without_umbrella_filename.
# TODO - Set the "last hit time" to 0.
# TODO Use instance variables:
# TODO screen x y image_umbrella image_no_umbrella last_hit_time.
self.screen = screen
self.x = x
self.y = y
self.image_umbrella = pygame.image.load(with_umbrella_filename)
self.image_no_umbrella = pygame.image.load(without_umbrella_filename)
self.last_hit_time = 0
def draw(self):
""" Draws this sprite onto the screen. """
# TODO 17: Draw (blit) this Hero, at this Hero's position, WITHOUT an umbrella:
# TODO 21: Instead draw (blit) this Hero, at this Hero's position, as follows:
# TODO If the current time is greater than this Hero's last_hit_time + 1,
# TODO draw this Hero WITHOUT an umbrella,
# TODO otherwise draw this Hero WITH an umbrella.
self.screen.blit(self.image_no_umbrella, (self.x, self.y))
if time.time() > self.last_hit_time + 1:
self.screen.blit(self.image_no_umbrella, (self.x, self.y))
else:
self.screen.blit(self.image_umbrella, (self.x, self.y))
def hit_by(self, raindrop):
""" Returns true if the given raindrop is hitting this Hero, otherwise false. """
# TODO 19: Return True if this Hero is currently colliding with the given Raindrop.
return pygame.Rect(self.x, self.y, 170, 192).collidepoint(raindrop.x, raindrop.y)
class Cloud:
def __init__(self, screen, x, y, image_filename):
""" Creates a Cloud sprite that will produce Raindrop objects. The cloud will be moving around. """
# TODO 24: Initialize this Cloud, as follows:
# TODO - Store the screen.
# TODO - Set the initial position of this Cloud to x and y.
# TODO - Set the image of this Cloud to the given image filename.
# TODO - Create a list for Raindrop objects as an empty list called raindrops.
# TODO Use instance variables:
# TODO screen x y image raindrops.
self.screen = screen
self.x = x
self.y = y
self.image = pygame.image.load(image_filename)
self.raindrops = []
def draw(self):
""" Draws this sprite onto the screen. """
# TODO 25: Draw (blit) this Cloud's image at its current position.
self.screen.blit(self.image, (self.x, self.y))
def rain(self):
""" Adds a Raindrop to the array of raindrops so that it looks like the Cloud is raining. """
# done 28: Append a new Raindrop to this Cloud's list of 03-Raindrops,
# TODO where the new Raindrop starts at:
# TODO - x is a random integer between this Cloud's x and this Cloud's x + 300.
# TODO - y is this Cloud's y + 100.
new_drop = Raindrop(self.screen, random.randint(self.x, self.x + 300), self.y + 100)
self.raindrops.append(new_drop)
def main():
""" Main game loop that creates the sprite objects, controls interactions, and draw the screen. """
# TODO 1: Initialize the game, display a caption, and set screen to a 1000x600 Screen.
pygame.init()
screen = pygame.display.set_mode((1000, 600))
# TODO 2: Make a Clock
clock = pygame.time.Clock()
# TODO 7: As a temporary test, make a new Raindrop called test_drop at x=320 y=10
#test_drop = Raindrop(screen, 320, 10)
# TODO 15: Make a Hero, named mike, with appropriate images, starting at position x=300 y=400.
mike = Hero(screen, 300, 400, "Mike_umbrella.png", "Mike.png")
# 23: Make a Cloud, named cloud, with appropriate images, starting at position x=300 y=50.
cloud = Cloud(screen, 300, 50, 'cloud.png')
# TODO 3: Enter the game loop, with a clock tick of 60 (or so) at each iteration.
while True:
clock.tick(60)
# TODO 4: Make the pygame.QUIT event stop the game.
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
pressed_keys = pygame.key.get_pressed()
if pressed_keys[pygame.K_UP]:
cloud.y -= 5
if pressed_keys[pygame.K_DOWN]:
cloud.y += 5
if pressed_keys[pygame.K_LEFT]:
cloud.x -= 5
if pressed_keys[pygame.K_RIGHT]:
cloud.x += 5
# TODO 27: Inside the game loop (AFTER the events loop above), get the list of keys that are currently pressed.
# TODO Arrange so that the Cloud moves:
# TODO 5 pixels (or 10 pixels) to the right if the Right Arrow key (pygame.K_RIGHT) is pressed.
# TODO 5 pixels (or 10 pixels) to the left if the Left Arrow key (pygame.K_LEFT) is pressed.
# TODO 5 pixels (or 10 pixels) up if the Up Arrow key (pygame.K_UP) is pressed.
# TODO 5 pixels (or 10 pixels) down if the Down Arrow key (pygame.K_DOWN) is pressed.
# DISCUSS: If you want something to happen once per key press, put it in the events loop above
# If you want something to continually happen while holding the key, put it after the events loop.
# TODO 5: Inside the game loop, draw the screen (fill with white)
screen.fill((255, 255, 255))
# TODO 12: As a temporary test, move test_drop
# test_drop.move()
# TODO 14: As a temporary test, check if test_drop is off screen, if so reset the y position to 10
# if test_drop.off_screen():
# test_drop.y = 10
#
# TODO 10: As a temporary test, draw test_drop
# test_drop.draw()
# TODO 20: As a temporary test, check if test_drop is hitting Mike, if so set Mike's last_hit_time
#if mike.hit_by(test_drop):
# mike.last_hit_time = time.time()
# done 22: When you run this test, slow the rain down to a speed of 2 to see the result, then remove that code
# TODO 26: Draw the Cloud.
cloud.draw()
# TODO 29: Remove the temporary testdrop code from this function and refactor it as follows:
# TODO: Inside the game loop, make the Cloud "rain", and then:
# TODO For each Raindrop in the Cloud's list of raindrops:
# TODO - move the Raindrop.
# TODO - draw the Raindrop.
# TODO 30: if the Hero is hit by a Raindrop, set the Hero's last_time_hit to the current time.
# CONSIDER - if the Raindrop is off the screen, delete it from the Cloud's list of 03-Raindrops.
cloud.rain()
for raindrop in cloud.raindrops:
raindrop.move()
raindrop.draw()
if mike.hit_by(raindrop):
mike.last_hit_time = time.time()
if raindrop.y > 650:
cloud.raindrops.remove(raindrop)
# TODO 18: Draw the Hero
mike.draw()
# TODO 6: Update the display and remove the pass statement below
pygame.display.update()
# TODO: Call main.
main()
| 43.56872
| 120
| 0.620037
|
4a0805c6b82ed7e4a30f5015af7b3f4e41162f7b
| 2,279
|
py
|
Python
|
Fig1_singlecell/example_1comp/plot_activity.py
|
TatsuyaHaga/preplaymodel_codes
|
548df9ef5f85358fb03ed5f7f4cdc9e03a04bbf3
|
[
"MIT"
] | 1
|
2019-02-26T04:15:16.000Z
|
2019-02-26T04:15:16.000Z
|
Fig1_singlecell/example_2comp_uncorrelate/plot_activity.py
|
TatsuyaHaga/preplaymodel_codes
|
548df9ef5f85358fb03ed5f7f4cdc9e03a04bbf3
|
[
"MIT"
] | null | null | null |
Fig1_singlecell/example_2comp_uncorrelate/plot_activity.py
|
TatsuyaHaga/preplaymodel_codes
|
548df9ef5f85358fb03ed5f7f4cdc9e03a04bbf3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import numpy
import pylab
pylab.rcParams["font.size"]=8
pylab.rcParams["legend.fontsize"]=6
#pylab.rcParams["lines.linewidth"]=1
#pylab.rcParams["axes.linewidth"]=2
#pylab.rcParams["axes.labelsize"]="large"
#pylab.rcParams["axes.labelweight"]="bold"
pylab.rcParams["xtick.major.size"]=0
pylab.rcParams["xtick.minor.size"]=0
pylab.rcParams["ytick.major.size"]=0
pylab.rcParams["ytick.minor.size"]=0
#pylab.rcParams["xtick.direction"]="out"
#pylab.rcParams["ytick.direction"]="out"
pylab.rcParams["figure.figsize"]=(2, 2.5)
#activity
activity=numpy.loadtxt("activity.csv", delimiter=",")
plot_start=-499
pylab.clf()
pylab.subplot(3,1,1)
pylab.plot(activity[plot_start:, 0], activity[plot_start:, 1])
pylab.ylabel("Soma")
pylab.xticks([])
#pylab.xlabel("Time [s]")
pylab.ylim([0.0, 1.0])
pylab.yticks([0.0, 1.0])
pylab.subplot(3,1,2)
pylab.plot(activity[plot_start:, 0], activity[plot_start:, 2])
pylab.ylabel("Dendrite")
pylab.xticks([])
#pylab.xlabel("Time [s]")
pylab.ylim([0.0, 1.0])
pylab.yticks([0.0, 1.0])
pylab.subplot(3,1,3)
pylab.plot(activity[plot_start:, 0], activity[plot_start:, 3])
pylab.xticks([round(activity[plot_start, 0]), round(activity[-1, 0])])
pylab.ylim([0.0, 0.16])
pylab.yticks([0.0, 0.16])
pylab.ylabel("Output [kHz]")
pylab.xlabel("Time [s]")
pylab.tight_layout()
pylab.savefig("activity.pdf")
#mean activity
mean_win=60.0 #[s]
mean_len=int(numpy.ceil(activity[-1,0]/mean_win))
time_mean=numpy.arange(0.0, mean_len*mean_win/60.0, mean_win/60.0)
mean_win=int(mean_win/(activity[1,0]-activity[0,0]))
pylab.clf()
mean=numpy.zeros(mean_len)
for i in range(mean_len):
mean[i]=numpy.mean(activity[i*mean_win:(i+1)*mean_win,1])
pylab.subplot(2,1,1)
pylab.plot(time_mean, mean, color="black")
#pylab.plot(time_mean, [10.0/1000.0]*len(mean), "--", color="gray")
#pylab.xlabel("time [m]")
pylab.xticks([])
pylab.ylabel("Distal [kHz]")
pylab.ylim([0.0, 1.0])
mean=numpy.zeros(mean_len)
for i in range(mean_len):
mean[i]=numpy.mean(activity[i*mean_win:(i+1)*mean_win,2])
pylab.subplot(2,1,2)
pylab.plot(time_mean, mean, color="black")
#pylab.plot(time_mean, [10.0/1000.0]*len(mean), "--", color="gray")
#pylab.xlabel("time [m]")
pylab.ylabel("Soma [kHz]")
pylab.ylim([0.0, 1.0])
pylab.tight_layout()
pylab.savefig("mean.pdf")
| 27.130952
| 70
| 0.704695
|
4a0806fb04d1a03ec91df3ab0aadc49422f5a589
| 395
|
py
|
Python
|
Markdown-Templates/repoutils/python-scripts/scripts/02_find_all_links.py
|
Uvacoder/html-demo-code-and-experiments
|
1bd2ab50afe8f331396c37822301afa8e4903bcd
|
[
"Apache-2.0"
] | 14
|
2021-02-18T04:54:43.000Z
|
2022-03-01T13:36:22.000Z
|
Markdown-Templates/repoutils/python-scripts/scripts/02_find_all_links.py
|
Uvacoder/html-demo-code-and-experiments
|
1bd2ab50afe8f331396c37822301afa8e4903bcd
|
[
"Apache-2.0"
] | 51
|
2021-05-03T05:36:48.000Z
|
2022-03-30T06:10:23.000Z
|
Markdown-Templates/repoutils/python-scripts/scripts/02_find_all_links.py
|
Uvacoder/html-demo-code-and-experiments
|
1bd2ab50afe8f331396c37822301afa8e4903bcd
|
[
"Apache-2.0"
] | 7
|
2021-06-02T23:42:41.000Z
|
2022-03-19T17:08:42.000Z
|
import requests
import re
# get url
# url = input('Enter a URL (include `https://lambda-w-1-notes.netlify.app/`): ')
url = ('https://lambda-w-1-notes.netlify.app/')
# connect to the url
website = requests.get(url)
# read html
html = website.text
# use re.findall to grab all the links
links = re.findall('"((http|ftp)s?://.*?)"', html)
# output links
for link in links:
print(link[0])
| 19.75
| 80
| 0.658228
|
4a08071e187548ef9f392f1eeef73d5613496070
| 7,231
|
py
|
Python
|
onmt/modules/GlobalAttention.py
|
yueyongjiao/openNMT-10-baiduyun
|
5f7af2f995cbb6816b52029173e6219d555ddeed
|
[
"MIT"
] | null | null | null |
onmt/modules/GlobalAttention.py
|
yueyongjiao/openNMT-10-baiduyun
|
5f7af2f995cbb6816b52029173e6219d555ddeed
|
[
"MIT"
] | null | null | null |
onmt/modules/GlobalAttention.py
|
yueyongjiao/openNMT-10-baiduyun
|
5f7af2f995cbb6816b52029173e6219d555ddeed
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from onmt.Utils import aeq, sequence_mask
class GlobalAttention(nn.Module):
"""
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
Constructs a unit mapping a query `q` of size `dim`
and a source matrix `H` of size `n x dim`, to an output
of size `dim`.
.. mermaid::
graph BT
A[Query]
subgraph RNN
C[H 1]
D[H 2]
E[H N]
end
F[Attn]
G[Output]
A --> F
C --> F
D --> F
E --> F
C -.-> G
D -.-> G
E -.-> G
F --> G
All models compute the output as
:math:`c = \sum_{j=1}^{SeqLength} a_j H_j` where
:math:`a_j` is the softmax of a score function.
Then then apply a projection layer to [q, c].
However they
differ on how they compute the attention score.
* Luong Attention (dot, general):
* dot: :math:`score(H_j,q) = H_j^T q`
* general: :math:`score(H_j, q) = H_j^T W_a q`
* Bahdanau Attention (mlp):
* :math:`score(H_j, q) = v_a^T tanh(W_a q + U_a h_j)`
Args:
dim (int): dimensionality of query and key
coverage (bool): use coverage term
attn_type (str): type of attention to use, options [dot,general,mlp]
"""
def __init__(self, dim, coverage=False, attn_type="dot"):
super(GlobalAttention, self).__init__()
self.dim = dim
self.attn_type = attn_type
assert (self.attn_type in ["dot", "general", "mlp"]), (
"Please select a valid attention type.")
if self.attn_type == "general":
self.linear_in = nn.Linear(dim, dim, bias=False)
elif self.attn_type == "mlp":
self.linear_context = nn.Linear(dim, dim, bias=False)
self.linear_query = nn.Linear(dim, dim, bias=True)
self.v = nn.Linear(dim, 1, bias=False)
# mlp wants it with bias
out_bias = self.attn_type == "mlp"
self.linear_out = nn.Linear(dim*2, dim, bias=out_bias)
self.sm = nn.Softmax()
self.tanh = nn.Tanh()
if coverage:
self.linear_cover = nn.Linear(1, dim, bias=False)
def score(self, h_t, h_s):
"""
Args:
h_t (`FloatTensor`): sequence of queries `[batch x tgt_len x dim]`
h_s (`FloatTensor`): sequence of sources `[batch x src_len x dim]`
Returns:
:obj:`FloatTensor`:
raw attention scores (unnormalized) for each src index
`[batch x tgt_len x src_len]`
"""
# Check input sizes
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
if self.attn_type in ["general", "dot"]:
if self.attn_type == "general":
h_t_ = h_t.view(tgt_batch*tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
# (batch, t_len, d) x (batch, d, s_len) --> (batch, t_len, s_len)
return torch.bmm(h_t, h_s_)
else:
dim = self.dim
wq = self.linear_query(h_t.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(h_s.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
# (batch, t_len, s_len, d)
wquh = self.tanh(wq + uh)
return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)
def forward(self, input, memory_bank, memory_lengths=None, coverage=None):
"""
Args:
input (`FloatTensor`): query vectors `[batch x tgt_len x dim]`
memory_bank (`FloatTensor`): source vectors `[batch x src_len x dim]`
memory_lengths (`LongTensor`): the source context lengths `[batch]`
coverage (`FloatTensor`): None (not supported yet)
Returns:
(`FloatTensor`, `FloatTensor`):
* Computed vector `[tgt_len x batch x dim]`
* Attention distribtutions for each query
`[tgt_len x batch x src_len]`
"""
# one step input
if input.dim() == 2:
one_step = True
input = input.unsqueeze(1)
else:
one_step = False
batch, sourceL, dim = memory_bank.size()
batch_, targetL, dim_ = input.size()
aeq(batch, batch_)
aeq(dim, dim_)
aeq(self.dim, dim)
if coverage is not None:
batch_, sourceL_ = coverage.size()
aeq(batch, batch_)
aeq(sourceL, sourceL_)
if coverage is not None:
'''
cover = coverage.view(-1).unsqueeze(1)
memory_bank = memory_bank * self.linear_cover(cover).view_as(memory_bank)
memory_bank = self.tanh(memory_bank)
'''
cover_score = torch.exp(-1 * coverage)
cover_score = cover_score.unsqueeze(2).expand(-1, -1, dim)
memory_bank = memory_bank * cover_score
# compute attention scores, as in Luong et al.
align = self.score(input, memory_bank)
if memory_lengths is not None:
mask = sequence_mask(memory_lengths)
mask = mask.unsqueeze(1) # Make it broadcastable.
align.data.masked_fill_(1 - mask, -float('inf'))
# Softmax to normalize attention weights
align_vectors = self.sm(align.view(batch*targetL, sourceL))
align_vectors = align_vectors.view(batch, targetL, sourceL)
# each context vector c_t is the weighted average
# over all the source hidden states
c = torch.bmm(align_vectors, memory_bank)
# concatenate
concat_c = torch.cat([c, input], 2).view(batch*targetL, dim*2)
attn_h = self.linear_out(concat_c).view(batch, targetL, dim)
if self.attn_type in ["general", "dot"]:
attn_h = self.tanh(attn_h)
if one_step:
attn_h = attn_h.squeeze(1)
align_vectors = align_vectors.squeeze(1)
# Check output sizes
batch_, dim_ = attn_h.size()
aeq(batch, batch_)
aeq(dim, dim_)
batch_, sourceL_ = align_vectors.size()
aeq(batch, batch_)
aeq(sourceL, sourceL_)
else:
attn_h = attn_h.transpose(0, 1).contiguous()
align_vectors = align_vectors.transpose(0, 1).contiguous()
# Check output sizes
targetL_, batch_, dim_ = attn_h.size()
aeq(targetL, targetL_)
aeq(batch, batch_)
aeq(dim, dim_)
targetL_, batch_, sourceL_ = align_vectors.size()
aeq(targetL, targetL_)
aeq(batch, batch_)
aeq(sourceL, sourceL_)
return attn_h, align_vectors
| 32.868182
| 85
| 0.557599
|
4a0808a2760bb6b499388d249ef0fac0acc9debb
| 30,551
|
py
|
Python
|
pypdn/nrbf.py
|
addisonElliott/pypaintdotnet
|
ebffd52f9b8a56f2e885e1a9c51a58fc4b30e3bb
|
[
"MIT"
] | 15
|
2018-04-29T18:29:14.000Z
|
2022-03-29T10:12:22.000Z
|
pypdn/nrbf.py
|
addisonElliott/pypaintdotnet
|
ebffd52f9b8a56f2e885e1a9c51a58fc4b30e3bb
|
[
"MIT"
] | 10
|
2019-09-25T00:03:20.000Z
|
2022-03-29T12:25:12.000Z
|
pypdn/nrbf.py
|
addisonElliott/pypaintdotnet
|
ebffd52f9b8a56f2e885e1a9c51a58fc4b30e3bb
|
[
"MIT"
] | 3
|
2019-09-24T21:21:31.000Z
|
2020-10-29T14:00:59.000Z
|
import struct
from datetime import timezone
from functools import reduce
from pypdn.util import *
# This library was inspired from the following code:
# https://github.com/gurnec/Undo_FFG/blob/master/nrbf.py
# Thanks to Christopher Gurnee!
# TODO Do documentation and docstrings for this
# TODO Issue with reporting _id in JSON or something. classID vs _id is issue
class NRBFError(Exception):
"""Exceptions for NRBFError class."""
pass
# Decorator which adds an enum value (an int or a length-one bytes object) and its associated reader function to a
# registration dict (either PrimitiveTypeReaders or RecordTypeReaders)
# The last two arguments, primitiveStructs and primitiveFormat are only specified for primitive types to create the
# Struct object. The Struct object parses the format and makes it quicker to parse the numbers by calling Struct.unpack
def _registerReader(typeDict, typeValue, primitiveStructs=None, primitiveFormat=None):
assert isinstance(typeValue, int)
def decorator(readFunction):
if primitiveStructs is not None:
primitiveStructs[typeValue] = struct.Struct(primitiveFormat)
typeDict[typeValue] = readFunction
return readFunction
return decorator
class NRBF:
# Dictionary that contains functions to call when reading records and primitives from NRBF file
_RecordTypeReaders = {}
_PrimitiveTypeReaders = {}
# Dictionary that contains Struct objects for each of the primitive types. The Struct objects are precompiled to
# speed up parsing the number
_PrimitiveTypeStructs = {}
# Dictionary contains stuff for reading primitive arrays
_PrimitiveTypeArrayReaders = {}
# Dictionary containing readers for additional info in classes
_AdditionalInfoReaders = {}
def __init__(self, stream=None, filename=None):
self.stream = None
self.rootID = None
self.headerID = None
self.referencesResolved = None
# Dictionary of binary libraries with their linked objects
self.binaryLibraries = {}
# Keep track of class and objects by their ID
self.classByID = {}
self.objectsByID = {}
# Keeps track of references so that after reading is done, the references can be resolved
# collection references replace the system collections with Python equivalent types such as dict or list
self._collectionReferences = []
# If a stream or filename to be loaded is given, then call the read function
# This makes the syntax cleaner to allow this creation of class and loading of data all in one line
if stream is not None or filename is not None:
self.read(stream, filename)
def read(self, stream=None, filename=None):
if stream is None and filename is not None:
stream = open(filename, 'rb')
assert stream.readable()
if self.stream is not None:
# Means we are in the middle of reading or writing data, throw exception probably
raise NRBFError('Class is already reading from a stream! Please close the stream before trying again')
if self.rootID is not None:
# File has already been loaded so we must reset everything
self.rootID = None
self.headerID = None
self.binaryLibraries = {}
self.classByID = {}
self.objectsByID = {}
self._collectionReferences = []
self.stream = stream
# Read header
self._readRecord()
if self.rootID is None:
raise NRBFError('Invalid stream, unable to read header. File may be corrupted')
# Keep reading records until we receive a MessageEnd record
while not isinstance(self._readRecord(), MessageEnd):
pass
# Resolve all the collection references
self.resolveReferences()
# Once we are done reading, we set the stream to None because it will not be used
self.stream = None
def write(self, stream=None, filename=None):
if stream is None and filename is not None:
stream = open(filename, 'wb')
assert stream.writable()
assert stream.seekable()
# TODO Write NRBF files
raise NotImplementedError('Writing a NRBF file is not supported yet')
def getRoot(self):
assert self.rootID is not None
return self.objectsByID[self.rootID]
def resolveReferences(self):
# Resolve all the collection references
for reference in self._collectionReferences:
# Calls one of the resolvers
replacement = reference.collectionResolver(self, reference)
# The final steps common to all collection resolvers are completed below
if reference.parent:
reference.parent[reference.index_in_parent] = replacement
self.objectsByID[reference._id] = replacement
# Note: Collection references must be saved so that it can be converted back
# when saving the file again.
# Loop through all of the objects
for _, object in self.objectsByID.items():
# Attempt to iterate through the object
# If it doesnt work, then move on to the next item because there wont need to be any
# references.
# Otherwise, if any of the items have an object ID, create a reference
try:
for index, item in enumerate(object):
if isinstance(item, Reference):
self._resolveSimpleReference(item)
except TypeError:
pass
self.referencesResolved = True
def unresolveReferences(self):
# Loop through all of the objects
for _, object in self.objectsByID.items():
# Attempt to iterate through the object
# If it doesnt work, then move on to the next item because there wont need to be any
# references.
# Otherwise, if any of the items have an object ID, create a reference
try:
for index, item in enumerate(object):
if hasattr(item, '_id'):
object[index] = Reference(item._id, object, index)
except TypeError:
pass
self.referencesResolved = False
def toJSON(self, resolveReferences=True, **kwargs):
# Resolve or unresolve the references based on what the user desires
# and the current state
if resolveReferences and not self.referencesResolved:
self.resolveReferences()
elif not resolveReferences and self.referencesResolved:
self.unresolveReferences()
jsonEncoder = JSONEncoder(**kwargs)
return jsonEncoder.encode(self)
# region Primitive reader functions
@_registerReader(_PrimitiveTypeReaders, PrimitiveType.Boolean, _PrimitiveTypeStructs, '?')
def _readBool(self):
return self._PrimitiveTypeStructs[PrimitiveType.Boolean].unpack(self.stream.read(1))[0]
@_registerReader(_PrimitiveTypeReaders, PrimitiveType.Byte, _PrimitiveTypeStructs, 'B')
@_registerReader(_AdditionalInfoReaders, BinaryType.Primitive)
@_registerReader(_AdditionalInfoReaders, BinaryType.PrimitiveArray)
def _readByte(self):
return self._PrimitiveTypeStructs[PrimitiveType.Byte].unpack(self.stream.read(1))[0]
@_registerReader(_PrimitiveTypeReaders, PrimitiveType.Char)
def _readChar(self):
utf8Bytes = bytearray()
while True:
utf8Bytes += self.stream.read(1)
try:
return utf8Bytes.decode('utf-8')
except UnicodeDecodeError:
if len(utf8Bytes) > 4:
raise NRBFError('Invalid char read from NRBF file, longer than 4 bytes: {0}'.format(utf8Bytes))
@_registerReader(_PrimitiveTypeReaders, PrimitiveType.Decimal)
def _read_Decimal(self):
return Decimal(self._readString())
@_registerReader(_PrimitiveTypeReaders, PrimitiveType.Double, _PrimitiveTypeStructs, '<d')
def _readDouble(self):
return self._PrimitiveTypeStructs[PrimitiveType.Double].unpack(self.stream.read(8))[0]
@_registerReader(_PrimitiveTypeReaders, PrimitiveType.Int16, _PrimitiveTypeStructs, '<h')
def _readInt16(self):
return self._PrimitiveTypeStructs[PrimitiveType.Int16].unpack(self.stream.read(2))[0]
@_registerReader(_PrimitiveTypeReaders, PrimitiveType.Int32, _PrimitiveTypeStructs, '<i')
def _readInt32(self):
return self._PrimitiveTypeStructs[PrimitiveType.Int32].unpack(self.stream.read(4))[0]
@_registerReader(_PrimitiveTypeReaders, PrimitiveType.Int64, _PrimitiveTypeStructs, '<q')
def _readInt64(self):
return self._PrimitiveTypeStructs[PrimitiveType.Int64].unpack(self.stream.read(8))[0]
@_registerReader(_PrimitiveTypeReaders, PrimitiveType.SByte, _PrimitiveTypeStructs, 'b')
def _readSByte(self):
return self._PrimitiveTypeStructs[PrimitiveType.SByte].unpack(self.stream.read(1))[0]
@_registerReader(_PrimitiveTypeReaders, PrimitiveType.Single, _PrimitiveTypeStructs, '<f')
def _readSingle(self):
return self._PrimitiveTypeStructs[PrimitiveType.Single].unpack(self.stream.read(4))[0]
@_registerReader(_PrimitiveTypeReaders, PrimitiveType.TimeSpan)
def _readTimeSpan(self):
# 64-bit integer that represents time span in increments of 100 nanoseconds
# Divide by 10 to get into microseconds
return timedelta(microseconds=self._readInt64() / 10)
@_registerReader(_PrimitiveTypeReaders, PrimitiveType.DateTime)
def _readDateTime(self):
ticks = self._readUInt64()
# Two MSB store kind, 0 = no timezone, 1 = UTC, 2 = local timezone
kind = ticks >> 62
# Remaining 62-bits are the number of 100ns increments from 12:00:00 January 1, 0001
ticks &= (1 << 62) - 1
# If negative, then reinterpret as 62-bit two's complement
if ticks >= 1 << 61:
ticks -= 1 << 62
# Create a datetime that starts at the beginning and then increment it by the number of microseconds
time = datetime(1, 1, 1)
try:
time += timedelta(microseconds=ticks / 10)
except OverflowError:
pass
# Update datetime object to have the appropriate timezone
# If kind is 1, then this is UTC and if kind is 2, then this is local timezone
if kind == 1:
time = time.replace(tzinfo=timezone.utc)
elif kind == 2:
LOCAL_TIMEZONE = datetime.now(timezone.utc).astimezone().tzinfo
time = time.replace(tzinfo=LOCAL_TIMEZONE) # kind 2 is the local time zone
return time
@_registerReader(_PrimitiveTypeReaders, PrimitiveType.UInt16, _PrimitiveTypeStructs, '<H')
def _readUInt16(self):
return self._PrimitiveTypeStructs[PrimitiveType.UInt16].unpack(self.stream.read(2))[0]
@_registerReader(_PrimitiveTypeReaders, PrimitiveType.UInt32, _PrimitiveTypeStructs, '<I')
def _readUInt32(self):
return self._PrimitiveTypeStructs[PrimitiveType.UInt32].unpack(self.stream.read(4))[0]
@_registerReader(_PrimitiveTypeReaders, PrimitiveType.UInt64, _PrimitiveTypeStructs, '<Q')
def _readUInt64(self):
return self._PrimitiveTypeStructs[PrimitiveType.UInt64].unpack(self.stream.read(8))[0]
@_registerReader(_PrimitiveTypeReaders, PrimitiveType.Null)
@_registerReader(_AdditionalInfoReaders, BinaryType.String)
@_registerReader(_AdditionalInfoReaders, BinaryType.Object)
@_registerReader(_AdditionalInfoReaders, BinaryType.ObjectArray)
@_registerReader(_AdditionalInfoReaders, BinaryType.StringArray)
@_registerReader(_RecordTypeReaders, RecordType.ObjectNull)
def _readNull(self):
return None
@_registerReader(_PrimitiveTypeReaders, PrimitiveType.String)
@_registerReader(_AdditionalInfoReaders, BinaryType.SystemClass)
def _readString(self):
length = 0
# Each bit range is 7 bits long with a maximum of 5 bytes
for bit_range in range(0, 5 * 7, 7):
byte = self._readByte()
# Remove the last bit from the length (used to indicate if there is another byte to come)
# Then shift the number to the appropiate bit range and add it
length += (byte & ((1 << 7) - 1)) << bit_range
# Check MSB and if it is zero, this is the last length byte and we are ready to read string
if byte & (1 << 7) == 0:
break
else:
# For-else statements in Python are useful! This will be only happen if the for successfully completes
raise NRBFError('NRBF LengthPrefixedString overflow')
# Read the string
return self.stream.read(length).decode('utf-8')
# endregion
# region Primitive Array reader functions
@_registerReader(_PrimitiveTypeArrayReaders, PrimitiveType.Boolean)
def _readBoolArray(self, length):
return struct.unpack('<{0}?'.format(length), self.stream.read(length))
@_registerReader(_PrimitiveTypeArrayReaders, PrimitiveType.Byte)
def _readByteArray(self, length):
return struct.unpack('<{0}B'.format(length), self.stream.read(length))
@_registerReader(_PrimitiveTypeArrayReaders, PrimitiveType.Double)
def _readDoubleArray(self, length):
return struct.unpack('<{0}d'.format(length), self.stream.read(8 * length))
@_registerReader(_PrimitiveTypeArrayReaders, PrimitiveType.Int16)
def _readInt16Array(self, length):
return struct.unpack('<{0}h'.format(length), self.stream.read(2 * length))
@_registerReader(_PrimitiveTypeArrayReaders, PrimitiveType.Int32)
def _readInt32Array(self, length):
return struct.unpack('<{0}i'.format(length), self.stream.read(4 * length))
@_registerReader(_PrimitiveTypeArrayReaders, PrimitiveType.Int64)
def _readInt64Array(self, length):
return struct.unpack('<{0}q'.format(length), self.stream.read(8 * length))
@_registerReader(_PrimitiveTypeArrayReaders, PrimitiveType.SByte)
def _readSByteArray(self, length):
return struct.unpack('<{0}b'.format(length), self.stream.read(length))
@_registerReader(_PrimitiveTypeArrayReaders, PrimitiveType.Single)
def _readSingleArray(self, length):
return struct.unpack('<{0}f'.format(length), self.stream.read(4 * length))
@_registerReader(_PrimitiveTypeArrayReaders, PrimitiveType.UInt16)
def _readUInt16Array(self, length):
return struct.unpack('<{0}H'.format(length), self.stream.read(2 * length))
@_registerReader(_PrimitiveTypeArrayReaders, PrimitiveType.UInt32)
def _readUInt32Array(self, length):
return struct.unpack('<{0}I'.format(length), self.stream.read(4 * length))
@_registerReader(_PrimitiveTypeArrayReaders, PrimitiveType.UInt64)
def _readUInt64Array(self, length):
return struct.unpack('<{0}Q'.format(length), self.stream.read(8 * length))
@_registerReader(_PrimitiveTypeArrayReaders, PrimitiveType.String)
def _readStringArray(self, length):
return [self._readString() for i in range(length)]
# endregion
# region AdditionalInfo reader functions
# Note: Remaining reader functions are attached in other sections to existing functions
# The ClassTypeInfo structure is read and ignored
@_registerReader(_AdditionalInfoReaders, BinaryType.Class)
def _readClassTypeInfo(self):
name = self._readString()
libraryID = self._readInt32()
return (name, libraryID)
# endregion
# region Record reader functions
def _readRecord(self):
recordType = self._readByte()
return self._RecordTypeReaders[recordType](self)
def _readPrimitive(self, primitiveType):
return self._PrimitiveTypeReaders[primitiveType](self)
@_registerReader(_RecordTypeReaders, RecordType.SerializedStreamHeader)
def _readSerializationHeaderRecord(self):
self.rootID = self._readInt32()
self.headerID = self._readInt32()
majorVersion, minorVersion = self._readInt32(), self._readInt32()
if majorVersion != 1 or minorVersion != 0:
raise NRBFError('Major and minor version for serialization header is incorrect: {0} {1}'.format(
majorVersion, minorVersion))
if self.rootID == 0:
raise NotImplementedError(
'Root ID is zero indicating that a BinaryMethodCall is available. Not implemented yet')
@_registerReader(_RecordTypeReaders, RecordType.ClassWithId)
def _readClassWithId(self):
objectID = self._readInt32()
metadataID = self._readInt32()
cls = self.classByID[metadataID]
# Only instance where the objectID does NOT equal the class ID is here!
return self._readClassMembers(cls(), objectID)
@_registerReader(_RecordTypeReaders, RecordType.SystemClassWithMembers)
def _readSystemClassWithMembers(self):
cls = self._readClassInfo(isSystemClass=True)
cls._typeInfo = None
return self._readClassMembers(cls(), cls._id)
@_registerReader(_RecordTypeReaders, RecordType.ClassWithMembers)
def _readClassWithMembers(self):
cls = self._readClassInfo(isSystemClass=False)
cls._typeInfo = None
libraryID = self._readInt32()
return self._readClassMembers(cls(), cls._id, libraryID)
@_registerReader(_RecordTypeReaders, RecordType.SystemClassWithMembersAndTypes)
def _readSystemClassWithMembersAndTypes(self):
cls = self._readClassInfo(isSystemClass=True)
self._readMemberTypeInfo(cls)
return self._readClassMembers(cls(), cls._id)
@_registerReader(_RecordTypeReaders, RecordType.ClassWithMembersAndTypes)
def _readClassWithMembersAndTypes(self):
cls = self._readClassInfo(isSystemClass=False)
self._readMemberTypeInfo(cls)
libraryID = self._readInt32()
return self._readClassMembers(cls(), cls._id, libraryID)
@_registerReader(_RecordTypeReaders, RecordType.BinaryObjectString)
def _readBinaryObjectString(self):
objectID = self._readInt32()
string = self._readString()
self.objectsByID[objectID] = string
return string
@_registerReader(_RecordTypeReaders, RecordType.MemberPrimitiveTyped)
def _readMemberPrimitiveTyped(self):
primitiveType = self._readByte()
value = self._PrimitiveTypeReaders[primitiveType](self)
return value
@_registerReader(_RecordTypeReaders, RecordType.BinaryArray)
def _readBinaryArray(self):
objectID = self._readInt32()
arrayType = self._readByte()
rank = self._readInt32()
lengths = [self._readInt32() for i in range(rank)]
# The lower bounds are ignored currently
# Not sure of the implications or purpose of this
if arrayType in [BinaryArrayType.SingleOffset, BinaryArrayType.JaggedOffset, BinaryArrayType.RectangularOffset]:
lowerBounds = [self._readInt32() for i in range(rank)]
binaryType = self._readByte()
additionalInfo = self._AdditionalInfoReaders[binaryType](self)
# Get total length of items that we need to read
# This is just the product of all the elements in the lengths array
length = reduce(lambda x, y: x * y, lengths)
# If the items are primitives, use primitive array readers
# Otherwise, the items will be objects and should be read by reading records
if binaryType == BinaryType.Primitive:
array = self._PrimitiveTypeArrayReaders[additionalInfo](self, length)
else:
array = self._readObjectArray(length, objectID)
# For a multidimensional array, take the 1D array that was read and convert it to ND
if arrayType in [BinaryArrayType.Rectangular, BinaryArrayType.RectangularOffset]:
array = convert1DArrayND(array, lengths)
# Save the object by ID
# Only required for primitive because _readObjectArray saves the ID for you
# But we just overwrite it regardless
self.objectsByID[objectID] = array
return array
# When object's with an object ID are encountered above, they are added to the _objectsByID dictionary.
# A MemberReference object contains the object ID that the reference refers to. These references are
# resolved at the end.
@_registerReader(_RecordTypeReaders, RecordType.MemberReference)
def _readMemberReference(self):
# objectID
ref = Reference(self._readInt32())
return ref
@_registerReader(_RecordTypeReaders, RecordType.MessageEnd)
def _readMessageEnd(self):
return MessageEnd()
@_registerReader(_RecordTypeReaders, RecordType.BinaryLibrary)
def _readBinaryLibrary(self):
libraryID = self._readInt32()
libraryName = self._readString()
library = BinaryLibrary(libraryID, libraryName, {})
self.binaryLibraries[libraryID] = library
return library
@_registerReader(_RecordTypeReaders, RecordType.ObjectNullMultiple256)
def _readObjectNullMultiple256(self):
# Count
return ObjectNullMultiple(self._readByte())
@_registerReader(_RecordTypeReaders, RecordType.ObjectNullMultiple)
def _readObjectNullMultiple(self):
# Count
return ObjectNullMultiple(self._readInt32())
@_registerReader(_RecordTypeReaders, RecordType.ArraySinglePrimitive)
def _readArraySinglePrimitive(self):
objectID, length = self._readArrayInfo()
primitiveType = self._readByte()
array = self._PrimitiveTypeArrayReaders[primitiveType](self, length)
self.objectsByID[objectID] = array
return array
@_registerReader(_RecordTypeReaders, RecordType.ArraySingleObject)
@_registerReader(_RecordTypeReaders, RecordType.ArraySingleString)
def _read_ArraySingleObject(self):
objectID, length = self._readArrayInfo()
array = self._readObjectArray(length, objectID)
self.objectsByID[objectID] = array
return array
@_registerReader(_RecordTypeReaders, RecordType.MethodCall)
def _read_MethodCall(self):
raise NotImplementedError('MethodCall')
@_registerReader(_RecordTypeReaders, RecordType.MethodReturn)
def _read_MethodReturn(self):
raise NotImplementedError('MethodReturn')
# endregion
# region Read helper classes
def _readArrayInfo(self):
# objectID and array length
return (self._readInt32(), self._readInt32())
# Reads a ClassInfo structure and creates and saves a new namedlist object with the same member and ClassInfo
# specifics
def _readClassInfo(self, isSystemClass=False):
objectID = self._readInt32()
className = self._readString()
memberCount = self._readInt32()
memberNames = [sanitizeIdentifier(self._readString()) for i in range(memberCount)]
# Create namedlist that will act as the class
# Set object ID for the class and save it by the object ID for later
cls = namedlist(sanitizeIdentifier(className), memberNames, default=None)
# Class ID is a special identifier to represent the class itself but not the data within of it
# For instance, you can declare the class once but then instantiate it multiple times
# The object ID is the unique identifier for the object itself and cannot be repeated
# In this instance, the classID and objectID are the same for the first instantiation
# The only way to get these different is to have a ClassWithID record and then this is
# set manually
cls._classID = cls._id = objectID
cls._isSystemClass = isSystemClass
self.classByID[objectID] = cls
return cls
def _readMemberTypeInfo(self, cls):
binaryTypes = [self._readByte() for member in cls._fields]
additionalInfo = [self._AdditionalInfoReaders[type](self) for type in binaryTypes]
# Combine the binary types and additional info into one tuple
# This gets saved to the class object because it will be used to set the members
# Also, if there is a ClassWithId object then it won't have type information but only
# the class ID so we will need to retain this information.
cls._typeInfo = tuple(zip(binaryTypes, additionalInfo))
# Reads members or array elements into the 'obj' pre-allocated list or class instance
def _readClassMembers(self, obj, objectID, libraryID=None):
index = 0
while index < len(obj._fields):
# If typeinfo is not defined, as is the case for ClassWithMembers and SystemClassWithMembers,
# then assume it is an object that can be read
# Not sure if this is a safe assumption because Microsoft isn't entirely clear when the member type
# information is 'unnecessary'
if obj._typeInfo is None:
binaryType, additionalInfo = BinaryType.Object, None
else:
binaryType, additionalInfo = obj._typeInfo[index]
if binaryType == BinaryType.Primitive:
value = self._readPrimitive(additionalInfo)
else:
value = self._readRecord()
if isinstance(value, BinaryLibrary):
# BinaryLibrary can precede the actual member
# Continue on to the actual member
continue
elif isinstance(value, ObjectNullMultiple):
# Skip a given number of indices
index += value.count
continue
elif isinstance(value, Reference):
value.parent = obj
value.indexInParent = index
obj[index] = value
index += 1
# If this object is a .NET collection (e.g. a Generic dict or list) which can be
# replaced by a native Python type, insert a collection _Reference instead of the raw
# object which will be resolved later in read() using a "collection resolver"
if getattr(obj.__class__, '_isSystemClass', False):
for name, resolver in self._collectionResolvers:
if obj.__class__.__name__.startswith('System_Collections_Generic_%s_' % name):
obj = Reference(objectID, collectionResolver=resolver, originalObj=obj)
self._collectionReferences.append(obj)
break
self.objectsByID[objectID] = obj
# Use libraryID to append the class to that binary library
# This is particularly useful when saving the items again so that you save all of a binary library at once
if libraryID:
self.binaryLibraries[libraryID].objects[objectID] = obj
return obj
def _readObjectArray(self, length, objectID):
array = [None] * length
index = 0
while index < length:
value = self._readRecord()
if isinstance(value, BinaryLibrary):
# BinaryLibrary can precede the actual member
# Continue on to the actual member
continue
elif isinstance(value, ObjectNullMultiple):
# Skip a given number of indices
index += value.count
continue
elif isinstance(value, Reference):
value.parent = array
value.indexInParent = index
array[index] = value
index += 1
return array
# endregion
# region Resolve references functions
# Convert a _Reference representing a MemberReference into its referenced object
def _resolveSimpleReference(self, reference):
if reference.resolved:
return
replacement = self.objectsByID[reference._id]
reference.parent[reference.indexInParent] = replacement
# Not sure when this would happen, doesn't hurt though!
reference.resolved = True
# Convert a _Reference representing a .NET dictionary collection into a Python dict
def _resolveDictReference(self, reference):
originalObj = reference.originalObj
# If the key-value pairs of the dict are itself a Reference, then resolve those first
if isinstance(originalObj.KeyValuePairs, Reference):
self._resolveSimpleReference(originalObj.KeyValuePairs)
replacement = {}
for item in originalObj.KeyValuePairs:
try:
# If any key is a _Reference, it must be resolved first
# (value _References will be resolved later)
if isinstance(item.key, Reference):
self._resolveSimpleReference(item.key)
assert item.key not in replacement
replacement[item.key] = item.value
except (AssertionError, TypeError):
# Not all .NET dictionaries can be converted to Python dicts
# If the conversion fails, just proceed w/ the original object
replacement = originalObj
break
else:
# Assuming that the for loop completed successfully, indicating that all
# of the dictionary objects were converted,
# Then we need to fix the dictionary values for References
for key, value in replacement.items():
if isinstance(value, Reference):
value.parent = replacement
value.index_in_parent = key
return replacement
# Convert a Reference representing a .NET list collection into a Python list
def _resolveListReference(self, reference):
originalObj = reference.originalObj
# If the components of the list are itself a Reference, then resolve those first
if isinstance(originalObj.items, Reference):
self._resolveSimpleReference(originalObj.items)
replacement = originalObj.items
# Update parent for all replacement elements if they are references
for element in replacement:
if isinstance(element, Reference):
element.parent = replacement
return replacement
_collectionResolvers = (
('Dictionary', _resolveDictReference),
('List', _resolveListReference)
)
# endregion
| 40.89826
| 120
| 0.677686
|
4a0808cedd53097ce276f19da6b613add05b71aa
| 520
|
py
|
Python
|
code-implementation/codetest.py
|
GamjaPower/kaggle
|
8ebedc73769c3c04d9c80f9e8877a10164c9c5e8
|
[
"MIT"
] | null | null | null |
code-implementation/codetest.py
|
GamjaPower/kaggle
|
8ebedc73769c3c04d9c80f9e8877a10164c9c5e8
|
[
"MIT"
] | null | null | null |
code-implementation/codetest.py
|
GamjaPower/kaggle
|
8ebedc73769c3c04d9c80f9e8877a10164c9c5e8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Created on 2018. 9. 29.
@author: jason96
'''
import pandas as pd
def codetest():
data = {
'name': ["Kang", "Kim", "Choi", "Park", "Yoon"],
'짱절미': [True, False, False, False, False],
'셀스타그램': [False, False, True, False, False],
'like': [True, False, True, True, False]
}
data = pd.DataFrame(data)
data = data.set_index("name")
left_data = data[data['짱절미'] == True] # @IgnorePep8 @NoEffect
if __name__ == '__main__':
codetest()
| 17.931034
| 65
| 0.544231
|
4a0809747d2ecbd004bcede7455d3c6fda2629ec
| 7,691
|
py
|
Python
|
multi_arm_bandit/mvn_arm4/mvn_test_arm4_sample15_last.py
|
zxsted/meta-critic-networks
|
1768751f84845bd6fe98a13d5b57dfaca154c1f8
|
[
"MIT"
] | 59
|
2018-04-03T05:59:37.000Z
|
2022-03-14T22:41:38.000Z
|
multi_arm_bandit/mvn_arm4/mvn_test_arm4_sample15_last.py
|
floodsung/meta-critic-networks
|
1768751f84845bd6fe98a13d5b57dfaca154c1f8
|
[
"MIT"
] | null | null | null |
multi_arm_bandit/mvn_arm4/mvn_test_arm4_sample15_last.py
|
floodsung/meta-critic-networks
|
1768751f84845bd6fe98a13d5b57dfaca154c1f8
|
[
"MIT"
] | 20
|
2018-04-03T04:30:55.000Z
|
2021-12-06T06:57:59.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt
import numpy as np
import math
import random
import os
import json
def save_to_json(fname, data):
with open(fname, 'w') as outfile:
json.dump(data, outfile)
# Hyper Parameters
TASK_NUMS = 100
TEST_NUMS_PER_TASK = 10
ARM_NUMS = 4
STEP = 300
SAMPLE_NUMS = 15
class MultiArmBandit():
"""docstring for MultiArmBandit"""
def __init__(self,arm_nums,probs):
self.arm_nums = arm_nums
self.probs = probs#np.random.dirichlet(np.ones(arm_nums),size=1)[0]
def step(self,action): # one hot action
prob = np.sum(self.probs * action)
if random.random() < prob:
return 1
else:
return 0
class ActorNetwork(nn.Module):
def __init__(self,input_size,hidden_size,action_size):
super(ActorNetwork, self).__init__()
self.fc1 = nn.Linear(input_size,hidden_size)
self.fc2 = nn.Linear(hidden_size,hidden_size)
self.fc3 = nn.Linear(hidden_size,action_size)
def forward(self,x):
out = F.relu(self.fc1(x))
out = F.relu(self.fc2(out))
out = F.log_softmax(self.fc3(out))
return out
class MetaValueNetwork(nn.Module):
def __init__(self,input_size,hidden_size,output_size):
super(MetaValueNetwork, self).__init__()
self.fc1 = nn.Linear(input_size,hidden_size)
self.fc2 = nn.Linear(hidden_size,hidden_size)
self.fc3 = nn.Linear(hidden_size,output_size)
def forward(self,x):
out = F.relu(self.fc1(x))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out
class TaskConfigNetwork(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, output_size):
super(TaskConfigNetwork, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x):
# Set initial states
h0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)).cuda()
c0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)).cuda()
# Forward propagate RNN
out, _ = self.lstm(x, (h0, c0))
# Decode hidden state of last time step
out = self.fc(out[:, -1, :])
return out
def roll_out(actor_network,task,sample_nums):
actions = []
rewards = []
for sample in range(3):
action = 0
one_hot_action = [int(i == action) for i in range(ARM_NUMS)]
reward = task.step(one_hot_action)
actions.append(one_hot_action)
rewards.append([reward])
for sample in range(4):
action = 1
one_hot_action = [int(i == action) for i in range(ARM_NUMS)]
reward = task.step(one_hot_action)
actions.append(one_hot_action)
rewards.append([reward])
for sample in range(4):
action = 2
one_hot_action = [int(i == action) for i in range(ARM_NUMS)]
reward = task.step(one_hot_action)
actions.append(one_hot_action)
rewards.append([reward])
for sample in range(4):
action = 3
one_hot_action = [int(i == action) for i in range(ARM_NUMS)]
reward = task.step(one_hot_action)
actions.append(one_hot_action)
rewards.append([reward])
return torch.Tensor([actions]),torch.Tensor([rewards])
def roll_out_actions(actor_network,sample_nums):
actions = []
rewards = []
softmax_action = torch.exp(actor_network(Variable(torch.Tensor([[1]])).cuda()))
for step in range(sample_nums):
action = np.random.choice(ARM_NUMS,p=softmax_action.cpu().data.numpy()[0])
one_hot_action = [int(i == action) for i in range(ARM_NUMS)]
actions.append(one_hot_action)
return torch.Tensor([actions])
def main():
mvn_input_dim = ARM_NUMS + 3
task_config_input_dim = ARM_NUMS + 1
# init meta value network with a task config network
meta_value_network = MetaValueNetwork(input_size = mvn_input_dim,hidden_size = 80,output_size = 1)
task_config_network = TaskConfigNetwork(input_size = task_config_input_dim,hidden_size = 30,num_layers = 1,output_size = 3)
meta_value_network.cuda()
task_config_network.cuda()
if os.path.exists("meta_value_network_arm4.pkl"):
meta_value_network.load_state_dict(torch.load("meta_value_network_arm4.pkl"))
print("load meta value network success")
if os.path.exists("task_config_network_arm4.pkl"):
task_config_network.load_state_dict(torch.load("task_config_network_arm4.pkl"))
print("load task config network success")
# init a task generator for data fetching
results = []
total_rewards = 0
task_probs = json.load(open("tasks_arm4.json"))
for episode in range(TASK_NUMS):
res_i = {}
task_prob = task_probs[episode]["task_probs"]
task = MultiArmBandit(ARM_NUMS,np.array(task_prob))
res_i["arm_nums"] = ARM_NUMS
res_i["task_probs"] = task.probs.tolist()
res_i["sample_nums"] = SAMPLE_NUMS
aver_rewards = []
correct_probs = []
for test_nums in range(TEST_NUMS_PER_TASK):
actor_network = ActorNetwork(1,40,ARM_NUMS)
actor_network_optim = torch.optim.Adam(actor_network.parameters(),lr=0.001)
actor_network.cuda()
pre_actions,pre_rewards = roll_out(actor_network,task,SAMPLE_NUMS)
pre_data_samples = torch.cat((pre_actions,pre_rewards),2)
task_configs = task_config_network(Variable(pre_data_samples).cuda()).repeat(1,SAMPLE_NUMS).view(-1,3)
for step in range(STEP):
inputs = Variable(torch.Tensor([[1]])).cuda() #[1,1]
actions = roll_out_actions(actor_network,SAMPLE_NUMS)
actions_var = Variable(actions.view(-1,ARM_NUMS)).cuda()
actor_data_samples = torch.cat((actions_var,task_configs.detach()),1) #[task_nums,5]
log_softmax_actions = actor_network(inputs) # [1,2]
log_softmax_actions = log_softmax_actions.repeat(1,SAMPLE_NUMS).view(-1,ARM_NUMS)
# train actor network
actor_network_optim.zero_grad()
qs = meta_value_network(actor_data_samples)
actor_network_loss = - torch.mean(torch.sum(log_softmax_actions*actions_var,1)* qs) #+ actor_criterion(actor_y_samples,target_y)
actor_network_loss.backward()
actor_network_optim.step()
choice = torch.exp(actor_network(inputs)).cpu().data[0].numpy()
aver_reward = np.sum(choice * task.probs)
optimal_action = np.argmax(task.probs)
optimal_choice = [int(i == optimal_action) for i in range(ARM_NUMS)]
correct_prob = np.sum(choice*optimal_choice)
aver_rewards.append(float(aver_reward))
correct_probs.append(float(correct_prob))
total_rewards += aver_reward
res_i["aver_rewards"] = aver_rewards
res_i["correct_probs"] = correct_probs
results.append(res_i)
print("episode:",episode,"aver_reward",np.mean(aver_rewards),"correct prob:",np.mean(correct_probs),"task:",task.probs)
save_to_json('mvn_arm_4_sample_15_last.json', results)
print("total aver reward:",total_rewards/TASK_NUMS/TEST_NUMS_PER_TASK)
if __name__ == '__main__':
main()
| 35.606481
| 144
| 0.64738
|
4a080975012e0ad092b9f531d5df9b5ce421b24f
| 6,075
|
py
|
Python
|
pipasic_src/inspect/ResultsParser.py
|
duocang/pipasic
|
28baf6f56cc33ed8663d238cb5bd9c9fb4f8e142
|
[
"BSD-3-Clause"
] | null | null | null |
pipasic_src/inspect/ResultsParser.py
|
duocang/pipasic
|
28baf6f56cc33ed8663d238cb5bd9c9fb4f8e142
|
[
"BSD-3-Clause"
] | null | null | null |
pipasic_src/inspect/ResultsParser.py
|
duocang/pipasic
|
28baf6f56cc33ed8663d238cb5bd9c9fb4f8e142
|
[
"BSD-3-Clause"
] | null | null | null |
#Title: ResultsParser.py
#Author: Stephen Tanner, Samuel Payne, Natalie Castellana, Pavel Pevzner, Vineet Bafna
#Created: 2005
# Copyright 2007,2008,2009 The Regents of the University of California
# All Rights Reserved
#
# Permission to use, copy, modify and distribute any part of this
# program for educational, research and non-profit purposes, by non-profit
# institutions only, without fee, and without a written agreement is hereby
# granted, provided that the above copyright notice, this paragraph and
# the following three paragraphs appear in all copies.
#
# Those desiring to incorporate this work into commercial
# products or use for commercial purposes should contact the Technology
# Transfer & Intellectual Property Services, University of California,
# San Diego, 9500 Gilman Drive, Mail Code 0910, La Jolla, CA 92093-0910,
# Ph: (858) 534-5815, FAX: (858) 534-7345, E-MAIL:invent@ucsd.edu.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES,
# INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
#
# THE SOFTWARE PROVIDED HEREIN IS ON AN "AS IS" BASIS, AND THE UNIVERSITY
# OF CALIFORNIA HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES,
# ENHANCEMENTS, OR MODIFICATIONS. THE UNIVERSITY OF CALIFORNIA MAKES NO
# REPRESENTATIONS AND EXTENDS NO WARRANTIES OF ANY KIND, EITHER IMPLIED OR
# EXPRESS, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, OR THAT THE USE OF
# THE SOFTWARE WILL NOT INFRINGE ANY PATENT, TRADEMARK OR OTHER RIGHTS.
"""
Constants and methods for parsing (Inspect) search results
"""
import os
import random
class Columns:
DefaultInspectHeader = "#SpectrumFile\tScan#\tAnnotation\tProtein\tCharge\tMQScore\tLength\tTotalPRMScore\tMedianPRMScore\tFractionY\tFractionB\tIntensity\tNTT\tInspectFDR\tF-Score\tDeltaScore\tDeltaScoreOther\tRecordNumber\tDBFilePos\tSpecFilePos\tPrecursorMZ\tPrecursorMZError\tSpecIndex"
def __init__(self):
self.header = self.initializeHeaders(self.DefaultInspectHeader)
def initializeHeaders(self,Header):
if Header[0] == '#':
Header = Header[1:]
self.headers = Header.lower().split("\t")
def getIndex(self,headerVal):
for i in range(0,len(self.headers)):
if headerVal.lower() == self.headers[i]:
return i
return -1
# "Constants for which columns contain which data"
# SpectrumFile = 0
# ScanNumber = 1
# Annotation = 2
# ProteinName = 3
# Charge = 4
# MQScore = 5
# Length = 6
# NTT = 12
# PValue = 13
# FScore = 14
# DeltaScoreAny = 15
# DeltaScore = 16
# ProteinID = 17
# DBPos = 18
# FileOffset = 19 #Spectrum File pos
# ParentMZ = 20 #Corrected, associated with tweak
# MZError = 21
# #More columns for splicing
# Chromosome = 22
# Strand = 23
# GenomicPost = 24
# SplicedSequence = 25
# Splices = 26
# SearchDB = 27
class SpectrumOracleMixin:
def __init__(self):
self.SpectrumOracle = {}
def FixSpectrumPath(self, Path):
FileName = os.path.split(Path)[-1]
Stub = os.path.splitext(FileName)[0]
return self.SpectrumOracle.get(Stub, Path)
def PopulateSpectrumOracle(self, RootDirectory):
"""
Used when mzxml files are spread over multiple subdirectories.
MZXMLOracle[Stub] = full path to the corresponding MZXML file
Used with -M option (not with -s option)
"""
if not RootDirectory or not os.path.exists(RootDirectory):
return
print "Populate oracle from %s..."%RootDirectory
for SubFileName in os.listdir(RootDirectory):
# Avoid expensive iteration through results directories:
if SubFileName[:7] == "Results":
continue
SubFilePath = os.path.join(RootDirectory, SubFileName)
if os.path.isdir(SubFilePath):
self.PopulateSpectrumOracle(SubFilePath)
continue
(Stub, Extension) = os.path.splitext(SubFileName)
Extension = Extension.lower()
if Extension == ".mzxml":
self.SpectrumOracle[Stub] = os.path.join(RootDirectory, SubFileName)
elif Extension == ".mgf":
self.SpectrumOracle[Stub] = os.path.join(RootDirectory, SubFileName)
elif Extension == ".ms2":
self.SpectrumOracle[Stub] = os.path.join(RootDirectory, SubFileName)
class ResultsParser:
def __init__(self, *args, **kw):
#self.Columns = Columns
self.Running = 1
def ProcessResultsFiles(self, FilePath, Callback, MaxFilesToParse = None, QuietFlag = 0):
"""
Function for applying a Callback function to one search-reuslts file, or to every
search-results file in a directory.
"""
print "ResultsParser:%s"%FilePath
FileCount = 0
if os.path.isdir(FilePath):
FileNames = os.listdir(FilePath)
random.shuffle(FileNames)
for FileNameIndex in range(len(FileNames)):
FileName = FileNames[FileNameIndex]
if not QuietFlag:
print "(%s/%s) %s"%(FileNameIndex, len(FileNames), FileName)
(Stub, Extension) = os.path.splitext(FileName)
if Extension.lower() not in (".txt", ".filtered", ".res", ".csv", ".out"):
continue
FileCount += 1
SubFilePath = os.path.join(FilePath, FileName)
apply(Callback, (SubFilePath,))
# Don't parse every single file, that will take too long!
if MaxFilesToParse != None and FileCount > MaxFilesToParse:
break
else:
apply(Callback, (FilePath,))
| 39.705882
| 294
| 0.651358
|
4a0809ecd080a5182b0d4ea607a70d399cb0b9ba
| 39
|
py
|
Python
|
catulator_app/webapp.py
|
LilacRapture/catulator_bot
|
a701aa657236f19e124121bc160d6e39b0ba9321
|
[
"MIT"
] | null | null | null |
catulator_app/webapp.py
|
LilacRapture/catulator_bot
|
a701aa657236f19e124121bc160d6e39b0ba9321
|
[
"MIT"
] | null | null | null |
catulator_app/webapp.py
|
LilacRapture/catulator_bot
|
a701aa657236f19e124121bc160d6e39b0ba9321
|
[
"MIT"
] | null | null | null |
from . import app
from . import routes
| 13
| 20
| 0.74359
|
4a080a255a3bdde6ea6404ab337b69a5d62e4bac
| 1,544
|
py
|
Python
|
scraper.py
|
mycelium-ethereum/punk-offerbook
|
5804a27fe26af0d613fd5281f0f17b9c207f1822
|
[
"MIT"
] | null | null | null |
scraper.py
|
mycelium-ethereum/punk-offerbook
|
5804a27fe26af0d613fd5281f0f17b9c207f1822
|
[
"MIT"
] | null | null | null |
scraper.py
|
mycelium-ethereum/punk-offerbook
|
5804a27fe26af0d613fd5281f0f17b9c207f1822
|
[
"MIT"
] | null | null | null |
import re
import logging
import settings
import numpy as np
import pandas as pd
from typing import List
from utils import mongo
from time import time, sleep
from bs4 import BeautifulSoup
from selenium import webdriver
from datetime import datetime
from webdriver_manager.utils import ChromeType
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
class CryptopunksScraper:
BASE_URL = "https://www.larvalabs.com/cryptopunks"
def __init__(self, headless: bool = True):
self.logger = logging.getLogger('root')
self.headless_toggle = headless
self.create_driver()
def create_driver(self):
options = Options()
options.headless = self.headless_toggle
self.driver = webdriver.Chrome(ChromeDriverManager(chrome_type=ChromeType.GOOGLE).install(), options=options)
def kill_driver(self):
self.logger.debug('Killing driver now...')
self.driver.quit()
def get_floor_price(self):
flag = True
start_time = time()
self.driver.get(self.BASE_URL)
while flag:
try:
html = self.driver.page_source
soup = BeautifulSoup(html, features="html.parser")
all_rows = soup.find_all('b')
val = float(all_rows[0].text.split(' ')[0])
flag = False
except:
if time() - start_time > 5:
val = np.nan
flag = False
return val
| 32.166667
| 117
| 0.641192
|
4a080a4bc3c7faabf53e77a5f71111362b3f98a6
| 3,098
|
py
|
Python
|
tests/functions/test_distribution.py
|
jvesely/PsyNeuLink
|
40d92fdfd61c4ec992ae2564a0bd8496dfd7fa31
|
[
"Apache-2.0"
] | null | null | null |
tests/functions/test_distribution.py
|
jvesely/PsyNeuLink
|
40d92fdfd61c4ec992ae2564a0bd8496dfd7fa31
|
[
"Apache-2.0"
] | 208
|
2020-09-11T04:27:32.000Z
|
2022-03-31T02:15:36.000Z
|
tests/functions/test_distribution.py
|
jvesely/PsyNeuLink
|
40d92fdfd61c4ec992ae2564a0bd8496dfd7fa31
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pytest
import psyneulink.core.llvm as pnlvm
import psyneulink.core.components.functions.nonstateful.distributionfunctions as Functions
np.random.seed(0)
test_var = np.random.rand()
RAND1 = np.random.rand()
RAND2 = np.random.rand()
RAND3 = np.random.rand()
RAND4 = np.random.rand()
RAND5 = np.random.rand()
test_data = [
(Functions.DriftDiffusionAnalytical, test_var, {}, None,
(1.9774974807292212, 0.012242689689501842, 1.9774974807292207, 1.3147677945132479, 1.7929299891370192, 1.9774974807292207, 1.3147677945132479, 1.7929299891370192)),
(Functions.DriftDiffusionAnalytical, test_var, {"drift_rate": RAND1, "threshold": RAND2, "starting_point": RAND3, "t0":RAND4, "noise": RAND5}, None,
(0.4236547993389047, -2.7755575615628914e-17, 0.5173675420165031, 0.06942854144616283, 6.302631815990666, 1.4934079600147951, 0.4288991185241868, 1.7740760781361433)),
(Functions.DriftDiffusionAnalytical, -test_var, {"drift_rate": RAND1, "threshold": RAND2, "starting_point": RAND3, "t0":RAND4, "noise": RAND5}, None,
(0.42365479933890504, 0.0, 0.5173675420165031, 0.06942854144616283, 6.302631815990666, 1.4934079600147951, 0.4288991185241868, 1.7740760781361433)),
# FIXME: Rounding errors result in different behaviour on different platforms
# (Functions.DriftDiffusionAnalytical, 1e-4, {"drift_rate": 1e-5, "threshold": RAND2, "starting_point": RAND3, "t0":RAND4, "noise": RAND5}, "Rounding errors",
# (0.5828813465336954, 0.04801236718458773, 0.532471083815943, 0.09633801362499317, 6.111833139205608, 1.5821207676710864, 0.5392724012504414, 1.8065252817609618)),
# Two tests with different inputs to show that input is ignored.
(Functions.NormalDist, 1e14, {"mean": RAND1, "standard_deviation": RAND2}, None, (1.0890232855122397)),
(Functions.NormalDist, 1e-4, {"mean": RAND1, "standard_deviation": RAND2}, None, (1.0890232855122397)),
(Functions.UniformDist, 1e14, {"low": min(RAND1, RAND2), "high": max(RAND1, RAND2)}, None, (0.6879771504250405)),
(Functions.UniformDist, 1e-4, {"low": min(RAND1, RAND2), "high": max(RAND1, RAND2)}, None, (0.6879771504250405)),
]
# use list, naming function produces ugly names
names = [
"DriftDiffusionAnalytical-DefaultParameters",
"DriftDiffusionAnalytical-RandomParameters",
"DriftDiffusionAnalytical-NegInput",
# "DriftDiffusionAnalytical-SmallDriftRate",
"NormalDist1",
"NormalDist2",
"UniformDist1",
"UniformDist2",
]
@pytest.mark.function
@pytest.mark.transfer_function
@pytest.mark.benchmark
@pytest.mark.parametrize("func, variable, params, llvm_skip, expected", test_data, ids=names)
def test_execute(func, variable, params, llvm_skip, expected, benchmark, func_mode):
benchmark.group = "TransferFunction " + func.componentName
if func_mode != 'Python' and llvm_skip:
pytest.skip(llvm_skip)
f = func(default_variable=variable, **params)
ex = pytest.helpers.get_func_execution(f, func_mode)
res = ex(variable)
assert np.allclose(res, expected)
if benchmark.enabled:
benchmark(ex, variable)
| 51.633333
| 172
| 0.741446
|
4a080ae13c876589897cf230e6a792ce74c2dcb1
| 24,987
|
py
|
Python
|
NetworkControllability/TmpEdgeAttackExperiments.py
|
xinfeng1i/NetworkControllability
|
8a22ad0498ea12438c132556814dc255e709dc01
|
[
"BSD-2-Clause"
] | 1
|
2019-02-06T13:39:49.000Z
|
2019-02-06T13:39:49.000Z
|
NetworkControllability/TmpEdgeAttackExperiments.py
|
python27/NetworkControllability
|
8a22ad0498ea12438c132556814dc255e709dc01
|
[
"BSD-2-Clause"
] | 1
|
2020-11-03T22:51:32.000Z
|
2020-11-06T11:48:28.000Z
|
NetworkControllability/TmpEdgeAttackExperiments.py
|
xinfeng1i/NetworkControllability
|
8a22ad0498ea12438c132556814dc255e709dc01
|
[
"BSD-2-Clause"
] | null | null | null |
import networkx as nx
import matplotlib.pyplot as plt
import exact_controllability as ECT
from networkx.utils import powerlaw_sequence
import operator
import random
import csv
import copy
import subprocess, os
import time
import numpy as np
from ControllabilityRobustnessBasedOnEdgeAttack import RandomEdgeAttack
from ControllabilityRobustnessBasedOnEdgeAttack import InitialEdgeDegreeAttack
from ControllabilityRobustnessBasedOnEdgeAttack import RecalculatedEdgeDegreeAttack
from ControllabilityRobustnessBasedOnEdgeAttack import InitialEdgeBetweennessAttack
from ControllabilityRobustnessBasedOnEdgeAttack import RecalculatedEdgeBetweennessAttack
import strutral_controllability as SC
def EdgeAttackBA():
start_time = time.time()
n = 200
m = 3
fraction = 0.2
E = 591
E_rm = 118
run_cnt = 100
#******** Run Node Attack 1 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
rndseed = 0
for i in range(run_cnt):
G1 = nx.barabasi_albert_graph(n, m, seed=rndseed)
print ">>>>>>>>>>>>>>> Random Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = RandomEdgeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
rndseed += 1
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack1_BA.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
#******** Run Node Attack 2 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
rndseed = 0
for i in range(run_cnt):
G1 = nx.barabasi_albert_graph(n, m, seed=rndseed)
print ">>>>>>>>>>>>>>> Initial Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = InitialEdgeDegreeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
rndseed += 1
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack2_BA.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
#******** Run Node Attack 3 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
rndseed = 0
for i in range(run_cnt):
G1 = nx.barabasi_albert_graph(n, m, seed=rndseed)
print ">>>>>>>>>>>>>>> Recalculated Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = RecalculatedEdgeDegreeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
rndseed += 1
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack3_BA.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
#******** Run Node Attack 4 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
rndseed = 0
for i in range(run_cnt):
G1 = nx.barabasi_albert_graph(n, m, seed=rndseed)
print ">>>>>>>>>>>>>>> Initial Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = InitialEdgeBetweennessAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
rndseed += 1
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack4_BA.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
#******** Run Node Attack 5 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
rndseed = 0
for i in range(run_cnt):
G1 = nx.barabasi_albert_graph(n, m, seed=rndseed)
print ">>>>>>>>>>>>>>> Recalculated Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = RecalculatedEdgeBetweennessAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
rndseed += 1
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack5_BA.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
print "--- cost time %s seconds ---" %(time.time() - start_time)
def EdgeAttackUSAir():
start_time = time.time()
n = 332
fraction = 0.2
E = 2126
E_rm = int(0.2 * E)
run_cnt = 100
#******** Run Edge Attack 1 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
rndseed = 1;
for i in range(run_cnt):
G1 = nx.read_pajek("dataset/USAir97.net")
print ">>>>>>>>>>>>>>> Random Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
random.seed(rndseed)
ND1, T1 = RandomEdgeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
rndseed += 1;
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack1_USAir.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 3
#******** Run Edge Attack 2 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G1 = nx.read_pajek("dataset/USAir97.net")
print ">>>>>>>>>>>>>>> Initial Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = InitialEdgeDegreeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack2_USAir.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 3
#******** Run Edge Attack 3 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G1 = nx.read_pajek("dataset/USAir97.net")
print ">>>>>>>>>>>>>>> Recalculated Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = RecalculatedEdgeDegreeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack3_USAir.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 3
#******** Run Edge Attack 4 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G1 = nx.read_pajek("dataset/USAir97.net")
print ">>>>>>>>>>>>>>> Initial Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = InitialEdgeBetweennessAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack4_USAir.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 3
#******** Run Edge Attack 5 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G1 = nx.read_pajek("dataset/USAir97.net")
print ">>>>>>>>>>>>>>> Recalculated Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = RecalculatedEdgeBetweennessAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack5_USAir.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
print "--- cost time %s seconds ---" %(time.time() - start_time)
def EdgeAttackErdosNetwork():
start_time = time.time()
n = 429
fraction = 0.2
E = 1312
E_rm = int(0.2 * E)
run_cnt = 30
#******** Run Node Attack 1 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
rndseed = 1
for i in range(run_cnt):
G = nx.read_pajek("dataset/Erdos971_revised.net")
G1 = max(nx.connected_component_subgraphs(G),key=len)
print ">>>>>>>>>>>>>>> Random Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
random.seed(rndseed)
ND1, T1 = RandomEdgeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
rndseed += 1
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack1_ErdosNetwork.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 1
random.seed()
#******** Run Node Attack 2 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G = nx.read_pajek("dataset/Erdos971_revised.net")
G1 = max(nx.connected_component_subgraphs(G),key=len)
print ">>>>>>>>>>>>>>> Initial Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = InitialEdgeDegreeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack2_ErdosNetwork.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 1
random.seed()
#******** Run Node Attack 3 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G = nx.read_pajek("dataset/Erdos971_revised.net")
G1 = max(nx.connected_component_subgraphs(G),key=len)
print ">>>>>>>>>>>>>>> Recalculated Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = RecalculatedEdgeDegreeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack3_ErdosNetwork.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 1
random.seed()
#******** Run Node Attack 4 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G = nx.read_pajek("dataset/Erdos971_revised.net")
G1 = max(nx.connected_component_subgraphs(G),key=len)
print ">>>>>>>>>>>>>>> Initial Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = InitialEdgeBetweennessAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack4_ErdosNetwork.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 1
random.seed()
#******** Run Node Attack 5 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G = nx.read_pajek("dataset/Erdos971_revised.net")
G1 = max(nx.connected_component_subgraphs(G),key=len)
print ">>>>>>>>>>>>>>> Recalculated Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = RecalculatedEdgeBetweennessAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack5_ErdosNetwork.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
print "--- cost time %s seconds ---" %(time.time() - start_time)
def ReadPajek(filename):
'''Read pajek file to construct DiGraph'''
G = nx.DiGraph()
fp = open(filename, 'r')
line = fp.readline()
while line:
if line[0] == '*':
line = line.strip().split()
label = line[0]
number = int(line[1])
if label == '*Vertices' or label == '*vertices':
NodeNum = number
for i in range(NodeNum):
NodeLine = fp.readline()
NodeLine = NodeLine.strip().split()
NodeID = int(NodeLine[0])
NodeLabel = NodeLine[1]
G.add_node(NodeID)
elif label == '*Arcs' or label == '*arcs':
EdgeNum = number
for i in range(EdgeNum):
EdgeLine = fp.readline()
EdgeLine = EdgeLine.strip().split()
u = int(EdgeLine[0])
v = int(EdgeLine[1])
#w = float(EdgeLine[2])
G.add_edge(u, v)
else:
pass
line = fp.readline()
fp.close()
return G
def EdgeAttack(G):
""" Edge attack experiments on real world networks
Params:
G: A directed network of networkx
Returns:
None. Print the network controllability n_D after
5% 10% 15% 20% edges removed
"""
NodesNum = G.number_of_nodes()
EdgesNum = G.number_of_edges()
# Edge remove fraction F0, F1, F2, F3, F4
F1 = 0.05
F2 = 0.10
F3 = 0.15
F4 = 0.20
LRA = []
LID = []
LRD = []
LIB = []
LRB = []
# Following is Edge Random Attack (RA)
print '########## Edge RA ##########'
G1 = copy.deepcopy(G)
RandomEdges = copy.deepcopy(G1.edges())
random.shuffle(RandomEdges)
i = 0
while i < int(F1 * EdgesNum):
u, v = RandomEdges[i]
G1.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G1)) / (NodesNum + 0.0)
print F1, nD
LRA.append(nD)
while i < int(F2 * EdgesNum):
u, v = RandomEdges[i]
G1.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G1)) / (NodesNum + 0.0)
print F2, nD
LRA.append(nD)
while i < int(F3 * EdgesNum):
u, v = RandomEdges[i]
G1.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G1)) / (NodesNum + 0.0)
print F3, nD
LRA.append(nD)
while i < int(F4 * EdgesNum):
u, v = RandomEdges[i]
G1.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G1)) / (NodesNum + 0.0)
print F4, nD
LRA.append(nD)
G1.clear()
RandomEdges = []
# Following is Initial Edge Degree Attack (IDA)
print '########## Edge IDA ##########'
G2 = copy.deepcopy(G)
NodeDegrees = nx.degree(G2)
EdgeDegrees = {}
for u, v in G2.edges_iter(): # Calculate the edge degrees
EdgeDegrees[(u, v)] = NodeDegrees[u] * NodeDegrees[v]
# Sort the edges decrendingly according to edge degree
SortedEdges = sorted(EdgeDegrees, key=EdgeDegrees.get, reverse=True)
i = 0
while i < int(F1 * EdgesNum):
u, v = SortedEdges[i]
G2.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G2)) / (NodesNum + 0.0)
print F1, nD
LID.append(nD)
while i < int(F2 * EdgesNum):
u, v = SortedEdges[i]
G2.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G2)) / (NodesNum + 0.0)
print F2, nD
LID.append(nD)
while i < int(F3 * EdgesNum):
u, v = SortedEdges[i]
G2.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G2)) / (NodesNum + 0.0)
print F3, nD
LID.append(nD)
while i < int(F4 * EdgesNum):
u, v = SortedEdges[i]
G2.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G2)) / (NodesNum + 0.0)
print F4, nD
LID.append(nD)
G2.clear()
NodeDegrees = {}
EdgeDegrees = {}
SortedEdges = []
# Following is Recalculated Edge Degree Attack (RDA)
print '########## Edge RDA ##########'
G3 = copy.deepcopy(G)
i = 0
while i < int(F1 * EdgesNum):
# Find the edge with max edge degree at present
MaxU = -1; MaxV = -1; MaxDegree = -1;
NodeDegrees = nx.degree(G3)
for (u, v) in G3.edges_iter():
CurDegree = NodeDegrees[u] * NodeDegrees[v]
if CurDegree > MaxDegree:
MaxDegree = CurDegree
MaxU = u
MaxV = v
G3.remove_edge(MaxU, MaxV)
i += 1
nD = len(SC.control_nodes(G3)) / (NodesNum + 0.0)
print F1, nD
LRD.append(nD)
while i < int(F2 * EdgesNum):
# Find the edge with max edge degree at present
MaxU = -1; MaxV = -1; MaxDegree = -1;
NodeDegrees = nx.degree(G3)
for (u, v) in G3.edges_iter():
CurDegree = NodeDegrees[u] * NodeDegrees[v]
if CurDegree > MaxDegree:
MaxDegree = CurDegree
MaxU = u
MaxV = v
G3.remove_edge(MaxU, MaxV)
i += 1
nD = len(SC.control_nodes(G3)) / (NodesNum + 0.0)
print F2, nD
LRD.append(nD)
while i < int(F3 * EdgesNum):
# Find the edge with max edge degree at present
MaxU = -1; MaxV = -1; MaxDegree = -1;
NodeDegrees = nx.degree(G3)
for (u, v) in G3.edges_iter():
CurDegree = NodeDegrees[u] * NodeDegrees[v]
if CurDegree > MaxDegree:
MaxDegree = CurDegree
MaxU = u
MaxV = v
G3.remove_edge(MaxU, MaxV)
i += 1
nD = len(SC.control_nodes(G3)) / (NodesNum + 0.0)
print F3, nD
LRD.append(nD)
while i < int(F4 * EdgesNum):
# Find the edge with max edge degree at present
MaxU = -1; MaxV = -1; MaxDegree = -1;
NodeDegrees = nx.degree(G3)
for (u, v) in G3.edges_iter():
CurDegree = NodeDegrees[u] * NodeDegrees[v]
if CurDegree > MaxDegree:
MaxDegree = CurDegree
MaxU = u
MaxV = v
G3.remove_edge(MaxU, MaxV)
i += 1
nD = len(SC.control_nodes(G3)) / (NodesNum + 0.0)
print F4, nD
LRD.append(nD)
G3.clear()
# Folloing is Initial Edge Betweenness Attack (IBA)
print '########## Edge IBA ##########'
G4 = copy.deepcopy(G)
EdgeBetweenness = nx.edge_betweenness_centrality(G4)
SortedBetEdges = sorted(EdgeBetweenness,
key=EdgeBetweenness.get, reverse=True)
i = 0
while i < int(F1 * EdgesNum):
u, v = SortedBetEdges[i]
G4.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G4)) / (NodesNum + 0.0)
print F1, nD
LIB.append(nD)
while i < int(F2 * EdgesNum):
u, v = SortedBetEdges[i]
G4.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G4)) / (NodesNum + 0.0)
print F2, nD
LIB.append(nD)
while i < int(F3 * EdgesNum):
u, v = SortedBetEdges[i]
G4.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G4)) / (NodesNum + 0.0)
print F3, nD
LIB.append(nD)
while i < int(F4 * EdgesNum):
u, v = SortedBetEdges[i]
G4.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G4)) / (NodesNum + 0.0)
print F4, nD
LIB.append(nD)
G4.clear()
EdgeBetweenness = {}
SortedBetEdges = []
# Following is Recalculated Edge Betweenness Attack (RBA)
print '########## Edge RBA ##########'
G5 = copy.deepcopy(G)
i = 0
while i < int(F1 * EdgesNum):
EdgeBets = nx.edge_betweenness_centrality(G5)
# Find the edge with Max edge betweenness
uMax = -1; vMax = -1; betMax = -1.0;
for ((u, v), bet) in EdgeBets.iteritems():
if bet > betMax:
betMax = bet
uMax = u
vMax = v
G5.remove_edge(uMax, vMax)
i += 1
nD = len(SC.control_nodes(G5)) / (NodesNum + 0.0)
print F1, nD
LRB.append(nD)
while i < int(F2 * EdgesNum):
EdgeBets = nx.edge_betweenness_centrality(G5)
# Find the edge with Max edge betweenness
uMax = -1; vMax = -1; betMax = -1.0;
for ((u, v), bet) in EdgeBets.iteritems():
if bet > betMax:
betMax = bet
uMax = u
vMax = v
G5.remove_edge(uMax, vMax)
i += 1
nD = len(SC.control_nodes(G5)) / (NodesNum + 0.0)
print F2, nD
LRB.append(nD)
while i < int(F3 * EdgesNum):
EdgeBets = nx.edge_betweenness_centrality(G5)
# Find the edge with Max edge betweenness
uMax = -1; vMax = -1; betMax = -1.0;
for ((u, v), bet) in EdgeBets.iteritems():
if bet > betMax:
betMax = bet
uMax = u
vMax = v
G5.remove_edge(uMax, vMax)
i += 1
nD = len(SC.control_nodes(G5)) / (NodesNum + 0.0)
print F3, nD
LRB.append(nD)
while i < int(F4 * EdgesNum):
EdgeBets = nx.edge_betweenness_centrality(G5)
# Find the edge with Max edge betweenness
uMax = -1; vMax = -1; betMax = -1.0;
for ((u, v), bet) in EdgeBets.iteritems():
if bet > betMax:
betMax = bet
uMax = u
vMax = v
G5.remove_edge(uMax, vMax)
i += 1
nD = len(SC.control_nodes(G5)) / (NodesNum + 0.0)
print F4, nD
LRB.append(nD)
G5.clear()
print 'RA: ', LRA[0], LRA[1], LRA[2], LRA[3]
print 'ID: ', LID[0], LID[1], LID[2], LID[3]
print 'RD: ', LRD[0], LRD[1], LRD[2], LRD[3]
print 'IB: ', LIB[0], LIB[1], LIB[2], LIB[3]
print 'RB: ', LRB[0], LRB[1], LRB[2], LRB[3]
if __name__ == "__main__":
#EdgeAttackBA()
#EdgeAttackUSAir()
# Edge Attack Erdos971 Network
# for random attack, we set the random seed to from 1 to 100 for the
# independent 100 runs. For other deliberate attacks, as the attack order
# is fixed, we reset the seed of random to the initial state, i.e. seed(None)
#EdgeAttackErdosNetwork()
# Regulatory
#G = ReadPajek('./dataset/Regulatory/TRN-Yeast-1.net')
#G = ReadPajek('./dataset/Regulatory/TRN-Yeast-2.net')
#G = ReadPajek('./dataset/Regulatory/TRN-EC-2.net')
#G = ReadPajek('./dataset/Regulatory/Ownership.net')
# World Wide Web (WWW)
G = ReadPajek('./dataset/WWW/PoliticalBlogs.net')
print 'Edge Attack From Temp Files ... '
print 'WWW --- PoliticalBlogs'
NodesNum = G.number_of_nodes()
EdgesNum = G.number_of_edges()
DriverNodes = SC.control_nodes(G)
nD = len(DriverNodes) / (NodesNum + 0.0)
print 'Nodes Num: ', NodesNum
print 'Edges Num: ', EdgesNum
print 'nD = ', nD
EdgeAttack(G)
| 34.370014
| 109
| 0.540521
|
4a080c65ffff104387658dcc924a4d2227e150ef
| 3,808
|
py
|
Python
|
experiments/state_distance/optimal_control/compare_oc_variants.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | null | null | null |
experiments/state_distance/optimal_control/compare_oc_variants.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | null | null | null |
experiments/state_distance/optimal_control/compare_oc_variants.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | null | null | null |
"""
Script to compare different optimal control variants for a give qf.
"""
import argparse
from collections import defaultdict
import joblib
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.launcher_util import run_experiment
from rlkit.misc.rllab_util import get_logger_table_dict
from rlkit.state_distance.policies import (
SoftOcOneStepRewardPolicy,
ArgmaxQFPolicy,
StateOnlySdqBasedSqpOcPolicy,
SamplePolicyPartialOptimizer,
)
from state_distance.rollout_util import multitask_rollout
from rlkit.core import logger
def get_class_params_to_try(policy_class):
if policy_class == SoftOcOneStepRewardPolicy:
search_space = {
'sample_size': [100, 1000],
'constraint_weight': [1, 10]
}
sweeper = hyp.DeterministicHyperparameterSweeper(search_space)
return sweeper.iterate_hyperparameters()
if policy_class == ArgmaxQFPolicy:
return [
{
'sample_size': 1,
'num_gradient_steps': 100,
},
{
'sample_size': 1000,
'num_gradient_steps': 0,
},
]
if policy_class == SamplePolicyPartialOptimizer:
search_space = {
'sample_size': [100, 1000],
}
sweeper = hyp.DeterministicHyperparameterSweeper(search_space)
return sweeper.iterate_hyperparameters()
if policy_class == StateOnlySdqBasedSqpOcPolicy:
search_space = {
'solver_params': [dict(
maxiter=5,
ftol=0.1
)],
'planning_horizon': [1, 5],
}
sweeper = hyp.DeterministicHyperparameterSweeper(search_space)
return sweeper.iterate_hyperparameters()
def experiment(variant):
path = variant['path']
policy_class = variant['policy_class']
policy_params = variant['policy_params']
horizon = variant['horizon']
num_rollouts = variant['num_rollouts']
discount = variant['discount']
stat_name = variant['stat_name']
data = joblib.load(path)
env = data['env']
qf = data['qf']
qf_argmax_policy = data['policy']
policy = policy_class(
qf,
env,
qf_argmax_policy,
**policy_params
)
paths = []
for _ in range(num_rollouts):
goal = env.sample_goal_for_rollout()
path = multitask_rollout(
env,
policy,
goal,
discount,
max_path_length=horizon,
animated=False,
decrement_discount=False,
)
paths.append(path)
env.log_diagnostics(paths)
results = logger.get_table_dict()
logger.dump_tabular()
return results[stat_name]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str,
help='path to the snapshot file with a QF')
args = parser.parse_args()
# exp_prefix = 'dev-sdql-compare-ocs'
exp_prefix = 'sdql-reacher2d-long-compare-ocs'
variant = dict(
path=args.file,
stat_name='Final Euclidean distance to goal Mean',
horizon=50,
num_rollouts=100,
discount=5,
)
policy_to_scores = defaultdict(list)
exp_id = 0
for policy_class in [
SoftOcOneStepRewardPolicy,
SamplePolicyPartialOptimizer,
ArgmaxQFPolicy,
StateOnlySdqBasedSqpOcPolicy,
]:
variant['policy_class'] = policy_class
for policy_params in get_class_params_to_try(policy_class):
variant['policy_params'] = policy_params
run_experiment(
experiment,
exp_prefix=exp_prefix,
variant=variant,
exp_id=exp_id,
)
exp_id += 1
| 28.631579
| 70
| 0.616071
|
4a080c6fa78d4383267e8e46011f1657efc4ed34
| 786
|
py
|
Python
|
src/binarization.py
|
ColoredInsaneAsylums/PageSegmentation-PrivacySensitiveTranscription
|
71ab8df1baf0a9b8cfb196115ed7c88880799fcb
|
[
"BSD-3-Clause"
] | null | null | null |
src/binarization.py
|
ColoredInsaneAsylums/PageSegmentation-PrivacySensitiveTranscription
|
71ab8df1baf0a9b8cfb196115ed7c88880799fcb
|
[
"BSD-3-Clause"
] | null | null | null |
src/binarization.py
|
ColoredInsaneAsylums/PageSegmentation-PrivacySensitiveTranscription
|
71ab8df1baf0a9b8cfb196115ed7c88880799fcb
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
##########IMPORTS#################
import sys
import time
from PIL import Image as Im
from gamera.core import *
from pil_io import *
init_gamera()
################################
########## Parse Arguments ############
infile = sys.argv[1]
outfile = sys.argv[2]
#######################################
########### Load Image ############
pilImg = Im.open(sys.argv[1])
img = from_pil(pilImg).image_copy()
###################################
########## Binarize Image ############
binImg = img.djvu_threshold(0.2, 512, 64, 2)
######################################
########## Save Image as 1-bit BMP ############
rgbImg = binImg.to_rgb()
pilImg = rgbImg.to_pil()
pilImg = pilImg.convert('1')
pilImg.save(outfile)
##############################################
| 21.833333
| 47
| 0.433842
|
4a080c73c339fcd199c70d875aed961a36ad0392
| 44,435
|
py
|
Python
|
helpers/phpdocx/lib/openoffice/openoffice.org/basis3.4/program/python-core-2.6.1/lib/pdb.py
|
vnnguyen/appbasic
|
c1d0b85c42a09f8c2d75807774f550b2b9eae33e
|
[
"BSD-3-Clause"
] | 550
|
2015-01-05T16:59:00.000Z
|
2022-03-20T16:55:25.000Z
|
helpers/phpdocx/lib/openoffice/openoffice.org/basis3.4/program/python-core-2.6.1/lib/pdb.py
|
vnnguyen/appbasic
|
c1d0b85c42a09f8c2d75807774f550b2b9eae33e
|
[
"BSD-3-Clause"
] | 15
|
2015-02-05T06:00:47.000Z
|
2018-07-07T14:34:04.000Z
|
helpers/phpdocx/lib/openoffice/openoffice.org/basis3.4/program/python-core-2.6.1/lib/pdb.py
|
vnnguyen/appbasic
|
c1d0b85c42a09f8c2d75807774f550b2b9eae33e
|
[
"BSD-3-Clause"
] | 119
|
2015-01-08T00:48:24.000Z
|
2022-01-27T14:13:15.000Z
|
#! /usr/bin/env python
"""A Python debugger."""
# (See pdb.doc for documentation.)
import sys
import linecache
import cmd
import bdb
from repr import Repr
import os
import re
import pprint
import traceback
class Restart(Exception):
"""Causes a debugger to be restarted for the debugged python program."""
pass
# Create a custom safe Repr instance and increase its maxstring.
# The default of 30 truncates error messages too easily.
_repr = Repr()
_repr.maxstring = 200
_saferepr = _repr.repr
__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace",
"post_mortem", "help"]
def find_function(funcname, filename):
cre = re.compile(r'def\s+%s\s*[(]' % re.escape(funcname))
try:
fp = open(filename)
except IOError:
return None
# consumer of this info expects the first line to be 1
lineno = 1
answer = None
while 1:
line = fp.readline()
if line == '':
break
if cre.match(line):
answer = funcname, filename, lineno
break
lineno = lineno + 1
fp.close()
return answer
# Interaction prompt line will separate file and call info from code
# text using value of line_prefix string. A newline and arrow may
# be to your liking. You can set it once pdb is imported using the
# command "pdb.line_prefix = '\n% '".
# line_prefix = ': ' # Use this to get the old situation back
line_prefix = '\n-> ' # Probably a better default
class Pdb(bdb.Bdb, cmd.Cmd):
def __init__(self, completekey='tab', stdin=None, stdout=None):
bdb.Bdb.__init__(self)
cmd.Cmd.__init__(self, completekey, stdin, stdout)
if stdout:
self.use_rawinput = 0
self.prompt = '(Pdb) '
self.aliases = {}
self.mainpyfile = ''
self._wait_for_mainpyfile = 0
# Try to load readline if it exists
try:
import readline
except ImportError:
pass
# Read $HOME/.pdbrc and ./.pdbrc
self.rcLines = []
if 'HOME' in os.environ:
envHome = os.environ['HOME']
try:
rcFile = open(os.path.join(envHome, ".pdbrc"))
except IOError:
pass
else:
for line in rcFile.readlines():
self.rcLines.append(line)
rcFile.close()
try:
rcFile = open(".pdbrc")
except IOError:
pass
else:
for line in rcFile.readlines():
self.rcLines.append(line)
rcFile.close()
self.commands = {} # associates a command list to breakpoint numbers
self.commands_doprompt = {} # for each bp num, tells if the prompt must be disp. after execing the cmd list
self.commands_silent = {} # for each bp num, tells if the stack trace must be disp. after execing the cmd list
self.commands_defining = False # True while in the process of defining a command list
self.commands_bnum = None # The breakpoint number for which we are defining a list
def reset(self):
bdb.Bdb.reset(self)
self.forget()
def forget(self):
self.lineno = None
self.stack = []
self.curindex = 0
self.curframe = None
def setup(self, f, t):
self.forget()
self.stack, self.curindex = self.get_stack(f, t)
self.curframe = self.stack[self.curindex][0]
self.execRcLines()
# Can be executed earlier than 'setup' if desired
def execRcLines(self):
if self.rcLines:
# Make local copy because of recursion
rcLines = self.rcLines
# executed only once
self.rcLines = []
for line in rcLines:
line = line[:-1]
if len(line) > 0 and line[0] != '#':
self.onecmd(line)
# Override Bdb methods
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
if self._wait_for_mainpyfile:
return
if self.stop_here(frame):
print >>self.stdout, '--Call--'
self.interaction(frame, None)
def user_line(self, frame):
"""This function is called when we stop or break at this line."""
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno<= 0):
return
self._wait_for_mainpyfile = 0
if self.bp_commands(frame):
self.interaction(frame, None)
def bp_commands(self,frame):
""" Call every command that was set for the current active breakpoint (if there is one)
Returns True if the normal interaction function must be called, False otherwise """
#self.currentbp is set in bdb.py in bdb.break_here if a breakpoint was hit
if getattr(self,"currentbp",False) and self.currentbp in self.commands:
currentbp = self.currentbp
self.currentbp = 0
lastcmd_back = self.lastcmd
self.setup(frame, None)
for line in self.commands[currentbp]:
self.onecmd(line)
self.lastcmd = lastcmd_back
if not self.commands_silent[currentbp]:
self.print_stack_entry(self.stack[self.curindex])
if self.commands_doprompt[currentbp]:
self.cmdloop()
self.forget()
return
return 1
def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
frame.f_locals['__return__'] = return_value
print >>self.stdout, '--Return--'
self.interaction(frame, None)
def user_exception(self, frame, exc_info):
exc_type, exc_value, exc_traceback = exc_info
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
frame.f_locals['__exception__'] = exc_type, exc_value
if type(exc_type) == type(''):
exc_type_name = exc_type
else: exc_type_name = exc_type.__name__
print >>self.stdout, exc_type_name + ':', _saferepr(exc_value)
self.interaction(frame, exc_traceback)
# General interaction function
def interaction(self, frame, traceback):
self.setup(frame, traceback)
self.print_stack_entry(self.stack[self.curindex])
self.cmdloop()
self.forget()
def default(self, line):
if line[:1] == '!': line = line[1:]
locals = self.curframe.f_locals
globals = self.curframe.f_globals
try:
code = compile(line + '\n', '<stdin>', 'single')
save_stdout = sys.stdout
save_stdin = sys.stdin
try:
sys.stdin = self.stdin
sys.stdout = self.stdout
exec code in globals, locals
finally:
sys.stdout = save_stdout
sys.stdin = save_stdin
except:
t, v = sys.exc_info()[:2]
if type(t) == type(''):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', v
def precmd(self, line):
"""Handle alias expansion and ';;' separator."""
if not line.strip():
return line
args = line.split()
while args[0] in self.aliases:
line = self.aliases[args[0]]
ii = 1
for tmpArg in args[1:]:
line = line.replace("%" + str(ii),
tmpArg)
ii = ii + 1
line = line.replace("%*", ' '.join(args[1:]))
args = line.split()
# split into ';;' separated commands
# unless it's an alias command
if args[0] != 'alias':
marker = line.find(';;')
if marker >= 0:
# queue up everything after marker
next = line[marker+2:].lstrip()
self.cmdqueue.append(next)
line = line[:marker].rstrip()
return line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
Checks whether this line is typed at the normal prompt or in
a breakpoint command list definition.
"""
if not self.commands_defining:
return cmd.Cmd.onecmd(self, line)
else:
return self.handle_command_def(line)
def handle_command_def(self,line):
""" Handles one command line during command list definition. """
cmd, arg, line = self.parseline(line)
if cmd == 'silent':
self.commands_silent[self.commands_bnum] = True
return # continue to handle other cmd def in the cmd list
elif cmd == 'end':
self.cmdqueue = []
return 1 # end of cmd list
cmdlist = self.commands[self.commands_bnum]
if (arg):
cmdlist.append(cmd+' '+arg)
else:
cmdlist.append(cmd)
# Determine if we must stop
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
func = self.default
if func.func_name in self.commands_resuming : # one of the resuming commands.
self.commands_doprompt[self.commands_bnum] = False
self.cmdqueue = []
return 1
return
# Command definitions, called by cmdloop()
# The argument is the remaining string on the command line
# Return true to exit from the command loop
do_h = cmd.Cmd.do_help
def do_commands(self, arg):
"""Defines a list of commands associated to a breakpoint
Those commands will be executed whenever the breakpoint causes the program to stop execution."""
if not arg:
bnum = len(bdb.Breakpoint.bpbynumber)-1
else:
try:
bnum = int(arg)
except:
print >>self.stdout, "Usage : commands [bnum]\n ...\n end"
return
self.commands_bnum = bnum
self.commands[bnum] = []
self.commands_doprompt[bnum] = True
self.commands_silent[bnum] = False
prompt_back = self.prompt
self.prompt = '(com) '
self.commands_defining = True
self.cmdloop()
self.commands_defining = False
self.prompt = prompt_back
def do_break(self, arg, temporary = 0):
# break [ ([filename:]lineno | function) [, "condition"] ]
if not arg:
if self.breaks: # There's at least one
print >>self.stdout, "Num Type Disp Enb Where"
for bp in bdb.Breakpoint.bpbynumber:
if bp:
bp.bpprint(self.stdout)
return
# parse arguments; comma has lowest precedence
# and cannot occur in filename
filename = None
lineno = None
cond = None
comma = arg.find(',')
if comma > 0:
# parse stuff after comma: "condition"
cond = arg[comma+1:].lstrip()
arg = arg[:comma].rstrip()
# parse stuff before comma: [filename:]lineno | function
colon = arg.rfind(':')
funcname = None
if colon >= 0:
filename = arg[:colon].rstrip()
f = self.lookupmodule(filename)
if not f:
print >>self.stdout, '*** ', repr(filename),
print >>self.stdout, 'not found from sys.path'
return
else:
filename = f
arg = arg[colon+1:].lstrip()
try:
lineno = int(arg)
except ValueError, msg:
print >>self.stdout, '*** Bad lineno:', arg
return
else:
# no colon; can be lineno or function
try:
lineno = int(arg)
except ValueError:
try:
func = eval(arg,
self.curframe.f_globals,
self.curframe.f_locals)
except:
func = arg
try:
if hasattr(func, 'im_func'):
func = func.im_func
code = func.func_code
#use co_name to identify the bkpt (function names
#could be aliased, but co_name is invariant)
funcname = code.co_name
lineno = code.co_firstlineno
filename = code.co_filename
except:
# last thing to try
(ok, filename, ln) = self.lineinfo(arg)
if not ok:
print >>self.stdout, '*** The specified object',
print >>self.stdout, repr(arg),
print >>self.stdout, 'is not a function'
print >>self.stdout, 'or was not found along sys.path.'
return
funcname = ok # ok contains a function name
lineno = int(ln)
if not filename:
filename = self.defaultFile()
# Check for reasonable breakpoint
line = self.checkline(filename, lineno)
if line:
# now set the break point
err = self.set_break(filename, line, temporary, cond, funcname)
if err: print >>self.stdout, '***', err
else:
bp = self.get_breaks(filename, line)[-1]
print >>self.stdout, "Breakpoint %d at %s:%d" % (bp.number,
bp.file,
bp.line)
# To be overridden in derived debuggers
def defaultFile(self):
"""Produce a reasonable default."""
filename = self.curframe.f_code.co_filename
if filename == '<string>' and self.mainpyfile:
filename = self.mainpyfile
return filename
do_b = do_break
def do_tbreak(self, arg):
self.do_break(arg, 1)
def lineinfo(self, identifier):
failed = (None, None, None)
# Input is identifier, may be in single quotes
idstring = identifier.split("'")
if len(idstring) == 1:
# not in single quotes
id = idstring[0].strip()
elif len(idstring) == 3:
# quoted
id = idstring[1].strip()
else:
return failed
if id == '': return failed
parts = id.split('.')
# Protection for derived debuggers
if parts[0] == 'self':
del parts[0]
if len(parts) == 0:
return failed
# Best first guess at file to look at
fname = self.defaultFile()
if len(parts) == 1:
item = parts[0]
else:
# More than one part.
# First is module, second is method/class
f = self.lookupmodule(parts[0])
if f:
fname = f
item = parts[1]
answer = find_function(item, fname)
return answer or failed
def checkline(self, filename, lineno):
"""Check whether specified line seems to be executable.
Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank
line or EOF). Warning: testing is not comprehensive.
"""
line = linecache.getline(filename, lineno)
if not line:
print >>self.stdout, 'End of file'
return 0
line = line.strip()
# Don't allow setting breakpoint at a blank line
if (not line or (line[0] == '#') or
(line[:3] == '"""') or line[:3] == "'''"):
print >>self.stdout, '*** Blank or comment'
return 0
return lineno
def do_enable(self, arg):
args = arg.split()
for i in args:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
bp = bdb.Breakpoint.bpbynumber[i]
if bp:
bp.enable()
def do_disable(self, arg):
args = arg.split()
for i in args:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
bp = bdb.Breakpoint.bpbynumber[i]
if bp:
bp.disable()
def do_condition(self, arg):
# arg is breakpoint number and condition
args = arg.split(' ', 1)
try:
bpnum = int(args[0].strip())
except ValueError:
# something went wrong
print >>self.stdout, \
'Breakpoint index %r is not a number' % args[0]
return
try:
cond = args[1]
except:
cond = None
try:
bp = bdb.Breakpoint.bpbynumber[bpnum]
except IndexError:
print >>self.stdout, 'Breakpoint index %r is not valid' % args[0]
return
if bp:
bp.cond = cond
if not cond:
print >>self.stdout, 'Breakpoint', bpnum,
print >>self.stdout, 'is now unconditional.'
def do_ignore(self,arg):
"""arg is bp number followed by ignore count."""
args = arg.split()
try:
bpnum = int(args[0].strip())
except ValueError:
# something went wrong
print >>self.stdout, \
'Breakpoint index %r is not a number' % args[0]
return
try:
count = int(args[1].strip())
except:
count = 0
try:
bp = bdb.Breakpoint.bpbynumber[bpnum]
except IndexError:
print >>self.stdout, 'Breakpoint index %r is not valid' % args[0]
return
if bp:
bp.ignore = count
if count > 0:
reply = 'Will ignore next '
if count > 1:
reply = reply + '%d crossings' % count
else:
reply = reply + '1 crossing'
print >>self.stdout, reply + ' of breakpoint %d.' % bpnum
else:
print >>self.stdout, 'Will stop next time breakpoint',
print >>self.stdout, bpnum, 'is reached.'
def do_clear(self, arg):
"""Three possibilities, tried in this order:
clear -> clear all breaks, ask for confirmation
clear file:lineno -> clear all breaks at file:lineno
clear bpno bpno ... -> clear breakpoints by number"""
if not arg:
try:
reply = raw_input('Clear all breaks? ')
except EOFError:
reply = 'no'
reply = reply.strip().lower()
if reply in ('y', 'yes'):
self.clear_all_breaks()
return
if ':' in arg:
# Make sure it works for "clear C:\foo\bar.py:12"
i = arg.rfind(':')
filename = arg[:i]
arg = arg[i+1:]
try:
lineno = int(arg)
except ValueError:
err = "Invalid line number (%s)" % arg
else:
err = self.clear_break(filename, lineno)
if err: print >>self.stdout, '***', err
return
numberlist = arg.split()
for i in numberlist:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
err = self.clear_bpbynumber(i)
if err:
print >>self.stdout, '***', err
else:
print >>self.stdout, 'Deleted breakpoint', i
do_cl = do_clear # 'c' is already an abbreviation for 'continue'
def do_where(self, arg):
self.print_stack_trace()
do_w = do_where
do_bt = do_where
def do_up(self, arg):
if self.curindex == 0:
print >>self.stdout, '*** Oldest frame'
else:
self.curindex = self.curindex - 1
self.curframe = self.stack[self.curindex][0]
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
do_u = do_up
def do_down(self, arg):
if self.curindex + 1 == len(self.stack):
print >>self.stdout, '*** Newest frame'
else:
self.curindex = self.curindex + 1
self.curframe = self.stack[self.curindex][0]
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
do_d = do_down
def do_until(self, arg):
self.set_until(self.curframe)
return 1
do_unt = do_until
def do_step(self, arg):
self.set_step()
return 1
do_s = do_step
def do_next(self, arg):
self.set_next(self.curframe)
return 1
do_n = do_next
def do_run(self, arg):
"""Restart program by raising an exception to be caught in the main debugger
loop. If arguments were given, set them in sys.argv."""
if arg:
import shlex
argv0 = sys.argv[0:1]
sys.argv = shlex.split(arg)
sys.argv[:0] = argv0
raise Restart
do_restart = do_run
def do_return(self, arg):
self.set_return(self.curframe)
return 1
do_r = do_return
def do_continue(self, arg):
self.set_continue()
return 1
do_c = do_cont = do_continue
def do_jump(self, arg):
if self.curindex + 1 != len(self.stack):
print >>self.stdout, "*** You can only jump within the bottom frame"
return
try:
arg = int(arg)
except ValueError:
print >>self.stdout, "*** The 'jump' command requires a line number."
else:
try:
# Do the jump, fix up our copy of the stack, and display the
# new position
self.curframe.f_lineno = arg
self.stack[self.curindex] = self.stack[self.curindex][0], arg
self.print_stack_entry(self.stack[self.curindex])
except ValueError, e:
print >>self.stdout, '*** Jump failed:', e
do_j = do_jump
def do_debug(self, arg):
sys.settrace(None)
globals = self.curframe.f_globals
locals = self.curframe.f_locals
p = Pdb(self.completekey, self.stdin, self.stdout)
p.prompt = "(%s) " % self.prompt.strip()
print >>self.stdout, "ENTERING RECURSIVE DEBUGGER"
sys.call_tracing(p.run, (arg, globals, locals))
print >>self.stdout, "LEAVING RECURSIVE DEBUGGER"
sys.settrace(self.trace_dispatch)
self.lastcmd = p.lastcmd
def do_quit(self, arg):
self._user_requested_quit = 1
self.set_quit()
return 1
do_q = do_quit
do_exit = do_quit
def do_EOF(self, arg):
print >>self.stdout
self._user_requested_quit = 1
self.set_quit()
return 1
def do_args(self, arg):
f = self.curframe
co = f.f_code
dict = f.f_locals
n = co.co_argcount
if co.co_flags & 4: n = n+1
if co.co_flags & 8: n = n+1
for i in range(n):
name = co.co_varnames[i]
print >>self.stdout, name, '=',
if name in dict: print >>self.stdout, dict[name]
else: print >>self.stdout, "*** undefined ***"
do_a = do_args
def do_retval(self, arg):
if '__return__' in self.curframe.f_locals:
print >>self.stdout, self.curframe.f_locals['__return__']
else:
print >>self.stdout, '*** Not yet returned!'
do_rv = do_retval
def _getval(self, arg):
try:
return eval(arg, self.curframe.f_globals,
self.curframe.f_locals)
except:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', repr(v)
raise
def do_p(self, arg):
try:
print >>self.stdout, repr(self._getval(arg))
except:
pass
def do_pp(self, arg):
try:
pprint.pprint(self._getval(arg), self.stdout)
except:
pass
def do_list(self, arg):
self.lastcmd = 'list'
last = None
if arg:
try:
x = eval(arg, {}, {})
if type(x) == type(()):
first, last = x
first = int(first)
last = int(last)
if last < first:
# Assume it's a count
last = first + last
else:
first = max(1, int(x) - 5)
except:
print >>self.stdout, '*** Error in argument:', repr(arg)
return
elif self.lineno is None:
first = max(1, self.curframe.f_lineno - 5)
else:
first = self.lineno + 1
if last is None:
last = first + 10
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
for lineno in range(first, last+1):
line = linecache.getline(filename, lineno)
if not line:
print >>self.stdout, '[EOF]'
break
else:
s = repr(lineno).rjust(3)
if len(s) < 4: s = s + ' '
if lineno in breaklist: s = s + 'B'
else: s = s + ' '
if lineno == self.curframe.f_lineno:
s = s + '->'
print >>self.stdout, s + '\t' + line,
self.lineno = lineno
except KeyboardInterrupt:
pass
do_l = do_list
def do_whatis(self, arg):
try:
value = eval(arg, self.curframe.f_globals,
self.curframe.f_locals)
except:
t, v = sys.exc_info()[:2]
if type(t) == type(''):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', repr(v)
return
code = None
# Is it a function?
try: code = value.func_code
except: pass
if code:
print >>self.stdout, 'Function', code.co_name
return
# Is it an instance method?
try: code = value.im_func.func_code
except: pass
if code:
print >>self.stdout, 'Method', code.co_name
return
# None of the above...
print >>self.stdout, type(value)
def do_alias(self, arg):
args = arg.split()
if len(args) == 0:
keys = self.aliases.keys()
keys.sort()
for alias in keys:
print >>self.stdout, "%s = %s" % (alias, self.aliases[alias])
return
if args[0] in self.aliases and len(args) == 1:
print >>self.stdout, "%s = %s" % (args[0], self.aliases[args[0]])
else:
self.aliases[args[0]] = ' '.join(args[1:])
def do_unalias(self, arg):
args = arg.split()
if len(args) == 0: return
if args[0] in self.aliases:
del self.aliases[args[0]]
#list of all the commands making the program resume execution.
commands_resuming = ['do_continue', 'do_step', 'do_next', 'do_return',
'do_quit', 'do_jump']
# Print a traceback starting at the top stack frame.
# The most recently entered frame is printed last;
# this is different from dbx and gdb, but consistent with
# the Python interpreter's stack trace.
# It is also consistent with the up/down commands (which are
# compatible with dbx and gdb: up moves towards 'main()'
# and down moves towards the most recent stack frame).
def print_stack_trace(self):
try:
for frame_lineno in self.stack:
self.print_stack_entry(frame_lineno)
except KeyboardInterrupt:
pass
def print_stack_entry(self, frame_lineno, prompt_prefix=line_prefix):
frame, lineno = frame_lineno
if frame is self.curframe:
print >>self.stdout, '>',
else:
print >>self.stdout, ' ',
print >>self.stdout, self.format_stack_entry(frame_lineno,
prompt_prefix)
# Help methods (derived from pdb.doc)
def help_help(self):
self.help_h()
def help_h(self):
print >>self.stdout, """h(elp)
Without argument, print the list of available commands.
With a command name as argument, print help about that command
"help pdb" pipes the full documentation file to the $PAGER
"help exec" gives help on the ! command"""
def help_where(self):
self.help_w()
def help_w(self):
print >>self.stdout, """w(here)
Print a stack trace, with the most recent frame at the bottom.
An arrow indicates the "current frame", which determines the
context of most commands. 'bt' is an alias for this command."""
help_bt = help_w
def help_down(self):
self.help_d()
def help_d(self):
print >>self.stdout, """d(own)
Move the current frame one level down in the stack trace
(to a newer frame)."""
def help_up(self):
self.help_u()
def help_u(self):
print >>self.stdout, """u(p)
Move the current frame one level up in the stack trace
(to an older frame)."""
def help_break(self):
self.help_b()
def help_b(self):
print >>self.stdout, """b(reak) ([file:]lineno | function) [, condition]
With a line number argument, set a break there in the current
file. With a function name, set a break at first executable line
of that function. Without argument, list all breaks. If a second
argument is present, it is a string specifying an expression
which must evaluate to true before the breakpoint is honored.
The line number may be prefixed with a filename and a colon,
to specify a breakpoint in another file (probably one that
hasn't been loaded yet). The file is searched for on sys.path;
the .py suffix may be omitted."""
def help_clear(self):
self.help_cl()
def help_cl(self):
print >>self.stdout, "cl(ear) filename:lineno"
print >>self.stdout, """cl(ear) [bpnumber [bpnumber...]]
With a space separated list of breakpoint numbers, clear
those breakpoints. Without argument, clear all breaks (but
first ask confirmation). With a filename:lineno argument,
clear all breaks at that line in that file.
Note that the argument is different from previous versions of
the debugger (in python distributions 1.5.1 and before) where
a linenumber was used instead of either filename:lineno or
breakpoint numbers."""
def help_tbreak(self):
print >>self.stdout, """tbreak same arguments as break, but breakpoint is
removed when first hit."""
def help_enable(self):
print >>self.stdout, """enable bpnumber [bpnumber ...]
Enables the breakpoints given as a space separated list of
bp numbers."""
def help_disable(self):
print >>self.stdout, """disable bpnumber [bpnumber ...]
Disables the breakpoints given as a space separated list of
bp numbers."""
def help_ignore(self):
print >>self.stdout, """ignore bpnumber count
Sets the ignore count for the given breakpoint number. A breakpoint
becomes active when the ignore count is zero. When non-zero, the
count is decremented each time the breakpoint is reached and the
breakpoint is not disabled and any associated condition evaluates
to true."""
def help_condition(self):
print >>self.stdout, """condition bpnumber str_condition
str_condition is a string specifying an expression which
must evaluate to true before the breakpoint is honored.
If str_condition is absent, any existing condition is removed;
i.e., the breakpoint is made unconditional."""
def help_step(self):
self.help_s()
def help_s(self):
print >>self.stdout, """s(tep)
Execute the current line, stop at the first possible occasion
(either in a function that is called or in the current function)."""
def help_until(self):
self.help_unt()
def help_unt(self):
print """unt(il)
Continue execution until the line with a number greater than the current
one is reached or until the current frame returns"""
def help_next(self):
self.help_n()
def help_n(self):
print >>self.stdout, """n(ext)
Continue execution until the next line in the current function
is reached or it returns."""
def help_return(self):
self.help_r()
def help_r(self):
print >>self.stdout, """r(eturn)
Continue execution until the current function returns."""
def help_continue(self):
self.help_c()
def help_cont(self):
self.help_c()
def help_c(self):
print >>self.stdout, """c(ont(inue))
Continue execution, only stop when a breakpoint is encountered."""
def help_jump(self):
self.help_j()
def help_j(self):
print >>self.stdout, """j(ump) lineno
Set the next line that will be executed."""
def help_debug(self):
print >>self.stdout, """debug code
Enter a recursive debugger that steps through the code argument
(which is an arbitrary expression or statement to be executed
in the current environment)."""
def help_list(self):
self.help_l()
def help_l(self):
print >>self.stdout, """l(ist) [first [,last]]
List source code for the current file.
Without arguments, list 11 lines around the current line
or continue the previous listing.
With one argument, list 11 lines starting at that line.
With two arguments, list the given range;
if the second argument is less than the first, it is a count."""
def help_args(self):
self.help_a()
def help_a(self):
print >>self.stdout, """a(rgs)
Print the arguments of the current function."""
def help_p(self):
print >>self.stdout, """p expression
Print the value of the expression."""
def help_pp(self):
print >>self.stdout, """pp expression
Pretty-print the value of the expression."""
def help_exec(self):
print >>self.stdout, """(!) statement
Execute the (one-line) statement in the context of
the current stack frame.
The exclamation point can be omitted unless the first word
of the statement resembles a debugger command.
To assign to a global variable you must always prefix the
command with a 'global' command, e.g.:
(Pdb) global list_options; list_options = ['-l']
(Pdb)"""
def help_run(self):
print """run [args...]
Restart the debugged python program. If a string is supplied, it is
splitted with "shlex" and the result is used as the new sys.argv.
History, breakpoints, actions and debugger options are preserved.
"restart" is an alias for "run"."""
help_restart = help_run
def help_quit(self):
self.help_q()
def help_q(self):
print >>self.stdout, """q(uit) or exit - Quit from the debugger.
The program being executed is aborted."""
help_exit = help_q
def help_whatis(self):
print >>self.stdout, """whatis arg
Prints the type of the argument."""
def help_EOF(self):
print >>self.stdout, """EOF
Handles the receipt of EOF as a command."""
def help_alias(self):
print >>self.stdout, """alias [name [command [parameter parameter ...] ]]
Creates an alias called 'name' the executes 'command'. The command
must *not* be enclosed in quotes. Replaceable parameters are
indicated by %1, %2, and so on, while %* is replaced by all the
parameters. If no command is given, the current alias for name
is shown. If no name is given, all aliases are listed.
Aliases may be nested and can contain anything that can be
legally typed at the pdb prompt. Note! You *can* override
internal pdb commands with aliases! Those internal commands
are then hidden until the alias is removed. Aliasing is recursively
applied to the first word of the command line; all other words
in the line are left alone.
Some useful aliases (especially when placed in the .pdbrc file) are:
#Print instance variables (usage "pi classInst")
alias pi for k in %1.__dict__.keys(): print "%1.",k,"=",%1.__dict__[k]
#Print instance variables in self
alias ps pi self
"""
def help_unalias(self):
print >>self.stdout, """unalias name
Deletes the specified alias."""
def help_commands(self):
print >>self.stdout, """commands [bpnumber]
(com) ...
(com) end
(Pdb)
Specify a list of commands for breakpoint number bpnumber. The
commands themselves appear on the following lines. Type a line
containing just 'end' to terminate the commands.
To remove all commands from a breakpoint, type commands and
follow it immediately with end; that is, give no commands.
With no bpnumber argument, commands refers to the last
breakpoint set.
You can use breakpoint commands to start your program up again.
Simply use the continue command, or step, or any other
command that resumes execution.
Specifying any command resuming execution (currently continue,
step, next, return, jump, quit and their abbreviations) terminates
the command list (as if that command was immediately followed by end).
This is because any time you resume execution
(even with a simple next or step), you may encounter
another breakpoint--which could have its own command list, leading to
ambiguities about which list to execute.
If you use the 'silent' command in the command list, the
usual message about stopping at a breakpoint is not printed. This may
be desirable for breakpoints that are to print a specific message and
then continue. If none of the other commands print anything, you
see no sign that the breakpoint was reached.
"""
def help_pdb(self):
help()
def lookupmodule(self, filename):
"""Helper function for break/clear parsing -- may be overridden.
lookupmodule() translates (possibly incomplete) file or module name
into an absolute file name.
"""
if os.path.isabs(filename) and os.path.exists(filename):
return filename
f = os.path.join(sys.path[0], filename)
if os.path.exists(f) and self.canonic(f) == self.mainpyfile:
return f
root, ext = os.path.splitext(filename)
if ext == '':
filename = filename + '.py'
if os.path.isabs(filename):
return filename
for dirname in sys.path:
while os.path.islink(dirname):
dirname = os.readlink(dirname)
fullname = os.path.join(dirname, filename)
if os.path.exists(fullname):
return fullname
return None
def _runscript(self, filename):
# The script has to run in __main__ namespace (or imports from
# __main__ will break).
#
# So we clear up the __main__ and set several special variables
# (this gets rid of pdb's globals and cleans old variables on restarts).
import __main__
__main__.__dict__.clear()
__main__.__dict__.update({"__name__" : "__main__",
"__file__" : filename,
"__builtins__": __builtins__,
})
# When bdb sets tracing, a number of call and line events happens
# BEFORE debugger even reaches user's code (and the exact sequence of
# events depends on python version). So we take special measures to
# avoid stopping before we reach the main script (see user_line and
# user_call for details).
self._wait_for_mainpyfile = 1
self.mainpyfile = self.canonic(filename)
self._user_requested_quit = 0
statement = 'execfile( "%s")' % filename
self.run(statement)
# Simplified interface
def run(statement, globals=None, locals=None):
Pdb().run(statement, globals, locals)
def runeval(expression, globals=None, locals=None):
return Pdb().runeval(expression, globals, locals)
def runctx(statement, globals, locals):
# B/W compatibility
run(statement, globals, locals)
def runcall(*args, **kwds):
return Pdb().runcall(*args, **kwds)
def set_trace():
Pdb().set_trace(sys._getframe().f_back)
# Post-Mortem interface
def post_mortem(t=None):
# handling the default
if t is None:
# sys.exc_info() returns (type, value, traceback) if an exception is
# being handled, otherwise it returns None
t = sys.exc_info()[2]
if t is None:
raise ValueError("A valid traceback must be passed if no "
"exception is being handled")
p = Pdb()
p.reset()
p.interaction(None, t)
def pm():
post_mortem(sys.last_traceback)
# Main program for testing
TESTCMD = 'import x; x.main()'
def test():
run(TESTCMD)
# print help
def help():
for dirname in sys.path:
fullname = os.path.join(dirname, 'pdb.doc')
if os.path.exists(fullname):
sts = os.system('${PAGER-more} '+fullname)
if sts: print '*** Pager exit status:', sts
break
else:
print 'Sorry, can\'t find the help file "pdb.doc"',
print 'along the Python search path'
def main():
if not sys.argv[1:] or sys.argv[1] in ("--help", "-h"):
print "usage: pdb.py scriptfile [arg] ..."
sys.exit(2)
mainpyfile = sys.argv[1] # Get script filename
if not os.path.exists(mainpyfile):
print 'Error:', mainpyfile, 'does not exist'
sys.exit(1)
del sys.argv[0] # Hide "pdb.py" from argument list
# Replace pdb's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(mainpyfile)
# Note on saving/restoring sys.argv: it's a good idea when sys.argv was
# modified by the script being debugged. It's a bad idea when it was
# changed by the user from the command line. There is a "restart" command which
# allows explicit specification of command line arguments.
pdb = Pdb()
while 1:
try:
pdb._runscript(mainpyfile)
if pdb._user_requested_quit:
break
print "The program finished and will be restarted"
except Restart:
print "Restarting", mainpyfile, "with arguments:"
print "\t" + " ".join(sys.argv[1:])
except SystemExit:
# In most cases SystemExit does not warrant a post-mortem session.
print "The program exited via sys.exit(). Exit status: ",
print sys.exc_info()[1]
except:
traceback.print_exc()
print "Uncaught exception. Entering post mortem debugging"
print "Running 'cont' or 'step' will restart the program"
t = sys.exc_info()[2]
pdb.interaction(None, t)
print "Post mortem debugger finished. The "+mainpyfile+" will be restarted"
# When invoked as main program, invoke the debugger on a script
if __name__ == '__main__':
import pdb
pdb.main()
| 34.233436
| 118
| 0.570361
|
4a080cef87e36fee7a0ea96c8512f82bddcb160f
| 5,676
|
py
|
Python
|
isi_sdk_7_2/isi_sdk_7_2/models/settings_character_encodings_character_encoding.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_7_2/isi_sdk_7_2/models/settings_character_encodings_character_encoding.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_7_2/isi_sdk_7_2/models/settings_character_encodings_character_encoding.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 2
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SettingsCharacterEncodingsCharacterEncoding(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'current_encoding': 'str',
'default_encoding': 'str',
'encodings': 'list[str]'
}
attribute_map = {
'current_encoding': 'current-encoding',
'default_encoding': 'default-encoding',
'encodings': 'encodings'
}
def __init__(self, current_encoding=None, default_encoding=None, encodings=None): # noqa: E501
"""SettingsCharacterEncodingsCharacterEncoding - a model defined in Swagger""" # noqa: E501
self._current_encoding = None
self._default_encoding = None
self._encodings = None
self.discriminator = None
self.current_encoding = current_encoding
self.default_encoding = default_encoding
self.encodings = encodings
@property
def current_encoding(self):
"""Gets the current_encoding of this SettingsCharacterEncodingsCharacterEncoding. # noqa: E501
Current character encoding. # noqa: E501
:return: The current_encoding of this SettingsCharacterEncodingsCharacterEncoding. # noqa: E501
:rtype: str
"""
return self._current_encoding
@current_encoding.setter
def current_encoding(self, current_encoding):
"""Sets the current_encoding of this SettingsCharacterEncodingsCharacterEncoding.
Current character encoding. # noqa: E501
:param current_encoding: The current_encoding of this SettingsCharacterEncodingsCharacterEncoding. # noqa: E501
:type: str
"""
if current_encoding is None:
raise ValueError("Invalid value for `current_encoding`, must not be `None`") # noqa: E501
self._current_encoding = current_encoding
@property
def default_encoding(self):
"""Gets the default_encoding of this SettingsCharacterEncodingsCharacterEncoding. # noqa: E501
Default character encoding. # noqa: E501
:return: The default_encoding of this SettingsCharacterEncodingsCharacterEncoding. # noqa: E501
:rtype: str
"""
return self._default_encoding
@default_encoding.setter
def default_encoding(self, default_encoding):
"""Sets the default_encoding of this SettingsCharacterEncodingsCharacterEncoding.
Default character encoding. # noqa: E501
:param default_encoding: The default_encoding of this SettingsCharacterEncodingsCharacterEncoding. # noqa: E501
:type: str
"""
if default_encoding is None:
raise ValueError("Invalid value for `default_encoding`, must not be `None`") # noqa: E501
self._default_encoding = default_encoding
@property
def encodings(self):
"""Gets the encodings of this SettingsCharacterEncodingsCharacterEncoding. # noqa: E501
A list of supported encoding values. # noqa: E501
:return: The encodings of this SettingsCharacterEncodingsCharacterEncoding. # noqa: E501
:rtype: list[str]
"""
return self._encodings
@encodings.setter
def encodings(self, encodings):
"""Sets the encodings of this SettingsCharacterEncodingsCharacterEncoding.
A list of supported encoding values. # noqa: E501
:param encodings: The encodings of this SettingsCharacterEncodingsCharacterEncoding. # noqa: E501
:type: list[str]
"""
if encodings is None:
raise ValueError("Invalid value for `encodings`, must not be `None`") # noqa: E501
self._encodings = encodings
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SettingsCharacterEncodingsCharacterEncoding):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.62069
| 120
| 0.632664
|
4a080d5bd5e2bd13b306cc334510baa268065d88
| 387
|
py
|
Python
|
class5/exercises/my_devices.py
|
ksannedhi/pyplus_course
|
fc3499f2dfef472dc49fe6caddf2e6e2be160f4b
|
[
"Apache-2.0"
] | 39
|
2019-03-03T18:16:55.000Z
|
2022-02-17T17:05:18.000Z
|
class5/exercises/my_devices.py
|
ksannedhi/pyplus_course
|
fc3499f2dfef472dc49fe6caddf2e6e2be160f4b
|
[
"Apache-2.0"
] | 1
|
2020-06-17T22:39:28.000Z
|
2020-06-17T22:39:28.000Z
|
class5/exercises/my_devices.py
|
ksannedhi/pyplus_course
|
fc3499f2dfef472dc49fe6caddf2e6e2be160f4b
|
[
"Apache-2.0"
] | 77
|
2019-01-25T10:41:23.000Z
|
2022-03-14T21:35:59.000Z
|
import os
from getpass import getpass
password = os.getenv("PYNET_PASSWORD") if os.getenv("PYNET_PASSWORD") else getpass()
nxos1 = {
"device_type": "cisco_nxos",
"host": "nxos1.lasthop.io",
"username": "pyclass",
"password": password,
}
nxos2 = {
"device_type": "cisco_nxos",
"host": "nxos2.lasthop.io",
"username": "pyclass",
"password": password,
}
| 19.35
| 84
| 0.638243
|
4a080e14ce239fee959116c9a853df63310b4bae
| 10,090
|
py
|
Python
|
plugins/module_utils/zpa_client.py
|
willguibr/zpacloud_ansible
|
1d95e004ffccbcef787640ccea625bb051083414
|
[
"Apache-2.0"
] | 1
|
2022-02-23T08:26:45.000Z
|
2022-02-23T08:26:45.000Z
|
plugins/module_utils/zpa_client.py
|
willguibr/zpacloud_ansible
|
1d95e004ffccbcef787640ccea625bb051083414
|
[
"Apache-2.0"
] | 2
|
2022-02-23T21:08:09.000Z
|
2022-03-01T16:45:29.000Z
|
plugins/module_utils/zpa_client.py
|
willguibr/zpacloud-ansible
|
1d95e004ffccbcef787640ccea625bb051083414
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Ansible Project 2017
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
import re
from ansible.module_utils.common.text.converters import jsonify
__metaclass__ = type
import json
import random
import time
import urllib.parse
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.urls import fetch_url
def retry_with_backoff(retries=5, backoff_in_seconds=1):
"""
This decorator should be used on functions that make HTTP calls and
returns Response
"""
def decorator(f):
def wrapper(*args):
x = 0
while True:
resp = f(*args)
if resp.status_code < 299 or resp.status_code == 400:
return resp
if x == retries:
raise Exception("Reached max retries: %s" % (resp.json))
else:
sleep = backoff_in_seconds * 2 ** x + random.uniform(0, 1)
args[0].module.log(
"\n[INFO] args: %s\nretrying after %d seconds...\n"
% (str(args), sleep)
)
time.sleep(sleep)
x += 1
return wrapper
return decorator
def delete_none(f):
"""
This decorator should be used on functions that return an object to delete empty fields
"""
def wrapper(*args):
_dict = f(*args)
if _dict is not None:
return deleteNone(_dict)
return _dict
return wrapper
def deleteNone(_dict):
"""Delete None values recursively from all of the dictionaries, tuples, lists, sets"""
if isinstance(_dict, dict):
for key, value in list(_dict.items()):
if isinstance(value, (list, dict, tuple, set)):
_dict[key] = deleteNone(value)
elif value is None or key is None:
del _dict[key]
elif isinstance(_dict, (list, set, tuple)):
_dict = type(_dict)(deleteNone(item) for item in _dict if item is not None)
return _dict
def camelcaseToSnakeCase(obj):
new_obj = dict()
for key, value in obj.items():
if value is not None:
new_obj[re.sub(r"(?<!^)(?=[A-Z])", "_", key).lower()] = value
return new_obj
def snakecaseToCamelcase(obj):
new_obj = dict()
for key, value in obj.items():
if value is not None:
newKey = "".join(x.capitalize() or "_" for x in key.split("_"))
newKey = newKey[:1].lower() + newKey[1:]
new_obj[newKey] = value
return new_obj
class Response(object):
def __init__(self, resp, info):
self.body = None
if resp:
self.body = resp.read()
self.info = info
@property
def json(self):
if not self.body:
if "body" in self.info:
return json.loads(to_text(self.info.get("body")))
return None
try:
return json.loads(to_text(self.body))
except ValueError:
return None
@property
def status_code(self):
return self.info.get("status")
class ZPAClientHelper:
def __init__(self, module):
self.baseurl = "https://config.private.zscaler.com"
# self.private_baseurl = "https://api.private.zscaler.com"
self.timeout = 240
self.module = module
self.client_id = module.params.get("client_id")
self.client_secret = module.params.get("client_secret")
self.customer_id = module.params.get("customer_id")
self.tries = 0
# login
response = self.login()
if response is None or response.status_code > 299 or response.json is None:
self.module.fail_json(
msg="Failed to login using provided credentials, please verify validity of API ZPA_CLIENT_ID & ZPA_CLIENT_SECRET. response: %s"
% (response)
)
resp_json = response.json
self.access_token = resp_json.get("access_token")
self.module.log("[INFO] access_token: '%s'" % (self.access_token))
self.headers = { # 'referer': self.baseurl,
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": "Bearer %s" % (self.access_token),
}
@retry_with_backoff(retries=5)
def login(self):
"""get jwt token"""
data = urllib.parse.urlencode(
{"client_id": self.client_id, "client_secret": self.client_secret}
)
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "application/json",
}
try:
url = "%s/signin" % self.baseurl
resp, info = fetch_url(
module=self.module, url=url, data=data, method="POST", headers=headers
)
resp = Response(resp, info)
self.module.log(
"[INFO] calling: %s %s %s\n response: %s"
% ("POST", url, str(data), str("" if resp is None else resp.json))
)
# self.module.log("[INFO] %s\n" % (to_text(resp.read())))
return resp
except Exception as e:
self._fail("login", str(e))
def jsonify(self, data):
try:
return jsonify(data)
except UnicodeError as e:
self.fail_json(msg=to_text(e))
def _fail(self, msg, e):
if "message" in e:
err_string = e.get("message")
else:
err_string = e
self.module.fail_json(msg="%s: %s" % (msg, err_string))
def _url_builder(self, path):
if path[0] == "/":
path = path[1:]
return "%s/%s" % (self.baseurl, path)
@retry_with_backoff(retries=5)
def send(self, method, path, data=None, fail_safe=False):
url = self._url_builder(path)
data = self.module.jsonify(data)
if method == "DELETE":
if data == "null":
data = None
resp, info = fetch_url(
self.module,
url,
data=data,
headers=self.headers,
method=method,
timeout=self.timeout,
)
resp = Response(resp, info)
self.module.log(
"[INFO] calling: %s %s %s\n response: %s"
% (method, url, str(data), str("" if resp is None else resp.json))
)
if resp.status_code == 400 and fail_safe:
self.module.fail_json(
msg="Operation failed. API response: %s\n" % (resp.json)
)
return resp
def get(self, path, data=None, fail_safe=False):
return self.send("GET", path, data, fail_safe)
def put(self, path, data=None):
return self.send("PUT", path, data)
def post(self, path, data=None):
return self.send("POST", path, data)
def delete(self, path, data=None):
return self.send("DELETE", path, data)
@staticmethod
def zpa_argument_spec():
return dict(
client_id=dict(
no_log=True,
fallback=(
env_fallback,
["ZPA_CLIENT_ID"],
),
),
client_secret=dict(
no_log=True,
fallback=(
env_fallback,
["ZPA_CLIENT_SECRET"],
),
),
customer_id=dict(
no_log=True,
fallback=(
env_fallback,
["ZPA_CUSTOMER_ID"],
),
),
)
def get_paginated_data(
self,
base_url=None,
data_key_name=None,
data_per_page=500,
expected_status_code=200,
):
"""
Function to get all paginated data from given URL
Args:
base_url: Base URL to get data from
data_key_name: Name of data key value
data_per_page: Number results per page (Default: 40)
expected_status_code: Expected returned code from DigitalOcean (Default: 200)
Returns: List of data
"""
page = 0
has_next = True
ret_data = []
status_code = None
response = None
while has_next or status_code != expected_status_code:
required_url = "{0}?page={1}&pagesize={2}".format(
base_url, page, data_per_page
)
response = self.get(required_url)
status_code = response.status_code
# stop if any error during pagination
if status_code != expected_status_code:
break
page += 1
if (
response is None
or response.json is None
or response.json.get(data_key_name) is None
):
has_next = False
continue
ret_data.extend(response.json[data_key_name])
try:
has_next = (
response.json.get("totalPages") is not None
and int(response.json["totalPages"]) != 0
and int(response.json["totalPages"]) < page
)
except KeyError:
has_next = False
if status_code != expected_status_code:
msg = "Failed to fetch %s from %s" % (data_key_name, base_url)
if response:
msg += " due to error : %s" % response.json.get("message")
self.module.fail_json(msg=msg)
return ret_data
| 32.031746
| 143
| 0.546085
|
4a080eec3a01024a43b8200d0d1a55b716709bcd
| 1,181
|
py
|
Python
|
setup.py
|
otov4its/pysnake
|
dbfe36e464b660cddb4193a493fc0e37e55a251f
|
[
"MIT"
] | null | null | null |
setup.py
|
otov4its/pysnake
|
dbfe36e464b660cddb4193a493fc0e37e55a251f
|
[
"MIT"
] | null | null | null |
setup.py
|
otov4its/pysnake
|
dbfe36e464b660cddb4193a493fc0e37e55a251f
|
[
"MIT"
] | null | null | null |
import os.path
from setuptools import setup, find_packages
import pysnake
def long_description():
here = os.path.dirname(os.path.abspath(__file__))
return open(os.path.join(here, 'README.rst')).read()
version = pysnake.__version__
setup(
name="pysnake",
version=version,
description='A curses-based cross-python version of Snake with zoom and rewind modes',
long_description=long_description(),
url='https://github.com/otov4its/pysnake',
license='MIT',
author='Stanislav Otovchits',
author_email='otov4its@gmail.com',
packages=find_packages(),
entry_points={
'console_scripts': ['pysnake = pysnake.__main__:main']
},
keywords=['snake', 'game', 'zoom', 'rewind'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console :: Curses',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Games/Entertainment',
],
)
| 30.282051
| 90
| 0.646063
|
4a080f47e1fcff647eded73c8521da1b4c32f317
| 457
|
py
|
Python
|
utils/cli/print_video_names.py
|
acetrace/cvat
|
dae1e0676329baea4568edd11005581c59dcb204
|
[
"Intel",
"MIT"
] | null | null | null |
utils/cli/print_video_names.py
|
acetrace/cvat
|
dae1e0676329baea4568edd11005581c59dcb204
|
[
"Intel",
"MIT"
] | 1
|
2022-02-09T23:26:23.000Z
|
2022-02-09T23:26:23.000Z
|
utils/cli/print_video_names.py
|
acetrace/cvat
|
dae1e0676329baea4568edd11005581c59dcb204
|
[
"Intel",
"MIT"
] | 1
|
2022-03-31T17:39:09.000Z
|
2022-03-31T17:39:09.000Z
|
#!/usr/local/bin/python3
import os
import sys
import re
#
# dir
# - video_1
# - ...
# - video_2
# - ...
#
dir = sys.argv[1]
labels = [{"name": "disc", "color": "#f68e83", "attributes": []}]
for filename in sorted(os.listdir(dir)):
video_name = filename
video_name_match = re.search('task_(.*)-\\d+_\\d+.*', filename, re.IGNORECASE)
if video_name_match:
video_name = video_name_match.group(1)
print(video_name)
| 19.869565
| 82
| 0.595186
|
4a0810148504cd76ab7ebf074b42b69a5caab8bf
| 3,846
|
py
|
Python
|
keymint_keymake/pki/utils.py
|
keymint/keymint_keymake
|
adc38e07ce5f16d6ba4b36294d7d2e8a361153f0
|
[
"Apache-2.0"
] | null | null | null |
keymint_keymake/pki/utils.py
|
keymint/keymint_keymake
|
adc38e07ce5f16d6ba4b36294d7d2e8a361153f0
|
[
"Apache-2.0"
] | null | null | null |
keymint_keymake/pki/utils.py
|
keymint/keymint_keymake
|
adc38e07ce5f16d6ba4b36294d7d2e8a361153f0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cryptography import x509 # hazmat
# from cryptography.x509.oid import NameOID
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization # hashes
from cryptography.hazmat.primitives.serialization import Encoding, PrivateFormat
# from cryptography.hazmat.primitives.asymmetric import rsa, dsa, ec
def _dump_cert(cert, encoding=None):
if encoding is None:
encoding = Encoding.PEM
return cert.public_bytes(encoding=encoding)
def save_cert(cert, cert_path, encoding=None):
with open(cert_path, 'wb') as f:
f.write(_dump_cert(cert=cert, encoding=encoding))
def _dump_key(key, encoding=None, format=None, encryption_algorithm=None): # noqa
if encoding is None:
encoding = Encoding.PEM
if format is None:
format = PrivateFormat.PKCS8 # noqa
if encryption_algorithm is None:
encryption_algorithm = serialization.NoEncryption()
return key.private_bytes(encoding=encoding, format=format,
encryption_algorithm=encryption_algorithm)
def save_key(key, key_path, encoding=None, format=None, encryption_algorithm=None): # noqa
with open(key_path, 'wb') as f:
f.write(_dump_key(key=key, encoding=encoding, format=format,
encryption_algorithm=encryption_algorithm))
def load_cert(cert_path, encoding=None, backend=None):
if encoding is None:
encoding = Encoding.PEM
if backend is None:
backend = default_backend()
with open(cert_path, 'rb') as f:
if encoding is Encoding.PEM:
cert = x509.load_pem_x509_certificate(
data=f.read(), backend=backend)
elif encoding is Encoding.DER:
cert = x509.load_der_x509_certificate(
data=f.read(), backend=backend)
return cert
def load_cert_data(cert_data, encoding=None, backend=None):
if encoding is None:
encoding = Encoding.PEM
if backend is None:
backend = default_backend()
if encoding is Encoding.PEM:
cert = x509.load_pem_x509_certificate(
cert_data, backend=backend)
elif encoding is Encoding.DER:
cert = x509.load_der_x509_certificate(
cert_data, backend=backend)
return cert
def load_key(key_path, password=None, encoding=None, backend=None):
if encoding is None:
encoding = Encoding.PEM
if backend is None:
backend = default_backend()
with open(key_path, 'rb') as f:
if encoding is Encoding.PEM:
key = serialization.load_pem_private_key(data=f.read(),
password=password,
backend=default_backend())
elif encoding is Encoding.DER:
key = serialization.load_der_private_key(data=f.read(),
password=password,
backend=default_backend())
return key
def check_public_keys_match(keys):
public_numbers = keys[0].public_key().public_numbers()
match = all(key.public_key().public_numbers() == public_numbers for key in keys)
return match
| 38.079208
| 90
| 0.667187
|
4a08109db3ebf016eee2d8bda911e90ea1aa3cec
| 5,429
|
py
|
Python
|
covid scrape/covid_scrape.py
|
hrishipoola/berlin_covid_dashboard
|
d0eedabae28d11b685ea69b4ad511878b0fd8cf2
|
[
"MIT"
] | null | null | null |
covid scrape/covid_scrape.py
|
hrishipoola/berlin_covid_dashboard
|
d0eedabae28d11b685ea69b4ad511878b0fd8cf2
|
[
"MIT"
] | null | null | null |
covid scrape/covid_scrape.py
|
hrishipoola/berlin_covid_dashboard
|
d0eedabae28d11b685ea69b4ad511878b0fd8cf2
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from datetime import datetime
# Web scraping using BeautifulSoup and converting to pandas dataframe
import requests
import urllib.request
import json # library to handle JSON files
from pandas.io.json import json_normalize # tranform JSON file into a pandas dataframe
from urllib.request import urlopen
from bs4 import BeautifulSoup
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
def covid_scrape():
global covid_raw
url1 = ('https://www.berlin.de/lageso/gesundheit/infektionsepidemiologie-infektionsschutz/corona/tabelle-bezirke-gesamtuebersicht/')
site1 = urlopen(url1)
soup1 = BeautifulSoup(site1, 'lxml')
# Create table object to extract the raw table inside that webpage
table1 = soup1.find_all('table')
# Scrape just the new case by district table, which is the 1st table and convert it into a dataframe
covid_raw = pd.read_html(str(table1[0]), index_col=None, header=0)[0]
return covid_raw
def covid_wide_dataframe():
global covid
# Change column names to English and spell out district acroynyms. Remove the last row of null values
covid = covid_raw.rename(columns={'Datum': 'Date', 'MI': 'Mitte', 'FK':'Friedrichshain-Kreuzberg', 'PA':'Pankow', 'CW': 'Charlottenburg-Wilmersdorf', 'SP':'Spandau', 'SZ':'Steglitz-Zehlendorf','TS':'Tempelhof-Schöneberg','NK':'Neukölln','TK':'Treptow-Köpenick','MH':'Marzahn-Hellersdorf','LI':'Lichtenberg','RD':'Reinickendorf'}).dropna()
# Non-date values are floats. Change data type of values to integers. Change type of Date column to datetime
covid = covid.astype({'Mitte':int, 'Friedrichshain-Kreuzberg':int,'Pankow':int,'Charlottenburg-Wilmersdorf':int,'Spandau':int,'Steglitz-Zehlendorf':int,'Tempelhof-Schöneberg':int,'Neukölln':int,'Treptow-Köpenick':int, 'Marzahn-Hellersdorf':int,'Lichtenberg':int,'Reinickendorf':int})
covid['Date'] = pd.to_datetime(covid['Date'].str.strip(), infer_datetime_format=True, dayfirst=True).dt.strftime('%Y-%m-%d')
return covid
def covid_long_dataframe():
global covid_long
# Convert to long format using .melt. Set variable name to District and value name to Cases
covid_long = covid.melt(id_vars=['Date'], var_name = 'District', value_name='Cases')
# Set index to Date
covid_long.set_index('Date', inplace=True)
# Convert index to datetime type
covid_long.index = pd.to_datetime(covid_long.index)
return covid_long
def rolling_7_dataframe():
global rolling_7_long
# Set index to date
covid.set_index('Date', inplace=True)
# Convert index to datetime type
covid.index = pd.to_datetime(covid.index)
# Create dataframe for rolling 7-day average of cases
rolling_7 = covid.rolling(7).mean()
# Reshape dataframe from wide to long for easier analysis and plotting
# Reset index
rolling_7_long = rolling_7.reset_index()
# Change index column name to month
rolling_7_long.rename(columns={'index':'Date'})
# Convert to long format using .melt. Set variable name to District and value name to Cases
rolling_7_long = rolling_7_long.melt(id_vars=['Date'], var_name = 'District', value_name='Cases').dropna()
# Set index to Date
rolling_7_long.set_index('Date', inplace=True)
return rolling_7_long
def population_scrape():
global population_raw
url2 = ('https://en.wikipedia.org/wiki/Demographics_of_Berlin')
site2 = urlopen(url2)
soup2 = BeautifulSoup(site2, 'lxml')
# Create table object to extract the raw table inside that webpage
table2 = soup2.find_all('table')
# Scrape just population by district table, which is the 3rd table and convert it into a dataframe
population_raw = pd.read_html(str(table2[4]), index_col=None, header=0)[0]
return population_raw
def population_dataframe():
global population
# Edit population dataframe
population = population_raw.rename(columns={'Borough': 'District'})
# Keep only population column
keep = ['District','Population 2010']
population = population[keep]
# Drop last row (Total Berlin) as we're focusing on district breakouts
population.drop(population.tail(1).index, inplace=True)
return population
def incidence_dataframe():
global incidence
# Calculate incidence per 100000
# Assign new Incidence column by mapping District from cases dataframe to index of population dataframe, dividing cases by population, multiplying by 100000
incidence = covid_long.assign(Incidence=(covid_long.Cases / covid_long.District.map(population.set_index('District')['Population 2010']))*100000).dropna()
return incidence
covid_scrape()
covid_wide_dataframe()
covid_long_dataframe()
rolling_7_dataframe()
population_scrape()
population_dataframe()
incidence_dataframe()
# Export needed dataframes as csv
#covid_raw.to_csv(r'/users/hpoola/Desktop/covid_raw.csv')
#covid.to_csv(r'/users/hpoola/Desktop/covid.csv')
#covid_long.to_csv(r'/users/hpoola/Desktop/covid_long.csv')
rolling_7_long.to_csv(r'/users/hpoola/Desktop/berlin_covid_dash/rolling_7_long.csv')
#population_raw.to_csv(r'/users/hpoola/Desktop/population_raw.csv')
#population.to_csv(r'/users/hpoola/Desktop/population.csv')
incidence.to_csv(r'/users/hpoola/Desktop/berlin_covid_dash/incidence.csv')
| 38.503546
| 342
| 0.732179
|
4a0810a3d08b2ac467f9ce4ac0288a71b9161705
| 6,915
|
py
|
Python
|
helper/model_helper.py
|
yangchihyuan/shufflenet-v2-tensorflow
|
8eb37ab5f0c78d7a03636a68f9c74cb3e2f301bf
|
[
"MIT"
] | null | null | null |
helper/model_helper.py
|
yangchihyuan/shufflenet-v2-tensorflow
|
8eb37ab5f0c78d7a03636a68f9c74cb3e2f301bf
|
[
"MIT"
] | null | null | null |
helper/model_helper.py
|
yangchihyuan/shufflenet-v2-tensorflow
|
8eb37ab5f0c78d7a03636a68f9c74cb3e2f301bf
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from helper import variables_helper
from helper.checkpoint_helper import check_init_from_initial_checkpoint
#from nets import nets_factory
ModeKeys = tf.estimator.ModeKeys
def get_input_function(dataset, batch_size, batch_threads, is_training, image_size):
input_data = dataset.get_input_data(is_training) #the input_data is a set of 5 compoments, image_path, file_name, actual_label, label, camera
def input_fn():
sliced_input_data = tf.train.slice_input_producer(input_data, num_epochs=1, shuffle=is_training, capacity=4096)
sliced_data_dictionary = dataset.prepare_sliced_data_for_batching(sliced_input_data, image_size)
batched_input_data = tf.train.batch(tensors=sliced_data_dictionary,
batch_size=batch_size,
num_threads=batch_threads,
capacity=batch_threads * batch_size * 2,
allow_smaller_final_batch=not is_training)
# tf.summary.image(name='input_images', tensor=batched_input_data['image'])
(features, targets) = dataset.get_input_function_dictionaries(batched_input_data)
features.update(targets)
return features, targets
return input_fn
'''
#This get_model_function() uses a function object to enable adding undetermined arguments.
def get_model_function(output_directory, network_name, num_classes, initial_checkpoint=None, checkpoint_exclude_scopes=None, ignore_missing_variables=False, trainable_scopes=None,
not_trainable_scopes=None):
def model_fn(features, labels, mode, params): # this is a function object. the get_model_function() doesn't have the features argument
if labels is None: # when predicting, labels is None
labels = {}
images = features['images']
file_names = features['file_names']
labels_tensor = labels['labels'] if 'labels' in labels else None
mse_labels = labels['mse_labels'] if 'mse_labels' in labels else None
#the network_function is a function resnet_v1_50 from net.resnet_v1.resnet_v1_50
network_function = nets_factory.get_network_fn(network_name, num_classes, weight_decay=0.00004, is_training=mode == ModeKeys.TRAIN)
#by the definition of resnet_v1, it returns two results, logits, and end_points
logits, end_points = network_function(images)
aux_logits = end_points['AuxLogits'] if 'AuxLogits' in end_points else None
views_labels = labels['views'] if 'views' in labels else None
views_logits = end_points['PoseLogits'] if 'PoseLogits' in end_points else None
check_init_from_initial_checkpoint(output_directory, initial_checkpoint, checkpoint_exclude_scopes, ignore_missing_variables)
predictions_dict = {}
train_op = tf.no_op()
eval_metric_ops = {}
if mode == ModeKeys.EVAL or mode == ModeKeys.TRAIN:
with tf.name_scope('losses'):
tf.summary.scalar(name='regularization', tensor=tf.losses.get_regularization_loss())
with tf.name_scope('softmax_cross_entropy'):
if labels_tensor is not None:
tf.summary.scalar(name='logits', tensor=tf.losses.sparse_softmax_cross_entropy(labels=labels_tensor, logits=logits, scope='logits'))
tf.summary.scalar(name='training-top-1', tensor=tf.reduce_mean(tf.cast(tf.nn.in_top_k(predictions=logits, targets=labels_tensor, k=1), tf.float32)))
tf.summary.scalar(name='training-top-5', tensor=tf.reduce_mean(tf.cast(tf.nn.in_top_k(predictions=logits, targets=labels_tensor, k=5), tf.float32)))
if aux_logits is not None:
tf.summary.scalar(name='auxLogits', tensor=tf.losses.sparse_softmax_cross_entropy(labels=labels_tensor, logits=aux_logits, scope='aux_logits'))
tf.summary.scalar(name='training-aux-top-1', tensor=tf.reduce_mean(tf.cast(tf.nn.in_top_k(predictions=aux_logits, targets=labels_tensor, k=1), tf.float32)))
tf.summary.scalar(name='training-aux-top-5', tensor=tf.reduce_mean(tf.cast(tf.nn.in_top_k(predictions=aux_logits, targets=labels_tensor, k=5), tf.float32)))
if views_logits is not None and views_labels is not None:
tf.summary.scalar(name='3_views', tensor=tf.losses.sparse_softmax_cross_entropy(labels=views_labels, logits=views_logits, scope='3_views'))
with tf.name_scope('mean_squared_error'):
if mse_labels is not None:
tf.summary.scalar(name='logits', tensor=tf.losses.mean_squared_error(labels=mse_labels, predictions=logits, scope='logits'))
if aux_logits is not None:
tf.summary.scalar(name='auxLogits', tensor=tf.losses.mean_squared_error(labels=mse_labels, predictions=aux_logits, scope='aux_logits'))
if mode == ModeKeys.TRAIN:
def learning_rate_decay_function(learning_rate, global_step):
if not params['fixed_learning_rate']:
return tf.train.exponential_decay(learning_rate=learning_rate,
global_step=global_step,
decay_steps=params['learning_rate_decay_steps'],
decay_rate=params['learning_rate_decay_rate'],
staircase=True,
name='learning-rate-decay')
else:
return learning_rate
variables_to_train = variables_helper.get_training_variables(tf.GraphKeys.TRAINABLE_VARIABLES, trainable_scopes, not_trainable_scopes)
train_op = tf.contrib.layers.optimize_loss(loss=tf.losses.get_total_loss(),
global_step=tf.train.get_or_create_global_step(),
learning_rate=params['learning_rate'],
optimizer=lambda learning_rate: tf.train.AdamOptimizer(learning_rate),
variables=variables_to_train,
learning_rate_decay_fn=learning_rate_decay_function)
if mode == ModeKeys.PREDICT or mode == ModeKeys.EVAL:
predictions_dict = {'logits': logits,
'classifications': tf.argmax(logits, axis=-1),
'file_names': file_names}
predictions_dict.update(features)
predictions_dict.update(labels)
if aux_logits is not None:
predictions_dict['aux_classifications'] = tf.argmax(aux_logits, axis=-1)
if views_logits is not None:
predictions_dict['views_classifications'] = tf.argmax(views_logits, axis=-1)
predictions_dict['views_softmax'] = tf.nn.softmax(views_logits)
if 'PreLogits' in end_points:
predictions_dict['pre_logits'] = end_points['PreLogits']
if mode == ModeKeys.EVAL:
if labels_tensor is not None:
eval_metric_ops = {'accuracy': tf.metrics.accuracy(labels_tensor, predictions_dict['classifications'])}
if aux_logits is not None:
eval_metric_ops['aux_accuracy'] = tf.metrics.accuracy(labels_tensor, predictions_dict['aux_classifications'])
if views_logits is not None and views_labels is not None:
eval_metric_ops['views_accuracy'] = tf.metrics.accuracy(views_labels, predictions_dict['views_classifications'])
total_loss = tf.losses.get_total_loss() if mode == ModeKeys.TRAIN or mode == ModeKeys.EVAL else None
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions_dict, loss=total_loss, train_op=train_op,
eval_metric_ops=eval_metric_ops)
return model_fn
'''
| 51.222222
| 179
| 0.757918
|
4a0810c00f6a3df20f54aec66a79179f1aa8c476
| 11,084
|
py
|
Python
|
mathgenerator/mathgen.py
|
Gourav2000/mathgenerator
|
670f8b29537c03c38a9489b474e27b2599852a09
|
[
"MIT"
] | null | null | null |
mathgenerator/mathgen.py
|
Gourav2000/mathgenerator
|
670f8b29537c03c38a9489b474e27b2599852a09
|
[
"MIT"
] | null | null | null |
mathgenerator/mathgen.py
|
Gourav2000/mathgenerator
|
670f8b29537c03c38a9489b474e27b2599852a09
|
[
"MIT"
] | null | null | null |
import random
import math
import fractions
import sympy
from .funcs import *
genList = []
# || Generator class
class Generator:
def __init__(self, title, id, generalProb, generalSol, func):
self.title = title
self.id = id
self.generalProb = generalProb
self.generalSol = generalSol
self.func = func
genList.append([id, title, self])
def __str__(self):
return str(self.id) + " " + self.title + " " + self.generalProb + " " + self.generalSol
def __call__(self, **kwargs):
return self.func(**kwargs)
# || Non-generator Functions
def genById(id):
generator = genList[id][2]
return(generator())
#
def getGenList():
return(genList)
# Format is:
# <title> = Generator("<Title>", <id>, <generalized problem>, <generalized solution>, <function name>)
addition = Generator("Addition", 0, "a+b=", "c", additionFunc)
subtraction = Generator("Subtraction", 1, "a-b=", "c", subtractionFunc)
multiplication = Generator(
"Multiplication", 2, "a*b=", "c", multiplicationFunc)
division = Generator("Division", 3, "a/b=", "c", divisionFunc)
binaryComplement1s = Generator(
"Binary Complement 1s", 4, "1010=", "0101", binaryComplement1sFunc)
moduloDivision = Generator("Modulo Division", 5, "a%b=", "c", moduloFunc)
squareRoot = Generator("Square Root", 6, "sqrt(a)=", "b", squareRootFunc)
powerRuleDifferentiation = Generator(
"Power Rule Differentiation", 7, "nx^m=", "(n*m)x^(m-1)", powerRuleDifferentiationFunc)
square = Generator("Square", 8, "a^2", "b", squareFunc)
lcm = Generator("LCM (Least Common Multiple)", 9,
"LCM of a and b = ", "c", lcmFunc)
gcd = Generator("GCD (Greatest Common Denominator)",
10, "GCD of a and b = ", "c", gcdFunc)
basicAlgebra = Generator(
"Basic Algebra", 11, "ax + b = c", "d", basicAlgebraFunc)
log = Generator("Logarithm", 12, "log2(8)", "3", logFunc)
intDivision = Generator("Easy Division", 13, "a/b=", "c", divisionToIntFunc)
decimalToBinary = Generator("Decimal to Binary", 14,
"Binary of a=", "b", DecimalToBinaryFunc)
binaryToDecimal = Generator("Binary to Decimal", 15,
"Decimal of a=", "b", BinaryToDecimalFunc)
fractionDivision = Generator(
"Fraction Division", 16, "(a/b)/(c/d)=", "x/y", divideFractionsFunc)
intMatrix22Multiplication = Generator("Integer Multiplication with 2x2 Matrix",
17, "k * [[a,b],[c,d]]=", "[[k*a,k*b],[k*c,k*d]]", multiplyIntToMatrix22)
areaOfTriangle = Generator(
"Area of Triangle", 18, "Area of Triangle with side lengths a, b, c = ", "area", areaOfTriangleFunc)
doesTriangleExist = Generator("Triangle exists check", 19,
"Does triangle with sides a, b and c exist?", "Yes/No", isTriangleValidFunc)
midPointOfTwoPoint = Generator("Midpoint of the two point", 20,
"((X1,Y1),(X2,Y2))=", "((X1+X2)/2,(Y1+Y2)/2)", MidPointOfTwoPointFunc)
factoring = Generator("Factoring Quadratic", 21,
"x^2+(x1+x2)+x1*x2", "(x-x1)(x-x2)", factoringFunc)
thirdAngleOfTriangle = Generator("Third Angle of Triangle", 22,
"Third Angle of the triangle = ", "angle3", thirdAngleOfTriangleFunc)
systemOfEquations = Generator("Solve a System of Equations in R^2", 23,
"2x + 5y = 13, -3x - 3y = -6", "x = -1, y = 3", systemOfEquationsFunc)
distance2Point = Generator("Distance between 2 points", 24,
"Find the distance between (x1,y1) and (x2,y2)", "sqrt(distanceSquared)", distanceTwoPointsFunc)
pythagoreanTheorem = Generator(
"Pythagorean Theorem", 25, "The hypotenuse of a right triangle given the other two lengths a and b = ", "hypotenuse", pythagoreanTheoremFunc)
# This has multiple variables whereas #23 has only x and y
linearEquations = Generator(
"Linear Equations", 26, "2x+5y=20 & 3x+6y=12", "x=-20 & y=12", linearEquationsFunc)
primeFactors = Generator("Prime Factorisation", 27,
"Prime Factors of a =", "[b, c, d, ...]", primeFactorsFunc)
fractionMultiplication = Generator(
"Fraction Multiplication", 28, "(a/b)*(c/d)=", "x/y", multiplyFractionsFunc)
angleRegularPolygon = Generator("Angle of a Regular Polygon", 29,
"Find the angle of a regular polygon with 6 sides", "120", regularPolygonAngleFunc)
combinations = Generator("Combinations of Objects", 30,
"Combinations available for picking 4 objects at a time from 6 distinct objects =", " 15", combinationsFunc)
factorial = Generator("Factorial", 31, "a! = ", "b", factorialFunc)
surfaceAreaCubeGen = Generator(
"Surface Area of Cube", 32, "Surface area of cube with side a units is", "b units^2", surfaceAreaCube)
surfaceAreaCuboidGen = Generator(
"Surface Area of Cuboid", 33, "Surface area of cuboid with sides = a units, b units, c units is", "d units^2", surfaceAreaCuboid)
surfaceAreaCylinderGen = Generator(
"Surface Area of Cylinder", 34, "Surface area of cylinder with height = a units and radius = b units is", "c units^2", surfaceAreaCylinder)
volumeCubeGen = Generator(
"Volum of Cube", 35, "Volume of cube with side a units is", "b units^3", volumeCube)
volumeCuboidGen = Generator(
"Volume of Cuboid", 36, "Volume of cuboid with sides = a units, b units, c units is", "d units^3", volumeCuboid)
volumeCylinderGen = Generator(
"Volume of cylinder", 37, "Volume of cylinder with height = a units and radius = b units is", "c units^3", volumeCylinder)
surfaceAreaConeGen = Generator(
"Surface Area of cone", 38, "Surface area of cone with height = a units and radius = b units is", "c units^2", surfaceAreaCone)
volumeConeGen = Generator(
"Volume of cone", 39, "Volume of cone with height = a units and radius = b units is", "c units^3", volumeCone)
commonFactors = Generator(
"Common Factors", 40, "Common Factors of {a} and {b} = ", "[c, d, ...]", commonFactorsFunc)
intersectionOfTwoLines = Generator("Intersection of Two Lines", 41,
"Find the point of intersection of the two lines: y = m1*x + b1 and y = m2*x + b2", "(x, y)", intersectionOfTwoLinesFunc)
permutations = Generator(
"Permutations", 42, "Total permutations of 4 objects at a time from 10 objects is", "5040", permutationFunc)
vectorCross = Generator("Cross Product of 2 Vectors",
43, "a X b = ", "c", vectorCrossFunc)
compareFractions = Generator(
"Compare Fractions", 44, "Which symbol represents the comparison between a/b and c/d?", ">/</=", compareFractionsFunc)
simpleInterest = Generator(
"Simple Interest", 45, "Simple interest for a principle amount of a dollars, b% rate of interest and for a time period of c years is = ", "d dollars", simpleInterestFunc)
matrixMultiplication = Generator("Multiplication of two matrices",
46, "Multiply two matrices A and B", "C", matrixMultiplicationFunc)
CubeRoot = Generator(
"Cube Root", 47, "Cuberoot of a upto 2 decimal places is", "b", cubeRootFunc)
powerRuleIntegration = Generator(
"Power Rule Integration", 48, "nx^m=", "(n/m)x^(m+1)", powerRuleIntegrationFunc)
fourthAngleOfQuadrilateral = Generator("Fourth Angle of Quadrilateral", 49,
"Fourth angle of Quadrilateral with angles a,b,c =", "angle4", fourthAngleOfQuadriFunc)
quadraticEquationSolve = Generator(
"Quadratic Equation", 50, "Find the zeros {x1,x2} of the quadratic equation ax^2+bx+c=0", "x1,x2", quadraticEquation)
hcf = Generator("HCF (Highest Common Factor)", 51,
"HCF of a and b = ", "c", hcfFunc)
diceSumProbability = Generator("Probability of a certain sum appearing on faces of dice",
52, "If n dices are rolled then probabilty of getting sum of x is =", "z", DiceSumProbFunc)
exponentiation = Generator(
"Exponentiation", 53, "a^b = ", "c", exponentiationFunc)
confidenceInterval = Generator("Confidence interval For sample S",
54, "With X% confidence", "is (A,B)", confidenceIntervalFunc)
surdsComparison = Generator(
"Comparing surds", 55, "Fill in the blanks a^(1/b) _ c^(1/d)", "</>/=", surdsComparisonFunc)
fibonacciSeries = Generator("Fibonacci Series", 56, "fibonacci series of first a numbers",
"prints the fibonacci series starting from 0 to a", fibonacciSeriesFunc)
basicTrigonometry = Generator(
"Trigonometric Values", 57, "What is sin(X)?", "ans", basicTrigonometryFunc)
sumOfAnglesOfPolygon = Generator("Sum of Angles of Polygon", 58,
"Sum of angles of polygon with n sides = ", "sum", sumOfAnglesOfPolygonFunc)
dataSummary = Generator("Mean,Standard Deviation,Variance",
59, "a,b,c", "Mean:a+b+c/3,Std,Var", dataSummaryFunc)
surfaceAreaSphereGen = Generator(
"Surface Area of Sphere", 60, "Surface area of sphere with radius = a units is", "d units^2", surfaceAreaSphere)
volumeSphere = Generator("Volume of Sphere", 61,
"Volume of sphere with radius r m = ", "(4*pi/3)*r*r*r", volumeSphereFunc)
nthFibonacciNumberGen = Generator("nth Fibonacci number", 62, "What is the nth Fibonacci number", "Fn", nthFibonacciNumberFunc)
profitLossPercent = Generator("Profit or Loss Percent", 63, "Profit/ Loss percent when CP = cp and SP = sp is: ", "percent", profitLossPercentFunc)
binaryToHex = Generator("Binary to Hexidecimal", 64, "Hexidecimal of a=", "b", binaryToHexFunc)
complexNumMultiply = Generator("Multiplication of 2 complex numbers", 65, "(x + j) (y + j) = ", "xy + xj + yj -1", multiplyComplexNumbersFunc)
geometricprogression=Generator("Geometric Progression", 66, "Initial value,Common Ratio,nth Term,Sum till nth term =", "a,r,ar^n-1,sum(ar^n-1", geomProgrFunc)
geometricMean=Generator("Geometric Mean of N Numbers",67,"Geometric mean of n numbers A1 , A2 , ... , An = ","(A1*A2*...An)^(1/n) = ans",geometricMeanFunc)
harmonicMean=Generator("Harmonic Mean of N Numbers",68,"Harmonic mean of n numbers A1 , A2 , ... , An = "," n/((1/A1) + (1/A2) + ... + (1/An)) = ans",harmonicMeanFunc)
eucldianNorm=Generator("Euclidian norm or L2 norm of a vector", 69, "Euclidian Norm of a vector V:[v1, v2, ......., vn]", "sqrt(v1^2 + v2^2 ........ +vn^2)", euclidianNormFunc)
angleBtwVectors=Generator("Angle between 2 vectors", 70, "Angle Between 2 vectors V1=[v11, v12, ..., v1n] and V2=[v21, v22, ....., v2n]", "V1.V2 / (euclidNorm(V1)*euclidNorm(V2))", angleBtwVectorsFunc)
absoluteDifference=Generator("Absolute difference between two numbers", 71, "Absolute difference betweeen two numbers a and b =", "|a-b|", absoluteDifferenceFunc)
vectorDot = Generator("Dot Product of 2 Vectors", 72, "a . b = ", "c", vectorDotFunc)
binary2sComplement = Generator("Binary 2's Complement", 73, "2's complement of 11010110 =", "101010", binary2sComplementFunc)
invertmatrix = Generator("Inverse of a Matrix", 74, "Inverse of a matrix A is", "A^(-1)", matrixInversion)
| 66.771084
| 201
| 0.661674
|
4a081387f471ecc2e1b47e4f0810e3a28b9f6be6
| 4,009
|
py
|
Python
|
scripts/artifacts/LineCallLogs.py
|
JamieSharpe/ALEAPP
|
acb06736d772d75c9dc0fd58b9f2a1726e795fb4
|
[
"MIT"
] | null | null | null |
scripts/artifacts/LineCallLogs.py
|
JamieSharpe/ALEAPP
|
acb06736d772d75c9dc0fd58b9f2a1726e795fb4
|
[
"MIT"
] | null | null | null |
scripts/artifacts/LineCallLogs.py
|
JamieSharpe/ALEAPP
|
acb06736d772d75c9dc0fd58b9f2a1726e795fb4
|
[
"MIT"
] | null | null | null |
import datetime
from scripts.ilapfuncs import open_sqlite_db_readonly, logfunc, tsv, timeline
from scripts.plugin_base import ArtefactPlugin
from scripts import artifact_report
class LineCallLogsPlugin(ArtefactPlugin):
"""
"""
def __init__(self):
super().__init__()
self.author = 'Unknown'
self.author_email = ''
self.author_url = ''
self.category = 'Line'
self.name = 'Call Logs'
self.description = ''
self.artefact_reference = '' # Description on what the artefact is.
self.path_filters = ['**/jp.naver.line.android/databases/**'] # Collection of regex search filters to locate an artefact.
self.icon = 'phone' # feathricon for report.
def _processor(self) -> bool:
source_file_call = ''
line_call_db = ''
line_msg_db = ''
for file_found in self.files_found:
file_name = str(file_found)
if file_name.lower().endswith('naver_line'):
line_msg_db = str(file_found)
if file_name.lower().endswith('call_history'):
line_call_db = str(file_found)
source_file_call = file_found.replace(self.seeker.directory, '')
db = open_sqlite_db_readonly(line_call_db)
cursor = db.cursor()
cursor.execute('''attach database "''' + line_msg_db + '''" as naver_line ''')
try:
cursor.execute('''
SELECT case Substr(calls.call_type, -1) when "O" then "Outgoing"
else "Incoming" end AS direction,
calls.start_time/1000 AS start_time,
calls.end_time/1000 AS end_time,
case when Substr(calls.call_type, -1) = "O" then contact_book_w_groups.members
else null end AS group_members,
calls.caller_mid,
case calls.voip_type when "V" then "Video"
when "A" then "Audio"
when "G" then calls.voip_gc_media_type
end AS call_type
FROM (SELECT id,
Group_concat(M.m_id) AS members
FROM membership AS M
GROUP BY id
UNION
SELECT m_id,
NULL
FROM naver_line.contacts) AS contact_book_w_groups
JOIN call_history AS calls
ON calls.caller_mid = contact_book_w_groups.id
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
except:
usageentries = 0
if usageentries > 0:
data_headers = ('Start Time', 'End Time', 'To ID', 'From ID', 'Direction', 'Call Type')
data_list = []
for row in all_rows:
start_time = datetime.datetime.fromtimestamp(int(row[1])).strftime('%Y-%m-%d %H:%M:%S')
end_time = datetime.datetime.fromtimestamp(int(row[2])).strftime('%Y-%m-%d %H:%M:%S')
data_list.append((start_time, end_time, row[3], row[4], row[0], row[5]))
artifact_report.GenerateHtmlReport(self, file_found, data_headers, data_list)
tsv(self.report_folder, data_headers, data_list, self.full_name(), source_file_call)
timeline(self.report_folder, self.full_name(), data_list, data_headers)
else:
logfunc('No Line Call Logs found')
db.close()
return True
| 41.329897
| 130
| 0.49314
|
4a0813b4da8652d4c8c77e424e6d70cf3dff0c0a
| 3,479
|
py
|
Python
|
xero_python/accounting/models/report_rows.py
|
sidtrengove/xero-python
|
52f1ec2232def4c8e773e8e5fd6f766c059517b2
|
[
"MIT"
] | 1
|
2020-06-05T15:03:15.000Z
|
2020-06-05T15:03:15.000Z
|
xero_python/accounting/models/report_rows.py
|
sidtrengove/xero-python
|
52f1ec2232def4c8e773e8e5fd6f766c059517b2
|
[
"MIT"
] | null | null | null |
xero_python/accounting/models/report_rows.py
|
sidtrengove/xero-python
|
52f1ec2232def4c8e773e8e5fd6f766c059517b2
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Accounting API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 2.1.6
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class ReportRows(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"row_type": "RowType",
"title": "str",
"cells": "list[ReportCell]",
"rows": "list[ReportRow]",
}
attribute_map = {
"row_type": "RowType",
"title": "Title",
"cells": "Cells",
"rows": "Rows",
}
def __init__(self, row_type=None, title=None, cells=None, rows=None): # noqa: E501
"""ReportRows - a model defined in OpenAPI""" # noqa: E501
self._row_type = None
self._title = None
self._cells = None
self._rows = None
self.discriminator = None
if row_type is not None:
self.row_type = row_type
if title is not None:
self.title = title
if cells is not None:
self.cells = cells
if rows is not None:
self.rows = rows
@property
def row_type(self):
"""Gets the row_type of this ReportRows. # noqa: E501
:return: The row_type of this ReportRows. # noqa: E501
:rtype: RowType
"""
return self._row_type
@row_type.setter
def row_type(self, row_type):
"""Sets the row_type of this ReportRows.
:param row_type: The row_type of this ReportRows. # noqa: E501
:type: RowType
"""
self._row_type = row_type
@property
def title(self):
"""Gets the title of this ReportRows. # noqa: E501
:return: The title of this ReportRows. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this ReportRows.
:param title: The title of this ReportRows. # noqa: E501
:type: str
"""
self._title = title
@property
def cells(self):
"""Gets the cells of this ReportRows. # noqa: E501
:return: The cells of this ReportRows. # noqa: E501
:rtype: list[ReportCell]
"""
return self._cells
@cells.setter
def cells(self, cells):
"""Sets the cells of this ReportRows.
:param cells: The cells of this ReportRows. # noqa: E501
:type: list[ReportCell]
"""
self._cells = cells
@property
def rows(self):
"""Gets the rows of this ReportRows. # noqa: E501
:return: The rows of this ReportRows. # noqa: E501
:rtype: list[ReportRow]
"""
return self._rows
@rows.setter
def rows(self, rows):
"""Sets the rows of this ReportRows.
:param rows: The rows of this ReportRows. # noqa: E501
:type: list[ReportRow]
"""
self._rows = rows
| 23.506757
| 124
| 0.567404
|
4a0814fb7f5db8496915c3761f87cebfc3ffc81f
| 4,047
|
bzl
|
Python
|
third_party/llvm/llvm.bzl
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 384
|
2017-02-21T18:38:04.000Z
|
2022-02-22T07:30:25.000Z
|
third_party/llvm/llvm.bzl
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 15
|
2017-03-01T20:18:43.000Z
|
2020-05-07T10:33:51.000Z
|
third_party/llvm/llvm.bzl
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 81
|
2017-02-21T19:31:19.000Z
|
2022-02-22T07:30:24.000Z
|
"""This file contains BUILD extensions for generating source code from LLVM's table definition files using the TableGen tool.
See http://llvm.org/cmds/tblgen.html for more information on the TableGen
tool.
TODO(chandlerc): Currently this expresses include-based dependencies as
"sources", and has no transitive understanding due to these files not being
correctly understood by the build system.
"""
def gentbl(name, tblgen, td_file, td_srcs, tbl_outs, library = True, **kwargs):
"""gentbl() generates tabular code from a table definition file.
Args:
name: The name of the build rule for use in dependencies.
tblgen: The binary used to produce the output.
td_file: The primary table definitions file.
td_srcs: A list of table definition files included transitively.
tbl_outs: A list of tuples (opts, out), where each opts is a string of
options passed to tblgen, and the out is the corresponding output file
produced.
library: Whether to bundle the generated files into a library.
**kwargs: Keyword arguments to pass to subsidiary cc_library() rule.
"""
if td_file not in td_srcs:
td_srcs += [td_file]
includes = []
for (opts, out) in tbl_outs:
outdir = out[:out.rindex("/")]
if outdir not in includes:
includes.append(outdir)
rule_suffix = "_".join(opts.replace("-", "_").replace("=", "_").split(" "))
native.genrule(
name="%s_%s_genrule" % (name, rule_suffix),
srcs=td_srcs,
outs=[out],
tools=[tblgen],
message="Generating code from table: %s" % td_file,
cmd=(("$(location %s) " + "-I external/llvm/include " +
"-I external/llvm/tools/clang/include " +
"-I $$(dirname $(location %s)) " + "%s $(location %s) -o $@") % (
tblgen, td_file, opts, td_file)))
# For now, all generated files can be assumed to comprise public interfaces.
# If this is not true, you should specify library = False
# and list the generated '.inc' files in "srcs".
if library:
native.cc_library(name=name, textual_hdrs=[f for (_, f) in tbl_outs],
includes=includes, **kwargs)
def llvm_target_cmake_vars(native_arch, target_triple):
return {
"LLVM_HOST_TRIPLE": target_triple,
"LLVM_DEFAULT_TARGET_TRIPLE": target_triple,
"LLVM_NATIVE_ARCH": native_arch,
}
def _quote(s):
"""Quotes the given string for use in a shell command.
This function double-quotes the given string (in case it contains spaces or
other special characters) and escapes any special characters (dollar signs,
double-quotes, and backslashes) that may be present.
Args:
s: The string to quote.
Returns:
An escaped and quoted version of the string that can be passed to a shell
command.
"""
return ('"' +
s.replace("\\", "\\\\").replace("$", "\\$").replace('"', '\\"') +
'"')
def cmake_var_string(cmake_vars):
"""Converts a dictionary to an input suitable for expand_cmake_vars.
Ideally we would jist stringify in the expand_cmake_vars() rule, but select()
interacts badly with genrules.
TODO(phawkins): replace the genrule() with native rule and delete this rule.
Args:
cmake_vars: a dictionary with string keys and values that are convertable to
strings.
"""
return " ".join([_quote("{}={}".format(k, str(v)))
for (k, v) in cmake_vars.items()])
def expand_cmake_vars(name, src, dst, cmake_vars):
"""Expands #cmakedefine, #cmakedefine01, and CMake variables in a text file.
Args:
name: the name of the rule
src: the input of the rule
dst: the output of the rule
cmake_vars: a string containing the CMake variables, as generated by
cmake_var_string.
"""
expand_cmake_vars_tool = "@//third_party/llvm:expand_cmake_vars"
native.genrule(
name = name,
srcs = [src],
tools = [expand_cmake_vars_tool],
outs = [dst],
cmd = ("$(location {}) ".format(expand_cmake_vars_tool) + cmake_vars +
"< $< > $@")
)
| 37.472222
| 125
| 0.663949
|
4a08159ee30af3b17bd94b2e022badc7f719c09e
| 746
|
py
|
Python
|
TCMS/TCMS/urls.py
|
tumutech/Tumutech-Content-Management-System
|
fa96912f7c54b4cab22ae18d0959d39fb0e83003
|
[
"Apache-2.0"
] | null | null | null |
TCMS/TCMS/urls.py
|
tumutech/Tumutech-Content-Management-System
|
fa96912f7c54b4cab22ae18d0959d39fb0e83003
|
[
"Apache-2.0"
] | null | null | null |
TCMS/TCMS/urls.py
|
tumutech/Tumutech-Content-Management-System
|
fa96912f7c54b4cab22ae18d0959d39fb0e83003
|
[
"Apache-2.0"
] | null | null | null |
"""TCMS URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 33.909091
| 77
| 0.707775
|
4a0816024630158833e5382991a6caa5dcf76bc0
| 3,818
|
py
|
Python
|
src/b2bit_mini_twitter/settings.py
|
viniciussslima/b2bit-mini-twitter
|
d37b6d1c3e8d92f5aeee8c8bfdde34b0e4f82e57
|
[
"MIT"
] | null | null | null |
src/b2bit_mini_twitter/settings.py
|
viniciussslima/b2bit-mini-twitter
|
d37b6d1c3e8d92f5aeee8c8bfdde34b0e4f82e57
|
[
"MIT"
] | null | null | null |
src/b2bit_mini_twitter/settings.py
|
viniciussslima/b2bit-mini-twitter
|
d37b6d1c3e8d92f5aeee8c8bfdde34b0e4f82e57
|
[
"MIT"
] | null | null | null |
"""
Django settings for b2bit_mini_twitter project.
Generated by 'django-admin startproject' using Django 4.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
import os
from pathlib import Path
from dotenv import load_dotenv
# Load Environment variables
load_dotenv(override=True)
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"rest_framework_simplejwt",
"authentication",
"post",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "b2bit_mini_twitter.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "b2bit_mini_twitter.wsgi.application"
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": os.getenv("db_name"),
"USER": os.getenv("db_user"),
"PASSWORD": os.getenv("db_password"),
"HOST": os.getenv("db_host"),
"PORT": os.getenv("db_port"),
}
}
AUTH_USER_MODEL = "authentication.User"
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = "static/"
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework_simplejwt.authentication.JWTAuthentication",
),
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.PageNumberPagination",
"PAGE_SIZE": 10,
}
| 26.150685
| 91
| 0.702462
|
4a08164140918a4ed3a3b1c246f7841d1de26a72
| 15,796
|
py
|
Python
|
salt/modules/boto_cognitoidentity.py
|
preoctopus/salt
|
aceaaa0e2f2f2ff29a694393bd82bba0d88fa44d
|
[
"Apache-2.0"
] | 1
|
2016-05-20T09:15:57.000Z
|
2016-05-20T09:15:57.000Z
|
salt/modules/boto_cognitoidentity.py
|
preoctopus/salt
|
aceaaa0e2f2f2ff29a694393bd82bba0d88fa44d
|
[
"Apache-2.0"
] | 1
|
2020-10-21T21:38:49.000Z
|
2020-10-21T21:38:49.000Z
|
salt/modules/boto_cognitoidentity.py
|
preoctopus/salt
|
aceaaa0e2f2f2ff29a694393bd82bba0d88fa44d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon CognitoIdentity
.. versionadded:: Boron
:configuration: This module accepts explicit CognitoIdentity credentials but can also
utilize IAM roles assigned to the instance trough Instance Profiles.
Dynamic credentials are then automatically obtained from AWS API and no
further configuration is necessary. More Information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
cognitoidentity.keyid: GKTADJGHEIQSXMKKRBJ08H
cognitoidentity.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
cognitoidentity.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
.. versionchanged:: 2015.8.0
All methods now return a dictionary. Create, delete, set, and
update methods return:
.. code-block:: yaml
created: true
or
.. code-block:: yaml
created: false
error:
message: error message
Request methods (e.g., `describe_identity_pools`) return:
.. code-block:: yaml
identity_pools:
- {...}
- {...}
or
.. code-block:: yaml
error:
message: error message
:depends: boto3
'''
# keep lint from choking on _get_conn and _cache_id
#pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import
import logging
from distutils.version import LooseVersion as _LooseVersion # pylint: disable=import-error,no-name-in-module
# Import Salt libs
import salt.utils.boto3
import salt.utils.compat
import salt.utils
log = logging.getLogger(__name__)
# Import third party libs
# pylint: disable=import-error
try:
#pylint: disable=unused-import
import boto
import boto3
#pylint: enable=unused-import
from botocore.exceptions import ClientError
logging.getLogger('boto').setLevel(logging.CRITICAL)
logging.getLogger('boto3').setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# pylint: enable=import-error
def __virtual__():
'''
Only load if boto libraries exist and if boto libraries are greater than
a given version.
'''
required_boto_version = '2.8.0'
required_boto3_version = '1.2.1'
# the boto_cognitoidentity execution module relies on the connect_to_region() method
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
if not HAS_BOTO:
return (False, 'The boto_cognitoidentity module could not be loaded: '
'boto libraries not found')
elif _LooseVersion(boto.__version__) < _LooseVersion(required_boto_version):
return (False, 'The boto_cognitoidentity module could not be loaded: '
'boto version {0} or later must be installed.'.format(required_boto_version))
elif _LooseVersion(boto3.__version__) < _LooseVersion(required_boto3_version):
return (False, 'The boto_cognitoidentity module could not be loaded: '
'boto version {0} or later must be installed.'.format(required_boto3_version))
else:
return True
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
if HAS_BOTO:
__utils__['boto3.assign_funcs'](__name__, 'cognito-identity')
def _find_identity_pool_ids(name, pool_id, conn):
'''
Given identity pool name (or optionally a pool_id and name will be ignored),
find and return list of matching identity pool id's.
'''
ids = []
if pool_id is None:
for pools in salt.utils.boto3.paged_call(conn.list_identity_pools,
marker_flag='NextToken', marker_arg='NextToken', MaxResults=25):
for pool in pools['IdentityPools']:
if pool['IdentityPoolName'] == name:
ids.append(pool['IdentityPoolId'])
else:
ids.append(pool_id)
return ids
def describe_identity_pools(IdentityPoolName, IdentityPoolId=None,
region=None, key=None, keyid=None, profile=None):
'''
Given an identity pool name, (optionally if an identity pool id is given,
the given name will be ignored)
Returns a list of matched identity pool name's pool properties
CLI Example:
.. code-block:: bash
salt myminion boto_cognitoidentity.describe_identity_pools my_id_pool_name
salt myminion boto_cognitoidentity.describe_identity_pools '' IdentityPoolId=my_id_pool_id
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
ids = _find_identity_pool_ids(IdentityPoolName, IdentityPoolId, conn)
if ids:
results = []
for pool_id in ids:
response = conn.describe_identity_pool(IdentityPoolId=pool_id)
response.pop('ResponseMetadata', None)
results.append(response)
return {'identity_pools': results}
else:
return {'identity_pools': None}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
def create_identity_pool(IdentityPoolName,
AllowUnauthenticatedIdentities=False,
SupportedLoginProviders=None,
DeveloperProviderName=None,
OpenIdConnectProviderARNs=None,
region=None, key=None, keyid=None, profile=None):
'''
Creates a new identity pool. All parameters except for IdentityPoolName is optional.
SupportedLoginProviders should be a dictionary mapping provider names to provider app
IDs. OpenIdConnectProviderARNs should be a list of OpenID Connect provider ARNs.
Returns the created identity pool if successful
CLI Example:
.. code-block:: bash
salt myminion boto_cognitoidentity.create_identity_pool my_id_pool_name \
DeveloperProviderName=custom_developer_provider
'''
SupportedLoginProviders = dict() if SupportedLoginProviders is None else SupportedLoginProviders
OpenIdConnectProviderARNs = list() if OpenIdConnectProviderARNs is None else OpenIdConnectProviderARNs
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
request_params = dict(IdentityPoolName=IdentityPoolName,
AllowUnauthenticatedIdentities=AllowUnauthenticatedIdentities,
SupportedLoginProviders=SupportedLoginProviders,
OpenIdConnectProviderARNs=OpenIdConnectProviderARNs)
if DeveloperProviderName:
request_params['DeveloperProviderName'] = DeveloperProviderName
response = conn.create_identity_pool(**request_params)
response.pop('ResponseMetadata', None)
return {'created': True, 'identity_pool': response}
except ClientError as e:
return {'created': False, 'error': salt.utils.boto3.get_error(e)}
def delete_identity_pools(IdentityPoolName, IdentityPoolId=None,
region=None, key=None, keyid=None, profile=None):
'''
Given an identity pool name, (optionally if an identity pool id is given,
the given name will be ignored)
Deletes all identity pools matching the given name, or the specific identity pool with
the given identity pool id.
CLI Example:
.. code-block:: bash
salt myminion boto_cognitoidentity.delete_identity_pools my_id_pool_name
salt myminion boto_cognitoidentity.delete_identity_pools '' IdentityPoolId=my_id_pool_id
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
ids = _find_identity_pool_ids(IdentityPoolName, IdentityPoolId, conn)
count = 0
if ids:
for pool_id in ids:
conn.delete_identity_pool(IdentityPoolId=pool_id)
count += 1
return {'deleted': True, 'count': count}
else:
return {'deleted': False, 'count': count}
except ClientError as e:
return {'deleted': False, 'error': salt.utils.boto3.get_error(e)}
def get_identity_pool_roles(IdentityPoolName, IdentityPoolId=None,
region=None, key=None, keyid=None, profile=None):
'''
Given an identity pool name, (optionally if an identity pool id if given,
the given name will be ignored)
Returns a list of matched identity pool name's associated roles
CLI Example:
.. code-block:: bash
salt myminion boto_cognitoidentity.get_identity_pool_roles my_id_pool_name
salt myminion boto_cognitoidentity.get_identity_pool_roles '' IdentityPoolId=my_id_pool_id
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
ids = _find_identity_pool_ids(IdentityPoolName, IdentityPoolId, conn)
if ids:
results = []
for pool_id in ids:
response = conn.get_identity_pool_roles(IdentityPoolId=pool_id)
response.pop('ResponseMetadata', None)
results.append(response)
return {'identity_pool_roles': results}
else:
return {'identity_pool_roles': None}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
def _get_role_arn(name, **conn_params):
'''
Helper function to turn a name into an arn string,
returns None if not able to resolve
'''
if name.startswith('arn:aws:iam'):
return name
role = __salt__['boto_iam.describe_role'](name, **conn_params)
rolearn = role.get('arn') if role else None
return rolearn
def set_identity_pool_roles(IdentityPoolId, AuthenticatedRole=None, UnauthenticatedRole=None,
region=None, key=None, keyid=None, profile=None):
'''
Given an identity pool id, set the given AuthenticatedRole and UnauthenticatedRole (the Role
can be an iam arn, or a role name) If AuthenticatedRole or UnauthenticatedRole is not given,
the authenticated and/or the unauthenticated role associated previously with the pool will be
cleared.
Returns set True if successful, set False if unsuccessful with the associated errors.
CLI Example:
.. code-block:: bash
salt myminion boto_cognitoidentity.set_identity_pool_roles my_id_pool_roles # this clears the roles
salt myminion boto_cognitoidentity.set_identity_pool_roles my_id_pool_id \
AuthenticatedRole=my_auth_role UnauthenticatedRole=my_unauth_role # this set both roles
salt myminion boto_cognitoidentity.set_identity_pool_roles my_id_pool_id \
AuthenticatedRole=my_auth_role # this will set the auth role and clear the unauth role
salt myminion boto_cognitoidentity.set_identity_pool_roles my_id_pool_id \
UnauthenticatedRole=my_unauth_role # this will set the unauth role and clear the auth role
'''
conn_params = dict(region=region, key=key, keyid=keyid, profile=profile)
conn = _get_conn(**conn_params)
try:
if AuthenticatedRole:
role_arn = _get_role_arn(AuthenticatedRole, **conn_params)
if role_arn is None:
return {'set': False, 'error': 'invalid AuthenticatedRole {0}'.format(AuthenticatedRole)}
AuthenticatedRole = role_arn
if UnauthenticatedRole:
role_arn = _get_role_arn(UnauthenticatedRole, **conn_params)
if role_arn is None:
return {'set': False, 'error': 'invalid UnauthenticatedRole {0}'.format(UnauthenticatedRole)}
UnauthenticatedRole = role_arn
Roles = dict()
if AuthenticatedRole:
Roles['authenticated'] = AuthenticatedRole
if UnauthenticatedRole:
Roles['unauthenticated'] = UnauthenticatedRole
conn.set_identity_pool_roles(IdentityPoolId=IdentityPoolId, Roles=Roles)
return {'set': True, 'roles': Roles}
except ClientError as e:
return {'set': False, 'error': salt.utils.boto3.get_error(e)}
def update_identity_pool(IdentityPoolId,
IdentityPoolName=None,
AllowUnauthenticatedIdentities=False,
SupportedLoginProviders=None,
DeveloperProviderName=None,
OpenIdConnectProviderARNs=None,
region=None, key=None, keyid=None, profile=None):
'''
Updates the given IdentityPoolId's properties. All parameters except for IdentityPoolId,
is optional. SupportedLoginProviders should be a dictionary mapping provider names to
provider app IDs. OpenIdConnectProviderARNs should be a list of OpenID Connect provider
ARNs.
To clear SupportedLoginProviders pass '{}'
To clear OpenIdConnectProviderARNs pass '[]'
boto3 api prevents DeveloperProviderName to be updated after it has been set for the first time.
Returns the updated identity pool if successful
CLI Example:
.. code-block:: bash
salt myminion boto_cognitoidentity.update_identity_pool my_id_pool_id my_id_pool_name \
DeveloperProviderName=custom_developer_provider
'''
conn_params = dict(region=region, key=key, keyid=keyid, profile=profile)
response = describe_identity_pools('', IdentityPoolId=IdentityPoolId, **conn_params)
error = response.get('error')
if error is None:
error = 'No matching pool' if response.get('identity_pools') is None else None
if error:
return {'updated': False, 'error': error}
id_pool = response.get('identity_pools')[0]
request_params = id_pool.copy()
# IdentityPoolName and AllowUnauthenticatedIdentities are required for the call to update_identity_pool
if IdentityPoolName is not None and IdentityPoolName != request_params.get('IdentityPoolName'):
request_params['IdentityPoolName'] = IdentityPoolName
if AllowUnauthenticatedIdentities != request_params.get('AllowUnauthenticatedIdentities'):
request_params['AllowUnauthenticatedIdentities'] = AllowUnauthenticatedIdentities
current_val = request_params.pop('SupportedLoginProviders', None)
if SupportedLoginProviders is not None and SupportedLoginProviders != current_val:
request_params['SupportedLoginProviders'] = SupportedLoginProviders
# we can only set DeveloperProviderName one time per AWS.
current_val = request_params.pop('DeveloperProviderName', None)
if current_val is None and DeveloperProviderName is not None:
request_params['DeveloperProviderName'] = DeveloperProviderName
current_val = request_params.pop('OpenIdConnectProviderARNs', None)
if OpenIdConnectProviderARNs is not None and OpenIdConnectProviderARNs != current_val:
request_params['OpenIdConnectProviderARNs'] = OpenIdConnectProviderARNs
conn = _get_conn(**conn_params)
try:
response = conn.update_identity_pool(**request_params)
response.pop('ResponseMetadata', None)
return {'updated': True, 'identity_pool': response}
except ClientError as e:
return {'updated': False, 'error': salt.utils.boto3.get_error(e)}
| 36.396313
| 109
| 0.688592
|
4a0816735a817c865b85d953b088eac53f39b985
| 5,987
|
py
|
Python
|
nonebot_plugin_epicfree/data_source.py
|
monsterxcn/nonebot_plugin_epicfree
|
5086cd043e486485bd9705e776965c397a4aa45f
|
[
"MIT"
] | 15
|
2021-08-16T13:16:52.000Z
|
2022-03-15T17:13:04.000Z
|
nonebot_plugin_epicfree/data_source.py
|
monsterxcn/nonebot_plugin_epicfree
|
5086cd043e486485bd9705e776965c397a4aa45f
|
[
"MIT"
] | 4
|
2021-08-17T01:52:13.000Z
|
2022-02-19T14:21:42.000Z
|
nonebot_plugin_epicfree/data_source.py
|
monsterxcn/nonebot_plugin_epicfree
|
5086cd043e486485bd9705e776965c397a4aa45f
|
[
"MIT"
] | 7
|
2021-09-19T12:10:34.000Z
|
2022-03-13T12:33:44.000Z
|
import json
import sys
from datetime import datetime
import nonebot
from httpx import AsyncClient
from nonebot.log import logger
resPath = nonebot.get_driver().config.resources_dir
if not resPath:
raise ValueError(f"请在环境变量中添加 resources_dir 参数,结尾带 / 且文件夹下需新建 epicfree 子文件夹")
# 写入与读取订阅信息
# method="w" 写入时返回新增订阅结果字符串
# method="r" 读取时返回订阅状态字典
async def subscribeHelper(method="r", subType="", subject=""):
try:
with open(f"{resPath}epicfree/status.json", "r", encoding="UTF-8") as f:
statusDict = json.load(f)
f.close()
except FileNotFoundError:
statusDict = {"群聊": [], "私聊": []}
with open(f"{resPath}epicfree/status.json", "w", encoding="UTF-8") as f:
json.dump(statusDict, f, ensure_ascii=False, indent=2)
f.close()
except Exception as e:
logger.error("获取 Epic 订阅 JSON 错误:" + str(sys.exc_info()[0]) + "\n" + str(e))
# 读取时,返回订阅状态字典
if method != "w":
return statusDict
# 写入时,将新的用户按类别写入至指定数组
try:
if subject in statusDict[subType]:
return f"{subType}已经订阅过 Epic 限免游戏资讯了哦!"
statusDict[subType].append(subject)
with open(f"{resPath}epicfree/status.json", "w", encoding="UTF-8") as f:
json.dump(statusDict, f, ensure_ascii=False, indent=2)
f.close()
return f"{subType}订阅 Epic 限免游戏资讯成功!"
except Exception as e:
logger.error("获取 Epic 订阅 JSON 错误:" + str(sys.exc_info()[0]) + "\n" + str(e))
return f"{subType}订阅 Epic 限免游戏资讯失败惹.."
# 获取所有 Epic Game Store 促销游戏
# 方法参考:RSSHub /epicgames 路由
# https://github.com/DIYgod/RSSHub/blob/master/lib/routes/epicgames/index.js
async def getEpicGame():
epic_url = "https://www.epicgames.com/store/backend/graphql-proxy"
headers = {
"Referer": "https://www.epicgames.com/store/zh-CN/",
"Content-Type": "application/json; charset=utf-8",
}
data = {
"query":
"query searchStoreQuery($allowCountries: String, $category: String, $count: Int, $country: String!, $keywords: String, $locale: String, $namespace: String, $sortBy: String, $sortDir: String, $start: Int, $tag: String, $withPrice: Boolean = false, $withPromotions: Boolean = false) {\n Catalog {\n searchStore(allowCountries: $allowCountries, category: $category, count: $count, country: $country, keywords: $keywords, locale: $locale, namespace: $namespace, sortBy: $sortBy, sortDir: $sortDir, start: $start, tag: $tag) {\n elements {\n title\n id\n namespace\n description\n effectiveDate\n keyImages {\n type\n url\n }\n seller {\n id\n name\n }\n productSlug\n urlSlug\n url\n items {\n id\n namespace\n }\n customAttributes {\n key\n value\n }\n categories {\n path\n }\n price(country: $country) @include(if: $withPrice) {\n totalPrice {\n discountPrice\n originalPrice\n voucherDiscount\n discount\n currencyCode\n currencyInfo {\n decimals\n }\n fmtPrice(locale: $locale) {\n originalPrice\n discountPrice\n intermediatePrice\n }\n }\n lineOffers {\n appliedRules {\n id\n endDate\n discountSetting {\n discountType\n }\n }\n }\n }\n promotions(category: $category) @include(if: $withPromotions) {\n promotionalOffers {\n promotionalOffers {\n startDate\n endDate\n discountSetting {\n discountType\n discountPercentage\n }\n }\n }\n upcomingPromotionalOffers {\n promotionalOffers {\n startDate\n endDate\n discountSetting {\n discountType\n discountPercentage\n }\n }\n }\n }\n }\n paging {\n count\n total\n }\n }\n }\n}\n",
"variables": {
"allowCountries": "CN",
"category": "freegames",
"count": 1000,
"country": "CN",
"locale": "zh-CN",
"sortBy": "effectiveDate",
"sortDir": "asc",
"withPrice": True,
"withPromotions": True
}
}
async with AsyncClient(proxies={"all://": None}) as client:
try:
res = await client.post(epic_url, headers=headers, json=data, timeout=10.0)
resJson = res.json()
games = resJson["data"]["Catalog"]["searchStore"]["elements"]
return games
except Exception as e:
logger.error("请求 Epic Store API 错误:" + str(sys.exc_info()[0]) + "\n" + str(e))
return None
# 获取 Epic Game Store 免费游戏信息
# 处理免费游戏的信息方法借鉴 pip 包 epicstore_api 示例
# https://github.com/SD4RK/epicstore_api/blob/master/examples/free_games_example.py
async def getEpicFree():
games = await getEpicGame()
if not games:
return "Epic 可能又抽风啦,请稍后再试("
else:
for game in games:
try:
game_name = game["title"]
game_corp = game["seller"]["name"]
game_price = game["price"]["totalPrice"]["fmtPrice"]["originalPrice"]
game_promotions = game["promotions"]["promotionalOffers"]
upcoming_promotions = game["promotions"]["upcomingPromotionalOffers"]
if not game_promotions and upcoming_promotions:
continue # 促销即将上线,跳过
else:
for image in game["keyImages"]:
game_thumbnail = image["url"] if image["type"] == "Thumbnail" else None
for pair in game["customAttributes"]:
game_dev = pair["value"] if pair["key"] == "developerName" else game_corp
game_pub = pair["value"] if pair["key"] == "publisherName" else game_corp
game_desp = game["description"]
end_date_iso = game["promotions"]["promotionalOffers"][0]["promotionalOffers"][0]["endDate"][:-1]
end_date = datetime.fromisoformat(end_date_iso).strftime("%b.%d %H:%M")
# API 返回不包含游戏商店 URL,此处自行拼接,可能出现少数游戏 404 请反馈
game_url = f"https://www.epicgames.com/store/zh-CN/p/{game['productSlug'].replace('/home', '')}"
msg = f"[CQ:image,file={game_thumbnail}]\n\n" if game_thumbnail else ""
msg += f"FREE now :: {game_name} ({game_price})\n\n{game_desp}\n\n"
msg += f"游戏由 {game_pub} 发售," if game_dev == game_pub else f"游戏由 {game_dev} 开发、{game_pub} 出版,"
msg += f"将在 UTC 时间 {end_date} 结束免费游玩,戳链接领取吧~\n{game_url}"
except (TypeError, IndexError):
pass
except Exception as e:
logger.error("组织 Epic 订阅消息错误:" + str(sys.exc_info()[0]) + "\n" + str(e))
# 返回整理为 CQ 码的消息字符串
return msg
| 49.891667
| 1,538
| 0.662268
|
4a081950a0457aa68ad584a6d3cdab4a51fe3836
| 14,888
|
py
|
Python
|
tests/test_build_js/test_build_js.py
|
acivgin1/sphinx-js
|
9c8afe4e2ea46c53916ae8278747722c84b52ced
|
[
"MIT"
] | 103
|
2018-11-30T06:05:40.000Z
|
2022-03-16T14:31:50.000Z
|
tests/test_build_js/test_build_js.py
|
acivgin1/sphinx-js
|
9c8afe4e2ea46c53916ae8278747722c84b52ced
|
[
"MIT"
] | 97
|
2018-12-04T09:40:33.000Z
|
2022-03-31T10:30:37.000Z
|
tests/test_build_js/test_build_js.py
|
acivgin1/sphinx-js
|
9c8afe4e2ea46c53916ae8278747722c84b52ced
|
[
"MIT"
] | 37
|
2018-11-26T15:36:05.000Z
|
2022-02-05T00:32:37.000Z
|
from tests.testing import SphinxBuildTestCase
class Tests(SphinxBuildTestCase):
"""Tests which require our big JS Sphinx tree to be built.
Yes, it's too coupled.
Many of these are renderer tests, but some indirectly test JS analysis.
These latter are left over from when JS was the only supported language and
had its assumptions coded into the renderers.
"""
def test_autofunction_minimal(self):
"""Make sure we render correctly and pull the params out of the JS code
when only the function name is provided."""
self._file_contents_eq(
'autofunction_minimal',
'linkDensity(node)' + DESCRIPTION + FIELDS)
def test_autofunction_explicit(self):
"""Make sure any explicitly provided params override the ones from the
code, and make sure any explicit arbitrary RST content gets
preserved."""
self._file_contents_eq(
'autofunction_explicit',
'linkDensity(snorko, borko[, forko])' + DESCRIPTION + FIELDS + CONTENT)
def test_autofunction_short(self):
"""Make sure the ``:short-name:`` option works."""
self._file_contents_eq(
'autofunction_short',
'someMethod(hi)\n\n Here.\n')
def test_autofunction_long(self):
"""Make sure instance methods get converted to dotted notation which
indexes better in Sphinx."""
self._file_contents_eq(
'autofunction_long',
'ContainingClass.someMethod(hi)\n\n Here.\n')
def test_autofunction_typedef(self):
"""Make sure @typedef uses can be documented with autofunction."""
self._file_contents_eq(
'autofunction_typedef',
u'TypeDefinition()\n\n Arguments:\n * **width** (*Number*) -- width in pixels\n')
def test_autofunction_callback(self):
"""Make sure @callback uses can be documented with autofunction."""
self._file_contents_eq(
'autofunction_callback',
u'requestCallback(responseCode)\n\n Some global callback\n\n Arguments:\n * **responseCode** (*number*) --\n')
def test_autofunction_example(self):
"""Make sure @example tags can be documented with autofunction."""
self._file_contents_eq(
'autofunction_example',
'exampleTag()\n\n'
' JSDoc example tag\n\n'
' **Examples:**\n\n'
' // This is the example.\n'
' exampleTag();\n')
def test_autofunction_destructured_params(self):
"""Make sure that all documented params appears in the function
definition."""
self._file_contents_eq(
'autofunction_destructured_params',
u'destructuredParams(p1, p2)\n\n'
' Arguments:\n'
' * **p1** (*number*) --\n\n'
' * **p2** (*Object*) --\n\n'
' * **p2.foo** (*string*) --\n\n'
' * **p2.bar** (*string*) --\n')
def test_autofunction_defaults_in_doclet(self):
"""Make sure param default values appear in the function definition,
when defined in JSDoc."""
self._file_contents_eq(
'autofunction_defaults_doclet',
'defaultsDocumentedInDoclet(func=() => 5, str="a string with \\" quote", strNum="42", strBool="true", num=5, nil=null)\n\n'
' Arguments:\n'
' * **func** (*function*) --\n\n'
' * **strNum** (*string*) --\n\n'
' * **strBool** (*string*) --\n')
def test_autofunction_defaults_in_code(self):
"""Make sure param default values appear in the function definition,
when defined in code."""
self._file_contents_eq(
'autofunction_defaults_code',
'defaultsDocumentedInCode(num=5, str="true", bool=true, nil=null)\n')
def test_autofunction_variadic(self):
"""Make sure variadic parameters are rendered as ellipses."""
self._file_contents_eq(
'autofunction_variadic',
'variadicParameter(a, ...args)\n\n'
' Variadic parameter\n')
def test_autofunction_deprecated(self):
"""Make sure @deprecated tags can be documented with autofunction."""
self._file_contents_eq(
'autofunction_deprecated',
'deprecatedFunction()\n\n'
' Note:\n\n'
' Deprecated.\n\n'
'deprecatedExplanatoryFunction()\n\n'
' Note:\n\n'
" Deprecated: don't use anymore\n")
def test_autofunction_see(self):
"""Make sure @see tags work with autofunction."""
self._file_contents_eq(
'autofunction_see',
'seeFunction()\n\n'
' See also:\n\n'
' * "DeprecatedClass"\n\n'
' * "deprecatedFunction"\n\n'
' * "DeprecatedAttribute"\n')
def test_autofunction_static(self):
"""Make sure the static function gets its prefix ``static``."""
self._file_contents_eq(
'autofunction_static',
'class SimpleClass()\n\n'
' Class doc.\n'
'\n'
' static SimpleClass.noUseOfThis()\n'
'\n'
' Static.\n')
def test_autoclass(self):
"""Make sure classes show their class comment and constructor
comment."""
contents = self._file_contents('autoclass')
assert 'Class doc.' in contents
assert 'Constructor doc.' in contents
def test_autoclass_members(self):
"""Make sure classes list their members if ``:members:`` is specified.
Make sure it shows both functions and attributes and shows getters and
setters as if they are attributes. Make sure it doesn't show private
members.
"""
self._file_contents_eq(
'autoclass_members',
'class ContainingClass(ho)\n\n'
' Class doc.\n'
'\n'
' Constructor doc.\n'
'\n'
' Arguments:\n'
' * **ho** -- A thing\n'
'\n'
' ContainingClass.bar\n'
'\n'
' Setting this also frobs the frobnicator.\n'
'\n'
' ContainingClass.someVar\n'
'\n'
' A var\n'
'\n'
' ContainingClass.anotherMethod()\n'
'\n'
' Another.\n'
'\n'
' ContainingClass.someMethod(hi)\n'
'\n'
' Here.\n'
'\n'
' ContainingClass.yetAnotherMethod()\n'
'\n'
' More.\n')
def test_autoclass_members_list(self):
"""Make sure including a list of names after ``members`` limits it to
those names and follows the order you specify."""
self._file_contents_eq(
'autoclass_members_list',
'class ClosedClass()\n\n Closed class.\n\n ClosedClass.publical3()\n\n Public thing 3.\n\n ClosedClass.publical()\n\n Public thing.\n')
def test_autoclass_members_list_star(self):
"""Make sure including ``*`` in a list of names after
``members`` includes the rest of the names in the normal order
at that point."""
self._file_contents_eq(
'autoclass_members_list_star',
'class ContainingClass(ho)\n'
'\n'
' Class doc.\n'
'\n'
' Constructor doc.\n'
'\n'
' Arguments:\n'
' * **ho** -- A thing\n'
'\n'
' ContainingClass.bar\n'
'\n'
' Setting this also frobs the frobnicator.\n'
'\n'
' ContainingClass.someVar\n'
'\n'
' A var\n'
'\n'
' ContainingClass.anotherMethod()\n'
'\n'
' Another.\n'
'\n'
' ContainingClass.yetAnotherMethod()\n'
'\n'
' More.\n'
'\n'
' ContainingClass.someMethod(hi)\n'
'\n'
' Here.\n')
def test_autoclass_alphabetical(self):
"""Make sure members sort alphabetically when not otherwise specified."""
self._file_contents_eq(
'autoclass_alphabetical',
'class NonAlphabetical()\n\n Non-alphabetical class.\n\n NonAlphabetical.a()\n\n Fun a.\n\n NonAlphabetical.z()\n\n Fun z.\n')
def test_autoclass_private_members(self):
"""Make sure classes list their private members if
``:private-members:`` is specified."""
contents = self._file_contents('autoclass_private_members')
assert 'secret()' in contents
def test_autoclass_exclude_members(self):
"""Make sure ``exclude-members`` option actually excludes listed
members."""
contents = self._file_contents('autoclass_exclude_members')
assert 'publical()' in contents
assert 'publical2' not in contents
assert 'publical3' not in contents
def test_autoclass_example(self):
"""Make sure @example tags can be documented with autoclass."""
self._file_contents_eq(
'autoclass_example',
'class ExampleClass()\n\n'
' JSDoc example tag for class\n\n'
' **Examples:**\n\n'
' // This is the example.\n'
' new ExampleClass();\n')
def test_autoclass_deprecated(self):
"""Make sure @deprecated tags can be documented with autoclass."""
self._file_contents_eq(
'autoclass_deprecated',
'class DeprecatedClass()\n\n'
' Note:\n\n'
' Deprecated.\n\n'
'class DeprecatedExplanatoryClass()\n\n'
' Note:\n\n'
" Deprecated: don't use anymore\n")
def test_autoclass_see(self):
"""Make sure @see tags work with autoclass."""
self._file_contents_eq(
'autoclass_see',
'class SeeClass()\n\n'
' See also:\n\n'
' * "DeprecatedClass"\n\n'
' * "deprecatedFunction"\n\n'
' * "DeprecatedAttribute"\n')
def test_autoattribute(self):
"""Make sure ``autoattribute`` works."""
self._file_contents_eq(
'autoattribute',
'ContainingClass.someVar\n\n A var\n')
def test_autoattribute_example(self):
"""Make sure @example tags can be documented with autoattribute."""
self._file_contents_eq(
'autoattribute_example',
'ExampleAttribute\n\n'
' JSDoc example tag for attribute\n\n'
' **Examples:**\n\n'
' // This is the example.\n'
' console.log(ExampleAttribute);\n')
def test_autoattribute_deprecated(self):
"""Make sure @deprecated tags can be documented with autoattribute."""
self._file_contents_eq(
'autoattribute_deprecated',
'DeprecatedAttribute\n\n'
' Note:\n\n'
' Deprecated.\n\n'
'DeprecatedExplanatoryAttribute\n\n'
' Note:\n\n'
" Deprecated: don't use anymore\n")
def test_autoattribute_see(self):
"""Make sure @see tags work with autoattribute."""
self._file_contents_eq(
'autoattribute_see',
'SeeAttribute\n\n'
' See also:\n\n'
' * "DeprecatedClass"\n\n'
' * "deprecatedFunction"\n\n'
' * "DeprecatedAttribute"\n')
def test_getter_setter(self):
"""Make sure ES6-style getters and setters can be documented."""
self._file_contents_eq(
'getter_setter',
'ContainingClass.bar\n\n Setting this also frobs the frobnicator.\n')
def test_no_shadowing(self):
"""Make sure we can disambiguate objects of the same name."""
self._file_contents_eq(
'avoid_shadowing',
'more_code.shadow()\n\n Another thing named shadow, to threaten to shadow the one in\n code.js\n')
def test_restructuredtext_injection(self):
"""Make sure param names and types are escaped and cannot be
interpreted as RestructuredText.
Descriptions should not be escaped; it is a feature to be able to use
RST markup there.
"""
self._file_contents_eq(
'injection',
u'injection(a_, b)\n\n'
' Arguments:\n'
' * **a_** -- Snorf\n\n'
' * **b** (*type_*) -- >>Borf_<<\n\n'
' Returns:\n'
' **rtype_** -- >>Dorf_<<\n')
def test_union_types(self):
"""Make sure union types render into RST non-wonkily.
The field was rendering into text as this before::
* **| Fnode fnodeA** (*Node*) --
I don't know what RST was thinking, but it got sane again when we
switched from " | " as the union separator back to "|".
"""
assert '* **fnodeA** (*Node|Fnode*) --' in self._file_contents('union')
def test_field_list_unwrapping(self):
"""Ensure the tails of field lists have line breaks and leading
whitespace removed.
Otherwise, the RST parser decides the field list is over, leading to
mangled markup.
"""
self._file_contents_eq(
'unwrapped',
'longDescriptions(a, b)\n'
'\n'
' Once upon a time, there was a large bear named Sid. Sid wore green\n'
' pants with blue stripes and pink polka dots.\n'
'\n'
# Also assert that line breaks in the description are preserved:
' * List!\n'
'\n'
' Arguments:\n'
' * **a** -- A is the first letter of the Roman alphabet. It is\n'
' used in such illustrious words as aardvark and artichoke.\n'
'\n'
' * **b** -- Next param, which should be part of the same field\n'
' list\n')
DESCRIPTION = """
Return the ratio of the inline text length of the links in an
element to the inline text length of the entire element."""
FIELDS = u"""
Arguments:
* **node** (*Node*) -- Something of a single type
Throws:
**PartyError|FartyError** -- Something with multiple types and a
line that wraps
Returns:
**Number** -- What a thing
"""
# Oddly enough, the text renderer renders these bullets with a blank line
# between, but the HTML renderer does make them a single list.
CONTENT = """
Things are "neat".
Off the beat.
* Sweet
* Fleet
"""
| 36.760494
| 163
| 0.553936
|
4a0819f9caa46461f14c6841caf229c2c3c3d459
| 7,066
|
py
|
Python
|
src/wrappers/wrapper_graphmap.py
|
isovic/realsim
|
659cfdb2476453bc26d15d29c48c52c755b71805
|
[
"MIT"
] | null | null | null |
src/wrappers/wrapper_graphmap.py
|
isovic/realsim
|
659cfdb2476453bc26d15d29c48c52c755b71805
|
[
"MIT"
] | null | null | null |
src/wrappers/wrapper_graphmap.py
|
isovic/realsim
|
659cfdb2476453bc26d15d29c48c52c755b71805
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python
import os
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
import sys
sys.path.append(SCRIPT_PATH + '/../src')
import subprocess
import multiprocessing
import basicdefines
ALIGNER_URL = 'https://github.com/isovic/graphmap.git'
ALIGNER_PATH = os.path.join(basicdefines.ALIGNERS_PATH_ROOT_ABS, 'graphmap/bin/Linux-x64')
BIN = 'graphmap'
MAPPER_NAME = 'GraphMap'
# Function 'run' should provide a standard interface for running a mapper. Given input parameters, it should run the
# alignment process, and convert any custom output results to the SAM format. Function should return a string with the
# path to the output file.
# reads_file Path to a FASTA/FASTQ file containing reads.
# reference_file Path to a reference genome FASTA file.
# machine_name A symbolic name to specify a set of parameters for a specific sequencing platform.
# output_path Folder to which the output will be placed to. Filename will be automatically generated according to the name of the mapper being run.
# output_suffix A custom suffix that can be added to the output filename.
def run(reads_file, reference_file, machine_name, output_path, output_suffix=''):
parameters = ''
num_threads = multiprocessing.cpu_count() / 2
if ((machine_name.lower() == 'illumina') or (machine_name.lower() == 'roche')):
# parameters = '-x illumina -v 5 -b 4 -B 0'
parameters = '-x illumina -v 5 -t %d -B 0 -b 3' % num_threads
elif ((machine_name.lower() == 'pacbio')):
# parameters = '-v 5 -b 4 -B 0'
parameters = '-v 5 -t %d -B 0 -b 3' % num_threads
elif ((machine_name.lower() == 'nanopore')):
# parameters = '-x nanopore -v 5 -b 4 -B 0'
# parameters = '-v 5 -t %d -B 0 -b 3 -w anchor' % num_threads
parameters = '-v 5 -t %d -B 0 -b 3 -a anchor' % num_threads;
elif ((machine_name.lower() == 'nanoporecirc')):
# parameters = '-x nanopore -v 5 -b 4 -B 0';
parameters = '-v 5 -t %d -C -B 0 -b 3' % num_threads;
elif ((machine_name.lower() == 'myers')):
# parameters = '-x nanopore -v 5 -b 4 -B 0';
parameters = '-a myers -v 5 -t %d -B 0 -b 3' % num_threads;
elif ((machine_name.lower() == 'gotoh')):
# parameters = '-x nanopore -v 5 -b 4 -B 0';
parameters = '-a gotoh -v 5 -t %d -B 0 -b 3' % num_threads;
elif ((machine_name.lower() == 'anchor')):
# parameters = '-x nanopore -v 5 -b 4 -B 0';
parameters = '-a anchor -v 5 -t %d -B 0 -b 3' % num_threads;
elif ((machine_name.lower() == 'metagen')):
# parameters = '-x nanopore -v 5 -b 4 -B 0';
parameters = '-v 5 -t %d -C -B 0 -b 3 -Z' % num_threads;
elif ((machine_name.lower() == 'metagenanchor')):
# parameters = '-x nanopore -v 5 -b 4 -B 0';
parameters = '-a anchor -v 5 -t %d -C -B 0 -b 3 -Z' % num_threads;
elif ((machine_name.lower() == 'debug')):
# parameters = '-x nanopore -v 5 -C -B 0 -j 11 -v 7 -y 31676 -n 1 -t 1';
parameters = '-B 0 -b 3 -F 0.05 -l 9 -A 12 -v 7 -y 31676 -n 1 -t 1';
else: # default
parameters = '-v 5 -t %d' % num_threads;
if (output_suffix != ''):
output_filename = '%s-%s' % (MAPPER_NAME, output_suffix);
else:
output_filename = MAPPER_NAME;
reads_basename = os.path.splitext(os.path.basename(reads_file))[0];
sam_file = '%s/%s.sam' % (output_path, output_filename);
memtime_file = '%s/%s.memtime' % (output_path, output_filename);
memtime_file_index = '%s/%s-index.memtime' % (output_path, output_filename);
# Run the indexing process, and measure execution time and memory.
if (os.path.exists(reference_file + '.gmidx') == False or os.path.exists(reference_file + '.gmidxsec') == False):
sys.stderr.write('[%s wrapper] Generating index...\n' % (MAPPER_NAME));
# command = '%s %s/%s -I -r %s' % (basicdefines.measure_command(memtime_file_index), ALIGNER_PATH, BIN, reference_file)
# ATM doing it without measurements
command = '%s/%s -I -r %s' % (ALIGNER_PATH, BIN, reference_file)
sys.stderr.write('[%s wrapper] %s\n' % (MAPPER_NAME, command));
subprocess.call(command, shell=True);
sys.stderr.write('\n\n');
else:
sys.stderr.write('[%s wrapper] Reference index already exists. Continuing.\n' % (MAPPER_NAME));
sys.stderr.flush();
# Run the alignment process, and measure execution time and memory.
sys.stderr.write('[%s wrapper] Running %s...\n' % (MAPPER_NAME, MAPPER_NAME));
# command = '%s %s/%s %s -r %s -d %s -o %s' % (basicdefines.measure_command(memtime_file), ALIGNER_PATH, BIN, parameters, reference_file, reads_file, sam_file)
# ATM not doing measurements
command = '%s/%s %s -r %s -d %s -o %s' % (ALIGNER_PATH, BIN, parameters, reference_file, reads_file, sam_file)
sys.stderr.write('[%s wrapper] %s\n' % (MAPPER_NAME, command));
subprocess.call(command, shell=True);
sys.stderr.write('\n\n');
sys.stderr.write('[%s wrapper] %s wrapper script finished processing.\n' % (MAPPER_NAME, MAPPER_NAME));
return sam_file
# This is a standard interface for setting up the aligner. It should assume that the aligner
# is not present localy, but needs to be retrieved, unpacked, compiled and set-up, without requireing
# root privileges.
def download_and_install():
sys.stderr.write('[%s wrapper] Started installation of %s.\n' % (MAPPER_NAME, MAPPER_NAME));
sys.stderr.write('[%s wrapper] Cloning git repository.\n' % (MAPPER_NAME));
command = 'cd %s; git clone %s' % (basicdefines.ALIGNERS_PATH_ROOT_ABS, ALIGNER_URL);
subprocess.call(command, shell='True');
sys.stderr.write('\n');
sys.stderr.write('[%s wrapper] Checking out commit "4d04c1b511f35d232c92bcd8ece5369e55f95aef" for reproducibility purposes.\n' % (MAPPER_NAME));
# command = 'cd %s; git checkout 47549fefed03a90cdd1079264eebac2132207333' % (ALIGNER_PATH);
command = 'cd %s; git checkout 4d04c1b511f35d232c92bcd8ece5369e55f95aef' % (ALIGNER_PATH);
subprocess.call(command, shell='True');
sys.stderr.write('\n');
sys.stderr.write('[%s wrapper] Running make.\n' % (MAPPER_NAME));
command = 'cd %s; make' % (ALIGNER_PATH);
sys.stderr.write('[%s wrapper] %s\n' % (MAPPER_NAME, command));
subprocess.call(command, shell='True');
sys.stderr.write('\n');
sys.stderr.write('[%s wrapper] All instalation steps finished.\n' % (MAPPER_NAME));
sys.stderr.write('\n');
def verbose_usage_and_exit():
sys.stderr.write('Usage:\n');
sys.stderr.write('\t%s mode [<reads_file> <reference_file> <machine_name> <output_path> [<output_suffix>]]\n' % sys.argv[0]);
sys.stderr.write('\n');
sys.stderr.write('\t- mode - either "run" or "install". Is "install" other parameters can be ommitted.\n');
exit(0);
if __name__ == "__main__":
if (len(sys.argv) < 2 or len(sys.argv) > 7):
verbose_usage_and_exit();
if (sys.argv[1] == 'install'):
download_and_install();
exit(0);
elif (sys.argv[1] == 'run'):
if (len(sys.argv) < 6):
verbose_usage_and_exit();
reads_file = sys.argv[2];
reference_file = sys.argv[3];
machine_name = sys.argv[4];
output_path = sys.argv[5];
output_suffix = '';
if (len(sys.argv) == 7):
output_suffix = sys.argv[6];
run(reads_file, reference_file, machine_name, output_path, output_suffix);
else:
verbose_usage_and_exit();
| 39.47486
| 160
| 0.680583
|
4a081a15441b0ea1532d37c5a675df71f012f8ea
| 2,065
|
py
|
Python
|
app/arq/tasks/snapshot.py
|
ninoseki/uzen
|
93726f22f43902e17b22dd36142dac05171d0d84
|
[
"MIT"
] | 76
|
2020-02-27T06:36:27.000Z
|
2022-03-10T20:18:03.000Z
|
app/arq/tasks/snapshot.py
|
ninoseki/uzen
|
93726f22f43902e17b22dd36142dac05171d0d84
|
[
"MIT"
] | 33
|
2020-03-13T02:04:14.000Z
|
2022-03-04T02:06:11.000Z
|
app/arq/tasks/snapshot.py
|
ninoseki/uzen
|
93726f22f43902e17b22dd36142dac05171d0d84
|
[
"MIT"
] | 6
|
2020-03-17T16:42:25.000Z
|
2021-04-27T06:35:46.000Z
|
from typing import Optional, Union
from uuid import UUID
from app import models, schemas
from app.api.dependencies.arq import get_arq_redis_with_context
from app.arq.constants import ENRICH_SNAPSHOT_TASK_NAME
from app.arq.tasks.classes.enrichment import EnrichmentTasks
from app.arq.tasks.classes.match import MatchingTask
from app.arq.tasks.classes.screenshot import UploadScrenshotTask
from app.arq.tasks.classes.snapshot import UpdateProcessingTask
from app.core.exceptions import TakeSnapshotError
from app.services.browser import Browser
async def enrich_snapshot_task(
ctx: dict, snapshot: models.Snapshot
) -> schemas.JobResultWrapper:
await EnrichmentTasks.process(snapshot)
await MatchingTask.process(snapshot)
await UpdateProcessingTask.process(snapshot)
return schemas.JobResultWrapper(result={"snapshot_id": snapshot.id}, error=None)
async def take_snapshot_task(
ctx: dict,
payload: schemas.CreateSnapshotPayload,
api_key: Optional[Union[str, UUID]] = None,
) -> schemas.JobResultWrapper:
ignore_https_error = payload.ignore_https_errors or False
browser = Browser(
enable_har=payload.enable_har,
ignore_https_errors=ignore_https_error,
timeout=payload.timeout,
device_name=payload.device_name,
headers=payload.headers,
wait_until=payload.wait_until,
)
try:
wrapper = await browser.take_snapshot(payload.url)
except TakeSnapshotError as e:
return schemas.JobResultWrapper(result=None, error=str(e))
id: Optional[str] = ctx.get("job_id")
snapshot = await models.Snapshot.save_snapshot(
wrapper, id=id, api_key=api_key, tag_names=payload.tags
)
# upload screenshot
if wrapper.screenshot is not None:
UploadScrenshotTask.process(uuid=snapshot.id, screenshot=wrapper.screenshot)
async with get_arq_redis_with_context() as arq_redis:
await arq_redis.enqueue_job(ENRICH_SNAPSHOT_TASK_NAME, snapshot)
return schemas.JobResultWrapper(result={"snapshot_id": snapshot.id}, error=None)
| 36.875
| 84
| 0.765133
|
4a081c1daf1b03994847f7f8d22298496566edec
| 2,921
|
py
|
Python
|
describe_graph.py
|
tongokongo/ln_neo4j
|
909eb5bf7254ce3d3fd9fd028b80c82ebffa371d
|
[
"MIT"
] | 2
|
2021-05-10T03:26:28.000Z
|
2022-02-07T08:09:42.000Z
|
describe_graph.py
|
tongokongo/ln_neo4j
|
909eb5bf7254ce3d3fd9fd028b80c82ebffa371d
|
[
"MIT"
] | 1
|
2019-06-08T01:24:18.000Z
|
2019-06-08T01:24:18.000Z
|
describe_graph.py
|
tongokongo/ln_neo4j
|
909eb5bf7254ce3d3fd9fd028b80c82ebffa371d
|
[
"MIT"
] | null | null | null |
import rpc_pb2 as ln
import rpc_pb2_grpc as lnrpc
import grpc
import os
import codecs
import json
import argparse
from response_parse.parse_response import response_parser
from neo.neo4jobj import Neo4J
#saving nodes to the neo4j
def nodesToNeo(nodes):
print("Parsing nodes")
for node in nodes:
#print(node)
graph.saveSingleNode(node)
#saving edges to the noe4j
def edgesToNeo(edges):
print("Parsing edges")
count = 0
for edge in edges:
if count >= 0:
#print(edge)
#print("\n\n\n")
graph.saveSingleChannel(edge)
count += 1
#parsing terminal arguments using argparse module
parser = argparse.ArgumentParser(description='Python lightning network topology export to Neo4j')
parser.add_argument('--neo-address', type=str, dest='url', default="bolt://127.0.0.1:7687",
help='Connections string for the Neo4J server')
parser.add_argument('--neo-user', type=str, dest='user', default='neo4j', help='Neo4J Username')
parser.add_argument('--neo-password', type=str, dest='password', default='neo4j', help='Neo4J Password')
parser.add_argument('--clean-db', type=str, dest='clean_db', default='n', help='Delete all nodes and relations')
args = parser.parse_args()
# Due to updated ECDSA generated tls.cert we need to let gprc know that
# we need to use that cipher suite otherwise there will be a handhsake
# error when we communicate with the lnd rpc server.
os.environ["GRPC_SSL_CIPHER_SUITES"] = 'HIGH+ECDSA'
# Lnd cert is at ~/.lnd/tls.cert on Linux and
# ~/Library/Application Support/Lnd/tls.cert on Mac
cert = open(os.path.expanduser('~/.lnd/tls.cert'), 'rb').read()
creds = grpc.ssl_channel_credentials(cert)
channel = grpc.secure_channel('localhost:10009', creds, options=[('grpc.max_send_message_length', 11154485), (
'grpc.max_receive_message_length', 11154485)])
stub = lnrpc.LightningStub(channel)
#macaroon path for linux: ~/.lnd/data/chain/bitcoin/mainnet/admin.macaroon
with open(os.path.expanduser('~/.lnd/data/chain/bitcoin/mainnet/admin.macaroon'), 'rb') as f:
macaroon_bytes = f.read()
macaroon = codecs.encode(macaroon_bytes, 'hex')
#create channel graph request
request = ln.ChannelGraphRequest(
include_unannounced=False,
)
response = stub.DescribeGraph(request, metadata=[('macaroon', macaroon)])
#setting up neo4j connection
url = "bolt://127.0.0.1:7687"
print("Connecting...")
global graph
graph = Neo4J(args.url, args.user, args.password)
print("Connected")
#delete all nodes temporary solution TODO: make historical snapshots of a graph
if args.clean_db == "y":
print("Cleaning the DB...")
graph.deleteAll()
#parsing part of the script
parser = response_parser(response)
nodes = parser.parseNodes()
edges = parser.parseEdges()
print("There are " + str(len(nodes)) + " nodes and " + str(len(edges)) + " edges. \n")
nodesToNeo(nodes)
edgesToNeo(edges)
| 34.364706
| 112
| 0.717562
|
4a081d3a733c5c32ce6f2beba533e24d4d61eee2
| 1,608
|
py
|
Python
|
jp.atcoder/abc235/abc235_e/28546946.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-09T03:06:25.000Z
|
2022-02-09T03:06:25.000Z
|
jp.atcoder/abc235/abc235_e/28546946.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-05T22:53:18.000Z
|
2022-02-09T01:29:30.000Z
|
jp.atcoder/abc235/abc235_e/28546946.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | null | null | null |
import typing
class UnionFind:
def __init__(self, n: int) -> None:
self.__data = [-1] * n
def __len__(self) -> int:
return len(self.__data)
def find(self, u: int) -> int:
d = self.__data
if d[u] < 0:
return u
d[u] = self.find(d[u])
return d[u]
def unite(self, u: int, v: int) -> None:
u, v = self.find(u), self.find(v)
if u == v:
return
d = self.__data
if d[u] > d[v]:
u, v = v, u
d[u] += d[v]
d[v] = u
def size(self, u: int) -> int:
return -self.__data[self.find(u)]
def main() -> None:
n, m, q = map(int, input().split())
abc = []
for _ in range(m):
a, b, c = map(int, input().split())
a -= 1
b -= 1
abc.append((a, b, c))
# for each node, memolize max weighted edge in advance.
abc.sort(key=lambda e: e[2])
uf = UnionFind(n)
inf = 1 << 30
max_weight = [inf] * n
for a, b, c in abc:
if uf.find(a) == uf.find(b):
continue
uf.unite(a, b)
max_weight[a] = min(max_weight[a], c)
max_weight[b] = min(max_weight[b], c)
res = []
for _ in range(q):
u, v, w = map(int, input().split())
u -= 1
v -= 1
if u == v:
res.append('No')
continue
if max_weight[u] > w or max_weight[v] > w:
res.append('Yes')
else:
res.append('No')
print(*res, sep='\n')
main()
| 22.027397
| 60
| 0.425995
|
4a081dfa3288c7325952ab7259a847971d7297f9
| 786
|
py
|
Python
|
Cryptography/Encryption_Decryption_Strings.py
|
ishkapoor2000/Cryptography_Practice
|
f9e1dac2828de30293934ed566991c015d9c8c2d
|
[
"MIT"
] | 1
|
2021-10-01T08:03:23.000Z
|
2021-10-01T08:03:23.000Z
|
Cryptography/Encryption_Decryption_Strings.py
|
ishkapoor2000/Cryptography_Practice
|
f9e1dac2828de30293934ed566991c015d9c8c2d
|
[
"MIT"
] | null | null | null |
Cryptography/Encryption_Decryption_Strings.py
|
ishkapoor2000/Cryptography_Practice
|
f9e1dac2828de30293934ed566991c015d9c8c2d
|
[
"MIT"
] | null | null | null |
"""
Created on Tue Aug 18 22:37:43 2020
@author: ISH KAPOOR
"""
from cryptography.fernet import Fernet
# Generate the key for encryption and decryption method
def gen_key():
key = Fernet.generate_key()
with open("my_key.key", "wb") as my_key:
my_key.write(key)
# Load my key from dir
def load_key():
return open("my_key.key", "rb").read()
# Generate and write a new key
gen_key()
# Load the new key
key = load_key()
my_msg = str(input("Enter the message to encrypt:\n"))
enc_msg = my_msg.encode()
# Initalize the Fernet class
f = Fernet(key)
encrypted_msg = f.encrypt(enc_msg)
print("\n\n", encrypted_msg)
# Decrypt tis message
decrypted_msg = f.decrypt(encrypted_msg)
print("Decrypted message:\n\n", decrypted_msg)
| 20.153846
| 56
| 0.669211
|
4a081e134e1d8c056afeabe43381878487763c31
| 562
|
py
|
Python
|
examples/simple.py
|
gregaw/abcvoting
|
4405807191d67c0851ba049b98785a98a662311d
|
[
"MIT"
] | null | null | null |
examples/simple.py
|
gregaw/abcvoting
|
4405807191d67c0851ba049b98785a98a662311d
|
[
"MIT"
] | null | null | null |
examples/simple.py
|
gregaw/abcvoting
|
4405807191d67c0851ba049b98785a98a662311d
|
[
"MIT"
] | null | null | null |
"""
Very simple example (compute PAV)
"""
from __future__ import print_function
import sys
sys.path.insert(0, '..')
from abcvoting.preferences import Profile
from abcvoting import abcrules
num_cand = 5
profile = Profile(num_cand)
profile.add_preferences([[0, 1, 2], [0, 1], [0, 1], [1, 2], [3, 4], [3, 4]])
committeesize = 3
print("Computing a committee of size", committeesize)
print("with the Proportional Approval Voting (PAV) rule")
print("given a", profile)
committees = abcrules.compute_pav(profile, committeesize)
print("\nOutput: " + str(committees))
| 26.761905
| 76
| 0.725979
|
4a081eaacf1f9822bc0e535cda1c78d0dc35f275
| 6,261
|
py
|
Python
|
examples/flask_receive_endpoint/receiver.py
|
Tas-sos/argo-messaging
|
629ca694d9118d8c6cde99273353f9d9c1e0bb24
|
[
"Apache-2.0"
] | 6
|
2017-10-17T15:23:19.000Z
|
2021-11-26T11:18:56.000Z
|
examples/flask_receive_endpoint/receiver.py
|
Tas-sos/argo-messaging
|
629ca694d9118d8c6cde99273353f9d9c1e0bb24
|
[
"Apache-2.0"
] | 113
|
2016-01-20T08:21:52.000Z
|
2022-02-27T20:31:03.000Z
|
examples/flask_receive_endpoint/receiver.py
|
Tas-sos/argo-messaging
|
629ca694d9118d8c6cde99273353f9d9c1e0bb24
|
[
"Apache-2.0"
] | 11
|
2015-12-22T06:09:22.000Z
|
2021-08-05T08:02:44.000Z
|
#!/usr/bin/env python
# Example of a remote endpoint used to receive push messages
# The endpoint is a simple flask app that by default listens to port 5000
# It receives push messages that are delivered with http POST to `host.remote.node:5000/receive_here`
# It dumps the message properties and the decoded payload to a local file `./flask_receiver.log`
#
# To run the example endpoint issue:
# $ export FLASK_APP=receiver.py
# $ flask run
#
# If you want the endpoint to support https issue:
# $ ./receiver.py --cert /path/to/cert --key /path/to/cert/key
#
# You can also specify the bind port with the -port argument, default is 5000
# Lastly, you can also specify which message format the endpoint should expect
# --single or --multiple
from flask import Flask
from flask import request
from flask import Response
import argparse
import json
from logging.config import dictConfig
import ssl
import flask_cors
from flask.logging import default_handler
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {
'wsgi': {
'class': 'logging.StreamHandler',
'stream': 'ext://flask.logging.wsgi_errors_stream',
'formatter': 'default',
'level': 'INFO'
},
'logfile': {
'class': 'logging.FileHandler',
'filename': 'flask_receiver.log',
'formatter': 'default',
'level': 'INFO'
}
},
'root': {
'level': 'INFO',
'handlers': ['wsgi', 'logfile']
}
})
VERIFICATION_HASH = ""
MESSAGE_FORMAT = ""
AUTHZ_HEADER = ""
app = Flask(__name__)
app.logger.removeHandler(default_handler)
@app.route('/receive_here', methods=['POST'])
def receive_msg():
# if there is an authz header provided, check it
if AUTHZ_HEADER != "":
print(request.headers.get("Authorization"))
if request.headers.get("Authorization") != AUTHZ_HEADER:
return "UNAUTHORIZED", 401
if MESSAGE_FORMAT is "single":
try:
data = json.loads(request.get_data())
data_json = json.dumps(data, indent=4)
if "message" not in data:
raise KeyError("message field missing from request data: {}".format(data_json))
if "subscription" not in data:
raise KeyError("subscription field missing from request data: {}".format(msg_json))
msg = data["message"]
msg_json = json.dumps(data, indent=4)
if "messageId" not in msg:
raise KeyError("messageId field missing from request message: {}".format(msg_json))
if "data" not in msg:
raise KeyError("data field missing from request message: {}".format(msg_json))
app.logger.info(data)
return 'Message received', 201
except Exception as e:
app.logger.error(e.message)
return e.message, 400
elif MESSAGE_FORMAT is "multi":
try:
data = json.loads(request.get_data())
data_json = json.dumps(data, indent=4)
if "messages" not in data:
raise KeyError("messages field missing from request data: {}".format(data_json))
messages = data["messages"]
for datum in messages:
msg_json = json.dumps(datum, indent=4)
if "message" not in datum:
raise KeyError("message field missing from request data: {}".format(msg_json))
if "subscription" not in datum:
raise KeyError("subscription field missing from request data: {}".format(msg_json))
msg = datum["message"]
if "messageId" not in msg:
raise KeyError("messageId field missing from request message: {}".format(msg_json))
if "data" not in msg:
raise KeyError("data field missing from request message: {}".format(msg_json))
app.logger.info(data)
return 'Messages received', 201
except Exception as e:
app.logger.error(e.message)
return e.message, 400
@app.route('/ams_verification_hash', methods=['GET'])
def return_verification_hash():
return Response(response=VERIFICATION_HASH, status=200, content_type="plain/text")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Simple flask endpoint for push subscriptions")
parser.add_argument(
"-cert", "--cert", metavar="STRING", help="Certificate location",
default="/etc/grid-security/hostcert.pem", dest="cert")
parser.add_argument(
"-key", "--key", metavar="STRING", help="Key location",
default="/etc/grid-security/hostkey.pem", dest="key")
parser.add_argument(
"-port", "--port", metavar="INTEGER", help="Bind port",
default=5000, type=int, dest="port")
parser.add_argument(
"-vh", "--verification-hash", metavar="STRING", help="Verification hash for the push endpoint",
required=True, dest="vhash")
parser.add_argument(
"-ah", "--authorization-header", metavar="STRING", help="Expected authorization header",
required=False, dest="authz")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--single", action="store_true", help="The endpoint should expect single message format",
dest="single_message")
group.add_argument("--multiple", action="store_true", help="The endpoint should expect multiple messages format",
dest="multi_message")
args = parser.parse_args()
flask_cors.CORS(app=app, methods=["OPTIONS", "HEAD", "POST"], allow_headers=["X-Requested-With", "Content-Type"])
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.load_cert_chain(args.cert, args.key)
VERIFICATION_HASH = args.vhash
AUTHZ_HEADER = args.authz
if args.single_message:
MESSAGE_FORMAT = "single"
if args.multi_message:
MESSAGE_FORMAT = "multi"
app.run(host='0.0.0.0', port=args.port, ssl_context=context, threaded=True, debug=True)
| 31.305
| 117
| 0.622744
|
4a081f3823600c1c83285969874201747ac77f42
| 8,313
|
py
|
Python
|
aiida_cusp/utils/defaults.py
|
astamminger/aiida_cusp
|
4a5a014fc90761ee8855cbe6305a8f565f9626a3
|
[
"MIT"
] | 2
|
2020-08-10T15:47:10.000Z
|
2022-03-14T12:29:43.000Z
|
aiida_cusp/utils/defaults.py
|
astamminger/aiida_cusp
|
4a5a014fc90761ee8855cbe6305a8f565f9626a3
|
[
"MIT"
] | 13
|
2020-07-10T16:22:05.000Z
|
2022-02-28T18:41:53.000Z
|
aiida_cusp/utils/defaults.py
|
astamminger/aiida_cusp
|
4a5a014fc90761ee8855cbe6305a8f565f9626a3
|
[
"MIT"
] | 2
|
2020-07-09T10:09:04.000Z
|
2020-08-10T15:47:54.000Z
|
# -*- coding: utf-8 -*-
from aiida_cusp.utils.decorators import classproperty
# FIXME: Decide what to do with the screened exchange (WFULLxxxx.tmp) and the
# diagonal elements of the screened exchange (Wxxxx.tmp) output files
# written for BSE calculations
class VaspDefaults(object):
"""
Collection of default values for VASP
"""
# map functionals contained in archive file names to internal string
@classproperty
def FUNCTIONAL_MAP(cls):
return dict({
# LDA type potentials
'potuspp_lda': 'lda_us',
'potpaw_lda': 'lda',
'potpaw_lda.52': 'lda_52',
'potpaw_lda.54': 'lda_54',
# PBE type potentials
'potpaw_pbe': 'pbe',
'potpaw_pbe.52': 'pbe_52',
'potpaw_pbe.54': 'pbe_54',
# PW91 type potentials
'potuspp_gga': 'pw91_us',
'potpaw_gga': 'pw91',
})
@classproperty
def FNAMES(cls):
# filenames for VASP input and output files
return dict({
# inputs
'potcar': 'POTCAR',
'incar': 'INCAR',
'poscar': 'POSCAR',
'kpoints': 'KPOINTS',
# outputs
'contcar': 'CONTCAR',
'chg': 'CHG',
'chgcar': 'CHGCAR',
'doscar': 'DOSCAR',
'eigenval': 'EIGENVAL',
'elfcar': 'ELFCAR',
'ibzkpt': 'IBZKPT',
'locpot': 'LOCPOT',
'oszicar': 'OSZICAR',
'outcar': 'OUTCAR',
'parchg': 'PARCHG',
'pcdat': 'PCDAT',
'procar': 'PROCAR',
'proout': 'PROOUT',
'report': 'REPORT',
'tmpcar': 'TMPCAR',
'vasprun': 'vasprun.xml',
'wavecar': 'WAVECAR',
'waveder': 'WAVEDER',
'xdatcar': 'XDATCAR',
'bsefatband': 'BSEFATBAND',
# outpts of bse-calculations
# 'W*.tmp',
# 'WFULL*.tmp',
})
class PluginDefaults(object):
# filenames for logging of stdin and stderr during AiiDA VASP calculations
@classproperty
def STDERR_FNAME(cls):
return 'aiida.err'
@classproperty
def STDOUT_FNAME(cls):
return 'aiida.out'
# default name used for the input file to the cstdn executable
@classproperty
def CSTDN_SPEC_FNAME(cls):
return 'cstdn_spec.yaml'
# default identifier prefix for neb-path node inputs
@classproperty
def NEB_NODE_PREFIX(cls):
return 'node_'
# expected format for neb-path node identifiers
@classproperty
def NEB_NODE_REGEX(cls):
import re
identifier = r"^{}[0-9]{{2}}$".format(cls.NEB_NODE_PREFIX)
return re.compile(identifier)
# default output namespace through which parsed calculation results
# are added to the calculation
@classproperty
def PARSER_OUTPUT_NAMESPACE(cls):
return "parsed_results"
class CustodianDefaults(object):
"""
Collection of default values for the custodian calculator comprising
default job options, handlers and corresponding handler options.
"""
# default name of the custodian logfile
@classproperty
def RUN_LOG_FNAME(cls):
return "run.log"
# path prefix for handler imports
@classproperty
def HANDLER_IMPORT_PATH(cls):
return 'custodian.vasp.handlers'
# import paths for the custodian jobs running VASP and VASP Neb calcs
@classproperty
def VASP_NEB_JOB_IMPORT_PATH(cls):
return 'custodian.vasp.jobs.VaspNEBJob'
@classproperty
def VASP_JOB_IMPORT_PATH(cls):
return 'custodian.vasp.jobs.VaspJob'
# default settings controlling regular VASP jobs run through custodian
@classproperty
def VASP_JOB_SETTINGS(cls):
return {
'vasp_cmd': None,
'output_file': PluginDefaults.STDOUT_FNAME,
'stderr_file': PluginDefaults.STDERR_FNAME,
'suffix': "",
'final': True,
'backup': True,
'auto_npar': False,
'auto_gamma': False,
'settings_override': None,
'gamma_vasp_cmd': None,
'copy_magmom': False,
'auto_continue': False,
}
# default settings controlling NEB VASP jobs run through custodian
@classproperty
def VASP_NEB_JOB_SETTINGS(cls):
return {
'vasp_cmd': None,
'output_file': PluginDefaults.STDOUT_FNAME,
'stderr_file': PluginDefaults.STDERR_FNAME,
'suffix': "",
'final': True,
'backup': True,
'auto_npar': False,
'auto_gamma': False,
'half_kpts': False,
'settings_override': None,
'gamma_vasp_cmd': None,
'auto_continue': False,
}
# default settings controlling the custodian executable
@classproperty
def CUSTODIAN_SETTINGS(cls):
return {
'max_errors_per_job': None,
'max_errors': 10,
'polling_time_step': 10,
'monitor_freq': 30,
'skip_over_errors': False,
'scratch_dir': None,
'gzipped_output': False,
'checkpoint': False,
'terminate_func': None,
'terminate_on_nonzero_returncode': False,
}
# custodian settings that may be altered by the user (settings not
# defined here won't be accepted when passed as input to the
# calculation's custodian.settings option!)
@classproperty
def MODIFIABLE_SETTINGS(cls):
return ['max_errors', 'polling_time_step', 'monitor_freq',
'skip_over_errors']
# dictionary of the used default settings for all VASP error handlers
# that may be used with this plugin
@classproperty
def ERROR_HANDLER_SETTINGS(cls):
return dict({
'AliasingErrorHandler': {
'output_filename': PluginDefaults.STDOUT_FNAME,
},
'DriftErrorHandler': {
'max_drift': None,
'to_average': 3,
'enaug_multiply': 2,
},
'FrozenJobErrorHandler': {
'output_filename': PluginDefaults.STDOUT_FNAME,
'timeout': 21600,
},
'LrfCommutatorHandler': {
'output_filename': PluginDefaults.STDERR_FNAME,
},
'MeshSymmetryErrorHandler': {
'output_filename': PluginDefaults.STDOUT_FNAME,
'output_vasprun': VaspDefaults.FNAMES['vasprun'],
},
'NonConvergingErrorHandler': {
'output_filename': VaspDefaults.FNAMES['oszicar'],
'nionic_steps': 10,
},
'PositiveEnergyErrorHandler': {
'output_filename': VaspDefaults.FNAMES['oszicar'],
},
'PotimErrorHandler': {
'input_filename': VaspDefaults.FNAMES['poscar'],
'output_filename': VaspDefaults.FNAMES['oszicar'],
'dE_threshold': 1.0,
},
'StdErrHandler': {
'output_filename': PluginDefaults.STDERR_FNAME,
},
'UnconvergedErrorHandler': {
'output_filename': VaspDefaults.FNAMES['vasprun'],
},
'VaspErrorHandler': {
'output_filename': PluginDefaults.STDOUT_FNAME,
'natoms_large_cell': 100,
'errors_subset_to_catch': None,
},
'WalltimeHandler': {
'wall_time': None,
'buffer_time': 300,
'electronic_step_stop': False,
},
}) # ERROR_HANDLER_SETTINGS
class VasprunParsingDefaults:
"""
Default settings used to parse vasprun.xml files
"""
# Defaults passed to the pymatgen.io.vasp.outputs.Vasprun parser
@classproperty
def PARSER_ARGS(cls):
return dict({
'ionic_step_skip': None,
'ionic_step_offset': 0,
'parse_dos': False,
'parse_eigen': False,
'parse_projected_eigen': False,
'occu_tol': 1.0E-8,
'exception_on_bad_xml': False,
})
| 31.850575
| 78
| 0.559124
|
4a081f3bcb99d6fb6391bb1314257381943e2997
| 533
|
py
|
Python
|
SmartMedApp/GUI/apps/PredictionApp/utils.py
|
SmartMedHSE/SmartMed
|
6026a667ef5bfb29561335a71e1988f81c0af19f
|
[
"Apache-2.0"
] | 1
|
2022-01-14T17:04:47.000Z
|
2022-01-14T17:04:47.000Z
|
SmartMedApp/GUI/apps/PredictionApp/utils.py
|
SmartMedHSE/SmartMed
|
6026a667ef5bfb29561335a71e1988f81c0af19f
|
[
"Apache-2.0"
] | 5
|
2021-11-14T17:18:32.000Z
|
2022-02-12T17:06:33.000Z
|
SmartMedApp/GUI/apps/PredictionApp/utils.py
|
SmartMedHSE/SmartMed
|
6026a667ef5bfb29561335a71e1988f81c0af19f
|
[
"Apache-2.0"
] | 3
|
2021-11-14T21:37:57.000Z
|
2021-11-24T10:43:24.000Z
|
import pandas as pd
import pathlib
def read_file(path):
ext = pathlib.Path(path).suffix
if ext == '.csv':
df = pd.read_csv(path)
if len(df.columns) <= 1:
df = pd.read_csv(path, sep=';')
elif ext == '.xlsx' or ext == '.xls':
df = pd.read_excel(path)
elif ext == '.tcv':
df = pd.read_excel(path, sep='\t')
else:
df = pd.DataFrame()
return df
def get_class_columns(path, num):
df = read_file(path)
return df.loc[:, df.nunique() < num].columns
| 19.035714
| 48
| 0.549719
|
4a081f9eff2ceb494959fb98449f1de892f8cbfd
| 5,746
|
py
|
Python
|
apps/messages/tests/test_views.py
|
storagebot/kitsune
|
613ba2ca09104f330ab77088b452391169096249
|
[
"BSD-3-Clause"
] | 2
|
2019-08-19T17:08:47.000Z
|
2019-10-05T11:37:02.000Z
|
apps/messages/tests/test_views.py
|
taliasman/kitsune
|
f8085205eef143011adb4c52d1f183da06c1c58e
|
[
"BSD-3-Clause"
] | null | null | null |
apps/messages/tests/test_views.py
|
taliasman/kitsune
|
f8085205eef143011adb4c52d1f183da06c1c58e
|
[
"BSD-3-Clause"
] | null | null | null |
from multidb.middleware import PINNING_COOKIE
from nose.tools import eq_
from messages.models import InboxMessage, OutboxMessage
from sumo.tests import TestCase, LocalizingClient
from sumo.urlresolvers import reverse
from users.tests import user
class ReadMessageTests(TestCase):
def setUp(self):
super(ReadMessageTests, self).setUp()
self.user1 = user(save=True)
self.user2 = user(save=True)
self.client.login(username=self.user1.username, password='testpass')
def test_mark_bulk_message_read(self):
i = InboxMessage.objects.create(sender=self.user2, to=self.user1,
message='foo')
assert not i.read
j = InboxMessage.objects.create(sender=self.user2, to=self.user1,
message='foo')
assert not j.read
url = reverse('messages.bulk_action', locale='en-US')
resp = self.client.post(url,
{'id': [i.pk, j.pk], 'mark_read': True},
follow=True)
eq_(200, resp.status_code)
assert InboxMessage.uncached.get(pk=i.pk).read
assert InboxMessage.uncached.get(pk=j.pk).read
def test_mark_message_read(self):
i = InboxMessage.objects.create(sender=self.user2, to=self.user1,
message='foo')
assert not i.read
resp = self.client.get(reverse('messages.read', args=[i.pk]),
follow=True)
eq_(200, resp.status_code)
assert InboxMessage.uncached.get(pk=i.pk).read
assert PINNING_COOKIE in resp.cookies
def test_unread_does_not_pin(self):
i = InboxMessage.objects.create(sender=self.user2, to=self.user1,
message='foo', read=True)
assert i.read
resp = self.client.get(reverse('messages.read', args=[i.pk]),
follow=True)
eq_(200, resp.status_code)
assert InboxMessage.uncached.get(pk=i.pk).read
assert PINNING_COOKIE not in resp.cookies
def test_mark_message_replied(self):
i = InboxMessage.objects.create(sender=self.user2, to=self.user1,
message='foo')
assert not i.replied
self.client.post(reverse('messages.new', locale='en-US'),
{'to': self.user2.username, 'message': 'bar',
'in_reply_to': i.pk})
assert InboxMessage.uncached.get(pk=i.pk).replied
class DeleteMessageTests(TestCase):
def setUp(self):
super(DeleteMessageTests, self).setUp()
self.user1 = user(save=True)
self.user2 = user(save=True)
self.client.login(username=self.user1.username, password='testpass')
def test_delete_inbox_message(self):
i = InboxMessage.objects.create(sender=self.user2, to=self.user1,
message='foo')
eq_(1, InboxMessage.objects.count())
resp = self.client.post(reverse('messages.delete', args=[i.pk],
locale='en-US'),
{'confirmed': True},
follow=True)
eq_(200, resp.status_code)
eq_(0, InboxMessage.uncached.count())
def test_delete_many_message(self):
i = InboxMessage.objects.create(to=self.user1, sender=self.user2,
message='foo')
j = InboxMessage.objects.create(to=self.user1, sender=self.user2,
message='foo')
eq_(2, InboxMessage.objects.count())
url = reverse('messages.bulk_action', locale='en-US')
resp = self.client.post(url, {'id': [i.pk, j.pk],
'delete': True,
'confirmed': True},
follow=True)
eq_(200, resp.status_code)
eq_(0, InboxMessage.uncached.count())
def test_delete_outbox_message(self):
i = OutboxMessage.objects.create(sender=self.user1, message='foo')
i.to.add(self.user2)
eq_(1, OutboxMessage.objects.count())
resp = self.client.post(reverse('messages.delete_outbox', args=[i.pk],
locale='en-US'),
{'confirmed': True}, follow=True)
eq_(200, resp.status_code)
eq_(0, OutboxMessage.uncached.count())
class OutboxTests(TestCase):
client_class = LocalizingClient
def setUp(self):
super(OutboxTests, self).setUp()
self.user1 = user(save=True)
self.user2 = user(save=True)
self.client.login(username=self.user1.username, password='testpass')
def test_message_without_recipients(self):
OutboxMessage.objects.create(sender=self.user1, message='foo')
eq_(1, OutboxMessage.objects.count())
resp = self.client.post(reverse('messages.outbox'), follow=True)
eq_(200, resp.status_code)
def test_delete_many_outbox_message(self):
i = OutboxMessage.objects.create(sender=self.user1, message='foo')
i.to.add(self.user2)
j = OutboxMessage.objects.create(sender=self.user1, message='foo')
j.to.add(self.user2)
eq_(2, OutboxMessage.uncached.count())
url = reverse('messages.outbox_bulk_action', locale='en-US')
resp = self.client.post(url, {'id': [i.pk, j.pk],
'delete': True,
'confirmed': True},
follow=True)
eq_(200, resp.status_code)
eq_(0, OutboxMessage.uncached.count())
| 42.880597
| 78
| 0.567699
|
4a08201646af48efa5c2f46ef5387f94a3f48494
| 7,372
|
py
|
Python
|
pyprometheus/metrics.py
|
Lispython/pyprometheus
|
d364b0dd01597b001102b5c8157f289f0eb18f5a
|
[
"BSD-3-Clause"
] | 13
|
2017-02-26T13:36:27.000Z
|
2019-11-14T06:31:40.000Z
|
pyprometheus/metrics.py
|
Lispython/pyprometheus
|
d364b0dd01597b001102b5c8157f289f0eb18f5a
|
[
"BSD-3-Clause"
] | 8
|
2017-02-27T16:28:08.000Z
|
2019-01-24T16:50:07.000Z
|
pyprometheus/metrics.py
|
Lispython/pyprometheus
|
d364b0dd01597b001102b5c8157f289f0eb18f5a
|
[
"BSD-3-Clause"
] | 3
|
2018-10-24T11:18:47.000Z
|
2019-11-14T06:31:43.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pyprometheus.metrics
~~~~~~~~~~~~~~~~~~~~
Prometheus instrumentation library for Python applications
:copyright: (c) 2017 by Alexandr Lispython.
:license: , see LICENSE for more details.
:github: http://github.com/Lispython/pyprometheus
"""
from pyprometheus.const import TYPES
from pyprometheus.utils import escape_str
from pyprometheus.values import (MetricValue, GaugeValue,
CounterValue, SummaryValue,
HistogramValue)
class BaseMetric(object):
value_class = MetricValue
NOT_ALLOWED_LABELS = set()
TYPE = "untyped"
PARENT_METHODS = set()
def __init__(self, name, doc, labels=[], registry=None):
self._name = name
self._doc = doc
self._labelnames = tuple(sorted(labels))
self.validate_labelnames(labels)
self._storage = None
if registry is not None:
self.add_to_registry(registry)
self._samples = {}
self._labels_cache = {}
def __repr__(self):
return u"<{0}[{1}]: {2} samples>".format(self.__class__.__name__, self._name, len(self._samples))
def get_proxy(self):
if self._labelnames:
raise RuntimeError("You need to use labels")
return self.value_class(self, label_values={})
def validate_labelnames(self, names):
for name in names:
if name in self.NOT_ALLOWED_LABELS:
raise RuntimeError("Label name {0} not allowed for {1}".format(name, self.__class__.__name__))
return True
@property
def name(self):
return self._name
@property
def doc(self):
return self._doc
@property
def label_names(self):
return self._labelnames
@property
def uid(self):
return "{0}-{1}".format(self._name, str(self._labelnames))
def add_to_registry(self, registry):
"""Add metric to registry
"""
registry.register(self)
self._storage = registry.storage
return self
def labels(self, *args, **kwargs):
if args and isinstance(args[0], dict):
label_values = self.value_class.prepare_labels(args[0])[0]
else:
label_values = self.value_class.prepare_labels(kwargs)[0]
return self._labels_cache.setdefault((label_values, self.value_class.TYPE),
self.value_class(self, label_values=label_values))
@property
def text_export_header(self):
"""
Format description lines for collector
# HELP go_gc_duration_seconds A summary of the GC invocation durations.
# TYPE go_gc_duration_seconds summary
"""
return "\n".join(["# HELP {name} {doc}",
"# TYPE {name} {metric_type}"]).format(
name=escape_str(self.name),
doc=escape_str(self.doc),
metric_type=self.TYPE)
def build_samples(self, items):
"""Build samples from objects
[((2, "metric_gauge_name", "", (("label1", "value3"), ("label2", "value4"))), 5.0)]
"""
for label_values, data in items:
self.add_sample(label_values, self.build_sample(label_values, data))
return self
def clear_samples(self):
self._samples.clear()
def build_sample(self, label_values, item):
"""Build value object from given data
"""
return self.value_class(self, label_values=label_values, value=item[0][-1])
def add_sample(self, label_values, value):
self._samples[tuple(sorted(label_values, key=lambda x: x[0]))] = value
def get_samples(self):
"""Get samples from storage
"""
return self._samples.values()
def __getattr__(self, name):
if name in self.PARENT_METHODS:
return getattr(self.get_proxy(), name)
raise AttributeError
# return super(BaseMetric, self).__getattr__(name)
class Gauge(BaseMetric):
TYPE = "gauge"
value_class = GaugeValue
PARENT_METHODS = set(("inc", "dec", "set", "get", "track_inprogress",
"set_to_current_time", "time", "value"))
class Counter(BaseMetric):
TYPE = "counter"
value_class = CounterValue
PARENT_METHODS = set(("inc", "get", "value"))
class Summary(BaseMetric):
TYPE = "summary"
DEFAULT_QUANTILES = (0, 0.25, 0.5, 0.75, 1)
value_class = SummaryValue
NOT_ALLOWED_LABELS = set("quantile")
PARENT_METHODS = set(("observe", "value", "time"))
def __init__(self, name, doc, labels=[], quantiles=False, registry=None):
self._quantiles = list(sorted(quantiles)) if quantiles else []
super(Summary, self).__init__(name, doc, labels, registry)
@property
def quantiles(self):
return self._quantiles
def build_sample(self, label_values, data):
subtypes = {
"sum": None,
"count": None,
"quantiles": [] if isinstance(self._quantiles, (list, tuple)) else None
}
for meta, value in data:
value_class = self.value_class.SUBTYPES[meta[2]]
if meta[0] == TYPES.SUMMARY_SUM:
subtypes["sum"] = value_class(self, label_values=label_values, value=value)
elif meta[0] == TYPES.SUMMARY_COUNTER:
subtypes["count"] = value_class(self, label_values=label_values, value=value)
elif meta[0] == TYPES.SUMMARY_QUANTILE:
quantile = dict(meta[3])["quantile"]
subtypes["quantiles"].append(
value_class(self, label_values=label_values, quantile=quantile, value=value))
return self.value_class(self, label_values=label_values, value=subtypes)
class Histogram(BaseMetric):
TYPE = "histogram"
DEFAULT_BUCKETS = (0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5,
0.75, 1.0, 2.5, 5.0, 7.5, 10.0, float("inf"))
NOT_ALLOWED_LABELS = set("le")
value_class = HistogramValue
PARENT_METHODS = set(("observe", "value", "time"))
def __init__(self, name, doc, labels=[], buckets=DEFAULT_BUCKETS, registry=None):
self._buckets = list(sorted(buckets)) if buckets else []
super(Histogram, self).__init__(name, doc, labels, registry)
@property
def buckets(self):
return self._buckets
def build_sample(self, label_values, data):
subtypes = {
"sum": None,
"count": None,
"buckets": [] if isinstance(self._buckets, (list, tuple)) else None
}
for meta, value in data:
value_class = self.value_class.SUBTYPES[meta[2]]
if meta[0] == TYPES.HISTOGRAM_SUM:
subtypes["sum"] = value_class(self, label_values=label_values, value=value)
elif meta[0] == TYPES.HISTOGRAM_COUNTER:
subtypes["count"] = value_class(self, label_values=label_values, value=value)
elif meta[0] == TYPES.HISTOGRAM_BUCKET:
bucket = dict(meta[3])["bucket"]
subtypes["buckets"].append(
value_class(self, label_values=label_values, bucket=bucket, value=value))
return self.value_class(self, label_values=label_values, value=subtypes)
| 30.97479
| 110
| 0.602821
|
4a0820f095dd98427f47ef022eccc12b8070e077
| 973
|
py
|
Python
|
bgpq3d/tests/test_cases.py
|
wolcomm/bgpq3d
|
689bf9a495ebf11749dcf5b3e4a2b70c90d12611
|
[
"Apache-2.0"
] | 2
|
2019-05-15T13:21:25.000Z
|
2020-04-26T19:28:31.000Z
|
bgpq3d/tests/test_cases.py
|
wolcomm/bgpq3d
|
689bf9a495ebf11749dcf5b3e4a2b70c90d12611
|
[
"Apache-2.0"
] | null | null | null |
bgpq3d/tests/test_cases.py
|
wolcomm/bgpq3d
|
689bf9a495ebf11749dcf5b3e4a2b70c90d12611
|
[
"Apache-2.0"
] | null | null | null |
import os
from unittest import TestCase
from whichcraft import which
from bgpq3d import parser, dispatch
class TestOutput(TestCase):
config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'bgpq3d-test.ini')
def test_01_dependency(self):
path = which('bgpq3')
self.assertIsInstance(path, str, msg="bgpq3 executable not found in PATH")
def test_02_autnum(self):
cli = ['-f', self.config_path, '--object', 'AS37271']
self.assertIsInstance(self._get_output(cli=cli), dict, msg="didn't get a dict object")
def test_03_as_set(self):
cli = ['-f', self.config_path, '--object', 'AS37271:AS-CUSTOMERS']
self.assertIsInstance(self._get_output(cli=cli), dict, msg="didn't get a dict object")
def _get_output(self, cli=None):
args = parser.Parser(args=cli).args
dispatcher = dispatch.Dispatcher(args=args, test=True)
output = dispatcher.dispatch()
return output
| 36.037037
| 94
| 0.68037
|
4a0821451d7d8917fb426d5e99dcecd2944a90ab
| 2,902
|
py
|
Python
|
validate_examples.py
|
blkmajik/hyperglass
|
c52a6f609843177671d38bcad59b8bd658f46b64
|
[
"BSD-3-Clause-Clear"
] | 298
|
2019-06-17T13:51:46.000Z
|
2021-06-23T18:09:51.000Z
|
validate_examples.py
|
blkmajik/hyperglass
|
c52a6f609843177671d38bcad59b8bd658f46b64
|
[
"BSD-3-Clause-Clear"
] | 137
|
2019-06-18T12:59:37.000Z
|
2021-06-19T05:50:58.000Z
|
validate_examples.py
|
blkmajik/hyperglass
|
c52a6f609843177671d38bcad59b8bd658f46b64
|
[
"BSD-3-Clause-Clear"
] | 42
|
2019-06-18T07:25:23.000Z
|
2021-06-18T17:40:20.000Z
|
"""Validate example files."""
# Standard Library
import re
import sys
from pathlib import Path
# Third Party
import yaml
# Project
from hyperglass.util import set_app_path
EXAMPLES = Path(__file__).parent.parent / "hyperglass" / "examples"
DEVICES = EXAMPLES / "devices.yaml"
COMMANDS = EXAMPLES / "commands.yaml"
MAIN = EXAMPLES / "hyperglass.yaml"
def _uncomment_files():
"""Uncomment out files."""
for file in (MAIN, COMMANDS):
output = []
with file.open("r") as f:
for line in f.readlines():
commented = re.compile(r"^(#\s*#?\s?).*$")
if re.match(commented, line):
output.append(re.sub(r"^#\s*#?\s?$", "", line))
else:
output.append(line)
with file.open("w") as f:
f.write("".join(output))
return True
def _comment_optional_files():
"""Comment out files."""
for file in (MAIN, COMMANDS):
output = []
with file.open("r") as f:
for line in f.readlines():
if not re.match(r"^(#\s*#?\s?).*$|(^\-{3})", line):
output.append("# " + line)
else:
output.append(line)
with file.open("w") as f:
f.write("".join(output))
return True
def _validate_devices():
# Project
from hyperglass.models.config.devices import Devices
with DEVICES.open() as raw:
devices_dict = yaml.safe_load(raw.read()) or {}
try:
Devices(devices_dict.get("routers", []))
except Exception as e:
raise ValueError(str(e))
return True
def _validate_commands():
# Project
from hyperglass.models.commands import Commands
with COMMANDS.open() as raw:
commands_dict = yaml.safe_load(raw.read()) or {}
try:
Commands.import_params(**commands_dict)
except Exception as e:
raise ValueError(str(e))
return True
def _validate_main():
# Project
from hyperglass.models.config.params import Params
with MAIN.open() as raw:
main_dict = yaml.safe_load(raw.read()) or {}
try:
Params(**main_dict)
except Exception as e:
raise
raise ValueError(str(e))
return True
def validate_all():
"""Validate all example configs against configuration models."""
_uncomment_files()
for validator in (_validate_main, _validate_commands, _validate_devices):
try:
validator()
except ValueError as e:
raise RuntimeError(str(e))
return True
if __name__ == "__main__":
set_app_path(required=True)
try:
all_passed = validate_all()
message = "All tests passed"
status = 0
except RuntimeError as e:
message = str(e)
status = 1
if status == 0:
_comment_optional_files()
print(message)
sys.exit(status)
| 25.017241
| 77
| 0.583735
|
4a082224b3851fe4bf5afdf58db88be944f8c1e4
| 7,990
|
py
|
Python
|
Reddit_algorithms/reddit_sentiment_vader.py
|
arshdeepdhillon/Sentiment-Analysis
|
9a91e31e7f19ea8537ec4a711e6d69b0e9acb480
|
[
"MIT"
] | null | null | null |
Reddit_algorithms/reddit_sentiment_vader.py
|
arshdeepdhillon/Sentiment-Analysis
|
9a91e31e7f19ea8537ec4a711e6d69b0e9acb480
|
[
"MIT"
] | 2
|
2019-02-14T11:07:35.000Z
|
2019-03-19T16:00:48.000Z
|
Reddit_algorithms/reddit_sentiment_vader.py
|
arshdeepdhillon/Sentiment-Analysis
|
9a91e31e7f19ea8537ec4a711e6d69b0e9acb480
|
[
"MIT"
] | null | null | null |
import sys
sys.path.insert(0, '../extract_data')
import extract_reddit_comments as RDT
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import timeit
class Vader:
def __init__(self):
# file's name that will be genereated in the comments_parsed_path folder
self.comments_parsed_file_name = "output.txt"
# all comments from each file that resides in json_formatted_path folder is
# extracted and aggregated into one file under comments_parsed_path folder
self.comments_parsed_path = "comments_extracted/" + self.comments_parsed_file_name
# used to plot a chart
self.nPosCorrect = 0
self.nPosCount = 0
self.nNegCorrect = 0
self.nNegCount = 0
def getRedditData(self):
rd = RDT.Extract()
rd.getComments()
def VaderAnalysis(self):
self.getRedditData()
analyzer = SentimentIntensityAnalyzer()
compoundScore = 0.00 # accuracy is good when threshold is close to 0.09
# check for positive text
with open(self.comments_parsed_path, "r") as f:
#with open("yelp_positive.txt", "r") as f:
startP = timeit.default_timer()
for line in f:
analysis = analyzer.polarity_scores(line)
self.nPosCount += 1
self.nNegCount += 1
if analysis['compound'] > compoundScore:
self.nPosCorrect += 1
if analysis['compound'] <= compoundScore:
self.nNegCorrect += 1
stopP = timeit.default_timer()
# check for neutral text
#with open(self.comments_parsed_path, "r") as f:
#with open("yelp_negative.txt", "r") as f:
# startN = timeit.default_timer()
# for line in f:
# analysis = analyzer.polarity_scores(line)
# self.nNegCount += 1
# if analysis['compound'] < compoundScore:
# if analysis['compound'] <= 0:
# self.nNegCorrect += 1
#stopN = timeit.default_timer()
print("\nFinished in {:0.4f} sec".format(stopP-startP))
print("Positive " + self.percentage(self.nPosCorrect,self.nPosCount))
print("Negative " + self.percentage(self.nNegCorrect,self.nNegCount))
#print("F-score is ", '{0:.3g}'.format(self.evaluteModel(self.nPosCorrect,self.nPosCount,self.nNegCorrect,self.nNegCount)))
return(stopP-startP)
# uncomment the below line to view the result using pie chart
# self.plotData()
def percentage(self,nCorrect, nCounted):
return (" {:0.4f}% via {} samples".format(nCorrect/nCounted*100.0, nCounted))
def plotData(self):
# plotting data
import matplotlib.pyplot as plt
# declare variables
labels = 'Positive', 'Neutral'
sizes = [self.nPosCorrect, self.nNegCorrect]
colors = ['green', 'red']
# using matplotlib to plot the data
plt.pie(sizes, labels = labels, colors = colors, shadow = True, startangle = 90)
strg = str("Sentiment of {} positives and {} negatives").format(self.nPosCount,self.nNegCount)
plt.title(strg)
plt.show()
def evaluteModel(self,nPosCorrect,nPosCount,nNegCorrect,nNegCount):
"""
Purpose:
Calculates the f-score, the closer it is to 1 the better.
This method can be extented to further calcualted other measures.
Note:
tp = True Positive - actual and predicted values are same
fn = False Negative - actual was positive but we predicted negative
tn = True Negative - actual and predicted values are same
fp = False Positive - actual was negative but we predicted positive
Returns:
f-score: float
"""
tp = nPosCorrect
fn = nPosCount - nPosCorrect
tn = nNegCorrect
fp = nNegCount - nNegCorrect
"""
print("tp: ",nPosCount," ","fn: ", fn)
print("fp: ", fp," ", "tn: ",tn)
"""
precision = tp/(float(tp+fp))
recall = tp/(float(tp+fn))
result = 2 * precision * (recall/(precision + recall))
return(result)
# run the analysis couple of time to get the average time
totalTime = 0.0
nRuns = 1
for i in range(nRuns):
print("\nRun #{:}".format(i+1))
totalTime += Vader().VaderAnalysis()
print("\nFinished with ave. time {:0.4f} sec".format(totalTime/nRuns))
#
#
#
#
#
#
# import sys
# sys.path.insert(0, '../extract_data')
# import extract_reddit_comments as RDT
#
# from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
# import timeit
#
# class Vader:
#
# def __init__(self):
#
# # file's name that will be genereated in the comments_parsed_path folder
# self.comments_parsed_file_name = "output.txt"
#
# # all comments from each file that resides in json_formatted_path folder is
# # extracted and aggregated into one file under comments_parsed_path folder
# self.comments_parsed_path = "comments_extracted/" + self.comments_parsed_file_name
#
# self.nPosCorrect = 0
# self.nPosCount = 0
# self.nNegCorrect = 0
# self.nNegCount = 0
#
#
# def getRedditData(self):
# rd = RDT.Extract()
# rd.getComments()
#
# def VaderAnalysis(self):
# self.getRedditData()
# analyzer = SentimentIntensityAnalyzer()
# compoundScore = 0.09 # accuracy is good when threshold is close to 0.09
#
#
# with open(self.comments_parsed_path, "r") as f:
# # with open("positive.txt", "r") as f:
# startP = timeit.default_timer()
# for line in f:
#
# analysis = analyzer.polarity_scores(line)
# if not analysis['neg'] > compoundScore:
# if analysis['pos'] - analysis['neg'] > 0:
# self.nPosCorrect += 1
# self.nPosCount += 1
# stopP = timeit.default_timer()
#
#
# with open(self.comments_parsed_path, "r") as f:
# # with open("negative.txt", "r") as f:
# startN = timeit.default_timer()
# for line in f:
# analysis = analyzer.polarity_scores(line)
# if not analysis['pos'] > compoundScore:
# if analysis['pos'] - analysis['neg'] <= 0:
# self.nNegCorrect += 1
# self.nNegCount += 1
# stopN = timeit.default_timer()
#
#
# print("\nFinished in {:0.4f} sec".format(stopP-startP + stopP-startP))
# print("Positive " + self.percentage(self.nNegCorrect,self.nNegCount))
# print("Negative " + self.percentage(self.nPosCorrect,self.nPosCount))
# return(stopP-startP + stopP-startP)
# # uncomment the below line to view the result using pie chart
# # self.plotData()
#
#
# def percentage(self,nCorrect, nCounted):
# return ("Accuracy is {:0.4f}% via {} samples".format(nCorrect/nCounted*100.0, nCounted))
#
# def plotData(self):
# # plotting data
# import matplotlib.pyplot as plt
#
# # declare variables
# labels = 'Positive', 'Neutral'
# sizes = [self.nPosCorrect, self.nNegCorrect]
# colors = ['green', 'red']
#
# # using matplotlib to plot the data
# plt.pie(sizes, labels = labels, colors = colors, shadow = True, startangle = 90)
# strg = str("Sentiment of {} positives and {} negatives").format(self.nPosCount,self.nNegCount)
# plt.title(strg)
# plt.show()
#
#
#
# # run the analysis couple of time to get the average time
# totalTime = 0.0
# nRuns = 2
# for i in range(nRuns):
# print("\nRun #{:}".format(i+1))
# totalTime += Vader().VaderAnalysis()
#
# print("\nFinished with ave. time {:0.4f} sec".format(totalTime/nRuns))
| 32.745902
| 131
| 0.593617
|
4a08252e340e9b4a8e337bc06b957232884b243f
| 225
|
py
|
Python
|
analytics/admin.py
|
ayushkalani/delightchat
|
7b60ca16ccd1cf4005fc2833fa03256f11cd71c7
|
[
"MIT"
] | null | null | null |
analytics/admin.py
|
ayushkalani/delightchat
|
7b60ca16ccd1cf4005fc2833fa03256f11cd71c7
|
[
"MIT"
] | null | null | null |
analytics/admin.py
|
ayushkalani/delightchat
|
7b60ca16ccd1cf4005fc2833fa03256f11cd71c7
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from analytics.models import Accounts, Users, Conversations, Channels
admin.site.register(Accounts)
admin.site.register(Users)
admin.site.register(Conversations)
admin.site.register(Channels)
| 32.142857
| 69
| 0.84
|
4a08257a61ba6fcbedb01e8c94b26cf5dcd749b7
| 10,729
|
py
|
Python
|
google/cloud/pubsub_v1/subscriber/message.py
|
renovate-bot/python-pubsub
|
bb25d755d70ba19e69d8a281be65f13eb994967d
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/pubsub_v1/subscriber/message.py
|
renovate-bot/python-pubsub
|
bb25d755d70ba19e69d8a281be65f13eb994967d
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/pubsub_v1/subscriber/message.py
|
renovate-bot/python-pubsub
|
bb25d755d70ba19e69d8a281be65f13eb994967d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017, Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import datetime as dt
import json
import math
import time
import typing
from typing import Optional
from google.cloud.pubsub_v1.subscriber._protocol import requests
if typing.TYPE_CHECKING: # pragma: NO COVER
import datetime
import queue
from google.cloud.pubsub_v1 import types
from google.protobuf.internal import containers
_MESSAGE_REPR = """\
Message {{
data: {!r}
ordering_key: {!r}
attributes: {}
}}"""
def _indent(lines: str, prefix: str = " ") -> str:
"""Indent some text.
Note that this is present as ``textwrap.indent``, but not in Python 2.
Args:
lines:
The newline delimited string to be indented.
prefix:
The prefix to indent each line with. Defaults to two spaces.
Returns:
The newly indented content.
"""
indented = []
for line in lines.split("\n"):
indented.append(prefix + line)
return "\n".join(indented)
class Message(object):
"""A representation of a single Pub/Sub message.
The common way to interact with
:class:`~.pubsub_v1.subscriber.message.Message` objects is to receive
them in callbacks on subscriptions; most users should never have a need
to instantiate them by hand. (The exception to this is if you are
implementing a custom subclass to
:class:`~.pubsub_v1.subscriber._consumer.Consumer`.)
Attributes:
message_id:
The message ID. In general, you should not need to use this directly.
data:
The data in the message. Note that this will be a :class:`bytes`,
not a text string.
attributes:
The attributes sent along with the message. See :attr:`attributes` for more
information on this type.
publish_time:
The time that this message was originally published.
"""
def __init__( # pytype: disable=module-attr
self,
message: "types.PubsubMessage._meta._pb",
ack_id: str,
delivery_attempt: int,
request_queue: "queue.Queue",
):
"""Construct the Message.
.. note::
This class should not be constructed directly; it is the
responsibility of :class:`BasePolicy` subclasses to do so.
Args:
message:
The message received from Pub/Sub. For performance reasons it should be
the raw protobuf message normally wrapped by
:class:`~pubsub_v1.types.PubsubMessage`. A raw message can be obtained
from a :class:`~pubsub_v1.types.PubsubMessage` instance through the
latter's ``._pb`` attribute.
ack_id:
The ack_id received from Pub/Sub.
delivery_attempt:
The delivery attempt counter received from Pub/Sub if a DeadLetterPolicy
is set on the subscription, and zero otherwise.
request_queue:
A queue provided by the policy that can accept requests; the policy is
responsible for handling those requests.
"""
self._message = message
self._ack_id = ack_id
self._delivery_attempt = delivery_attempt if delivery_attempt > 0 else None
self._request_queue = request_queue
self.message_id = message.message_id
# The instantiation time is the time that this message
# was received. Tracking this provides us a way to be smart about
# the default lease deadline.
self._received_timestamp = time.time()
# Store the message attributes directly to speed up attribute access, i.e.
# to avoid two lookups if self._message.<attribute> pattern was used in
# properties.
self._attributes = message.attributes
self._data = message.data
self._publish_time = dt.datetime.fromtimestamp(
message.publish_time.seconds + message.publish_time.nanos / 1e9,
tz=dt.timezone.utc,
)
self._ordering_key = message.ordering_key
self._size = message.ByteSize()
def __repr__(self):
# Get an abbreviated version of the data.
abbv_data = self._message.data
if len(abbv_data) > 50:
abbv_data = abbv_data[:50] + b"..."
pretty_attrs = json.dumps(
dict(self.attributes), indent=2, separators=(",", ": "), sort_keys=True
)
pretty_attrs = _indent(pretty_attrs)
# We don't actually want the first line indented.
pretty_attrs = pretty_attrs.lstrip()
return _MESSAGE_REPR.format(abbv_data, str(self.ordering_key), pretty_attrs)
@property
def attributes(self) -> "containers.ScalarMap":
"""Return the attributes of the underlying Pub/Sub Message.
.. warning::
A ``ScalarMap`` behaves slightly differently than a
``dict``. For a Pub / Sub message this is a ``string->string`` map.
When trying to access a value via ``map['key']``, if the key is
not in the map, then the default value for the string type will
be returned, which is an empty string. It may be more intuitive
to just cast the map to a ``dict`` or to one use ``map.get``.
Returns:
The message's attributes. This is a ``dict``-like object provided by
``google.protobuf``.
"""
return self._attributes
@property
def data(self) -> bytes:
"""Return the data for the underlying Pub/Sub Message.
Returns:
The message data. This is always a bytestring; if you want a text string,
call :meth:`bytes.decode`.
"""
return self._data
@property
def publish_time(self) -> "datetime.datetime":
"""Return the time that the message was originally published.
Returns:
The date and time that the message was published.
"""
return self._publish_time
@property
def ordering_key(self) -> str:
"""The ordering key used to publish the message."""
return self._ordering_key
@property
def size(self) -> int:
"""Return the size of the underlying message, in bytes."""
return self._size
@property
def ack_id(self) -> str:
"""the ID used to ack the message."""
return self._ack_id
@property
def delivery_attempt(self) -> Optional[int]:
"""The delivery attempt counter is 1 + (the sum of number of NACKs
and number of ack_deadline exceeds) for this message. It is set to None
if a DeadLetterPolicy is not set on the subscription.
A NACK is any call to ModifyAckDeadline with a 0 deadline. An ack_deadline
exceeds event is whenever a message is not acknowledged within
ack_deadline. Note that ack_deadline is initially
Subscription.ackDeadlineSeconds, but may get extended automatically by
the client library.
The first delivery of a given message will have this value as 1. The value
is calculated at best effort and is approximate.
Returns:
The delivery attempt counter or ``None``.
"""
return self._delivery_attempt
def ack(self) -> None:
"""Acknowledge the given message.
Acknowledging a message in Pub/Sub means that you are done
with it, and it will not be delivered to this subscription again.
You should avoid acknowledging messages until you have
*finished* processing them, so that in the event of a failure,
you receive the message again.
.. warning::
Acks in Pub/Sub are best effort. You should always
ensure that your processing code is idempotent, as you may
receive any given message more than once.
"""
time_to_ack = math.ceil(time.time() - self._received_timestamp)
self._request_queue.put(
requests.AckRequest(
ack_id=self._ack_id,
byte_size=self.size,
time_to_ack=time_to_ack,
ordering_key=self.ordering_key,
)
)
def drop(self) -> None:
"""Release the message from lease management.
This informs the policy to no longer hold on to the lease for this
message. Pub/Sub will re-deliver the message if it is not acknowledged
before the existing lease expires.
.. warning::
For most use cases, the only reason to drop a message from
lease management is on `ack` or `nack`; this library
automatically drop()s the message on `ack` or `nack`. You probably
do not want to call this method directly.
"""
self._request_queue.put(
requests.DropRequest(
ack_id=self._ack_id, byte_size=self.size, ordering_key=self.ordering_key
)
)
def modify_ack_deadline(self, seconds: int) -> None:
"""Resets the deadline for acknowledgement.
New deadline will be the given value of seconds from now.
The default implementation handles this for you; you should not need
to manually deal with setting ack deadlines. The exception case is
if you are implementing your own custom subclass of
:class:`~.pubsub_v1.subcriber._consumer.Consumer`.
Args:
seconds:
The number of seconds to set the lease deadline to. This should be
between 0 and 600. Due to network latency, values below 10 are advised
against.
"""
self._request_queue.put(
requests.ModAckRequest(ack_id=self._ack_id, seconds=seconds)
)
def nack(self) -> None:
"""Decline to acknowldge the given message.
This will cause the message to be re-delivered to the subscription.
"""
self._request_queue.put(
requests.NackRequest(
ack_id=self._ack_id, byte_size=self.size, ordering_key=self.ordering_key
)
)
| 36.124579
| 88
| 0.634169
|
4a08267afa6cbcc6650d9144de731b248cfa24ee
| 968
|
py
|
Python
|
ymir/backend/src/pymir-app/app/api/api_v1/endpoints/upload.py
|
under-chaos/ymir
|
83e98186b23429e6027b187cdade247f5f93e5de
|
[
"Apache-2.0"
] | 1
|
2022-01-12T03:12:47.000Z
|
2022-01-12T03:12:47.000Z
|
ymir/backend/src/pymir-app/app/api/api_v1/endpoints/upload.py
|
under-chaos/ymir
|
83e98186b23429e6027b187cdade247f5f93e5de
|
[
"Apache-2.0"
] | null | null | null |
ymir/backend/src/pymir-app/app/api/api_v1/endpoints/upload.py
|
under-chaos/ymir
|
83e98186b23429e6027b187cdade247f5f93e5de
|
[
"Apache-2.0"
] | null | null | null |
import random
import secrets
from typing import Any, List
from fastapi import (
APIRouter,
Depends,
File,
Form,
HTTPException,
Path,
Query,
UploadFile,
)
from sqlalchemy.orm import Session
from app import crud, models, schemas
from app.api import deps
from app.api.errors.errors import (
AssetNotFound,
DatasetNotFound,
DuplicateDatasetError,
NoDatasetPermission,
WorkspaceNotFound,
)
from app.config import settings
from app.utils.files import host_file, md5_of_file
from app.utils.ymir_controller import ControllerClient
from app.utils.ymir_viz import VizClient
router = APIRouter()
@router.post(
"/uploadfile/",
response_model=schemas.Msg,
)
def upload(
*,
file: UploadFile = File(...),
current_user: models.User = Depends(deps.get_current_active_user),
) -> Any:
"""
Upload a file, return an url that has access to it
"""
url = host_file(file)
return {"result": url}
| 20.166667
| 70
| 0.705579
|
4a0828e249846975ec604b17538584952548cf90
| 33,831
|
py
|
Python
|
upstox_api/api.py
|
sivaa/upstox-python
|
35b1509806456cf95484064d4471a7a3f98c63a2
|
[
"MIT"
] | 59
|
2017-07-16T18:53:46.000Z
|
2022-01-21T06:53:25.000Z
|
upstox_api/api.py
|
somsirsa/upstox-python
|
35b1509806456cf95484064d4471a7a3f98c63a2
|
[
"MIT"
] | 26
|
2017-06-18T10:41:50.000Z
|
2022-03-29T15:23:19.000Z
|
upstox_api/api.py
|
somsirsa/upstox-python
|
35b1509806456cf95484064d4471a7a3f98c63a2
|
[
"MIT"
] | 73
|
2017-09-12T14:11:11.000Z
|
2022-03-25T17:59:04.000Z
|
import json, os, future
from collections import OrderedDict
from upstox_api.utils import *
import websocket, threading
import logging
from datetime import date, datetime
import requests
from requests.auth import HTTPBasicAuth
from builtins import int
try:
from urllib.parse import urlencode
except:
from urllib import urlencode
# compatible import
from future.standard_library import install_aliases
install_aliases()
# master contracts by token
master_contracts_by_token = dict()
# master contracts by symbol
master_contracts_by_symbol = dict()
class Session:
"""Session object to create and authenticate a session"""
# account and session variables
api_key = None
api_secret = None
redirect_uri = None
code = None
# dictionary object to hold settings
config = None
def __init__(self, api_key):
self.api_key = api_key
with open(os.path.join(os.path.dirname(__file__), 'service_config.json')) as data_file:
self.config = json.load(data_file)
def set_api_secret(self, api_secret):
self.api_secret = api_secret
def set_redirect_uri(self, redirect_uri):
self.redirect_uri = redirect_uri
def set_code(self, code):
self.code = code
def get_login_url(self):
""" login to this url and retrieve authorization code. api_key
and redirect_uri have to be set
"""
if self.api_key is None:
raise (TypeError, 'Value api_key cannot be None. Please go to the Developer Console to get this value')
if self.redirect_uri is None:
raise (TypeError, 'Value redirect_uri cannot be None. Please go to the Developer Console to get this value')
params = {'apiKey' : self.api_key, 'redirect_uri' : self.redirect_uri, 'response_type' : 'code'}
return self.config['host'] + self.config['routes']['authorize'] + '?' + urlencode(params)
def retrieve_access_token(self):
""" once you have the authorization code, you can call this function to get
the access_token. The access_token gives you full access to the API and is
valid throughout the day
"""
if self.api_key is None:
raise (TypeError, 'Value api_key cannot be None. Please go to the Developer Console to get this value')
if self.redirect_uri is None:
raise (TypeError, 'Value redirect_uri cannot be None. Please go to the Developer Console to get this value')
if self.api_secret is None:
raise (TypeError, 'Value api_secret cannot be None. Please go to the Developer Console to get this value')
if self.code is None:
raise (TypeError, 'Value code cannot be None. Please visit the login URL to generate a code')
params = {'code': self.code, 'redirect_uri': self.redirect_uri, 'grant_type': 'authorization_code'}
url = self.config['host'] + self.config['routes']['accessToken']
headers = {"Content-Type" : "application/json", "x-api-key" : self.api_key}
r = requests.post(url, auth=(self.api_key, self.api_secret), data=json.dumps(params), headers=headers)
body = json.loads(r.text)
if 'access_token' not in body:
raise SystemError(body);
return body['access_token']
class Upstox:
api_key = None
access_token = None
# dictionary object to hold settings
config = None
enabled_exchanges = None
products_enabled = None
websocket = None
on_order_update = None
on_trade_update = None
on_quote_update = None
on_error = None
on_disconnect = None
def _on_data (self, ws, message, data_type, continue_flag):
if data_type == websocket.ABNF.OPCODE_TEXT:
parsed_message = json.loads(message)
if is_status_2xx(parsed_message['code']):
# valid 200 status message
message = parsed_message['message']
data = parsed_message['data']
if message.lower() == 'order_update':
order_update = {
'quantity' : int(data['quantity']),
'exchange_order_id': data['exchange_order_id'],
'order_type': OrderType.parse(data['order_type']),
'status' : data['status'],
'transaction_type' : TransactionType.parse(data['transaction_type']),
'exchange' : data['exchange'],
'trigger_price' : float(data['trigger_price']),
'symbol' : data['symbol'],
'traded_quantity' : int(data['traded_quantity']),
'is_amo' : data['is_amo'],
'product' : ProductType.parse(data['product']),
'order_request_id' : data['order_request_id'],
'duration' : DurationType.parse(data['valid_date']),
'price' : float(data['price']),
'time_in_micro' : data['time_in_micro'],
'parent_order_id' : data['parent_order_id'],
'order_id' : data['order_id'],
'message' : data['message'],
'exchange_time' : data['exchange_time'],
'disclosed_quantity' : data['disclosed_quantity'],
'token' : data['token'],
'average_price' : float(data['average_price']),
'instrument' : None
}
try:
instrument = self.get_instrument_by_token(data['exchange'], data['token'])
order_update['instrument'] = instrument
except ValueError:
pass
if self.on_order_update:
self.on_order_update(order_update)
elif message.lower() == 'fill_report':
# {'data': {'exchange_time': '16-Jun-2017 12:41:20', 'token': 45578, 'traded_quantity': 40,
# 'order_id': '170616000000084', 'order_type': 'M', 'traded_price': 22998.45, 'trade_id': '1600',
# 'transaction_type': 'S', 'exchange_order_id': '1000000000005143',
# 'exchange': 'NSE_FO', 'product': 'I', 'time_in_micro': '0', 'symbol': 'BANKNIFTY17JUNFUT'},
# 'timestamp': '2017-06-16T12:41:20+05:30', 'status': 'OK', 'code': 200, 'message': 'fill_report'}
trade_update = {
'exchange_time': data['exchange_time'],
'token': data['token'],
'traded_quantity': int(data['traded_quantity']),
'order_id': data['order_id'],
'order_type': OrderType.parse(data['order_type']),
'traded_price': float(data['traded_price']),
'trade_id': data['trade_id'],
'transaction_type': TransactionType.parse(data['transaction_type']),
'exchange_order_id': data['exchange_order_id'],
'exchange': data['exchange'],
'product': ProductType.parse(data['product']),
'time_in_micro': data['time_in_micro'],
'symbol': data['symbol'],
'instrument': None
}
try:
instrument = self.get_instrument_by_token(data['exchange'], data['token'])
trade_update['instrument'] = instrument
except ValueError:
pass
if self.on_trade_update:
self.on_trade_update(trade_update)
else:
print("Unknown message: %s" % parsed_message)
else:
data = message.decode()
quotes = data.split(';')
ltp_quote_fields = ["timestamp", "exchange", "symbol", "ltp", "close"]
full_quote_fields = ["timestamp", "exchange", "symbol", "ltp", "close", "open", "high", "low", "vtt",
"atp", "oi", "spot_price", "total_buy_qty", "total_sell_qty", "lower_circuit",
"upper_circuit", "yearly_low", "yearly_high"]
full_quote_fields_indices = ["timestamp", "exchange", "symbol", "live_ltp", "live_open",
"live_high", "live_low", "live_close", "live_yearly_high",
"live_yearly_low"]
for quote in quotes:
quote_object = None
fields = quote.split(',')
for index, field in enumerate(fields):
if field == 'NaN' or field == '':
fields[index] = None
# convert timestamp to DateTime object
#fields[0] = datetime.fromtimestamp(float(fields[0])/1000.0)
# convert LTP and close to floats from string
try:
fields[3] = float(fields[3])
fields[4] = float(fields[4])
except ValueError:
pass
# check if LTP subscription
if len(fields) == 5:
quote_object = dict(zip(ltp_quote_fields, fields))
# check if full quote subscription for indices
elif len(fields) == 10:
quote_object = dict(zip(full_quote_fields_indices, fields))
# check if full quote subscription
elif len(fields) == 49 or len(fields) == 48:
# convert other string fields to floats or ints
for m in range (5, 12):
if fields[m] is not None:
fields[m] = float(fields[m])
for m in range (12, 14):
if fields[m] is not None:
fields[m] = int(fields[m])
for m in range (14, 18):
if fields[m] is not None:
fields[m] = float(fields[m])
quote_object = dict(zip(full_quote_fields, fields[:17]))
# Adding ltt or last traded time which comes as last field in full quote subscription
if len(fields) == 49:
quote_object["ltt"] = int(fields[48])
quote_object["bids"] = []
quote_object["asks"] = []
i = 18
j = 33
for h in range(1, 6):
quote_object["bids"].append({"quantity" : int(fields[i]), "price" : float(fields[i + 1]), "orders" : int(fields[i + 2])})
quote_object["asks"].append({"quantity" : int(fields[j]), "price" : float(fields[j + 1]), "orders" : int(fields[j + 2])})
i += 3
j += 3
if quote_object is None:
logging.warning('Quote object was not mapped to any subscription. Length: %s, Values: %s' % (str(len(fields)), quote))
continue
else:
# append instrument object
if self.get_instrument_by_symbol(fields[1], fields[2]) is not None:
quote_object["instrument"] = self.get_instrument_by_symbol(fields[1], fields[2])
if self.on_quote_update:
self.on_quote_update(quote_object)
def _on_error (self, ws, error):
if self.on_error:
self.on_error(ws, error)
def _on_close (self, ws):
if self.on_disconnect:
self.on_disconnect(ws)
def __init__(self, api_key, access_token):
""" logs in and gets enabled exchanges and products for user """
self.api_key = api_key
self.access_token = access_token
with open(os.path.join(os.path.dirname(__file__), 'service_config.json')) as data_file:
self.config = json.load(data_file)
profile = self.api_call_helper('profile', PyCurlVerbs.GET, None, None)
self.enabled_exchanges = []
for x in profile['exchanges_enabled']:
if x.lower() == 'nse_eq':
self.enabled_exchanges.append('nse_index')
if x.lower() == 'bse_eq':
self.enabled_exchanges.append('bse_index')
if x.lower() == 'mcx_fo':
self.enabled_exchanges.append('mcx_index')
self.enabled_exchanges.append(x.lower())
self.enabled_products = [x.lower() for x in profile['products_enabled']]
self.ws_thread = None
def get_socket_params(self):
return self.api_call_helper('socketParams', PyCurlVerbs.GET, None, None)
def start_websocket(self, run_in_background=False):
socket_params = {}
try:
socket_params = self.get_socket_params()
except requests.exceptions.HTTPError:
print ("Can't Access Socket Params")
ping_interval = 60
ping_timeout = 10
if 'pythonPingInterval' in socket_params.keys():
ping_interval = socket_params['pythonPingInterval']
if 'pythonPingTimeout' in socket_params.keys():
ping_timeout = socket_params['pythonPingTimeout']
url = self.config['socketEndpoint'].format(api_key=self.api_key, access_token=self.access_token)
self.websocket = websocket.WebSocketApp(url,
header={'Authorization: Bearer' + self.access_token},
on_data=self._on_data,
on_error=self._on_error,
on_close=self._on_close)
if run_in_background is True:
self.ws_thread = threading.Thread(target=self.websocket.run_forever)
self.ws_thread.daemon = True
self.ws_thread.start()
else:
self.websocket.run_forever(ping_interval=ping_interval, ping_timeout=ping_timeout)
def set_on_order_update(self, event_handler):
self.on_order_update = event_handler
def set_on_quote_update(self, event_handler):
self.on_quote_update = event_handler
def set_on_trade_update(self, event_handler):
self.on_trade_update = event_handler
def set_on_disconnect(self, event_handler):
self.on_disconnect = event_handler
def set_on_error(self, event_handler):
self.on_error = event_handler
def get_profile(self):
return self.api_call_helper('profile', PyCurlVerbs.GET, None, None)
def get_balance(self):
return self.api_call_helper('balance', PyCurlVerbs.GET, None, None)
def get_holdings(self):
return self.api_call_helper('holdings', PyCurlVerbs.GET, None, None)
def get_positions(self):
return self.api_call_helper('positions', PyCurlVerbs.GET, None, None)
def get_trade_book(self):
""" returns trade_book of a user """
trade_book = self.api_call_helper('tradeBook', PyCurlVerbs.GET, None, None)
for trade in trade_book:
for key in trade:
if key in Schema.schema_trade_book:
trade[key] = Schema.schema_trade_book[key](trade[key])
try:
instrument = self.get_instrument_by_token(trade['exchange'], trade['token'])
trade['instrument'] = instrument
except ValueError:
pass
return trade_book
def get_order_history(self, order_id=None):
""" leave order_id as None to get all entire order history """
if order_id is None:
order_history = self.api_call_helper('getOrders', PyCurlVerbs.GET, None, None);
else:
order_history = self.api_call_helper('getOrdersInfo', PyCurlVerbs.GET, {'order_id' : order_id}, None);
for order in order_history:
for key in order:
if key in Schema.schema_order_history:
order[key] = Schema.schema_order_history[key](order[key])
try:
instrument = self.get_instrument_by_token(order['exchange'], order['token'])
order['instrument'] = instrument
except ValueError:
pass
return order_history;
def get_trades(self, order_id):
""" get all trades of a particular order """
if not isinstance(order_id, int):
raise TypeError("Required parameter order_id not of type int")
return self.api_call_helper('tradesInfo', PyCurlVerbs.GET, {'order_id' : order_id}, None)
def logout(self):
return self.api_call_helper('logout', PyCurlVerbs.GET, None, None)
def get_exchanges(self):
return self.enabled_exchanges
def get_live_feed(self, instrument, live_feed_type):
""" get the current feed of an instrument """
if not isinstance(instrument, Instrument):
raise TypeError("Required parameter instrument not of type Instrument")
if LiveFeedType.parse(live_feed_type) is None:
raise TypeError("Required parameter live_feed_type not of type LiveFeedType")
return self.api_call_helper('liveFeed', PyCurlVerbs.GET, {'exchange': instrument.exchange,
'symbol' : instrument.symbol,
'type' : live_feed_type}
, None)
def get_ohlc(self, instrument, interval, start_date, end_date, download_as_csv = False):
""" get OHLC for an instrument """
if not isinstance(instrument, Instrument):
raise TypeError("Required parameter instrument not of type Instrument")
if OHLCInterval.parse(interval) is None:
raise TypeError("Required parameter interval not of type OHLCInterval")
if not isinstance(start_date, date):
raise TypeError("Required parameter start_date not of type date")
if not isinstance(end_date, date):
raise TypeError("Required parameter end_date not of type date")
output_format = 'json'
if download_as_csv is True:
output_format = 'csv'
ohlc = self.api_call_helper('OHLC', PyCurlVerbs.GET, {'exchange': instrument.exchange,
'symbol' : instrument.symbol,
'interval' : interval,
'start_date' : start_date.strftime('%d-%m-%Y'),
'end_date': end_date.strftime('%d-%m-%Y'),
'format' : output_format
}, None
)
return ohlc;
def place_order(self, transaction_type, instrument, quantity, order_type,
product_type, price = None, trigger_price = None,
disclosed_quantity = None, duration = None, stop_loss = None,
square_off = None, trailing_ticks = None):
""" placing an order, many fields are optional and are not required
for all order types
"""
if TransactionType.parse(transaction_type) is None:
raise TypeError("Required parameter transaction_type not of type TransactionType")
if not isinstance(instrument, Instrument):
raise TypeError("Required parameter instrument not of type Instrument")
if not isinstance(quantity, int):
raise TypeError("Required parameter quantity not of type int")
if OrderType.parse(order_type) is None:
raise TypeError("Required parameter order_type not of type OrderType")
if ProductType.parse(product_type) is None:
raise TypeError("Required parameter product_type not of type ProductType")
# construct order object after all required parameters are met
order = {'transaction_type': transaction_type, 'exchange': instrument.exchange,
'symbol': instrument.symbol,
'quantity': quantity, 'order_type': order_type, 'product': product_type}
if price is not None and not isinstance(price, float):
raise TypeError("Optional parameter price not of type float")
elif price is not None:
order['price'] = price
if trigger_price is not None and not isinstance(trigger_price, float):
raise TypeError("Optional parameter trigger_price not of type float")
elif trigger_price is not None:
order['trigger_price'] = trigger_price
if disclosed_quantity is not None and not isinstance(disclosed_quantity, int):
raise TypeError("Optional parameter disclosed_quantity not of type int")
elif disclosed_quantity is not None:
order['disclosed_quantity'] = disclosed_quantity
if duration is not None and DurationType.parse(duration) is None:
raise TypeError("Optional parameter duration not of type DurationType")
elif duration is not None:
order['duration'] = duration
if stop_loss is not None and not isinstance(stop_loss, float):
raise TypeError("Optional parameter stop_loss not of type float")
elif stop_loss is not None:
order['stoploss'] = stop_loss
if square_off is not None and not isinstance(square_off, float):
raise TypeError("Optional parameter square_off not of type float")
elif square_off is not None:
order['squareoff'] = square_off
if trailing_ticks is not None and not isinstance(trailing_ticks, int):
raise TypeError("Optional parameter trailing_ticks not of type int")
elif trailing_ticks is not None:
order['trailing_ticks'] = trailing_ticks
if product_type is ProductType.CoverOrder:
if not isinstance(trigger_price, float):
raise TypeError("Required parameter trigger_price not of type float")
if product_type is ProductType.OneCancelsOther:
if not isinstance(stop_loss, float):
raise TypeError("Required parameter stop_loss not of type float")
if not isinstance(square_off, float):
raise TypeError("Required parameter square_off not of type float")
return self.api_call_helper('placeOrder', PyCurlVerbs.POST, None, order)
def modify_order(self, order_id, quantity = None, order_type = None, price = None,
trigger_price = None, disclosed_quantity = None, duration = None):
""" modify an order, only order id is required, rest are optional, use only when
when you want to change that attribute
"""
if not isinstance(order_id, int):
raise TypeError("Required parameter order_id not of type int")
# construct order object with order id
order = {'order_id': order_id}
if quantity is not None and not isinstance(quantity, int):
raise TypeError("Optional parameter quantity not of type int")
elif quantity is not None:
order['quantity'] = quantity
if order_type is not None and OrderType.parse(order_type) is None:
raise TypeError("Optional parameter order_type not of type OrderType")
elif order_type is not None:
order['order_type'] = order_type
if price is not None and not isinstance(price, float):
raise TypeError("Optional parameter price not of type float")
elif price is not None:
order['price'] = price
if trigger_price is not None and not isinstance(trigger_price, float):
raise TypeError("Optional parameter trigger_price not of type float")
elif trigger_price is not None:
order['trigger_price'] = trigger_price
if disclosed_quantity is not None and not isinstance(disclosed_quantity, int):
raise TypeError("Optional parameter disclosed_quantity not of type int")
elif disclosed_quantity is not None:
order['disclosed_quantity'] = disclosed_quantity
if duration is not None and DurationType.parse(duration) is None:
raise TypeError("Optional parameter duration not of type DurationType")
elif duration is not None:
order['duration'] = duration
return self.api_call_helper('modifyOrder', PyCurlVerbs.PUT, {'order_id' : order_id}, order)
def cancel_order(self, order_id):
# if not isinstance(order_id, int):
# raise TypeError("Required parameter order_id not of type int")
return self.api_call_helper('cancelOrder', PyCurlVerbs.DELETE, {'order_id' : order_id}, None)
def cancel_all_orders(self):
# if not isinstance(order_id, int):
# raise TypeError("Required parameter order_id not of type int")
return self.api_call_helper('cancelAllOrders', PyCurlVerbs.DELETE, None, None)
def subscribe(self, instrument, live_feed_type):
""" subscribe to the current feed of an instrument """
if not isinstance(instrument, Instrument):
raise TypeError("Required parameter instrument not of type Instrument")
if LiveFeedType.parse(live_feed_type) is None:
raise TypeError("Required parameter live_feed_type not of type LiveFeedType")
return self.api_call_helper('liveFeedSubscribe', PyCurlVerbs.GET, {'exchange': instrument.exchange,
'symbol' : instrument.symbol,
'type' : live_feed_type}
, None);
def unsubscribe(self, instrument, live_feed_type):
""" unsubscribe to the current feed of an instrument """
if not isinstance(instrument, Instrument):
raise TypeError("Required parameter instrument not of type Instrument")
if LiveFeedType.parse(live_feed_type) is None:
raise TypeError("Required parameter live_feed_type not of type LiveFeedType")
return self.api_call_helper('liveFeedUnsubscribe', PyCurlVerbs.GET, {'exchange': instrument.exchange,
'symbol' : instrument.symbol,
'type' : live_feed_type}
, None);
def get_instrument_by_symbol(self, exchange, symbol):
# get instrument given exchange and symbol
global master_contracts_by_symbol
exchange = exchange.lower()
symbol = symbol.lower()
# check if master contract exists
if exchange not in master_contracts_by_symbol:
logging.warning("Cannot find exchange [%s] in master contract. "
"Please ensure you have called get_master_contract function first" % exchange)
return None
master_contract = master_contracts_by_symbol[exchange]
if symbol not in master_contract:
logging.warning("Cannot find symbol [%s:%s] in master contract" % (exchange, symbol))
return None
return master_contract[symbol]
def search_instruments(self, exchange, symbol):
# search instrument given exchange and symbol
global master_contracts_by_token
exchange = exchange.lower()
symbol = symbol.lower()
matches = []
# check if master contract exists
if exchange not in master_contracts_by_token:
logging.warning(
"Cannot find exchange [%s] in master contract. "
"Please ensure you have called get_master_contract function first" % exchange)
return None
master_contract = master_contracts_by_token[exchange]
for contract in master_contract:
if symbol in master_contract[contract].symbol:
matches.append(master_contract[contract])
return matches
def get_instrument_by_token(self, exchange, token):
# get instrument given exchange and token
global master_contracts_by_token
exchange = exchange.lower()
# check if master contract exists
if exchange not in master_contracts_by_token:
logging.warning(
"Cannot find exchange [%s] in master contract. "
"Please ensure you have called get_master_contract function first" % exchange)
return None
master_contract = master_contracts_by_token[exchange]
if token not in master_contract:
logging.warning("Cannot find token [%s:%s] in master contracts" % (exchange, token))
return None
return master_contract[token]
def get_master_contract(self, exchange):
""" returns all the tradable contracts of an exchange
placed in an OrderedDict and the key is the token
"""
global master_contracts_by_token
exchange = exchange.lower()
if exchange in master_contracts_by_token:
return master_contracts_by_token[exchange]
if exchange not in self.enabled_exchanges:
logging.warning('Invalid exchange value provided: [%s]' % (exchange))
raise ValueError("Please provide a valid exchange [%s]" % ",".join(self.enabled_exchanges))
logging.debug('Downloading master contracts for exchange: %s' % (exchange))
body = self.api_call_helper('masterContract', PyCurlVerbs.GET, {'exchange' : exchange}, None)
count = 0
master_contract_by_token = OrderedDict()
master_contract_by_symbol = OrderedDict()
for line in body:
count += 1
if count == 1:
continue
item = line.split(',')
# convert token
if item[1] is not u'':
item[1] = int(item[1])
# convert parent token
if item[2] is not u'':
item[2] = int(item[2])
else:
item[2] = None;
# convert symbol to upper
item[3] = item[3].lower()
# convert closing price to float
if item[5] is not u'':
item[5] = float(item[5])
else:
item[5] = None;
# convert expiry to none if it's non-existent
if item[6] is u'':
item[6] = None;
# convert strike price to float
if item[7] is not u'' and item[7] is not u'0':
item[7] = float(item[7])
else:
item[7] = None;
# convert tick size to int
if item[8] is not u'':
item[8] = float(item[8])
else:
item[8] = None;
# convert lot size to int
if item[9] is not u'':
item[9] = int(item[9])
else:
item[9] = None
# convert instrument_type to none if it's non-existent
if item[10] is u'':
item[10] = None;
# convert isin to none if it's non-existent
if item[11] is u'':
item[11] = None;
instrument = Instrument(item[0], item[1], item[2], item[3], item[4],
item[5], item[6], item[7], item[8], item[9],
item[10], item[11])
token = item[1]
symbol = item[3]
master_contract_by_token[token] = instrument
master_contract_by_symbol[symbol] = instrument
master_contracts_by_token[exchange] = master_contract_by_token
master_contracts_by_symbol[exchange] = master_contract_by_symbol
logging.debug('Downloading master contracts for exchange: %s... done' % (exchange))
return master_contracts_by_token[exchange]
def api_call_helper(self, name, http_method, params, data):
# helper formats the url and reads error codes nicely
url = self.config['host'] + self.config['routes'][name]
if params is not None:
url = url.format(**params)
response = self.api_call(url, http_method, data)
if response.status_code != 200:
raise requests.HTTPError(response.text)
body = json.loads(response.text)
if is_status_2xx(body['code']):
# success
return body['data']
else:
raise requests.HTTPError(response.text)
return
def api_call(self, url, http_method, data):
headers = {"Content-Type" : "application/json", "x-api-key" : self.api_key,
"authorization" : "Bearer " + self.access_token}
r = None
if http_method is PyCurlVerbs.POST:
r = requests.post(url, data=json.dumps(data), headers=headers)
elif http_method is PyCurlVerbs.DELETE:
r = requests.delete(url, headers=headers)
elif http_method is PyCurlVerbs.PUT:
r = requests.put(url, data=json.dumps(data), headers=headers)
elif http_method is PyCurlVerbs.GET:
r = requests.get(url, headers=headers)
return r
| 42.715909
| 145
| 0.575921
|
4a08294da6fbf94f7bbc6c2d230a3525dfbe04d2
| 3,103
|
py
|
Python
|
src/appengine/handlers/testcase_detail/update_issue.py
|
fengjixuchui/clusterfuzz
|
ef89be3934936d1086b4a21bffca5506c8cb93be
|
[
"Apache-2.0"
] | null | null | null |
src/appengine/handlers/testcase_detail/update_issue.py
|
fengjixuchui/clusterfuzz
|
ef89be3934936d1086b4a21bffca5506c8cb93be
|
[
"Apache-2.0"
] | null | null | null |
src/appengine/handlers/testcase_detail/update_issue.py
|
fengjixuchui/clusterfuzz
|
ef89be3934936d1086b4a21bffca5506c8cb93be
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handler for updating issue."""
from datastore import data_handler
from handlers import base_handler
from handlers.testcase_detail import show
from libs import handler
from libs import helpers
from libs.issue_management import issue_filer
from libs.issue_management import issue_tracker_policy
class Handler(base_handler.Handler):
"""Handler that updates an issue."""
@staticmethod
def update_issue(testcase, issue_id, needs_summary_update):
"""Associate (or update) an existing issue with the testcase."""
issue_id = helpers.cast(issue_id, int,
'Issue ID (%s) is not a number!' % issue_id)
issue_tracker = helpers.get_issue_tracker_for_testcase(testcase)
issue = helpers.get_or_exit(lambda: issue_tracker.get_issue(issue_id),
'Issue (id=%d) is not found!' % issue_id,
'Failed to get the issue (id=%s).' % issue_id,
Exception)
if not issue.is_open:
raise helpers.EarlyExitException(
('The issue (%d) is already closed and further updates are not'
' allowed. Please file a new issue instead!') % issue_id, 400)
if not testcase.is_crash():
raise helpers.EarlyExitException(
'This is not a crash testcase, so issue update is not applicable.',
400)
issue_comment = data_handler.get_issue_description(testcase,
helpers.get_user_email())
if needs_summary_update:
issue.title = data_handler.get_issue_summary(testcase)
policy = issue_tracker_policy.get(issue_tracker.project)
properties = policy.get_existing_issue_properties()
for label in properties.labels:
for result in issue_filer.apply_substitutions(label, testcase):
issue.labels.add(result)
issue.save(new_comment=issue_comment)
testcase.bug_information = str(issue_id)
testcase.put()
data_handler.update_group_bug(testcase.group_id)
helpers.log('Updated issue %sd' % issue_id, helpers.MODIFY_OPERATION)
@handler.post(handler.JSON, handler.JSON)
@handler.require_csrf_token
@handler.check_admin_access_if_oss_fuzz
@handler.check_testcase_access
def post(self, testcase):
"""Update an issue."""
issue_id = self.request.get('issueId')
needs_summary_update = self.request.get('needsSummaryUpdate')
self.update_issue(testcase, issue_id, needs_summary_update)
self.render_json(show.get_testcase_detail(testcase))
| 38.308642
| 80
| 0.705769
|
4a082a03bd6b00fbe85696f403e4ab7b03dea98d
| 4,179
|
py
|
Python
|
tests/pytests/unit/states/test_mac_xattr.py
|
babs/salt
|
c536ea716d5308880b244e7980f4b659d86fc104
|
[
"Apache-2.0"
] | 9,425
|
2015-01-01T05:59:24.000Z
|
2022-03-31T20:44:05.000Z
|
tests/pytests/unit/states/test_mac_xattr.py
|
babs/salt
|
c536ea716d5308880b244e7980f4b659d86fc104
|
[
"Apache-2.0"
] | 33,507
|
2015-01-01T00:19:56.000Z
|
2022-03-31T23:48:20.000Z
|
tests/pytests/unit/states/test_mac_xattr.py
|
babs/salt
|
c536ea716d5308880b244e7980f4b659d86fc104
|
[
"Apache-2.0"
] | 5,810
|
2015-01-01T19:11:45.000Z
|
2022-03-31T02:37:20.000Z
|
import pytest
import salt.states.mac_xattr as xattr
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {xattr: {}}
def test_exists_not():
"""
Test adding an attribute when it doesn't exist
"""
with patch("os.path.exists") as exists_mock:
expected = {
"changes": {"key": "value"},
"comment": "",
"name": "/path/to/file",
"result": True,
}
exists_mock.return_value = True
list_mock = MagicMock(return_value={"other.id": "value2"})
write_mock = MagicMock()
with patch.dict(
xattr.__salt__, {"xattr.list": list_mock, "xattr.write": write_mock}
):
out = xattr.exists("/path/to/file", ["key=value"])
list_mock.assert_called_once_with("/path/to/file")
write_mock.assert_called_once_with("/path/to/file", "key", "value", False)
assert out == expected
def test_exists_change():
"""
Test changing an attribute value
"""
with patch("os.path.exists") as exists_mock:
expected = {
"changes": {"key": "other_value"},
"comment": "",
"name": "/path/to/file",
"result": True,
}
exists_mock.return_value = True
list_mock = MagicMock(return_value={"key": "value"})
write_mock = MagicMock()
with patch.dict(
xattr.__salt__, {"xattr.list": list_mock, "xattr.write": write_mock}
):
out = xattr.exists("/path/to/file", ["key=other_value"])
list_mock.assert_called_once_with("/path/to/file")
write_mock.assert_called_once_with(
"/path/to/file", "key", "other_value", False
)
assert out == expected
def test_exists_already():
"""
Test with the same value does nothing
"""
with patch("os.path.exists") as exists_mock:
expected = {
"changes": {},
"comment": "All values existed correctly.",
"name": "/path/to/file",
"result": True,
}
exists_mock.return_value = True
list_mock = MagicMock(return_value={"key": "value"})
write_mock = MagicMock()
with patch.dict(
xattr.__salt__, {"xattr.list": list_mock, "xattr.write": write_mock}
):
out = xattr.exists("/path/to/file", ["key=value"])
list_mock.assert_called_once_with("/path/to/file")
assert not write_mock.called
assert out == expected
def test_delete():
"""
Test deleting an attribute from a file
"""
with patch("os.path.exists") as exists_mock:
expected = {
"changes": {"key": "delete"},
"comment": "",
"name": "/path/to/file",
"result": True,
}
exists_mock.return_value = True
list_mock = MagicMock(return_value={"key": "value2"})
delete_mock = MagicMock()
with patch.dict(
xattr.__salt__, {"xattr.list": list_mock, "xattr.delete": delete_mock}
):
out = xattr.delete("/path/to/file", ["key"])
list_mock.assert_called_once_with("/path/to/file")
delete_mock.assert_called_once_with("/path/to/file", "key")
assert out == expected
def test_delete_not():
"""
Test deleting an attribute that doesn't exist from a file
"""
with patch("os.path.exists") as exists_mock:
expected = {
"changes": {},
"comment": "All attributes were already deleted.",
"name": "/path/to/file",
"result": True,
}
exists_mock.return_value = True
list_mock = MagicMock(return_value={"other.key": "value2"})
delete_mock = MagicMock()
with patch.dict(
xattr.__salt__, {"xattr.list": list_mock, "xattr.delete": delete_mock}
):
out = xattr.delete("/path/to/file", ["key"])
list_mock.assert_called_once_with("/path/to/file")
assert not delete_mock.called
assert out == expected
| 29.638298
| 86
| 0.5542
|
4a082a67256a138d953833524f9d6dd8cf7a7e8b
| 8,045
|
py
|
Python
|
py/test/selenium/webdriver/common/interactions_tests.py
|
akiellor/selenium
|
239490f9c5f3c7e7d4082bbe53c86eb5158d70a3
|
[
"Apache-2.0"
] | 1
|
2018-08-24T18:01:34.000Z
|
2018-08-24T18:01:34.000Z
|
py/test/selenium/webdriver/common/interactions_tests.py
|
akiellor/selenium
|
239490f9c5f3c7e7d4082bbe53c86eb5158d70a3
|
[
"Apache-2.0"
] | 1
|
2021-10-18T12:23:37.000Z
|
2021-10-18T12:23:37.000Z
|
py/test/selenium/webdriver/common/interactions_tests.py
|
akiellor/selenium
|
239490f9c5f3c7e7d4082bbe53c86eb5158d70a3
|
[
"Apache-2.0"
] | 2
|
2018-04-30T21:35:30.000Z
|
2021-05-14T08:11:46.000Z
|
#!/usr/bin/python
# Copyright 2011 WebDriver committers
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for advanced user interactions."""
import os
import time
import unittest
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.remote.webdriver import WebDriver
class AdvancedUserInteractionTest(unittest.TestCase):
def performDragAndDropWithMouse(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("draggableLists")
dragReporter = self.driver.find_element_by_id("dragging_reports")
toDrag = self.driver.find_element_by_id("rightitem-3")
dragInto = self.driver.find_element_by_id("sortable1")
holdItem = ActionChains(self.driver).click_and_hold(toDrag)
moveToSpecificItem = ActionChains(self.driver) \
.move_to_element(self.driver.find_element_by_id("leftitem-4"))
moveToOtherList = ActionChains(self.driver).move_to_element(dragInto)
drop = ActionChains(self.driver).release(dragInto)
self.assertEqual("Nothing happened.", dragReporter.text)
holdItem.perform()
moveToSpecificItem.perform()
moveToOtherList.perform()
self.assertEqual("Nothing happened. DragOut", dragReporter.text)
drop.perform()
def testDraggingElementWithMouseMovesItToAnotherList(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self.performDragAndDropWithMouse()
dragInto = self.driver.find_element_by_id("sortable1")
self.assertEqual(6, len(dragInto.find_elements_by_tag_name("li")))
def _testDraggingElementWithMouseFiresEvents(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface.
Disabled since this test doesn't work with HTMLUNIT.
"""
self.performDragAndDropWithMouse()
dragReporter = self.driver.find_element_by_id("dragging_reports")
self.assertEqual("Nothing happened. DragOut DropIn RightItem 3", dragReporter.text)
def _isElementAvailable(self, id):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
try:
self.driver.find_element_by_id(id)
return True
except:
return False
def testDragAndDrop(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("droppableItems")
waitEndTime = time.time() + 15
while (not self._isElementAvailable("draggable") and
time.time() < waitEndTime):
time.sleep(0.2)
if not self._isElementAvailable("draggable"):
raise "Could not find draggable element after 15 seconds."
toDrag = self.driver.find_element_by_id("draggable")
dropInto = self.driver.find_element_by_id("droppable")
holdDrag = ActionChains(self.driver) \
.click_and_hold(toDrag)
move = ActionChains(self.driver) \
.move_to_element(dropInto)
drop = ActionChains(self.driver).release(dropInto)
holdDrag.perform()
move.perform()
drop.perform()
dropInto = self.driver.find_element_by_id("droppable")
text = dropInto.find_element_by_tag_name("p").text
self.assertEqual("Dropped!", text)
def testDoubleClick(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("javascriptPage")
toDoubleClick = self.driver.find_element_by_id("doubleClickField")
dblClick = ActionChains(self.driver) \
.double_click(toDoubleClick)
dblClick.perform()
self.assertEqual("DoubleClicked", toDoubleClick.get_attribute('value'))
def testContextClick(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("javascriptPage")
toContextClick = self.driver.find_element_by_id("doubleClickField")
contextClick = ActionChains(self.driver) \
.context_click(toContextClick)
contextClick.perform()
self.assertEqual("ContextClicked",
toContextClick.get_attribute('value'))
def testMoveAndClick(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("javascriptPage")
toClick = self.driver.find_element_by_id("clickField")
click = ActionChains(self.driver) \
.move_to_element(toClick) \
.click()
click.perform()
self.assertEqual("Clicked", toClick.get_attribute('value'))
def testCannotMoveToANullLocator(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("javascriptPage")
try:
move = ActionChains(self.driver) \
.move_to_element(None)
move.perform()
self.fail("Shouldn't be allowed to click on null element.")
except AttributeError:
pass # Expected.
try:
ActionChains(self.driver).click().perform()
self.fail("Shouldn't be allowed to click without a context.")
except WebDriverException:
pass # Expected.
def _testClickingOnFormElements(self):
"""Copied from org.openqa.selenium.interactions.CombinedInputActionsTest.
Disabled since this test doesn't work with HTMLUNIT.
"""
self._loadPage("formSelectionPage")
options = self.driver.find_elements_by_tag_name("option")
selectThreeOptions = ActionChains(self.driver) \
.click(options[1]) \
.key_down(Keys.SHIFT) \
.click(options[2]) \
.click(options[3]) \
.key_up(Keys.SHIFT)
selectThreeOptions.perform()
showButton = self.driver.find_element_by_name("showselected")
showButton.click()
resultElement = self.driver.find_element_by_id("result")
self.assertEqual("roquefort parmigiano cheddar", resultElement.text)
def testSelectingMultipleItems(self):
"""Copied from org.openqa.selenium.interactions.CombinedInputActionsTest."""
self._loadPage("selectableItems")
reportingElement = self.driver.find_element_by_id("infodiv")
self.assertEqual("no info", reportingElement.text)
listItems = self.driver.find_elements_by_tag_name("li")
selectThreeItems = ActionChains(self.driver) \
.key_down(Keys.CONTROL) \
.click(listItems[1]) \
.click(listItems[3]) \
.click(listItems[5]) \
.key_up(Keys.CONTROL)
selectThreeItems.perform()
self.assertEqual("#item2 #item4 #item6", reportingElement.text)
# Now click on another element, make sure that's the only one selected.
actionsBuilder = ActionChains(self.driver)
actionsBuilder.click(listItems[6]).perform()
self.assertEqual("#item7", reportingElement.text)
def _pageURL(self, name):
return "http://localhost:%d/%s.html" % (self.webserver.port, name)
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
| 39.243902
| 91
| 0.681914
|
4a082a6f37c6fd26ff8393f48c2f1cfc65b29949
| 3,427
|
py
|
Python
|
parlai/tasks/opensubtitles/build.py
|
ysglh/ParlAI
|
e0f16e9168839be12f72d3431b9819cf3d51fe10
|
[
"BSD-3-Clause"
] | 2
|
2017-09-30T23:23:44.000Z
|
2021-07-08T17:12:58.000Z
|
parlai/tasks/opensubtitles/build.py
|
ysglh/ParlAI
|
e0f16e9168839be12f72d3431b9819cf3d51fe10
|
[
"BSD-3-Clause"
] | null | null | null |
parlai/tasks/opensubtitles/build.py
|
ysglh/ParlAI
|
e0f16e9168839be12f72d3431b9819cf3d51fe10
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
import gzip
import os
import re
def _regularize(sent):
sent = sent.replace('i>', '').replace('<', '').replace('>', '')
sent = re.sub(r'x[0-9|a-f][0-9|a-f]', ' ', sent)
sent = sent.replace('\\', '').replace('-', '')
sent = ' '.join(re.findall(r"[\w']+|[.,!?:;]", sent))
sent = sent.replace('. . .', '...')
return sent
def create_fb_format(inpath, outpath):
print('[building fbformat]')
ftrain = open(os.path.join(outpath, 'train.txt'), 'w')
fvalid = open(os.path.join(outpath, 'valid.txt'), 'w')
ftest = open(os.path.join(outpath, 'test.txt'), 'w')
conv_id = 0
# find all the files.
for root, _subfolder, files in os.walk(inpath):
for f in files:
if f.endswith('.gz'):
dialog = ''
conv_id = conv_id + 1
with gzip.open(os.path.join(root, f), 'r') as f1:
words = ''
line_id = 1
turn_id = 1
for line in f1:
line = str(line)
if line.find('<s id="') != -1:
# new sentence
if len(words) > 0:
words = _regularize(words)
if (turn_id % 2) == 0:
dialog += str(line_id) + ' ' + words
else:
dialog += '\t' + words + '\n'
line_id += 1
turn_id = turn_id + 1
words = ''
else:
i1 = line.find('<w id="')
if i1 >= 0:
line = line[i1:]
word = line[line.find('>')+1:line.find('</w')]
words = words + ' ' + word.replace('\t', ' ')
handle = ftrain
if (conv_id % 10) == 0:
handle = ftest
if (conv_id % 10) == 1:
handle = fvalid
handle.write(dialog + '\n')
ftrain.close()
fvalid.close()
ftest.close()
def build(opt):
dpath = os.path.join(opt['datapath'], 'OpenSubtitles')
version = '1'
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
url = ('http://opus.lingfil.uu.se/download.php?f=OpenSubtitles/en.tar.gz')
build_data.download(url, dpath, 'OpenSubtitles.tar.gz')
build_data.untar(dpath, 'OpenSubtitles.tar.gz')
create_fb_format(os.path.join(dpath, 'OpenSubtitles', 'en'), dpath)
# Mark the data as built.
build_data.mark_done(dpath, version_string=version)
| 38.077778
| 82
| 0.47972
|
4a082be498fb0521b7f3c8f4212c00d55d32951a
| 4,247
|
py
|
Python
|
pypureclient/flasharray/FA_2_10/models/qos.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 14
|
2018-12-07T18:30:27.000Z
|
2022-02-22T09:12:33.000Z
|
pypureclient/flasharray/FA_2_10/models/qos.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 28
|
2019-09-17T21:03:52.000Z
|
2022-03-29T22:07:35.000Z
|
pypureclient/flasharray/FA_2_10/models/qos.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 15
|
2020-06-11T15:50:08.000Z
|
2022-03-21T09:27:25.000Z
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.10
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_10 import models
class Qos(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'bandwidth_limit': 'int',
'iops_limit': 'int'
}
attribute_map = {
'bandwidth_limit': 'bandwidth_limit',
'iops_limit': 'iops_limit'
}
required_args = {
}
def __init__(
self,
bandwidth_limit=None, # type: int
iops_limit=None, # type: int
):
"""
Keyword args:
bandwidth_limit (int): The maximum QoS bandwidth limit for the volume. Whenever throughput exceeds the bandwidth limit, throttling occurs. Measured in bytes per second. Maximum limit is 512 GB/s.
iops_limit (int): The QoS IOPs limit for the volume.
"""
if bandwidth_limit is not None:
self.bandwidth_limit = bandwidth_limit
if iops_limit is not None:
self.iops_limit = iops_limit
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Qos`".format(key))
if key == "bandwidth_limit" and value is not None:
if value > 549755813888:
raise ValueError("Invalid value for `bandwidth_limit`, value must be less than or equal to `549755813888`")
if value < 1048576:
raise ValueError("Invalid value for `bandwidth_limit`, must be a value greater than or equal to `1048576`")
if key == "iops_limit" and value is not None:
if value > 104857600:
raise ValueError("Invalid value for `iops_limit`, value must be less than or equal to `104857600`")
if value < 100:
raise ValueError("Invalid value for `iops_limit`, must be a value greater than or equal to `100`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Qos, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Qos):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 33.179688
| 207
| 0.569108
|
4a082cfec1654d038247007ca91dd9c8d10a0baa
| 817
|
py
|
Python
|
mxfusion/common/exceptions.py
|
JeremiasKnoblauch/MXFusion
|
af6223e9636b055d029d136dd7ae023b210b4560
|
[
"Apache-2.0"
] | 2
|
2019-05-31T09:50:47.000Z
|
2021-03-06T09:38:47.000Z
|
mxfusion/common/exceptions.py
|
JeremiasKnoblauch/MXFusion
|
af6223e9636b055d029d136dd7ae023b210b4560
|
[
"Apache-2.0"
] | null | null | null |
mxfusion/common/exceptions.py
|
JeremiasKnoblauch/MXFusion
|
af6223e9636b055d029d136dd7ae023b210b4560
|
[
"Apache-2.0"
] | 1
|
2019-05-30T09:39:46.000Z
|
2019-05-30T09:39:46.000Z
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
class ModelSpecificationError(Exception):
pass
class InferenceError(Exception):
pass
class SerializationError(Exception):
pass
| 31.423077
| 80
| 0.664627
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.